Compare commits

...

27 Commits

Author SHA1 Message Date
Michael Barz
d7fbaa3f30 feat: add release build step 2025-03-18 13:00:36 +01:00
Michael Barz
205e12c516 Merge pull request #417 from opencloud-eu/chore/bump-web-v2.0.0
chore: bump web to v2.0.0
2025-03-18 12:31:59 +01:00
Michael Barz
c6a8dde4c7 Merge pull request #418 from opencloud-eu/optimize-docker-build
feat: optimize docker build
2025-03-18 12:15:36 +01:00
Michael Barz
29f701efa2 feat: optimize docker build 2025-03-18 12:15:16 +01:00
Jannik Stehle
b40f4ba398 chore: bump web to v2.0.0 2025-03-18 10:53:43 +01:00
Viktor Scharf
49a44fd4bc Merge pull request #405 from opencloud-eu/runTestwithPosix
run test with posix
2025-03-18 10:48:36 +01:00
Viktor Scharf
a7b0911363 run test with posix 2025-03-18 09:55:46 +01:00
Artur Neumann
4e8cad8df3 fix finding existing caches in CI (#381) 2025-03-18 13:45:33 +05:45
Ralf Haferkamp
1a48cda106 Merge pull request #410 from opencloud-eu/dependabot/npm_and_yarn/services/idp/babel/core-7.26.10
build(deps-dev): bump @babel/core from 7.26.9 to 7.26.10 in /services/idp
2025-03-18 08:13:19 +01:00
Ralf Haferkamp
d03bcf784a Merge pull request #412 from opencloud-eu/dependabot/go_modules/github.com/open-policy-agent/opa-1.2.0
build(deps): bump github.com/open-policy-agent/opa from 1.1.0 to 1.2.0
2025-03-18 08:12:50 +01:00
Ralf Haferkamp
1f259397bc Merge pull request #374 from opencloud-eu/dependabot/go_modules/github.com/riandyrn/otelchi-0.12.1
build(deps): bump github.com/riandyrn/otelchi from 0.12.0 to 0.12.1
2025-03-18 08:11:16 +01:00
Artur Neumann
3655b04258 fix finding existing caches in CI 2025-03-18 12:52:10 +05:45
Artur Neumann
648a7b8a59 Merge pull request #380 from opencloud-eu/enableMoreE2Etests
[full-ci] run more E2E tests in CI
2025-03-18 09:54:48 +05:45
dependabot[bot]
871fdd15d7 build(deps): bump github.com/open-policy-agent/opa from 1.1.0 to 1.2.0
Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 1.1.0 to 1.2.0.
- [Release notes](https://github.com/open-policy-agent/opa/releases)
- [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-policy-agent/opa/compare/v1.1.0...v1.2.0)

---
updated-dependencies:
- dependency-name: github.com/open-policy-agent/opa
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 17:29:55 +00:00
dependabot[bot]
4275ba2dcd build(deps-dev): bump @babel/core in /services/idp
Bumps [@babel/core](https://github.com/babel/babel/tree/HEAD/packages/babel-core) from 7.26.9 to 7.26.10.
- [Release notes](https://github.com/babel/babel/releases)
- [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md)
- [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-core)

---
updated-dependencies:
- dependency-name: "@babel/core"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 16:43:48 +00:00
Ralf Haferkamp
98bce5e215 Merge pull request #409 from opencloud-eu/dependabot/npm_and_yarn/services/idp/babel-loader-10.0.0
build(deps-dev): bump babel-loader from 9.2.1 to 10.0.0 in /services/idp
2025-03-17 17:42:06 +01:00
dependabot[bot]
0d48832f3c build(deps-dev): bump babel-loader from 9.2.1 to 10.0.0 in /services/idp
Bumps [babel-loader](https://github.com/babel/babel-loader) from 9.2.1 to 10.0.0.
- [Release notes](https://github.com/babel/babel-loader/releases)
- [Changelog](https://github.com/babel/babel-loader/blob/main/CHANGELOG.md)
- [Commits](https://github.com/babel/babel-loader/compare/v9.2.1...v10.0.0)

---
updated-dependencies:
- dependency-name: babel-loader
  dependency-type: direct:development
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 15:07:19 +00:00
Ralf Haferkamp
43c3b814cc Merge pull request #401 from rhafer/posixfs-template-userid
posixfs: Use userid as the foldername for personal space
2025-03-17 15:54:50 +01:00
Viktor Scharf
19ccfe2678 Merge pull request #386 from opencloud-eu/removeExpectedToFailDublications
[full-ci] delete dublicated lines from expected-failures
2025-03-17 15:34:44 +01:00
Viktor Scharf
1183f7d274 Merge pull request #407 from opencloud-eu/revaBump2.28
bump reva 2.28
2025-03-17 15:29:09 +01:00
Viktor Scharf
e15989e993 bump reva 2.28 2025-03-17 14:49:17 +01:00
Artur Neumann
e70ff761e6 fix expected to fail files format 2025-03-17 18:56:56 +05:45
Artur Neumann
0d505db241 move tests that fail in both with/without remote.php 2025-03-17 18:39:02 +05:45
Artur Neumann
3a48c54618 adjust / fix lines in expected to fail that were forgotten 2025-03-17 18:36:28 +05:45
Artur Neumann
8ef69ed68b [full-ci] delete dublicated lines from expected-failures 2025-03-17 18:13:13 +05:45
Ralf Haferkamp
90328a7ed1 posixfs: Use userid as the foldername for personal space
This avoids loosing the user's personal space after renaming the user.

Closes: #192
2025-03-17 10:54:52 +01:00
dependabot[bot]
e4e7dffdb7 build(deps): bump github.com/riandyrn/otelchi from 0.12.0 to 0.12.1
Bumps [github.com/riandyrn/otelchi](https://github.com/riandyrn/otelchi) from 0.12.0 to 0.12.1.
- [Release notes](https://github.com/riandyrn/otelchi/releases)
- [Changelog](https://github.com/riandyrn/otelchi/blob/master/CHANGELOG.md)
- [Commits](https://github.com/riandyrn/otelchi/compare/v0.12.0...v0.12.1)

---
updated-dependencies:
- dependency-name: github.com/riandyrn/otelchi
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-13 14:28:48 +00:00
174 changed files with 8382 additions and 3365 deletions

View File

@@ -1,3 +1,3 @@
# The test runner source for UI tests
WEB_COMMITID=74c8df4f64d9bf957a0652fb92e01529efa3c0b3
WEB_COMMITID=4a2f3a1d14009676a3a9dfef536ed4fd3e7f4c21
WEB_BRANCH=main

View File

@@ -35,7 +35,7 @@ ONLYOFFICE_DOCUMENT_SERVER = "onlyoffice/documentserver:7.5.1"
PLUGINS_CODACY = "plugins/codacy:1"
PLUGINS_DOCKER_BUILDX = "woodpeckerci/plugin-docker-buildx:latest"
PLUGINS_GH_PAGES = "plugins/gh-pages:1"
PLUGINS_GITHUB_RELEASE = "plugins/github-release:1"
PLUGINS_GITHUB_RELEASE = "woodpeckerci/plugin-release"
PLUGINS_GIT_ACTION = "plugins/git-action:1"
PLUGINS_GIT_PUSH = "appleboy/drone-git-push"
PLUGINS_MANIFEST = "plugins/manifest:1"
@@ -53,13 +53,14 @@ dirs = {
"web": "/woodpecker/src/github.com/opencloud-eu/opencloud/webTestRunner",
"zip": "/woodpecker/src/github.com/opencloud-eu/opencloud/zip",
"webZip": "/woodpecker/src/github.com/opencloud-eu/opencloud/zip/web.tar.gz",
"webPnpmZip": "/woodpecker/src/github.com/opencloud-eu/opencloud/zip/pnpm-store.tar.gz",
"webPnpmZip": "/woodpecker/src/github.com/opencloud-eu/opencloud/zip/web-pnpm.tar.gz",
"baseGo": "/go/src/github.com/opencloud-eu/opencloud",
"gobinTar": "go-bin.tar.gz",
"gobinTarPath": "/go/src/github.com/opencloud-eu/opencloud/go-bin.tar.gz",
"opencloudConfig": "tests/config/woodpecker/opencloud-config.json",
"ocis": "/woodpecker/src/github.com/opencloud-eu/opencloud/srv/app/tmp/ocis",
"ocisRevaDataRoot": "/woodpecker/src/github.com/opencloud-eu/opencloud/srv/app/tmp/ocis/owncloud/data",
"multiServiceOcBaseDataPath": "/woodpecker/src/github.com/opencloud-eu/opencloud/multiServiceData",
"ocWrapper": "/woodpecker/src/github.com/opencloud-eu/opencloud/tests/ocwrapper",
"bannedPasswordList": "tests/config/woodpecker/banned-password-list.txt",
"ocmProviders": "tests/config/woodpecker/providers.json",
@@ -310,14 +311,14 @@ config = {
"xsuites": ["search", "app-provider", "app-provider-onlyOffice", "app-store", "keycloak", "oidc", "ocm"], # suites to skip
},
"search": {
"skip": True,
"skip": False,
"suites": ["search"], # suites to run
"tikaNeeded": True,
},
},
"e2eMultiService": {
"testSuites": {
"skip": True,
"skip": False,
"suites": [
"smoke",
"shares",
@@ -344,6 +345,8 @@ config = {
"codestyle": True,
}
GRAPH_AVAILABLE_ROLES = "b1e2218d-eef8-4d4c-b82d-0f1a1b48f3b5,a8d5fe5e-96e3-418d-825b-534dbdf22b99,fb6c3e19-e378-47e5-b277-9732f9de6e21,58c63c02-1d89-4572-916a-870abc5a1b7d,2d00ce52-1fc2-4dbc-8b95-a73b73395f5a,1c996275-f1c9-4e71-abdf-a42f6495e960,312c0871-5ef7-4b3a-85b6-0e4074c64049,aa97fe03-7980-45ac-9e50-b325749fd7e6,63e64e19-8d43-42ec-a738-2b6af2610efa"
# workspace for pipeline to cache Go dependencies between steps of a pipeline
# to be used in combination with stepVolumeGo
workspace = \
@@ -518,11 +521,15 @@ def testPipelines(ctx):
if config["litmus"]:
pipelines += litmus(ctx, "decomposed")
storage = "decomposed"
if "[posix]" in ctx.build.title.lower():
storage = "posix"
if "skip" not in config["cs3ApiTests"] or not config["cs3ApiTests"]["skip"]:
pipelines.append(cs3ApiTests(ctx, "ocis", "default"))
pipelines.append(cs3ApiTests(ctx, storage, "default"))
if "skip" not in config["wopiValidatorTests"] or not config["wopiValidatorTests"]["skip"]:
pipelines.append(wopiValidatorTests(ctx, "ocis", "builtin", "default"))
pipelines.append(wopiValidatorTests(ctx, "ocis", "cs3", "default"))
pipelines.append(wopiValidatorTests(ctx, storage, "builtin", "default"))
pipelines.append(wopiValidatorTests(ctx, storage, "cs3", "default"))
pipelines += localApiTestPipeline(ctx)
@@ -552,6 +559,9 @@ def getGoBinForTesting(ctx):
"exclude": skipIfUnchanged(ctx, "unit-tests"),
},
},
{
"event": "tag",
},
],
"workspace": workspace,
}]
@@ -559,15 +569,8 @@ def getGoBinForTesting(ctx):
def checkGoBinCache():
return [{
"name": "check-go-bin-cache",
"image": OC_UBUNTU,
"environment": {
"CACHE_ENDPOINT": {
"from_secret": "cache_s3_server",
},
"CACHE_BUCKET": {
"from_secret": "cache_s3_bucket",
},
},
"image": MINIO_MC,
"environment": MINIO_MC_ENV,
"commands": [
"bash -x %s/tests/config/woodpecker/check_go_bin_cache.sh %s %s" % (dirs["baseGo"], dirs["baseGo"], dirs["gobinTar"]),
],
@@ -579,6 +582,8 @@ def cacheGoBin():
"name": "bingo-get",
"image": OC_CI_GOLANG,
"commands": [
". ./.env",
"if $BIN_CACHE_FOUND; then exit 0; fi",
"make bingo-update",
],
"environment": CI_HTTP_PROXY_ENV,
@@ -587,6 +592,8 @@ def cacheGoBin():
"name": "archive-go-bin",
"image": OC_UBUNTU,
"commands": [
". ./.env",
"if $BIN_CACHE_FOUND; then exit 0; fi",
"tar -czvf %s /go/bin" % dirs["gobinTarPath"],
],
},
@@ -595,6 +602,8 @@ def cacheGoBin():
"image": MINIO_MC,
"environment": MINIO_MC_ENV,
"commands": [
". ./.env",
"if $BIN_CACHE_FOUND; then exit 0; fi",
# .bingo folder will change after 'bingo-get'
# so get the stored hash of a .bingo folder
"BINGO_HASH=$(cat %s/.bingo_hash)" % dirs["baseGo"],
@@ -886,12 +895,16 @@ def localApiTestPipeline(ctx):
if ctx.build.event == "cron" or "full-ci" in ctx.build.title.lower():
with_remote_php.append(False)
storages = ["decomposed"]
if "[posix]" in ctx.build.title.lower():
storages = ["posix"]
defaults = {
"suites": {},
"skip": False,
"extraEnvironment": {},
"extraServerEnvironment": {},
"storages": ["decomposed"],
"storages": storages,
"accounts_hash_difficulty": 4,
"emailNeeded": False,
"antivirusNeeded": False,
@@ -920,7 +933,7 @@ def localApiTestPipeline(ctx):
(waitForEmailService() if params["emailNeeded"] else []) +
(opencloudServer(storage, params["accounts_hash_difficulty"], deploy_type = "federation", extra_server_environment = params["extraServerEnvironment"]) if params["federationServer"] else []) +
((wopiCollaborationService("fakeoffice") + wopiCollaborationService("collabora") + wopiCollaborationService("onlyoffice")) if params["collaborationServiceNeeded"] else []) +
(ocisHealthCheck("wopi", ["wopi-collabora:9304", "wopi-onlyoffice:9304", "wopi-fakeoffice:9304"]) if params["collaborationServiceNeeded"] else []) +
(openCloudHealthCheck("wopi", ["wopi-collabora:9304", "wopi-onlyoffice:9304", "wopi-fakeoffice:9304"]) if params["collaborationServiceNeeded"] else []) +
localApiTests(ctx, name, params["suites"], storage, params["extraEnvironment"], run_with_remote_php) +
logRequests(),
"services": (emailService() if params["emailNeeded"] else []) +
@@ -945,7 +958,7 @@ def localApiTestPipeline(ctx):
def localApiTests(ctx, name, suites, storage = "decomposed", extra_environment = {}, with_remote_php = False):
test_dir = "%s/tests/acceptance" % dirs["base"]
expected_failures_file = "%s/expected-failures-localAPI-on-%s-storage.md" % (test_dir, storage)
expected_failures_file = "%s/expected-failures-localAPI-on-decomposed-storage.md" % (test_dir)
environment = {
"TEST_SERVER_URL": OC_URL,
@@ -1122,10 +1135,14 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
],
}
def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = False, storage = "ocis", accounts_hash_difficulty = 4):
def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = False, accounts_hash_difficulty = 4):
filterTags = "~@skipOnGraph&&~@skipOnOcis-%s-Storage" % ("OC" if storage == "owncloud" else "OCIS")
test_dir = "%s/tests/acceptance" % dirs["base"]
expected_failures_file = "%s/expected-failures-API-on-%s-storage.md" % (test_dir, storage.upper())
expected_failures_file = "%s/expected-failures-API-on-decomposed-storage.md" % (test_dir)
storage = "decomposed"
if "[posix]" in ctx.build.title.lower():
storage = "posix"
return {
"name": "Core-API-Tests-%s%s" % (part_number, "-withoutRemotePhp" if not with_remote_php else ""),
@@ -1205,9 +1222,8 @@ def e2eTestPipeline(ctx):
extra_server_environment = {
"OC_PASSWORD_POLICY_BANNED_PASSWORDS_LIST": "%s" % dirs["bannedPasswordList"],
"OC_SHOW_USER_EMAIL_IN_RESULTS": True,
"FRONTEND_OCS_ENABLE_DENIALS": True,
# Needed for enabling all roles
"GRAPH_AVAILABLE_ROLES": "b1e2218d-eef8-4d4c-b82d-0f1a1b48f3b5,a8d5fe5e-96e3-418d-825b-534dbdf22b99,fb6c3e19-e378-47e5-b277-9732f9de6e21,58c63c02-1d89-4572-916a-870abc5a1b7d,2d00ce52-1fc2-4dbc-8b95-a73b73395f5a,1c996275-f1c9-4e71-abdf-a42f6495e960,312c0871-5ef7-4b3a-85b6-0e4074c64049,aa97fe03-7980-45ac-9e50-b325749fd7e6,63e64e19-8d43-42ec-a738-2b6af2610efa",
"GRAPH_AVAILABLE_ROLES": "%s" % GRAPH_AVAILABLE_ROLES,
}
e2e_trigger = [
@@ -1235,6 +1251,10 @@ def e2eTestPipeline(ctx):
if (ctx.build.event == "tag"):
return pipelines
storage = "decomposed"
if "[posix]" in ctx.build.title.lower():
storage = "posix"
for name, suite in config["e2eTests"].items():
if "skip" in suite and suite["skip"]:
continue
@@ -1258,7 +1278,7 @@ def e2eTestPipeline(ctx):
restoreWebCache() + \
restoreWebPnpmCache() + \
(tikaService() if params["tikaNeeded"] else []) + \
opencloudServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"])
opencloudServer(storage, extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"])
step_e2e = {
"name": "e2e-tests",
@@ -1315,20 +1335,18 @@ def multiServiceE2ePipeline(ctx):
"tikaNeeded": False,
}
e2e_trigger = {
"when": [
{
"event": ["push", "manual"],
"branch": "main",
e2e_trigger = [
{
"event": ["push", "manual"],
"branch": "main",
},
{
"event": "pull_request",
"path": {
"exclude": skipIfUnchanged(ctx, "e2e-tests"),
},
{
"event": "pull_request",
"path": {
"exclude": skipIfUnchanged(ctx, "e2e-tests"),
},
},
],
}
},
]
if ("skip-e2e" in ctx.build.title.lower()):
return pipelines
@@ -1337,29 +1355,37 @@ def multiServiceE2ePipeline(ctx):
if (not "full-ci" in ctx.build.title.lower() and ctx.build.event != "cron"):
return pipelines
storage = "decomposed"
if "[posix]" in ctx.build.title.lower():
storage = "posix"
extra_server_environment = {
"OCIS_PASSWORD_POLICY_BANNED_PASSWORDS_LIST": "%s" % dirs["bannedPasswordList"],
"OCIS_JWT_SECRET": "some-ocis-jwt-secret",
"OCIS_SERVICE_ACCOUNT_ID": "service-account-id",
"OCIS_SERVICE_ACCOUNT_SECRET": "service-account-secret",
"OCIS_EXCLUDE_RUN_SERVICES": "storage-users",
"OCIS_GATEWAY_GRPC_ADDR": "0.0.0.0:9142",
"OC_PASSWORD_POLICY_BANNED_PASSWORDS_LIST": "%s" % dirs["bannedPasswordList"],
"OC_JWT_SECRET": "some-opencloud-jwt-secret",
"OC_SERVICE_ACCOUNT_ID": "service-account-id",
"OC_SERVICE_ACCOUNT_SECRET": "service-account-secret",
"OC_EXCLUDE_RUN_SERVICES": "storage-users",
"OC_GATEWAY_GRPC_ADDR": "0.0.0.0:9142",
"SETTINGS_GRPC_ADDR": "0.0.0.0:9191",
"GATEWAY_STORAGE_USERS_MOUNT_ID": "storage-users-id",
"OC_SHOW_USER_EMAIL_IN_RESULTS": True,
# Needed for enabling all roles
"GRAPH_AVAILABLE_ROLES": "%s" % GRAPH_AVAILABLE_ROLES,
}
storage_users_environment = {
"OCIS_CORS_ALLOW_ORIGINS": "%s,https://%s:9201" % (OC_URL, OC_SERVER_NAME),
"STORAGE_USERS_JWT_SECRET": "some-ocis-jwt-secret",
"OC_CORS_ALLOW_ORIGINS": "%s,https://%s:9201" % (OC_URL, OC_SERVER_NAME),
"STORAGE_USERS_JWT_SECRET": "some-opencloud-jwt-secret",
"STORAGE_USERS_MOUNT_ID": "storage-users-id",
"STORAGE_USERS_SERVICE_ACCOUNT_ID": "service-account-id",
"STORAGE_USERS_SERVICE_ACCOUNT_SECRET": "service-account-secret",
"STORAGE_USERS_GATEWAY_GRPC_ADDR": "%s:9142" % OC_SERVER_NAME,
"STORAGE_USERS_EVENTS_ENDPOINT": "%s:9233" % OC_SERVER_NAME,
"STORAGE_USERS_DATA_GATEWAY_URL": "%s/data" % OC_URL,
"OCIS_CACHE_STORE": "nats-js-kv",
"OCIS_CACHE_STORE_NODES": "%s:9233" % OC_SERVER_NAME,
"OC_CACHE_STORE": "nats-js-kv",
"OC_CACHE_STORE_NODES": "%s:9233" % OC_SERVER_NAME,
"MICRO_REGISTRY_ADDRESS": "%s:9233" % OC_SERVER_NAME,
"OC_BASE_DATA_PATH": dirs["multiServiceOcBaseDataPath"],
}
storage_users1_environment = {
"STORAGE_USERS_GRPC_ADDR": "storageusers1:9157",
@@ -1379,13 +1405,9 @@ def multiServiceE2ePipeline(ctx):
for item in storage_users_environment:
storage_users2_environment[item] = storage_users_environment[item]
storage_volume = [{
"name": "storage",
"path": "/root/.ocis",
}]
storage_users_services = startOcisService("storage-users", "storageusers1", storage_users1_environment, storage_volume) + \
startOcisService("storage-users", "storageusers2", storage_users2_environment, storage_volume) + \
ocisHealthCheck("storage-users", ["storageusers1:9159", "storageusers2:9159"])
storage_users_services = startOpenCloudService("storage-users", "storageusers1", storage_users1_environment) + \
startOpenCloudService("storage-users", "storageusers2", storage_users2_environment) + \
openCloudHealthCheck("storage-users", ["storageusers1:9159", "storageusers2:9159"])
for _, suite in config["e2eMultiService"].items():
if "skip" in suite and suite["skip"]:
@@ -1408,13 +1430,13 @@ def multiServiceE2ePipeline(ctx):
restoreWebCache() + \
restoreWebPnpmCache() + \
tikaService() + \
opencloudServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"]) + \
opencloudServer(storage, extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"]) + \
storage_users_services + \
[{
"name": "e2e-tests",
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
"environment": {
"BASE_URL_OCIS": OC_DOMAIN,
"OC_BASE_URL": OC_DOMAIN,
"HEADLESS": True,
"RETRY": "1",
},
@@ -1422,14 +1444,15 @@ def multiServiceE2ePipeline(ctx):
"cd %s/tests/e2e" % dirs["web"],
"bash run-e2e.sh %s" % e2e_args,
],
}] + logTracingResults()
}]
# + logTracingResults()
# uploadTracingResult(ctx) + \
pipelines.append({
"name": "e2e-tests-multi-service",
"steps": steps,
"depends_on": getPipelineNames(buildOpencloudBinaryForTesting(ctx) + buildWebCache(ctx)),
"workspace": e2e_trigger,
"when": e2e_trigger,
})
return pipelines
@@ -1545,13 +1568,20 @@ def dockerRelease(ctx, repo, build_type):
"image": PLUGINS_DOCKER_BUILDX,
"settings": {
"dry_run": True,
"platforms": "linux/amd64,linux/arm64",
"platforms": "linux/amd64", # do dry run only on the native platform
"repo": "%s,quay.io/%s" % (repo, repo),
"auto_tag": False if build_type == "daily" else True,
"tag": "daily" if build_type == "daily" else "",
"default_tag": "daily",
"dockerfile": "opencloud/docker/Dockerfile.multiarch",
"build_args": build_args,
"pull_image": False,
"http_proxy": {
"from_secret": "ci_http_proxy",
},
"https_proxy": {
"from_secret": "ci_http_proxy",
},
},
"when": [
{
@@ -1564,12 +1594,19 @@ def dockerRelease(ctx, repo, build_type):
"image": PLUGINS_DOCKER_BUILDX,
"settings": {
"repo": "%s,quay.io/%s" % (repo, repo),
"platforms": "linux/amd64,linux/arm64",
"platforms": "linux/amd64,linux/arm64", # we can add remote builders
"auto_tag": False if build_type == "daily" else True,
"tag": "daily" if build_type == "daily" else "",
"default_tag": "daily",
"dockerfile": "opencloud/docker/Dockerfile.multiarch",
"build_args": build_args,
"pull_image": False,
"http_proxy": {
"from_secret": "ci_http_proxy",
},
"https_proxy": {
"from_secret": "ci_http_proxy",
},
"logins": [
{
"registry": "https://index.docker.io/v1/",
@@ -1622,7 +1659,7 @@ def dockerRelease(ctx, repo, build_type):
def binaryReleases(ctx):
pipelines = []
depends_on = getPipelineNames(testOpencloudAndUploadResults(ctx) + testPipelines(ctx))
depends_on = getPipelineNames(getGoBinForTesting(ctx))
for os in config["binaryReleases"]["os"]:
pipelines.append(binaryRelease(ctx, os, depends_on))
@@ -1659,19 +1696,6 @@ def binaryRelease(ctx, arch, depends_on = []):
},
],
},
{
"name": "changelog",
"image": OC_CI_GOLANG,
"environment": CI_HTTP_PROXY_ENV,
"commands": [
"make changelog CHANGELOG_VERSION=%s" % ctx.build.ref.replace("refs/tags/v", ""),
],
"when": [
{
"event": "tag",
},
],
},
{
"name": "release",
"image": PLUGINS_GITHUB_RELEASE,
@@ -1680,11 +1704,8 @@ def binaryRelease(ctx, arch, depends_on = []):
"from_secret": "github_token",
},
"files": [
"ocis/dist/release/*",
"opencloud/dist/release/*",
],
"title": ctx.build.ref.replace("refs/tags/v", ""),
"note": "ocis/dist/CHANGELOG.md",
"overwrite": True,
"prerelease": len(ctx.build.ref.split("-")) > 1,
},
"when": [
@@ -1753,19 +1774,6 @@ def licenseCheck(ctx):
"cd third-party-licenses && tar -czf ../third-party-licenses.tar.gz *",
],
},
{
"name": "changelog",
"image": OC_CI_GOLANG,
"environment": CI_HTTP_PROXY_ENV,
"commands": [
"make changelog CHANGELOG_VERSION=%s" % ctx.build.ref.replace("refs/tags/v", "").split("-")[0],
],
"when": [
{
"event": "tag",
},
],
},
{
"name": "release",
"image": PLUGINS_GITHUB_RELEASE,
@@ -1776,10 +1784,6 @@ def licenseCheck(ctx):
"files": [
"third-party-licenses.tar.gz",
],
"title": ctx.build.ref.replace("refs/tags/v", ""),
"note": "ocis/dist/CHANGELOG.md",
"overwrite": True,
"prerelease": len(ctx.build.ref.split("-")) > 1,
},
"when": [
{
@@ -2060,6 +2064,9 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
"WEBFINGER_DEBUG_ADDR": "0.0.0.0:9279",
}
if storage == "posix":
environment["STORAGE_USERS_ID_CACHE_STORE"] = "nats-js-kv"
if deploy_type == "":
environment["FRONTEND_OCS_ENABLE_DENIALS"] = True
@@ -2146,15 +2153,14 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
return steps
def startOcisService(service = None, name = None, environment = {}, volumes = []):
def startOpenCloudService(service = None, name = None, environment = {}):
"""
Starts an OCIS service in a detached container.
Starts an OpenCloud service in a detached container.
Args:
service (str): The name of the service to start.
name (str): The name of the container.
environment (dict): The environment variables to set in the container.
volumes (list): The volumes to mount in the container.
Returns:
list: A list of pipeline steps to start the service.
@@ -2640,15 +2646,8 @@ def getWoodpeckerEnvAndCheckScript(ctx):
def checkForWebCache(name):
return {
"name": "check-for-%s-cache" % name,
"image": OC_UBUNTU,
"environment": {
"CACHE_ENDPOINT": {
"from_secret": "cache_s3_server",
},
"CACHE_BUCKET": {
"from_secret": "cache_s3_bucket",
},
},
"image": MINIO_MC,
"environment": MINIO_MC_ENV,
"commands": [
"bash -x check_web_cache.sh %s" % name,
],
@@ -2660,6 +2659,7 @@ def cloneWeb():
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
"commands": [
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
"rm -rf %s" % dirs["web"],
"git clone -b $WEB_BRANCH --single-branch --no-tags https://github.com/opencloud-eu/web.git %s" % dirs["web"],
"cd %s && git checkout $WEB_COMMITID" % dirs["web"],
@@ -2675,6 +2675,8 @@ def generateWebPnpmCache(ctx):
"name": "install-pnpm",
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
"commands": [
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
"cd %s" % dirs["web"],
'npm install --silent --global --force "$(jq -r ".packageManager" < package.json)"',
"pnpm config set store-dir ./.pnpm-store",
@@ -2685,6 +2687,8 @@ def generateWebPnpmCache(ctx):
"name": "zip-pnpm",
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
"commands": [
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
# zip the pnpm deps before caching
"if [ ! -d '%s' ]; then mkdir -p %s; fi" % (dirs["zip"], dirs["zip"]),
"cd %s" % dirs["web"],
@@ -2696,7 +2700,8 @@ def generateWebPnpmCache(ctx):
"image": MINIO_MC,
"environment": MINIO_MC_ENV,
"commands": [
"source ./.woodpecker.env",
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
# cache using the minio/mc client to the public bucket (long term bucket)
"mc alias set s3 $MC_HOST $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY",
"mc cp -r -a %s s3/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID" % dirs["webPnpmZip"],
@@ -2713,6 +2718,8 @@ def generateWebCache(ctx):
"name": "zip-web",
"image": OC_UBUNTU,
"commands": [
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
"if [ ! -d '%s' ]; then mkdir -p %s; fi" % (dirs["zip"], dirs["zip"]),
"tar -czvf %s webTestRunner" % dirs["webZip"],
],
@@ -2722,7 +2729,8 @@ def generateWebCache(ctx):
"image": MINIO_MC,
"environment": MINIO_MC_ENV,
"commands": [
"source ./.woodpecker.env",
". ./.woodpecker.env",
"if $WEB_CACHE_FOUND; then exit 0; fi",
# cache using the minio/mc client to the 'owncloud' bucket (long term bucket)
"mc alias set s3 $MC_HOST $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY",
"mc cp -r -a %s s3/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID" % dirs["webZip"],
@@ -2758,7 +2766,7 @@ def restoreWebPnpmCache():
"commands": [
"source ./.woodpecker.env",
"mc alias set s3 $MC_HOST $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY",
"mc cp -r -a s3/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID/pnpm-store.tar.gz %s" % dirs["zip"],
"mc cp -r -a s3/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID/web-pnpm.tar.gz %s" % dirs["zip"],
],
}, {
# we need to install again because the node_modules are not cached
@@ -2851,7 +2859,7 @@ def wopiCollaborationService(name):
environment["COLLABORATION_WOPI_SRC"] = "http://%s:9300" % service_name
return startOcisService("collaboration", service_name, environment)
return startOpenCloudService("collaboration", service_name, environment)
def tikaService():
return [{
@@ -2981,7 +2989,7 @@ def waitForServices(name, services = []):
],
}]
def ocisHealthCheck(name, services = []):
def openCloudHealthCheck(name, services = []):
commands = []
timeout = 300
curl_command = ["timeout %s bash -c 'while [ $(curl -s %s/%s ", "-w %{http_code} -o /dev/null) != 200 ]; do sleep 1; done'"]

23
go.mod
View File

@@ -21,7 +21,7 @@ require (
github.com/egirna/icap-client v0.1.1
github.com/gabriel-vasile/mimetype v1.4.8
github.com/ggwhite/go-masker v1.1.0
github.com/go-chi/chi/v5 v5.2.0
github.com/go-chi/chi/v5 v5.2.1
github.com/go-chi/render v1.0.3
github.com/go-ldap/ldap/v3 v3.4.10
github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3
@@ -62,22 +62,22 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.23.0
github.com/onsi/gomega v1.36.2
github.com/open-policy-agent/opa v1.1.0
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be
github.com/open-policy-agent/opa v1.2.0
github.com/opencloud-eu/reva/v2 v2.28.0
github.com/orcaman/concurrent-map v1.0.0
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
github.com/pkg/errors v0.9.1
github.com/pkg/xattr v0.4.10
github.com/prometheus/client_golang v1.21.1
github.com/r3labs/sse/v2 v2.10.0
github.com/riandyrn/otelchi v0.12.0
github.com/riandyrn/otelchi v0.12.1
github.com/rogpeppe/go-internal v1.14.1
github.com/rs/cors v1.11.1
github.com/rs/zerolog v1.33.0
github.com/shamaton/msgpack/v2 v2.2.2
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.12.0
github.com/spf13/cobra v1.8.1
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.10.0
github.com/test-go/testify v1.1.4
github.com/thejerf/suture/v4 v4.0.6
@@ -88,7 +88,7 @@ require (
github.com/xhit/go-simple-mail/v2 v2.16.0
go-micro.dev/v4 v4.11.0
go.etcd.io/bbolt v1.4.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
go.opentelemetry.io/contrib/zpages v0.57.0
go.opentelemetry.io/otel v1.35.0
@@ -120,10 +120,9 @@ require (
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/ProtonMail/go-crypto v1.1.5 // indirect
github.com/RoaringBitmap/roaring v1.9.3 // indirect
github.com/agnivade/levenshtein v1.2.0 // indirect
github.com/agnivade/levenshtein v1.2.1 // indirect
github.com/ajg/form v1.5.1 // indirect
github.com/alexedwards/argon2id v1.0.0 // indirect
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect
@@ -160,7 +159,7 @@ require (
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cornelk/hashmap v1.0.8 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/crewjam/httperr v0.2.0 // indirect
github.com/crewjam/saml v0.4.14 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
@@ -274,7 +273,7 @@ require (
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
github.com/prometheus/alertmanager v0.27.0 // indirect
github.com/prometheus/alertmanager v0.28.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
@@ -289,8 +288,8 @@ require (
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sethvargo/go-password v0.3.1 // indirect
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
github.com/skeema/knownhosts v1.3.0 // indirect
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect
github.com/spf13/pflag v1.0.6 // indirect

50
go.sum
View File

@@ -84,8 +84,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
github.com/Nerzal/gocloak/v13 v13.9.0 h1:YWsJsdM5b0yhM2Ba3MLydiOlujkBry4TtdzfIzSVZhw=
github.com/Nerzal/gocloak/v13 v13.9.0/go.mod h1:YYuDcXZ7K2zKECyVP7pPqjKxx2AzYSpKDj8d6GuyM10=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks=
github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4=
github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
@@ -93,8 +91,8 @@ github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.1.0/go.mod h1:kX6YddBkXqqywAe8c9LyvgTCyFuZCTMF4cRPQhc3Fy8=
@@ -133,9 +131,8 @@ github.com/bbalet/stopwords v1.0.0/go.mod h1:sAWrQoDMfqARGIn4s6dp7OW7ISrshUD8IP2
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs=
github.com/beevik/etree v1.5.0/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -236,9 +233,8 @@ github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2L
github.com/cpu/goacmedns v0.1.1/go.mod h1:MuaouqEhPAHxsbqjgnck5zeghuwBP1dLnPoobeGqugQ=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo=
github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4=
@@ -338,8 +334,8 @@ github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkPro
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0=
github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4=
github.com/go-chi/render v1.0.3/go.mod h1:/gr3hVkmYR0YlEy3LxCuVRFzEu9Ruok+gFqbIofjao0=
github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s=
@@ -863,10 +859,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be h1:dxKsVUzdKIGf1hfGdr1GH6NFfo0xuIcPma/qjHNG8mU=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be/go.mod h1:sqlExPoEnEd0KdfoSKogV8PrwEBY3l06icoa4gJnGnU=
github.com/open-policy-agent/opa v1.2.0 h1:88NDVCM0of1eO6Z4AFeL3utTEtMuwloFmWWU7dRV1z0=
github.com/open-policy-agent/opa v1.2.0/go.mod h1:30euUmOvuBoebRCcJ7DMF42bRBOPznvt0ACUMYDUGVY=
github.com/opencloud-eu/reva/v2 v2.28.0 h1:ai7PRIESdw2SiM/MmK8Tc+C/GDHBwzlQp4MwCnqTl5Y=
github.com/opencloud-eu/reva/v2 v2.28.0/go.mod h1:hbCaf73/SzHtbVlmVCU1Eheadds029am/X0Bff5k514=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -911,8 +907,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k=
github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I=
github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE=
github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA=
github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
@@ -969,8 +965,8 @@ github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKc
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/riandyrn/otelchi v0.12.0 h1:7aXphKyzut8849DDb/0LWyCPq4mfnikpggEmmW3b38U=
github.com/riandyrn/otelchi v0.12.0/go.mod h1:weZZeUJURvtCcbWsdb7Y6F8KFZGedJlSrgUjq9VirV8=
github.com/riandyrn/otelchi v0.12.1 h1:FdRKK3/RgZ/T+d+qTH5Uw3MFx0KwRF38SkdfTMMq/m8=
github.com/riandyrn/otelchi v0.12.1/go.mod h1:weZZeUJURvtCcbWsdb7Y6F8KFZGedJlSrgUjq9VirV8=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -1010,11 +1006,11 @@ github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOH
github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 h1:OfRzdxCzDhp+rsKWXuOO2I/quKMJ/+TQwVbIP/gltZg=
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92/go.mod h1:7/OT02F6S6I7v6WXb+IjhMuZEYfH/RJ5RwEWnEo5BMg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -1041,8 +1037,8 @@ github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -1162,8 +1158,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/contrib/zpages v0.57.0 h1:mHFZlTkyrUJcuBhpytPSaVPiVkqri96RKUDk01d83eQ=

View File

@@ -105,11 +105,11 @@
"web-vitals": "^3.5.2"
},
"devDependencies": {
"@babel/core": "7.26.9",
"@babel/core": "7.26.10",
"@typescript-eslint/eslint-plugin": "^4.33.0",
"@typescript-eslint/parser": "^4.33.0",
"babel-eslint": "^10.1.0",
"babel-loader": "9.2.1",
"babel-loader": "10.0.0",
"babel-plugin-named-asset-import": "^0.3.8",
"babel-preset-react-app": "^10.1.0",
"case-sensitive-paths-webpack-plugin": "2.4.0",

View File

File diff suppressed because it is too large Load Diff

View File

@@ -143,7 +143,7 @@ func DefaultConfig() *config.Config {
UseSpaceGroups: false,
Root: filepath.Join(defaults.BaseDataPath(), "storage", "users"),
PersonalSpaceAliasTemplate: "{{.SpaceType}}/{{.User.Username | lower}}",
PersonalSpacePathTemplate: "users/{{.User.Username}}",
PersonalSpacePathTemplate: "users/{{.User.Id.OpaqueId}}",
GeneralSpaceAliasTemplate: "{{.SpaceType}}/{{.SpaceName | replace \" \" \"-\" | lower}}",
GeneralSpacePathTemplate: "projects/{{.SpaceId}}",
PermissionsEndpoint: "eu.opencloud.api.settings",

View File

@@ -1,6 +1,6 @@
SHELL := bash
NAME := web
WEB_ASSETS_VERSION = v1.0.0
WEB_ASSETS_VERSION = v2.0.0
WEB_ASSETS_BRANCH = main
ifneq (, $(shell command -v go 2> /dev/null)) # suppress `command not found warnings` for non go targets in CI

View File

@@ -36,14 +36,9 @@
- [apiSearch1/search.feature:321](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L321)
- [apiSearch1/search.feature:324](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L324)
- [apiSearch1/search.feature:356](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L356)
- [apiSearch1/search.feature:369](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L369)
- [apiSearch1/search.feature:396](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L396)
- [apiSearch1/search.feature:410](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L410)
- [apiSearch1/search.feature:437](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L437)
- [apiSearch1/search.feature:438](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L438)
- [apiSearch1/search.feature:439](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L439)
- [apiSearch1/search.feature:465](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L465)
- [apiSearch1/search.feature:466](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L466)
- [apiSearch1/search.feature:467](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L467)
- [apiSearch2/mediaTypeSearch.feature:31](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L31)
- [apiSearch2/mediaTypeSearch.feature:32](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L32)
- [apiSearch2/mediaTypeSearch.feature:33](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L33)
@@ -154,7 +149,7 @@
- [apiSpacesShares/shareSubItemOfSpaceViaPublicLink.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSubItemOfSpaceViaPublicLink.feature#L147)
- [apiSharingNgLinkSharePermission/createLinkShare.feature:473](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/createLinkShare.feature#L473)
- [apiSharingNgLinkSharePermission/createLinkShare.feature:1208](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/createLinkShare.feature#L1208)
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L203)
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:204](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L204)
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:282](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L282)
- [apiSharingNgLinkShareRoot/updateLinkShare.feature:10](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkShareRoot/updateLinkShare.feature#L10)
- [apiSharingNgLinkShareRoot/updateLinkShare.feature:42](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkShareRoot/updateLinkShare.feature#L42)
@@ -193,7 +188,7 @@
- [apiSpaces/uploadSpaces.feature:95](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L95)
- [apiSpaces/uploadSpaces.feature:112](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L112)
- [apiSpaces/uploadSpaces.feature:129](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L129)
- [apiSpacesShares/shareSpacesViaLink.feature:61](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSpacesViaLink.feature#L61)
- [apiSpacesShares/shareSpacesViaLink.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSpacesViaLink.feature#L58)
- [apiDepthInfinity/propfind.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiDepthInfinity/propfind.feature#L74)
- [apiDepthInfinity/propfind.feature:124](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiDepthInfinity/propfind.feature#L124)
- [apiLocks/lockFiles.feature:490](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L490)
@@ -209,17 +204,11 @@
- [apiLocks/unlockFiles.feature:322](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L322)
- [apiLocks/unlockFiles.feature:323](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L323)
- [apiAntivirus/antivirus.feature:114](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L114)
- [apiAntivirus/antivirus.feature:115](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L115)
- [apiAntivirus/antivirus.feature:116](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L116)
- [apiAntivirus/antivirus.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L117)
- [apiAntivirus/antivirus.feature:118](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L118)
- [apiAntivirus/antivirus.feature:119](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L119)
- [apiAntivirus/antivirus.feature:140](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L140)
- [apiAntivirus/antivirus.feature:141](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L141)
- [apiAntivirus/antivirus.feature:142](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L142)
- [apiAntivirus/antivirus.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L143)
- [apiAntivirus/antivirus.feature:144](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L144)
- [apiAntivirus/antivirus.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L145)
- [apiAntivirus/antivirus.feature:356](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L356)
- [apiAntivirus/antivirus.feature:357](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L357)
- [apiAntivirus/antivirus.feature:358](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L358)
@@ -310,6 +299,8 @@
- [coreApiWebdavOperations/propfind.feature:55](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/propfind.feature#L55)
- [coreApiWebdavOperations/propfind.feature:69](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/propfind.feature#L69)
- [coreApiWebdavUpload/uploadFile.feature:376](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L376)
- [apiActivities/shareActivities.feature:1956](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/shareActivities.feature#L1956)
- [apiActivities/shareActivities.feature:2095](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/shareActivities.feature#L2095)
#### [Cannot create new TUS upload resource using /webdav without remote.php - returns html instead](https://github.com/owncloud/ocis/issues/10346)

View File

@@ -15,13 +15,17 @@ BINGO_DIR="$ROOT_PATH/.bingo"
BINGO_HASH=$(cat "$BINGO_DIR"/* | sha256sum | cut -d ' ' -f 1)
URL="$CACHE_ENDPOINT/$CACHE_BUCKET/opencloud/go-bin/$BINGO_HASH/$2"
if curl --output /dev/null --silent --head --fail "$URL"; then
mc alias set s3 "$MC_HOST" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
if mc ls --json s3/"$CACHE_BUCKET"/opencloud/go-bin/"$BINGO_HASH"/$2 | grep "\"status\":\"success\""; then
echo "[INFO] Go bin cache with has '$BINGO_HASH' exists."
# https://discourse.drone.io/t/how-to-exit-a-pipeline-early-without-failing/3951
# exit a Pipeline early without failing
exit 78
ENV="BIN_CACHE_FOUND=true\n"
else
# stored hash of a .bingo folder to '.bingo_hash' file
echo "$BINGO_HASH" >"$ROOT_PATH/.bingo_hash"
echo "[INFO] Go bin cache with has '$BINGO_HASH' does not exist."
ENV="BIN_CACHE_FOUND=false\n"
fi
echo -e $ENV >> .env

View File

@@ -10,16 +10,15 @@ fi
echo "Checking web version - $WEB_COMMITID in cache"
URL="$CACHE_ENDPOINT/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID/$1.tar.gz"
mc alias set s3 "$MC_HOST" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
echo "Checking for the web cache at '$URL'."
if curl --output /dev/null --silent --head --fail "$URL"
if mc ls --json s3/"$CACHE_BUCKET"/opencloud/web-test-runner/"$WEB_COMMITID"/"$1".tar.gz | grep "\"status\":\"success\"";
then
echo "$1 cache with commit id $WEB_COMMITID already available."
# https://discourse.drone.io/t/how-to-exit-a-pipeline-early-without-failing/3951
# exit a Pipeline early without failing
exit 78
ENV="WEB_CACHE_FOUND=true\n"
else
echo "$1 cache with commit id $WEB_COMMITID was not available."
ENV="WEB_CACHE_FOUND=false\n"
fi
echo -e $ENV >> .woodpecker.env

View File

@@ -1,4 +0,0 @@
*.txt
*.pprof
cmap2/
cache/

View File

@@ -1,13 +0,0 @@
language: go
sudo: false
go:
- "1.10"
- "1.11"
- "1.12"
- master
script:
- go test -tags safe ./...
- go test ./...
-

View File

@@ -1,187 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.

View File

@@ -1,74 +0,0 @@
# xxhash [![GoDoc](https://godoc.org/github.com/OneOfOne/xxhash?status.svg)](https://godoc.org/github.com/OneOfOne/xxhash) [![Build Status](https://travis-ci.org/OneOfOne/xxhash.svg?branch=master)](https://travis-ci.org/OneOfOne/xxhash) [![Coverage](https://gocover.io/_badge/github.com/OneOfOne/xxhash)](https://gocover.io/github.com/OneOfOne/xxhash)
This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits.
* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet)
## Install
go get github.com/OneOfOne/xxhash
## Features
* On Go 1.7+ the pure go version is faster than CGO for all inputs.
* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine.
* The native version falls back to a less optimized version on appengine due to the lack of unsafe.
* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds.
* To manually toggle the appengine version build with `-tags safe`.
## Benchmark
### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19)
```bash
➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin
name time/op
# https://github.com/cespare/xxhash
XXSum64Cespare/Func-8 160ns ± 2%
XXSum64Cespare/Struct-8 173ns ± 1%
XXSum64ShortCespare/Func-8 6.78ns ± 1%
XXSum64ShortCespare/Struct-8 19.6ns ± 2%
# this package (default mode, using unsafe)
XXSum64/Func-8 170ns ± 1%
XXSum64/Struct-8 182ns ± 1%
XXSum64Short/Func-8 13.5ns ± 3%
XXSum64Short/Struct-8 20.4ns ± 0%
# this package (appengine, *not* using unsafe)
XXSum64/Func-8 241ns ± 5%
XXSum64/Struct-8 243ns ± 6%
XXSum64Short/Func-8 15.2ns ± 2%
XXSum64Short/Struct-8 23.7ns ± 5%
CRC64ISO-8 1.23µs ± 1%
CRC64ISOString-8 2.71µs ± 4%
CRC64ISOShort-8 22.2ns ± 3%
Fnv64-8 2.34µs ± 1%
Fnv64Short-8 74.7ns ± 8%
```
## Usage
```go
h := xxhash.New64()
// r, err := os.Open("......")
// defer f.Close()
r := strings.NewReader(F)
io.Copy(h, r)
fmt.Println("xxhash.Backend:", xxhash.Backend)
fmt.Println("File checksum:", h.Sum64())
```
[<kbd>playground</kbd>](https://play.golang.org/p/wHKBwfu6CPV)
## TODO
* Rewrite the 32bit version to be more optimized.
* General cleanup as the Go inliner gets smarter.
## License
This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details.

View File

@@ -1,294 +0,0 @@
package xxhash
import (
"encoding/binary"
"errors"
"hash"
)
const (
prime32x1 uint32 = 2654435761
prime32x2 uint32 = 2246822519
prime32x3 uint32 = 3266489917
prime32x4 uint32 = 668265263
prime32x5 uint32 = 374761393
prime64x1 uint64 = 11400714785074694791
prime64x2 uint64 = 14029467366897019727
prime64x3 uint64 = 1609587929392839161
prime64x4 uint64 = 9650029242287828579
prime64x5 uint64 = 2870177450012600261
maxInt32 int32 = (1<<31 - 1)
// precomputed zero Vs for seed 0
zero64x1 = 0x60ea27eeadc0b5d6
zero64x2 = 0xc2b2ae3d27d4eb4f
zero64x3 = 0x0
zero64x4 = 0x61c8864e7a143579
)
const (
magic32 = "xxh\x07"
magic64 = "xxh\x08"
marshaled32Size = len(magic32) + 4*7 + 16
marshaled64Size = len(magic64) + 8*6 + 32 + 1
)
func NewHash32() hash.Hash { return New32() }
func NewHash64() hash.Hash { return New64() }
// Checksum32 returns the checksum of the input data with the seed set to 0.
func Checksum32(in []byte) uint32 {
return Checksum32S(in, 0)
}
// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
func ChecksumString32(s string) uint32 {
return ChecksumString32S(s, 0)
}
type XXHash32 struct {
mem [16]byte
ln, memIdx int32
v1, v2, v3, v4 uint32
seed uint32
}
// Size returns the number of bytes Sum will return.
func (xx *XXHash32) Size() int {
return 4
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (xx *XXHash32) BlockSize() int {
return 16
}
// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
func NewS32(seed uint32) (xx *XXHash32) {
xx = &XXHash32{
seed: seed,
}
xx.Reset()
return
}
// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
func New32() *XXHash32 {
return NewS32(0)
}
func (xx *XXHash32) Reset() {
xx.v1 = xx.seed + prime32x1 + prime32x2
xx.v2 = xx.seed + prime32x2
xx.v3 = xx.seed
xx.v4 = xx.seed - prime32x1
xx.ln, xx.memIdx = 0, 0
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xx *XXHash32) Sum(in []byte) []byte {
s := xx.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (xx *XXHash32) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaled32Size)
b = append(b, magic32...)
b = appendUint32(b, xx.v1)
b = appendUint32(b, xx.v2)
b = appendUint32(b, xx.v3)
b = appendUint32(b, xx.v4)
b = appendUint32(b, xx.seed)
b = appendInt32(b, xx.ln)
b = appendInt32(b, xx.memIdx)
b = append(b, xx.mem[:]...)
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (xx *XXHash32) UnmarshalBinary(b []byte) error {
if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaled32Size {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic32):]
b, xx.v1 = consumeUint32(b)
b, xx.v2 = consumeUint32(b)
b, xx.v3 = consumeUint32(b)
b, xx.v4 = consumeUint32(b)
b, xx.seed = consumeUint32(b)
b, xx.ln = consumeInt32(b)
b, xx.memIdx = consumeInt32(b)
copy(xx.mem[:], b)
return nil
}
// Checksum64 an alias for Checksum64S(in, 0)
func Checksum64(in []byte) uint64 {
return Checksum64S(in, 0)
}
// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
func ChecksumString64(s string) uint64 {
return ChecksumString64S(s, 0)
}
type XXHash64 struct {
v1, v2, v3, v4 uint64
seed uint64
ln uint64
mem [32]byte
memIdx int8
}
// Size returns the number of bytes Sum will return.
func (xx *XXHash64) Size() int {
return 8
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (xx *XXHash64) BlockSize() int {
return 32
}
// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
func NewS64(seed uint64) (xx *XXHash64) {
xx = &XXHash64{
seed: seed,
}
xx.Reset()
return
}
// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
func New64() *XXHash64 {
return NewS64(0)
}
func (xx *XXHash64) Reset() {
xx.ln, xx.memIdx = 0, 0
xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xx *XXHash64) Sum(in []byte) []byte {
s := xx.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (xx *XXHash64) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaled64Size)
b = append(b, magic64...)
b = appendUint64(b, xx.v1)
b = appendUint64(b, xx.v2)
b = appendUint64(b, xx.v3)
b = appendUint64(b, xx.v4)
b = appendUint64(b, xx.seed)
b = appendUint64(b, xx.ln)
b = append(b, byte(xx.memIdx))
b = append(b, xx.mem[:]...)
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (xx *XXHash64) UnmarshalBinary(b []byte) error {
if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaled64Size {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic64):]
b, xx.v1 = consumeUint64(b)
b, xx.v2 = consumeUint64(b)
b, xx.v3 = consumeUint64(b)
b, xx.v4 = consumeUint64(b)
b, xx.seed = consumeUint64(b)
b, xx.ln = consumeUint64(b)
xx.memIdx = int8(b[0])
b = b[1:]
copy(xx.mem[:], b)
return nil
}
func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) }
func appendUint32(b []byte, x uint32) []byte {
var a [4]byte
binary.LittleEndian.PutUint32(a[:], x)
return append(b, a[:]...)
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) }
func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x }
func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x }
// force the compiler to use ROTL instructions
func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
func mix64(h uint64) uint64 {
h ^= h >> 33
h *= prime64x2
h ^= h >> 29
h *= prime64x3
h ^= h >> 32
return h
}
func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
if seed == 0 {
return zero64x1, zero64x2, zero64x3, zero64x4
}
return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
}
// borrowed from cespare
func round64(h, v uint64) uint64 {
h += v * prime64x2
h = rotl64_31(h)
h *= prime64x1
return h
}
func mergeRound64(h, v uint64) uint64 {
v = round64(0, v)
h ^= v
h = h*prime64x1 + prime64x4
return h
}

View File

@@ -1,161 +0,0 @@
package xxhash
func u32(in []byte) uint32 {
return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
}
func u64(in []byte) uint64 {
return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56
}
// Checksum32S returns the checksum of the input bytes with the specific seed.
func Checksum32S(in []byte, seed uint32) (h uint32) {
var i int
if len(in) > 15 {
var (
v1 = seed + prime32x1 + prime32x2
v2 = seed + prime32x2
v3 = seed + 0
v4 = seed - prime32x1
)
for ; i < len(in)-15; i += 16 {
in := in[i : i+16 : len(in)]
v1 += u32(in[0:4:len(in)]) * prime32x2
v1 = rotl32_13(v1) * prime32x1
v2 += u32(in[4:8:len(in)]) * prime32x2
v2 = rotl32_13(v2) * prime32x1
v3 += u32(in[8:12:len(in)]) * prime32x2
v3 = rotl32_13(v3) * prime32x1
v4 += u32(in[12:16:len(in)]) * prime32x2
v4 = rotl32_13(v4) * prime32x1
}
h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4)
} else {
h = seed + prime32x5
}
h += uint32(len(in))
for ; i <= len(in)-4; i += 4 {
in := in[i : i+4 : len(in)]
h += u32(in[0:4:len(in)]) * prime32x3
h = rotl32_17(h) * prime32x4
}
for ; i < len(in); i++ {
h += uint32(in[i]) * prime32x5
h = rotl32_11(h) * prime32x1
}
h ^= h >> 15
h *= prime32x2
h ^= h >> 13
h *= prime32x3
h ^= h >> 16
return
}
func (xx *XXHash32) Write(in []byte) (n int, err error) {
i, ml := 0, int(xx.memIdx)
n = len(in)
xx.ln += int32(n)
if d := 16 - ml; ml > 0 && ml+len(in) > 16 {
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d]))
ml, in = 16, in[d:len(in):len(in)]
} else if ml+len(in) < 16 {
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in))
return
}
if ml > 0 {
i += 16 - ml
xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
in := xx.mem[:16:len(xx.mem)]
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
xx.v1 = rotl32_13(xx.v1) * prime32x1
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
xx.v2 = rotl32_13(xx.v2) * prime32x1
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
xx.v3 = rotl32_13(xx.v3) * prime32x1
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
xx.v4 = rotl32_13(xx.v4) * prime32x1
xx.memIdx = 0
}
for ; i <= len(in)-16; i += 16 {
in := in[i : i+16 : len(in)]
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
xx.v1 = rotl32_13(xx.v1) * prime32x1
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
xx.v2 = rotl32_13(xx.v2) * prime32x1
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
xx.v3 = rotl32_13(xx.v3) * prime32x1
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
xx.v4 = rotl32_13(xx.v4) * prime32x1
}
if len(in)-i != 0 {
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
}
return
}
func (xx *XXHash32) Sum32() (h uint32) {
var i int32
if xx.ln > 15 {
h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4)
} else {
h = xx.seed + prime32x5
}
h += uint32(xx.ln)
if xx.memIdx > 0 {
for ; i < xx.memIdx-3; i += 4 {
in := xx.mem[i : i+4 : len(xx.mem)]
h += u32(in[0:4:len(in)]) * prime32x3
h = rotl32_17(h) * prime32x4
}
for ; i < xx.memIdx; i++ {
h += uint32(xx.mem[i]) * prime32x5
h = rotl32_11(h) * prime32x1
}
}
h ^= h >> 15
h *= prime32x2
h ^= h >> 13
h *= prime32x3
h ^= h >> 16
return
}
// Checksum64S returns the 64bit xxhash checksum for a single input
func Checksum64S(in []byte, seed uint64) uint64 {
if len(in) == 0 && seed == 0 {
return 0xef46db3751d8e999
}
if len(in) > 31 {
return checksum64(in, seed)
}
return checksum64Short(in, seed)
}

View File

@@ -1,183 +0,0 @@
// +build appengine safe ppc64le ppc64be mipsle mips s390x
package xxhash
// Backend returns the current version of xxhash being used.
const Backend = "GoSafe"
func ChecksumString32S(s string, seed uint32) uint32 {
return Checksum32S([]byte(s), seed)
}
func (xx *XXHash32) WriteString(s string) (int, error) {
if len(s) == 0 {
return 0, nil
}
return xx.Write([]byte(s))
}
func ChecksumString64S(s string, seed uint64) uint64 {
return Checksum64S([]byte(s), seed)
}
func (xx *XXHash64) WriteString(s string) (int, error) {
if len(s) == 0 {
return 0, nil
}
return xx.Write([]byte(s))
}
func checksum64(in []byte, seed uint64) (h uint64) {
var (
v1, v2, v3, v4 = resetVs64(seed)
i int
)
for ; i < len(in)-31; i += 32 {
in := in[i : i+32 : len(in)]
v1 = round64(v1, u64(in[0:8:len(in)]))
v2 = round64(v2, u64(in[8:16:len(in)]))
v3 = round64(v3, u64(in[16:24:len(in)]))
v4 = round64(v4, u64(in[24:32:len(in)]))
}
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
h = mergeRound64(h, v1)
h = mergeRound64(h, v2)
h = mergeRound64(h, v3)
h = mergeRound64(h, v4)
h += uint64(len(in))
for ; i < len(in)-7; i += 8 {
h ^= round64(0, u64(in[i:len(in):len(in)]))
h = rotl64_27(h)*prime64x1 + prime64x4
}
for ; i < len(in)-3; i += 4 {
h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
}
for ; i < len(in); i++ {
h ^= uint64(in[i]) * prime64x5
h = rotl64_11(h) * prime64x1
}
return mix64(h)
}
func checksum64Short(in []byte, seed uint64) uint64 {
var (
h = seed + prime64x5 + uint64(len(in))
i int
)
for ; i < len(in)-7; i += 8 {
k := u64(in[i : i+8 : len(in)])
h ^= round64(0, k)
h = rotl64_27(h)*prime64x1 + prime64x4
}
for ; i < len(in)-3; i += 4 {
h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
}
for ; i < len(in); i++ {
h ^= uint64(in[i]) * prime64x5
h = rotl64_11(h) * prime64x1
}
return mix64(h)
}
func (xx *XXHash64) Write(in []byte) (n int, err error) {
var (
ml = int(xx.memIdx)
d = 32 - ml
)
n = len(in)
xx.ln += uint64(n)
if ml+len(in) < 32 {
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
return
}
i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4
if ml > 0 && ml+len(in) > 32 {
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)]))
in = in[d:len(in):len(in)]
in := xx.mem[0:32:len(xx.mem)]
v1 = round64(v1, u64(in[0:8:len(in)]))
v2 = round64(v2, u64(in[8:16:len(in)]))
v3 = round64(v3, u64(in[16:24:len(in)]))
v4 = round64(v4, u64(in[24:32:len(in)]))
xx.memIdx = 0
}
for ; i < len(in)-31; i += 32 {
in := in[i : i+32 : len(in)]
v1 = round64(v1, u64(in[0:8:len(in)]))
v2 = round64(v2, u64(in[8:16:len(in)]))
v3 = round64(v3, u64(in[16:24:len(in)]))
v4 = round64(v4, u64(in[24:32:len(in)]))
}
if len(in)-i != 0 {
xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
}
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
return
}
func (xx *XXHash64) Sum64() (h uint64) {
var i int
if xx.ln > 31 {
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
h = mergeRound64(h, v1)
h = mergeRound64(h, v2)
h = mergeRound64(h, v3)
h = mergeRound64(h, v4)
} else {
h = xx.seed + prime64x5
}
h += uint64(xx.ln)
if xx.memIdx > 0 {
in := xx.mem[:xx.memIdx]
for ; i < int(xx.memIdx)-7; i += 8 {
in := in[i : i+8 : len(in)]
k := u64(in[0:8:len(in)])
k *= prime64x2
k = rotl64_31(k)
k *= prime64x1
h ^= k
h = rotl64_27(h)*prime64x1 + prime64x4
}
for ; i < int(xx.memIdx)-3; i += 4 {
in := in[i : i+4 : len(in)]
h ^= uint64(u32(in[0:4:len(in)])) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
}
for ; i < int(xx.memIdx); i++ {
h ^= uint64(in[i]) * prime64x5
h = rotl64_11(h) * prime64x1
}
}
return mix64(h)
}

View File

@@ -1,240 +0,0 @@
// +build !safe
// +build !appengine
// +build !ppc64le
// +build !mipsle
// +build !ppc64be
// +build !mips
// +build !s390x
package xxhash
import (
"reflect"
"unsafe"
)
// Backend returns the current version of xxhash being used.
const Backend = "GoUnsafe"
// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed.
func ChecksumString32S(s string, seed uint32) uint32 {
if len(s) == 0 {
return Checksum32S(nil, seed)
}
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
}
func (xx *XXHash32) WriteString(s string) (int, error) {
if len(s) == 0 {
return 0, nil
}
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
}
// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed.
func ChecksumString64S(s string, seed uint64) uint64 {
if len(s) == 0 {
return Checksum64S(nil, seed)
}
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
}
func (xx *XXHash64) WriteString(s string) (int, error) {
if len(s) == 0 {
return 0, nil
}
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
}
//go:nocheckptr
func checksum64(in []byte, seed uint64) uint64 {
var (
wordsLen = len(in) >> 3
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
v1, v2, v3, v4 = resetVs64(seed)
h uint64
i int
)
for ; i < len(words)-3; i += 4 {
words := (*[4]uint64)(unsafe.Pointer(&words[i]))
v1 = round64(v1, words[0])
v2 = round64(v2, words[1])
v3 = round64(v3, words[2])
v4 = round64(v4, words[3])
}
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
h = mergeRound64(h, v1)
h = mergeRound64(h, v2)
h = mergeRound64(h, v3)
h = mergeRound64(h, v4)
h += uint64(len(in))
for _, k := range words[i:] {
h ^= round64(0, k)
h = rotl64_27(h)*prime64x1 + prime64x4
}
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
h ^= uint64(words[0]) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
in = in[4:len(in):len(in)]
}
for _, b := range in {
h ^= uint64(b) * prime64x5
h = rotl64_11(h) * prime64x1
}
return mix64(h)
}
//go:nocheckptr
func checksum64Short(in []byte, seed uint64) uint64 {
var (
h = seed + prime64x5 + uint64(len(in))
i int
)
if len(in) > 7 {
var (
wordsLen = len(in) >> 3
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
)
for i := range words {
h ^= round64(0, words[i])
h = rotl64_27(h)*prime64x1 + prime64x4
}
i = wordsLen << 3
}
if in = in[i:len(in):len(in)]; len(in) > 3 {
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
h ^= uint64(words[0]) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
in = in[4:len(in):len(in)]
}
for _, b := range in {
h ^= uint64(b) * prime64x5
h = rotl64_11(h) * prime64x1
}
return mix64(h)
}
func (xx *XXHash64) Write(in []byte) (n int, err error) {
mem, idx := xx.mem[:], int(xx.memIdx)
xx.ln, n = xx.ln+uint64(len(in)), len(in)
if idx+len(in) < 32 {
xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in))
return
}
var (
v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4
i int
)
if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 {
copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)])
words := (*[4]uint64)(unsafe.Pointer(&mem[0]))
v1 = round64(v1, words[0])
v2 = round64(v2, words[1])
v3 = round64(v3, words[2])
v4 = round64(v4, words[3])
if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 {
goto RET
}
}
for ; i < len(in)-31; i += 32 {
words := (*[4]uint64)(unsafe.Pointer(&in[i]))
v1 = round64(v1, words[0])
v2 = round64(v2, words[1])
v3 = round64(v3, words[2])
v4 = round64(v4, words[3])
}
if len(in)-i != 0 {
xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)]))
}
RET:
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
return
}
func (xx *XXHash64) Sum64() (h uint64) {
if seed := xx.seed; xx.ln > 31 {
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
h = mergeRound64(h, v1)
h = mergeRound64(h, v2)
h = mergeRound64(h, v3)
h = mergeRound64(h, v4)
} else if seed == 0 {
h = prime64x5
} else {
h = seed + prime64x5
}
h += uint64(xx.ln)
if xx.memIdx == 0 {
return mix64(h)
}
var (
in = xx.mem[:xx.memIdx:xx.memIdx]
wordsLen = len(in) >> 3
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
)
for _, k := range words {
h ^= round64(0, k)
h = rotl64_27(h)*prime64x1 + prime64x4
}
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
h ^= uint64(words[0]) * prime64x1
h = rotl64_23(h)*prime64x2 + prime64x3
in = in[4:len(in):len(in)]
}
for _, b := range in {
h ^= uint64(b) * prime64x5
h = rotl64_11(h) * prime64x1
}
return mix64(h)
}

View File

@@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
node.Parent.Prev.Type == blackfriday.Heading &&
node.Parent.Prev.FirstChild != nil &&
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
before, after, found := bytes.Cut(node.Literal, []byte(" - "))
before, after, found := bytesCut(node.Literal, []byte(" - "))
escapeSpecialChars(w, before)
if found {
out(w, ` \- `)
@@ -406,3 +406,12 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
}
}
// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
// and older. We can remove this once we drop support for go1.17 and older.
func bytesCut(s, sep []byte) (before, after []byte, found bool) {
if i := bytes.Index(s, sep); i >= 0 {
return s[:i], s[i+len(sep):], true
}
return s, nil, false
}

View File

@@ -31,3 +31,7 @@ type (
func NewAnnotationsRef(a *Annotations) *AnnotationsRef {
return v1.NewAnnotationsRef(a)
}
func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) {
return v1.BuildAnnotationSet(modules)
}

View File

@@ -5,6 +5,7 @@
package ast
import (
"errors"
"fmt"
v1 "github.com/open-policy-agent/opa/v1/ast"
@@ -279,7 +280,7 @@ func ParseStatement(input string) (Statement, error) {
return nil, err
}
if len(stmts) != 1 {
return nil, fmt.Errorf("expected exactly one statement")
return nil, errors.New("expected exactly one statement")
}
return stmts[0], nil
}

View File

@@ -184,7 +184,7 @@ func RefHead(ref Ref, args ...*Term) *Head {
}
// DocKind represents the collection of document types that can be produced by rules.
type DocKind int
type DocKind = v1.DocKind
const (
// CompleteDoc represents a document that is completely defined by the rule.

View File

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@ package bundle
import (
"context"
"errors"
"fmt"
"io"
"os"
@@ -97,7 +98,7 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st
_, err := os.Stat(bundlePath)
if err == nil {
f, err := os.Open(filepath.Join(bundlePath))
f, err := os.Open(bundlePath)
if err != nil {
return nil, err
}
@@ -132,7 +133,7 @@ func SaveBundleToDisk(path string, raw io.Reader) (string, error) {
}
if raw == nil {
return "", fmt.Errorf("no raw bundle bytes to persist to disk")
return "", errors.New("no raw bundle bytes to persist to disk")
}
dest, err := os.CreateTemp(path, ".bundle.tar.gz.*.tmp")

View File

@@ -114,7 +114,7 @@ func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) {
copy(lastIPMask, ipNet.Mask)
for i := range lastIPMask {
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
lastIP[net.IPv6len-i-1] |= lastIPMask[len(lastIPMask)-i-1]
}
return firstIP, lastIP

View File

@@ -5,6 +5,9 @@
package compiler
import (
"errors"
"sync"
"github.com/open-policy-agent/opa/v1/ast"
"github.com/open-policy-agent/opa/v1/schemas"
"github.com/open-policy-agent/opa/v1/util"
@@ -16,12 +19,35 @@ const (
AuthorizationPolicySchema SchemaFile = "authorizationPolicy.json"
)
var schemaDefinitions = map[SchemaFile]interface{}{}
var schemaDefinitions = map[SchemaFile]any{}
var loadOnce = sync.OnceValue(func() error {
cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
if err != nil {
return err
}
if len(cont) == 0 {
return errors.New("expected authorization policy schema file to be present")
}
var schema any
if err := util.Unmarshal(cont, &schema); err != nil {
return err
}
schemaDefinitions[AuthorizationPolicySchema] = schema
return nil
})
// VerifyAuthorizationPolicySchema performs type checking on rules against the schema for the Authorization Policy
// Input document.
// NOTE: The provided compiler should have already run the compilation process on the input modules
func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error {
if err := loadOnce(); err != nil {
panic(err)
}
rules := getRulesWithDependencies(compiler, ref)
@@ -67,26 +93,3 @@ func transitiveDependencies(compiler *ast.Compiler, rule *ast.Rule, deps map[*as
transitiveDependencies(compiler, other, deps)
}
}
func loadAuthorizationPolicySchema() {
cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
if err != nil {
panic(err)
}
if len(cont) == 0 {
panic("expected authorization policy schema file to be present")
}
var schema interface{}
if err := util.Unmarshal(cont, &schema); err != nil {
panic(err)
}
schemaDefinitions[AuthorizationPolicySchema] = schema
}
func init() {
loadAuthorizationPolicySchema()
}

View File

@@ -340,7 +340,7 @@ func (c *Compiler) initModule() error {
// two times. But let's deal with that when it happens.
if _, ok := c.funcs[name]; ok { // already seen
c.debug.Printf("function name duplicate: %s (%d)", name, fn.Index)
name = name + ".1"
name += ".1"
}
c.funcs[name] = fn.Index
}
@@ -348,7 +348,7 @@ func (c *Compiler) initModule() error {
for _, fn := range c.policy.Funcs.Funcs {
params := make([]types.ValueType, len(fn.Params))
for i := 0; i < len(params); i++ {
for i := range params {
params[i] = types.I32
}
@@ -827,7 +827,7 @@ func (c *Compiler) compileFunc(fn *ir.Func) error {
memoize := len(fn.Params) == 2
if len(fn.Params) == 0 {
return fmt.Errorf("illegal function: zero args")
return errors.New("illegal function: zero args")
}
c.nextLocal = 0
@@ -996,12 +996,16 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
for _, stmt := range block.Stmts {
switch stmt := stmt.(type) {
case *ir.ResultSetAddStmt:
instrs = append(instrs, instruction.GetLocal{Index: c.lrs})
instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)})
instrs = append(instrs,
instruction.GetLocal{Index: c.lrs},
instruction.GetLocal{Index: c.local(stmt.Value)},
instruction.Call{Index: c.function(opaSetAdd)},
)
case *ir.ReturnLocalStmt:
instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
instrs = append(instrs, instruction.Return{})
instrs = append(instrs,
instruction.GetLocal{Index: c.local(stmt.Source)},
instruction.Return{},
)
case *ir.BlockStmt:
for i := range stmt.Blocks {
block, err := c.compileBlock(stmt.Blocks[i])
@@ -1029,8 +1033,10 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
return instrs, err
}
case *ir.AssignVarStmt:
instrs = append(instrs, c.instrRead(stmt.Source))
instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
instrs = append(instrs,
c.instrRead(stmt.Source),
instruction.SetLocal{Index: c.local(stmt.Target)},
)
case *ir.AssignVarOnceStmt:
instrs = append(instrs, instruction.Block{
Instrs: []instruction.Instruction{
@@ -1360,7 +1366,7 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
// Initialize the locals that specify the path of the upsert operation.
lpath := make(map[int]uint32, len(path))
for i := 0; i < len(path); i++ {
for i := range path {
lpath[i] = c.genLocal()
instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])})
instrs = append(instrs, instruction.SetLocal{Index: lpath[i]})
@@ -1369,10 +1375,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
// Generate a block that traverses the path of the upsert operation,
// shallowing copying values at each step as needed. Stop before the final
// segment that will only be inserted.
var inner []instruction.Instruction
inner := make([]instruction.Instruction, 0, len(path)*21+1)
ltemp := c.genLocal()
for i := 0; i < len(path)-1; i++ {
for i := range len(path) - 1 {
// Lookup the next part of the path.
inner = append(inner, instruction.GetLocal{Index: lcopy})
@@ -1408,10 +1414,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)})
// Generate blocks that handle missing nodes during traversal.
var block []instruction.Instruction
block := make([]instruction.Instruction, 0, len(path)*10)
lval := c.genLocal()
for i := 0; i < len(path)-1; i++ {
for i := range len(path) - 1 {
block = append(block, instruction.Block{Instrs: inner})
block = append(block, instruction.Call{Index: c.function(opaObject)})
block = append(block, instruction.SetLocal{Index: lval})
@@ -1535,8 +1541,7 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
}
instrs := *result
instrs = append(instrs, instruction.I32Const{Value: ef.ID})
instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter
instrs = append(instrs, instruction.I32Const{Value: ef.ID}, instruction.I32Const{Value: 0}) // unused context parameter
for _, arg := range stmt.Args {
instrs = append(instrs, c.instrRead(arg))
@@ -1545,9 +1550,11 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])})
if ef.Decl.Result() != nil {
instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)})
instrs = append(instrs, instruction.I32Eqz{})
instrs = append(instrs, instruction.BrIf{Index: 0})
instrs = append(instrs,
instruction.TeeLocal{Index: c.local(stmt.Result)},
instruction.I32Eqz{},
instruction.BrIf{Index: 0},
)
} else {
instrs = append(instrs, instruction.Drop{})
}
@@ -1678,7 +1685,7 @@ func (c *Compiler) genLocal() uint32 {
func (c *Compiler) function(name string) uint32 {
fidx, ok := c.funcs[name]
if !ok {
panic(fmt.Sprintf("function not found: %s", name))
panic("function not found: " + name)
}
return fidx
}

View File

@@ -36,7 +36,7 @@ func (vector *BitVector) Length() int {
// position of the last byte in the slice.
// This returns the bit that was shifted off of the last byte.
func shiftLower(bit byte, b []byte) byte {
bit = bit << 7
bit <<= 7
for i := len(b) - 1; i >= 0; i-- {
newByte := b[i] >> 1
newByte |= bit

View File

@@ -146,6 +146,7 @@
package edittree
import (
"errors"
"fmt"
"math/big"
"sort"
@@ -335,13 +336,13 @@ func (e *EditTree) deleteChildValue(hash int) {
// Insert creates a new child of e, and returns the new child EditTree node.
func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
if e.value == nil {
return nil, fmt.Errorf("deleted node encountered during insert operation")
return nil, errors.New("deleted node encountered during insert operation")
}
if key == nil {
return nil, fmt.Errorf("nil key provided for insert operation")
return nil, errors.New("nil key provided for insert operation")
}
if value == nil {
return nil, fmt.Errorf("nil value provided for insert operation")
return nil, errors.New("nil value provided for insert operation")
}
switch x := e.value.Value.(type) {
@@ -367,7 +368,7 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
return nil, err
}
if idx < 0 || idx > e.insertions.Length() {
return nil, fmt.Errorf("index for array insertion out of bounds")
return nil, errors.New("index for array insertion out of bounds")
}
return e.unsafeInsertArray(idx, value), nil
default:
@@ -457,10 +458,10 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree {
// already present in e. It then returns the deleted child EditTree node.
func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
if e.value == nil {
return nil, fmt.Errorf("deleted node encountered during delete operation")
return nil, errors.New("deleted node encountered during delete operation")
}
if key == nil {
return nil, fmt.Errorf("nil key provided for delete operation")
return nil, errors.New("nil key provided for delete operation")
}
switch e.value.Value.(type) {
@@ -531,7 +532,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
return nil, err
}
if idx < 0 || idx > e.insertions.Length()-1 {
return nil, fmt.Errorf("index for array delete out of bounds")
return nil, errors.New("index for array delete out of bounds")
}
// Collect insertion indexes above the delete site for rewriting.
@@ -552,14 +553,14 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
}
// Do rewrites to clear out the newly-removed element.
e.deleteChildValue(idx)
for i := 0; i < len(rewritesScalars); i++ {
for i := range rewritesScalars {
originalIdx := rewritesScalars[i]
rewriteIdx := rewritesScalars[i] - 1
v := e.childScalarValues[originalIdx]
e.deleteChildValue(originalIdx)
e.setChildScalarValue(rewriteIdx, v)
}
for i := 0; i < len(rewritesComposites); i++ {
for i := range rewritesComposites {
originalIdx := rewritesComposites[i]
rewriteIdx := rewritesComposites[i] - 1
v := e.childCompositeValues[originalIdx]
@@ -591,7 +592,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
//gcassert:inline
func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
zeroesSeen := 0
for i := 0; i < index; i++ {
for i := range index {
if bv.Element(i) == 0 {
zeroesSeen++
}
@@ -601,7 +602,7 @@ func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) {
zeroesSeen := 0
for i := 0; i < bv.Length(); i++ {
for i := range bv.Length() {
if bv.Element(i) == 0 {
zeroesSeen++
}
@@ -637,7 +638,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
}
// 1+ path segment case.
if e.value == nil {
return nil, fmt.Errorf("nil value encountered where composite value was expected")
return nil, errors.New("nil value encountered where composite value was expected")
}
// Switch behavior based on types.
@@ -831,7 +832,7 @@ func (e *EditTree) Render() *ast.Term {
// original array. We build a new Array with modified/deleted keys.
out := make([]*ast.Term, 0, e.insertions.Length())
eIdx := 0
for i := 0; i < e.insertions.Length(); i++ {
for i := range e.insertions.Length() {
// If the index == 0, that indicates we should look up the next
// surviving original element.
// If the index == 1, that indicates we should look up that
@@ -879,7 +880,7 @@ func (e *EditTree) Render() *ast.Term {
// Returns the inserted EditTree node.
func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error) {
if value == nil {
return nil, fmt.Errorf("cannot insert nil value into EditTree")
return nil, errors.New("cannot insert nil value into EditTree")
}
if len(path) == 0 {
@@ -910,7 +911,7 @@ func (e *EditTree) DeleteAtPath(path ast.Ref) (*EditTree, error) {
// Root document case:
if len(path) == 0 {
if e.value == nil {
return nil, fmt.Errorf("deleted node encountered during delete operation")
return nil, errors.New("deleted node encountered during delete operation")
}
e.value = nil
e.childKeys = nil
@@ -1046,7 +1047,7 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
switch v := term.Value.(type) {
case ast.Number:
if i, ok = v.Int(); !ok {
return 0, fmt.Errorf("invalid number type for indexing")
return 0, errors.New("invalid number type for indexing")
}
case ast.String:
if v == "-" {
@@ -1054,13 +1055,13 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
}
num := ast.Number(v)
if i, ok = num.Int(); !ok {
return 0, fmt.Errorf("invalid string for indexing")
return 0, errors.New("invalid string for indexing")
}
if v != "0" && strings.HasPrefix(string(v), "0") {
return 0, fmt.Errorf("leading zeros are not allowed in JSON paths")
return 0, errors.New("leading zeros are not allowed in JSON paths")
}
default:
return 0, fmt.Errorf("invalid type for indexing")
return 0, errors.New("invalid type for indexing")
}
return i, nil
@@ -1179,5 +1180,5 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term {
type termSlice []*ast.Term
func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 }
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s termSlice) Len() int { return len(s) }

View File

@@ -19,12 +19,14 @@ func FilterFutureImports(imps []*ast.Import) []*ast.Import {
return ret
}
var keywordsTerm = ast.StringTerm("keywords")
// IsAllFutureKeywords returns true if the passed *ast.Import is `future.keywords`
func IsAllFutureKeywords(imp *ast.Import) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 2 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(ast.StringTerm("keywords"))
path[1].Equal(keywordsTerm)
}
// IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}`
@@ -32,7 +34,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(ast.StringTerm("keywords")) &&
path[1].Equal(keywordsTerm) &&
path[2].Equal(ast.StringTerm(kw))
}
@@ -40,7 +42,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) {
path := imp.Path.Value.(ast.Ref)
if len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(ast.StringTerm("keywords")) {
path[1].Equal(keywordsTerm) {
if str, ok := path[2].Value.(ast.String); ok {
return string(str), true
}

View File

@@ -5,6 +5,7 @@
package future
import (
"errors"
"fmt"
"github.com/open-policy-agent/opa/v1/ast"
@@ -33,7 +34,7 @@ func ParserOptionsFromFutureImports(imports []*ast.Import) (ast.ParserOptions, e
}
if len(path) == 3 {
if imp.Alias != "" {
return popts, fmt.Errorf("alias not supported")
return popts, errors.New("alias not supported")
}
popts.FutureKeywords = append(popts.FutureKeywords, string(path[2].Value.(ast.String)))
}

View File

@@ -40,10 +40,10 @@ func (d *dumper) dump(v reflect.Value) {
d.WriteString("false")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d.WriteString(fmt.Sprintf("%d", v.Int()))
d.WriteString(strconv.FormatInt(v.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
d.WriteString(fmt.Sprintf("%d", v.Uint()))
d.WriteString(strconv.FormatUint(v.Uint(), 10))
case reflect.Float32, reflect.Float64:
d.WriteString(fmt.Sprintf("%.2f", v.Float()))
@@ -88,7 +88,7 @@ func typeName(t reflect.Type) string {
func (d *dumper) dumpArray(v reflect.Value) {
d.WriteString("[" + typeName(v.Type().Elem()) + "]")
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
d.nl()
d.WriteString("- ")
d.indent++
@@ -102,7 +102,7 @@ func (d *dumper) dumpStruct(v reflect.Value) {
d.indent++
typ := v.Type()
for i := 0; i < v.NumField(); i++ {
for i := range v.NumField() {
f := v.Field(i)
if typ.Field(i).Tag.Get("dump") == "-" {
continue
@@ -132,13 +132,13 @@ func isZero(v reflect.Value) bool {
return true
}
z := true
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
z = z && isZero(v.Index(i))
}
return z
case reflect.Struct:
z := true
for i := 0; i < v.NumField(); i++ {
for i := range v.NumField() {
z = z && isZero(v.Field(i))
}
return z

View File

@@ -51,7 +51,7 @@ func init() {
}
var via string
if len(fragmentNames) != 0 {
via = fmt.Sprintf(" via %s", strings.Join(fragmentNames, ", "))
via = " via " + strings.Join(fragmentNames, ", ")
}
addError(
Message(`Cannot spread fragment "%s" within itself%s.`, spreadName, via),

View File

@@ -159,8 +159,6 @@ func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption {
return Message(`Float cannot represent non numeric value: %s`, v.String())
case "ID", "ID!":
return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String())
//case "Enum":
// return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String())
default:
if v.Definition.Kind == ast.Enum {
return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String())

View File

@@ -2,6 +2,7 @@ package validator
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strconv"
@@ -11,7 +12,7 @@ import (
"github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
)
var ErrUnexpectedType = fmt.Errorf("Unexpected Type")
var ErrUnexpectedType = errors.New("Unexpected Type")
// VariableValues coerces and validates variable values
func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) {
@@ -106,7 +107,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
slc = reflect.Append(slc, val)
val = slc
}
for i := 0; i < val.Len(); i++ {
for i := range val.Len() {
resetPath()
v.path = append(v.path, ast.PathIndex(i))
field := val.Index(i)
@@ -222,7 +223,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
if fieldDef.Type.NonNull && field.IsNil() {
return val, gqlerror.ErrorPathf(v.path, "cannot be null")
}
//allow null object field and skip it
// allow null object field and skip it
if !fieldDef.Type.NonNull && field.IsNil() {
continue
}

View File

@@ -37,8 +37,8 @@ func ParsePatchPathEscaped(str string) (path storage.Path, ok bool) {
// the substitutions in this order, an implementation avoids the error of
// turning '~01' first into '~1' and then into '/', which would be
// incorrect (the string '~01' correctly becomes '~1' after transformation)."
path[i] = strings.Replace(path[i], "~1", "/", -1)
path[i] = strings.Replace(path[i], "~0", "~", -1)
path[i] = strings.ReplaceAll(path[i], "~1", "/")
path[i] = strings.ReplaceAll(path[i], "~0", "~")
}
return

View File

@@ -114,7 +114,7 @@ func parse(jwkSrc string) (*Set, error) {
// ParseBytes parses JWK from the incoming byte buffer.
func ParseBytes(buf []byte) (*Set, error) {
return parse(string(buf[:]))
return parse(string(buf))
}
// ParseString parses JWK from the incoming string.

View File

@@ -2,6 +2,7 @@ package jwk
import (
"encoding/json"
"errors"
"fmt"
)
@@ -53,12 +54,12 @@ func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error {
var tempKeyOperationList []string
err := json.Unmarshal(data, &tempKeyOperationList)
if err != nil {
return fmt.Errorf("invalid key operation")
return errors.New("invalid key operation")
}
for _, value := range tempKeyOperationList {
_, ok := keyOps[value]
if !ok {
return fmt.Errorf("unknown key operation")
return errors.New("unknown key operation")
}
*keyOperationList = append(*keyOperationList, KeyOperation(value))
}

View File

@@ -111,7 +111,7 @@ func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte
return nil, errors.New(`attempt to verify empty buffer`)
}
parts, err := SplitCompact(string(buf[:]))
parts, err := SplitCompact(string(buf))
if err != nil {
return nil, fmt.Errorf("failed extract from compact serialization format: %w", err)
}
@@ -164,7 +164,7 @@ func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) {
// ParseByte parses a JWS value serialized via compact serialization and provided as []byte.
func ParseByte(jwsCompact []byte) (m *Message, err error) {
return parseCompact(string(jwsCompact[:]))
return parseCompact(string(jwsCompact))
}
// ParseString parses a JWS value serialized via compact serialization and provided as string.

View File

@@ -3,6 +3,7 @@ package sign
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
@@ -30,7 +31,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
block, _ := pem.Decode([]byte(key))
if block == nil {
return nil, fmt.Errorf("failed to parse PEM block containing the key")
return nil, errors.New("failed to parse PEM block containing the key")
}
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
@@ -45,7 +46,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
case jwa.ES256, jwa.ES384, jwa.ES512:
block, _ := pem.Decode([]byte(key))
if block == nil {
return nil, fmt.Errorf("failed to parse PEM block containing the key")
return nil, errors.New("failed to parse PEM block containing the key")
}
priv, err := x509.ParseECPrivateKey(block.Bytes)

View File

@@ -5,6 +5,7 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
@@ -33,7 +34,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512:
block, _ := pem.Decode([]byte(key))
if block == nil {
return nil, fmt.Errorf("failed to parse PEM block containing the key")
return nil, errors.New("failed to parse PEM block containing the key")
}
pub, err := x509.ParsePKIXPublicKey(block.Bytes)

View File

@@ -223,7 +223,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
}
// Initialize parameters for functions.
for i := 0; i < len(rules[0].Head.Args); i++ {
for range len(rules[0].Head.Args) {
fn.Params = append(fn.Params, p.newLocal())
}
@@ -385,7 +385,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
return nil
})
default:
return fmt.Errorf("illegal rule kind")
return errors.New("illegal rule kind")
}
})
})
@@ -497,7 +497,6 @@ func (p *Planner) planDotOr(obj ir.Local, key ir.Operand, or stmtFactory, iter p
func (p *Planner) planNestedObjects(obj ir.Local, ref ast.Ref, iter planLocalIter) error {
if len(ref) == 0 {
//return fmt.Errorf("nested object construction didn't create object")
return iter(obj)
}
@@ -991,8 +990,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
op := e.Operator()
if replacement := p.mocks.Lookup(operator); replacement != nil {
switch r := replacement.Value.(type) {
case ast.Ref:
if r, ok := replacement.Value.(ast.Ref); ok {
if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) {
// replacement is builtin
operator = r.String()
@@ -1147,7 +1145,7 @@ func (p *Planner) planExprCallFunc(name string, arity int, void bool, operands [
})
default:
return fmt.Errorf("impossible replacement, arity mismatch")
return errors.New("impossible replacement, arity mismatch")
}
}
@@ -1173,7 +1171,7 @@ func (p *Planner) planExprCallValue(value *ast.Term, arity int, operands []*ast.
})
})
default:
return fmt.Errorf("impossible replacement, arity mismatch")
return errors.New("impossible replacement, arity mismatch")
}
}
@@ -1750,7 +1748,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
head, ok := ref[0].Value.(ast.Var)
if !ok {
return fmt.Errorf("illegal ref: non-var head")
return errors.New("illegal ref: non-var head")
}
if head.Compare(ast.DefaultRootDocument.Value) == 0 {
@@ -1767,7 +1765,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
p.ltarget, ok = p.vars.GetOp(head)
if !ok {
return fmt.Errorf("illegal ref: unsafe head")
return errors.New("illegal ref: unsafe head")
}
return p.planRefRec(ref, 1, iter)

View File

@@ -111,7 +111,7 @@ func (t *ruletrie) Rules() []*ast.Rule {
func (t *ruletrie) Push(key ast.Ref) {
node := t
for i := 0; i < len(key)-1; i++ {
for i := range len(key) - 1 {
node = node.Get(key[i].Value)
if node == nil {
return
@@ -123,7 +123,7 @@ func (t *ruletrie) Push(key ast.Ref) {
func (t *ruletrie) Pop(key ast.Ref) {
node := t
for i := 0; i < len(key)-1; i++ {
for i := range len(key) - 1 {
node = node.Get(key[i].Value)
if node == nil {
return

View File

@@ -1,6 +1,6 @@
package crypto
import "fmt"
import "errors"
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
// if the two byte slices assuming they represent a big-endian number.
@@ -11,7 +11,7 @@ import "fmt"
// +1 if x > y
func ConstantTimeByteCompare(x, y []byte) (int, error) {
if len(x) != len(y) {
return 0, fmt.Errorf("slice lengths do not match")
return 0, errors.New("slice lengths do not match")
}
xLarger, yLarger := 0, 0

View File

@@ -7,6 +7,7 @@ import (
"crypto/hmac"
"encoding/asn1"
"encoding/binary"
"errors"
"fmt"
"hash"
"math"
@@ -82,7 +83,7 @@ func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, con
// verify the requested bit length is not larger then the length encoding size
if int64(bitLen) > 0x7FFFFFFF {
return nil, fmt.Errorf("bitLen is greater than 32-bits")
return nil, errors.New("bitLen is greater than 32-bits")
}
fixedInput := bytes.NewBuffer(nil)

View File

@@ -8,6 +8,7 @@ import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
@@ -189,7 +190,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body []
authHeader := "AWS4-HMAC-SHA256 Credential=" + awsCreds.AccessKey + "/" + dateNow
authHeader += "/" + awsCreds.RegionName + "/" + service + "/aws4_request,"
authHeader += "SignedHeaders=" + headerList + ","
authHeader += "Signature=" + fmt.Sprintf("%x", signature)
authHeader += "Signature=" + hex.EncodeToString(signature)
return authHeader, awsHeaders
}

View File

@@ -9,7 +9,7 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"errors"
"hash"
"io"
"math/big"
@@ -107,7 +107,7 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey,
counter++
if counter > 0xFF {
return nil, fmt.Errorf("exhausted single byte external counter")
return nil, errors.New("exhausted single byte external counter")
}
}
d = d.Add(d, one)
@@ -146,7 +146,7 @@ func retrievePrivateKey(symmetric Credentials) (v4aCredentials, error) {
privateKey, err := deriveKeyFromAccessKeyPair(symmetric.AccessKey, symmetric.SecretKey)
if err != nil {
return v4aCredentials{}, fmt.Errorf("failed to derive asymmetric key from credentials")
return v4aCredentials{}, errors.New("failed to derive asymmetric key from credentials")
}
creds := v4aCredentials{
@@ -216,7 +216,7 @@ func (s *httpSigner) Build() (signedRequest, error) {
signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
rawQuery := strings.ReplaceAll(query.Encode(), "+", "%20")
canonicalURI := v4Internal.GetURIPath(req.URL)
@@ -314,7 +314,7 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
var canonicalHeaders strings.Builder
n := len(headers)
const colon = ':'
for i := 0; i < n; i++ {
for i := range n {
if headers[i] == hostHeader {
canonicalHeaders.WriteString(hostHeader)
canonicalHeaders.WriteRune(colon)

View File

@@ -57,7 +57,7 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string]
}
// Drop the overall length down to match our substitution
longestLocation = longestLocation - (len(lcs) - 3)
longestLocation -= (len(lcs) - 3)
}
return result, longestLocation

View File

@@ -148,8 +148,6 @@ func (t *parser) key(data map[string]interface{}) error {
return err
}
return fmt.Errorf("key %q has no value", string(k))
//set(data, string(k), "")
//return err
case last == '[':
// We are in a list index context, so we need to set an index.
i, err := t.keyIndex()
@@ -168,7 +166,7 @@ func (t *parser) key(data map[string]interface{}) error {
set(data, kk, list)
return err
case last == '=':
//End of key. Consume =, Get value.
// End of key. Consume =, Get value.
// FIXME: Get value list first
vl, e := t.valList()
switch e {

View File

@@ -7,6 +7,7 @@ package encoding
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
@@ -105,7 +106,7 @@ func readMagic(r io.Reader) error {
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
return err
} else if v != constant.Magic {
return fmt.Errorf("illegal magic value")
return errors.New("illegal magic value")
}
return nil
}
@@ -115,7 +116,7 @@ func readVersion(r io.Reader) error {
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
return err
} else if v != constant.Version {
return fmt.Errorf("illegal wasm version")
return errors.New("illegal wasm version")
}
return nil
}
@@ -199,7 +200,7 @@ func readSections(r io.Reader, m *module.Module) error {
return fmt.Errorf("code section: %w", err)
}
default:
return fmt.Errorf("illegal section id")
return errors.New("illegal section id")
}
}
}
@@ -269,7 +270,7 @@ func readNameMap(r io.Reader) ([]module.NameMap, error) {
return nil, err
}
nm := make([]module.NameMap, n)
for i := uint32(0); i < n; i++ {
for i := range n {
var name string
id, err := leb128.ReadVarUint32(r)
if err != nil {
@@ -289,7 +290,7 @@ func readNameSectionLocals(r io.Reader, s *module.NameSection) error {
if err != nil {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
id, err := leb128.ReadVarUint32(r) // func index
if err != nil {
return err
@@ -326,7 +327,7 @@ func readTypeSection(r io.Reader, s *module.TypeSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var ftype module.FunctionType
if err := readFunctionType(r, &ftype); err != nil {
@@ -346,7 +347,7 @@ func readImportSection(r io.Reader, s *module.ImportSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var imp module.Import
@@ -367,14 +368,14 @@ func readTableSection(r io.Reader, s *module.TableSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var table module.Table
if elem, err := readByte(r); err != nil {
return err
} else if elem != constant.ElementTypeAnyFunc {
return fmt.Errorf("illegal element type")
return errors.New("illegal element type")
}
table.Type = types.Anyfunc
@@ -396,7 +397,7 @@ func readMemorySection(r io.Reader, s *module.MemorySection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var mem module.Memory
@@ -417,7 +418,7 @@ func readGlobalSection(r io.Reader, s *module.GlobalSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var global module.Global
@@ -442,7 +443,7 @@ func readExportSection(r io.Reader, s *module.ExportSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var exp module.Export
@@ -463,7 +464,7 @@ func readElementSection(r io.Reader, s *module.ElementSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var seg module.ElementSegment
@@ -484,7 +485,7 @@ func readDataSection(r io.Reader, s *module.DataSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var seg module.DataSegment
@@ -505,7 +506,7 @@ func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error {
return err
}
for i := uint32(0); i < n; i++ {
for range n {
var seg module.RawCodeSegment
if err := readRawCodeSegment(r, &seg); err != nil {
@@ -547,7 +548,7 @@ func readGlobal(r io.Reader, global *module.Global) error {
if b == 1 {
global.Mutable = true
} else if b != 0 {
return fmt.Errorf("illegal mutability flag")
return errors.New("illegal mutability flag")
}
return readConstantExpr(r, &global.Init)
@@ -584,7 +585,7 @@ func readImport(r io.Reader, imp *module.Import) error {
if elem, err := readByte(r); err != nil {
return err
} else if elem != constant.ElementTypeAnyFunc {
return fmt.Errorf("illegal element type")
return errors.New("illegal element type")
}
desc := module.TableImport{
Type: types.Anyfunc,
@@ -617,12 +618,12 @@ func readImport(r io.Reader, imp *module.Import) error {
if b == 1 {
desc.Mutable = true
} else if b != 0 {
return fmt.Errorf("illegal mutability flag")
return errors.New("illegal mutability flag")
}
return nil
}
return fmt.Errorf("illegal import descriptor type")
return errors.New("illegal import descriptor type")
}
func readExport(r io.Reader, exp *module.Export) error {
@@ -646,7 +647,7 @@ func readExport(r io.Reader, exp *module.Export) error {
case constant.ExportDescGlobal:
exp.Descriptor.Type = module.GlobalExportType
default:
return fmt.Errorf("illegal export descriptor type")
return errors.New("illegal export descriptor type")
}
exp.Descriptor.Index, err = leb128.ReadVarUint32(r)
@@ -727,7 +728,7 @@ func readExpr(r io.Reader, expr *module.Expr) (err error) {
case error:
err = r
default:
err = fmt.Errorf("unknown panic")
err = errors.New("unknown panic")
}
}
}()
@@ -823,7 +824,7 @@ func readLimits(r io.Reader, l *module.Limit) error {
}
l.Max = &maxLim
} else if b != 0 {
return fmt.Errorf("illegal limit flag")
return errors.New("illegal limit flag")
}
return nil
@@ -838,7 +839,7 @@ func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error {
ret := make([]module.LocalDeclaration, n)
for i := uint32(0); i < n; i++ {
for i := range n {
if err := readVarUint32(r, &ret[i].Count); err != nil {
return err
}
@@ -888,7 +889,7 @@ func readVarUint32Vector(r io.Reader, v *[]uint32) error {
ret := make([]uint32, n)
for i := uint32(0); i < n; i++ {
for i := range n {
if err := readVarUint32(r, &ret[i]); err != nil {
return err
}
@@ -907,7 +908,7 @@ func readValueTypeVector(r io.Reader, v *[]types.ValueType) error {
ret := make([]types.ValueType, n)
for i := uint32(0); i < n; i++ {
for i := range n {
if err := readValueType(r, &ret[i]); err != nil {
return err
}

View File

@@ -7,6 +7,7 @@ package encoding
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
@@ -260,7 +261,7 @@ func writeTableSection(w io.Writer, s module.TableSection) error {
return err
}
default:
return fmt.Errorf("illegal table element type")
return errors.New("illegal table element type")
}
if err := writeLimits(&buf, table.Lim); err != nil {
return err
@@ -588,7 +589,7 @@ func writeImport(w io.Writer, imp module.Import) error {
}
return writeByte(w, constant.Const)
default:
return fmt.Errorf("illegal import descriptor type")
return errors.New("illegal import descriptor type")
}
}

View File

@@ -8,7 +8,7 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"slices"
"strings"
"github.com/open-policy-agent/opa/internal/deepcopy"
@@ -18,12 +18,32 @@ import (
const (
annotationScopePackage = "package"
annotationScopeImport = "import"
annotationScopeRule = "rule"
annotationScopeDocument = "document"
annotationScopeSubpackages = "subpackages"
)
var (
scopeTerm = StringTerm("scope")
titleTerm = StringTerm("title")
entrypointTerm = StringTerm("entrypoint")
descriptionTerm = StringTerm("description")
organizationsTerm = StringTerm("organizations")
authorsTerm = StringTerm("authors")
relatedResourcesTerm = StringTerm("related_resources")
schemasTerm = StringTerm("schemas")
customTerm = StringTerm("custom")
refTerm = StringTerm("ref")
nameTerm = StringTerm("name")
emailTerm = StringTerm("email")
schemaTerm = StringTerm("schema")
definitionTerm = StringTerm("definition")
documentTerm = StringTerm(annotationScopeDocument)
packageTerm = StringTerm(annotationScopePackage)
ruleTerm = StringTerm(annotationScopeRule)
subpackagesTerm = StringTerm(annotationScopeSubpackages)
)
type (
// Annotations represents metadata attached to other AST nodes such as rules.
Annotations struct {
@@ -291,7 +311,6 @@ func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) {
}
func scopeCompare(s1, s2 string) int {
o1 := scopeOrder(s1)
o2 := scopeOrder(s2)
@@ -311,8 +330,7 @@ func scopeCompare(s1, s2 string) int {
}
func scopeOrder(s string) int {
switch s {
case annotationScopeRule:
if s == annotationScopeRule {
return 1
}
return 0
@@ -325,7 +343,7 @@ func compareAuthors(a, b []*AuthorAnnotation) int {
return -1
}
for i := 0; i < len(a); i++ {
for i := range a {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
@@ -341,8 +359,8 @@ func compareRelatedResources(a, b []*RelatedResourceAnnotation) int {
return -1
}
for i := 0; i < len(a); i++ {
if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 {
for i := range a {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
}
@@ -356,7 +374,7 @@ func compareSchemas(a, b []*SchemaAnnotation) int {
maxLen = len(b)
}
for i := 0; i < maxLen; i++ {
for i := range maxLen {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
@@ -378,7 +396,7 @@ func compareStringLists(a, b []string) int {
return -1
}
for i := 0; i < len(a); i++ {
for i := range a {
if cmp := strings.Compare(a[i], b[i]); cmp != 0 {
return cmp
}
@@ -409,7 +427,9 @@ func (a *Annotations) Copy(node Node) *Annotations {
cpy.Schemas[i] = a.Schemas[i].Copy()
}
cpy.Custom = deepcopy.Map(a.Custom)
if a.Custom != nil {
cpy.Custom = deepcopy.Map(a.Custom)
}
cpy.node = node
@@ -425,19 +445,30 @@ func (a *Annotations) toObject() (*Object, *Error) {
}
if len(a.Scope) > 0 {
obj.Insert(StringTerm("scope"), StringTerm(a.Scope))
switch a.Scope {
case annotationScopeDocument:
obj.Insert(scopeTerm, documentTerm)
case annotationScopePackage:
obj.Insert(scopeTerm, packageTerm)
case annotationScopeRule:
obj.Insert(scopeTerm, ruleTerm)
case annotationScopeSubpackages:
obj.Insert(scopeTerm, subpackagesTerm)
default:
obj.Insert(scopeTerm, StringTerm(a.Scope))
}
}
if len(a.Title) > 0 {
obj.Insert(StringTerm("title"), StringTerm(a.Title))
obj.Insert(titleTerm, StringTerm(a.Title))
}
if a.Entrypoint {
obj.Insert(StringTerm("entrypoint"), BooleanTerm(true))
obj.Insert(entrypointTerm, InternedBooleanTerm(true))
}
if len(a.Description) > 0 {
obj.Insert(StringTerm("description"), StringTerm(a.Description))
obj.Insert(descriptionTerm, StringTerm(a.Description))
}
if len(a.Organizations) > 0 {
@@ -445,19 +476,19 @@ func (a *Annotations) toObject() (*Object, *Error) {
for _, org := range a.Organizations {
orgs = append(orgs, StringTerm(org))
}
obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...))
obj.Insert(organizationsTerm, ArrayTerm(orgs...))
}
if len(a.RelatedResources) > 0 {
rrs := make([]*Term, 0, len(a.RelatedResources))
for _, rr := range a.RelatedResources {
rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String())))
rrObj := NewObject(Item(refTerm, StringTerm(rr.Ref.String())))
if len(rr.Description) > 0 {
rrObj.Insert(StringTerm("description"), StringTerm(rr.Description))
rrObj.Insert(descriptionTerm, StringTerm(rr.Description))
}
rrs = append(rrs, NewTerm(rrObj))
}
obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...))
obj.Insert(relatedResourcesTerm, ArrayTerm(rrs...))
}
if len(a.Authors) > 0 {
@@ -465,14 +496,14 @@ func (a *Annotations) toObject() (*Object, *Error) {
for _, author := range a.Authors {
aObj := NewObject()
if len(author.Name) > 0 {
aObj.Insert(StringTerm("name"), StringTerm(author.Name))
aObj.Insert(nameTerm, StringTerm(author.Name))
}
if len(author.Email) > 0 {
aObj.Insert(StringTerm("email"), StringTerm(author.Email))
aObj.Insert(emailTerm, StringTerm(author.Email))
}
as = append(as, NewTerm(aObj))
}
obj.Insert(StringTerm("authors"), ArrayTerm(as...))
obj.Insert(authorsTerm, ArrayTerm(as...))
}
if len(a.Schemas) > 0 {
@@ -480,21 +511,21 @@ func (a *Annotations) toObject() (*Object, *Error) {
for _, s := range a.Schemas {
sObj := NewObject()
if len(s.Path) > 0 {
sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray()))
sObj.Insert(pathTerm, NewTerm(s.Path.toArray()))
}
if len(s.Schema) > 0 {
sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray()))
sObj.Insert(schemaTerm, NewTerm(s.Schema.toArray()))
}
if s.Definition != nil {
def, err := InterfaceToValue(s.Definition)
if err != nil {
return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error())
}
sObj.Insert(StringTerm("definition"), NewTerm(def))
sObj.Insert(definitionTerm, NewTerm(def))
}
ss = append(ss, NewTerm(sObj))
}
obj.Insert(StringTerm("schemas"), ArrayTerm(ss...))
obj.Insert(schemasTerm, ArrayTerm(ss...))
}
if len(a.Custom) > 0 {
@@ -502,7 +533,7 @@ func (a *Annotations) toObject() (*Object, *Error) {
if err != nil {
return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error())
}
obj.Insert(StringTerm("custom"), NewTerm(c))
obj.Insert(customTerm, NewTerm(c))
}
return &obj, nil
@@ -563,7 +594,11 @@ func attachAnnotationsNodes(mod *Module) Errors {
case *Package:
a.Scope = annotationScopePackage
case *Import:
a.Scope = annotationScopeImport
// Note that this isn't a valid scope, but set here so that the
// validate function called below can print an error message with
// a context that makes sense ("invalid scope: 'import'" instead of
// "invalid scope: '')
a.Scope = "import"
}
}
@@ -681,7 +716,6 @@ func (s *SchemaAnnotation) Copy() *SchemaAnnotation {
// Compare returns an integer indicating if s is less than, equal to, or greater
// than other.
func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int {
if cmp := s.Path.Compare(other.Path); cmp != 0 {
return cmp
}
@@ -819,9 +853,7 @@ func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet {
}
// Sort by path, then annotation location, for stable output
sort.SliceStable(refs, func(i, j int) bool {
return refs[i].Compare(refs[j]) < 0
})
slices.SortStableFunc(refs, (*AnnotationsRef).Compare)
return refs
}
@@ -853,8 +885,8 @@ func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet {
if len(refs) > 1 {
// Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
sort.SliceStable(refs, func(i, j int) bool {
return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0
slices.SortStableFunc(refs, func(a, b *AnnotationsRef) int {
return -a.Annotations.Location.Compare(b.Annotations.Location)
})
}

View File

@@ -299,6 +299,9 @@ var DefaultBuiltins = [...]*Builtin{
// Printing
Print,
InternalPrint,
// Testing
InternalTestCase,
}
// BuiltinMap provides a convenient mapping of built-in names to
@@ -486,10 +489,10 @@ var Minus = &Builtin{
Description: "Minus subtracts the second number from the first number or computes the difference between two sets.",
Decl: types.NewFunction(
types.Args(
types.Named("x", types.NewAny(types.N, types.NewSet(types.A))),
types.Named("y", types.NewAny(types.N, types.NewSet(types.A))),
types.Named("x", types.NewAny(types.N, types.SetOfAny)),
types.Named("y", types.NewAny(types.N, types.SetOfAny)),
),
types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"),
types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"),
),
Categories: category("sets", "numbers"),
}
@@ -671,10 +674,10 @@ var And = &Builtin{
Description: "Returns the intersection of two sets.",
Decl: types.NewFunction(
types.Args(
types.Named("x", types.NewSet(types.A)).Description("the first set"),
types.Named("y", types.NewSet(types.A)).Description("the second set"),
types.Named("x", types.SetOfAny).Description("the first set"),
types.Named("y", types.SetOfAny).Description("the second set"),
),
types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"),
types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"),
),
Categories: sets,
}
@@ -686,10 +689,10 @@ var Or = &Builtin{
Description: "Returns the union of two sets.",
Decl: types.NewFunction(
types.Args(
types.Named("x", types.NewSet(types.A)),
types.Named("y", types.NewSet(types.A)),
types.Named("x", types.SetOfAny),
types.Named("y", types.SetOfAny),
),
types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"),
types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"),
),
Categories: sets,
}
@@ -699,9 +702,9 @@ var Intersection = &Builtin{
Description: "Returns the intersection of the given input sets.",
Decl: types.NewFunction(
types.Args(
types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"),
types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to intersect"),
),
types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"),
types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"),
),
Categories: sets,
}
@@ -711,9 +714,9 @@ var Union = &Builtin{
Description: "Returns the union of the given input sets.",
Decl: types.NewFunction(
types.Args(
types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"),
types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to merge"),
),
types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"),
types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"),
),
Categories: sets,
}
@@ -730,7 +733,7 @@ var Count = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("collection", types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A),
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
types.S,
@@ -747,7 +750,7 @@ var Sum = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("collection", types.NewAny(
types.NewSet(types.N),
types.SetOfNum,
types.NewArray(nil, types.N),
)).Description("the set or array of numbers to sum"),
),
@@ -762,7 +765,7 @@ var Product = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("collection", types.NewAny(
types.NewSet(types.N),
types.SetOfNum,
types.NewArray(nil, types.N),
)).Description("the set or array of numbers to multiply"),
),
@@ -777,7 +780,7 @@ var Max = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("collection", types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A),
)).Description("the set or array to be searched"),
),
@@ -792,7 +795,7 @@ var Min = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("collection", types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A),
)).Description("the set or array to be searched"),
),
@@ -812,7 +815,7 @@ var Sort = &Builtin{
types.Args(
types.Named("collection", types.NewAny(
types.NewArray(nil, types.A),
types.NewSet(types.A),
types.SetOfAny,
)).Description("the array or set to be sorted"),
),
types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"),
@@ -842,8 +845,8 @@ var ArraySlice = &Builtin{
Decl: types.NewFunction(
types.Args(
types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"),
types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
types.Named("start", types.N).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
types.Named("stop", types.N).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
),
types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"),
),
@@ -993,12 +996,12 @@ var AnyPrefixMatch = &Builtin{
types.Args(
types.Named("search", types.NewAny(
types.S,
types.NewSet(types.S),
types.SetOfStr,
types.NewArray(nil, types.S),
)).Description("search string(s)"),
types.Named("base", types.NewAny(
types.S,
types.NewSet(types.S),
types.SetOfStr,
types.NewArray(nil, types.S),
)).Description("base string(s)"),
),
@@ -1014,12 +1017,12 @@ var AnySuffixMatch = &Builtin{
types.Args(
types.Named("search", types.NewAny(
types.S,
types.NewSet(types.S),
types.SetOfStr,
types.NewArray(nil, types.S),
)).Description("search string(s)"),
types.Named("base", types.NewAny(
types.S,
types.NewSet(types.S),
types.SetOfStr,
types.NewArray(nil, types.S),
)).Description("base string(s)"),
),
@@ -1035,7 +1038,7 @@ var Concat = &Builtin{
types.Args(
types.Named("delimiter", types.S).Description("string to use as a delimiter"),
types.Named("collection", types.NewAny(
types.NewSet(types.S),
types.SetOfStr,
types.NewArray(nil, types.S),
)).Description("strings to join"),
),
@@ -1597,13 +1600,13 @@ var ObjectSubset = &Builtin{
types.Named("super", types.NewAny(types.NewObject(
nil,
types.NewDynamicProperty(types.A, types.A),
), types.NewSet(types.A),
), types.SetOfAny,
types.NewArray(nil, types.A),
)).Description("object to test if sub is a subset of"),
types.Named("sub", types.NewAny(types.NewObject(
nil,
types.NewDynamicProperty(types.A, types.A),
), types.NewSet(types.A),
), types.SetOfAny,
types.NewArray(nil, types.A),
)).Description("object to test if super is a superset of"),
),
@@ -1656,7 +1659,7 @@ var ObjectRemove = &Builtin{
)).Description("object to remove keys from"),
types.Named("keys", types.NewAny(
types.NewArray(nil, types.A),
types.NewSet(types.A),
types.SetOfAny,
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
)).Description("keys to remove from x"),
),
@@ -1676,7 +1679,7 @@ var ObjectFilter = &Builtin{
)).Description("object to filter keys"),
types.Named("keys", types.NewAny(
types.NewArray(nil, types.A),
types.NewSet(types.A),
types.SetOfAny,
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
)).Description("keys to keep in `object`"),
),
@@ -1707,7 +1710,7 @@ var ObjectKeys = &Builtin{
types.Args(
types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"),
),
types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"),
types.Named("value", types.SetOfAny).Description("set of `object`'s keys"),
),
}
@@ -1881,7 +1884,8 @@ var URLQueryEncodeObject = &Builtin{
types.NewAny(
types.S,
types.NewArray(nil, types.S),
types.NewSet(types.S)),
types.SetOfStr,
),
),
),
).Description("the object to encode"),
@@ -2572,13 +2576,13 @@ var ReachableBuiltin = &Builtin{
types.NewDynamicProperty(
types.A,
types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A)),
)),
).Description("object containing a set or array of neighboring vertices"),
types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"),
types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("set or array of root vertices"),
),
types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
),
}
@@ -2592,11 +2596,11 @@ var ReachablePathsBuiltin = &Builtin{
types.NewDynamicProperty(
types.A,
types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A)),
)),
).Description("object containing a set or array of root vertices"),
types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
),
types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"),
),
@@ -3027,7 +3031,7 @@ var NetCIDRExpand = &Builtin{
types.Args(
types.Named("cidr", types.S).Description("CIDR to expand"),
),
types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"),
types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"),
),
}
@@ -3065,10 +3069,10 @@ Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/
types.Args(
types.Named("addrs", types.NewAny(
types.NewArray(nil, types.NewAny(types.S)),
types.NewSet(types.S),
types.SetOfStr,
)).Description("CIDRs or IP addresses"),
),
types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
),
}
@@ -3110,7 +3114,7 @@ var NetLookupIPAddr = &Builtin{
types.Args(
types.Named("name", types.S).Description("domain name to resolve"),
),
types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"),
types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"),
),
Nondeterministic: true,
}
@@ -3160,7 +3164,12 @@ var Print = &Builtin{
// The compiler rewrites print() calls to refer to the internal implementation.
var InternalPrint = &Builtin{
Name: "internal.print",
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil),
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.SetOfAny)}, nil),
}
var InternalTestCase = &Builtin{
Name: "internal.test_case",
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil),
}
/**
@@ -3172,10 +3181,10 @@ var SetDiff = &Builtin{
Name: "set_diff",
Decl: types.NewFunction(
types.Args(
types.NewSet(types.A),
types.NewSet(types.A),
types.SetOfAny,
types.SetOfAny,
),
types.NewSet(types.A),
types.SetOfAny,
),
deprecated: true,
}
@@ -3212,7 +3221,7 @@ var CastSet = &Builtin{
Name: "cast_set",
Decl: types.NewFunction(
types.Args(types.A),
types.NewSet(types.A),
types.SetOfAny,
),
deprecated: true,
}
@@ -3278,7 +3287,7 @@ var All = &Builtin{
Decl: types.NewFunction(
types.Args(
types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A),
),
),
@@ -3294,7 +3303,7 @@ var Any = &Builtin{
Decl: types.NewFunction(
types.Args(
types.NewAny(
types.NewSet(types.A),
types.SetOfAny,
types.NewArray(nil, types.A),
),
),
@@ -3392,7 +3401,7 @@ func (b *Builtin) IsTargetPos(i int) bool {
func init() {
BuiltinMap = map[string]*Builtin{}
for _, b := range DefaultBuiltins {
for _, b := range &DefaultBuiltins {
RegisterBuiltin(b)
}
}

View File

@@ -11,6 +11,7 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
@@ -116,8 +117,9 @@ func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities {
f.Builtins = make([]*Builtin, len(Builtins))
copy(f.Builtins, Builtins)
sort.Slice(f.Builtins, func(i, j int) bool {
return f.Builtins[i].Name < f.Builtins[j].Name
slices.SortFunc(f.Builtins, func(a, b *Builtin) int {
return strings.Compare(a.Name, b.Name)
})
if co.regoVersion == RegoV0 || co.regoVersion == RegoV0CompatV1 {
@@ -243,12 +245,7 @@ func (c *Capabilities) MinimumCompatibleVersion() (string, bool) {
}
func (c *Capabilities) ContainsFeature(feature string) bool {
for _, f := range c.Features {
if f == feature {
return true
}
}
return false
return slices.Contains(c.Features, feature)
}
// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name

View File

@@ -276,7 +276,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
if len(rule.Head.Args) > 0 {
// If args are not referred to in body, infer as any.
WalkVars(rule.Head.Args, func(v Var) bool {
if cpy.Get(v) == nil {
if cpy.GetByValue(v) == nil {
cpy.tree.PutOne(v, types.A)
}
return false
@@ -284,8 +284,8 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
// Construct function type.
args := make([]types.Type, len(rule.Head.Args))
for i := 0; i < len(rule.Head.Args); i++ {
args[i] = cpy.Get(rule.Head.Args[i])
for i := range len(rule.Head.Args) {
args[i] = cpy.GetByValue(rule.Head.Args[i].Value)
}
f := types.NewFunction(args, cpy.Get(rule.Head.Value))
@@ -294,7 +294,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
} else {
switch rule.Head.RuleKind() {
case SingleValue:
typeV := cpy.Get(rule.Head.Value)
typeV := cpy.GetByValue(rule.Head.Value.Value)
if !path.IsGround() {
// e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z]
objPath := path.DynamicSuffix()
@@ -306,13 +306,11 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) //nolint:govet
tpe = nil
}
} else {
if typeV != nil {
tpe = typeV
}
} else if typeV != nil {
tpe = typeV
}
case MultiValue:
typeK := cpy.Get(rule.Head.Key)
typeK := cpy.GetByValue(rule.Head.Key.Value)
if typeK != nil {
tpe = types.NewSet(typeK)
}
@@ -341,7 +339,7 @@ func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) {
}
var dynamicProperty *types.DynamicProperty
typeK := env.Get(k)
typeK := env.GetByValue(k.Value)
if typeK == nil {
return nil, nil
}
@@ -391,7 +389,7 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
// type checker relies on reordering (in particular for references to local
// vars).
name := expr.Operator()
tpe := env.Get(name)
tpe := env.GetByRef(name)
if tpe == nil {
if tc.allowUndefinedFuncs {
@@ -431,7 +429,7 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
if !unify1(env, args[i], fargs.Arg(i), false) {
post := make([]types.Type, len(args))
for i := range args {
post[i] = env.Get(args[i])
post[i] = env.GetByValue(args[i].Value)
}
return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs)
}
@@ -453,7 +451,7 @@ func checkExprEq(env *TypeEnv, expr *Expr) *Error {
}
a, b := expr.Operand(0), expr.Operand(1)
typeA, typeB := env.Get(a), env.Get(b)
typeA, typeB := env.GetByValue(a.Value), env.GetByValue(b.Value)
if !unify2(env, a, typeA, b, typeB) {
err := NewError(TypeErr, expr.Location, "match error")
@@ -473,7 +471,7 @@ func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error {
}
target, value := expr.With[i].Target, expr.With[i].Value
targetType, valueType := env.Get(target), env.Get(value)
targetType, valueType := env.GetByValue(target.Value), env.GetByValue(value.Value)
if t, ok := targetType.(*types.Function); ok { // built-in function replacement
switch v := valueType.(type) {
@@ -509,7 +507,7 @@ func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type)
case Var:
switch b.Value.(type) {
case Var:
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
case *Array:
return unify2Array(env, b, a)
case *object:
@@ -525,15 +523,15 @@ func unify2Array(env *TypeEnv, a *Term, b *Term) bool {
switch bv := b.Value.(type) {
case *Array:
if arr.Len() == bv.Len() {
for i := 0; i < arr.Len(); i++ {
if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) {
for i := range arr.Len() {
if !unify2(env, arr.Elem(i), env.GetByValue(arr.Elem(i).Value), bv.Elem(i), env.GetByValue(bv.Elem(i).Value)) {
return false
}
}
return true
}
case Var:
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
}
return false
}
@@ -545,14 +543,14 @@ func unify2Object(env *TypeEnv, a *Term, b *Term) bool {
cv := obj.Intersect(bv)
if obj.Len() == bv.Len() && bv.Len() == len(cv) {
for i := range cv {
if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) {
if !unify2(env, cv[i][1], env.GetByValue(cv[i][1].Value), cv[i][2], env.GetByValue(cv[i][2].Value)) {
return false
}
}
return true
}
case Var:
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
}
return false
}
@@ -565,7 +563,7 @@ func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
return unify1Array(env, v, tpe, union)
case types.Any:
if types.Compare(tpe, types.A) == 0 {
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
unify1(env, v.Elem(i), types.A, true)
}
return true
@@ -615,22 +613,22 @@ func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
}
return false
case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
return unifies(env.Get(v), tpe)
return unifies(env.GetByValue(v), tpe)
case Var:
if !union {
if exist := env.Get(v); exist != nil {
if exist := env.GetByValue(v); exist != nil {
return unifies(exist, tpe)
}
env.tree.PutOne(term.Value, tpe)
} else {
env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe))
env.tree.PutOne(term.Value, types.Or(env.GetByValue(v), tpe))
}
return true
default:
if !IsConstant(v) {
panic("unreachable")
}
return unifies(env.Get(term), tpe)
return unifies(env.GetByValue(term.Value), tpe)
}
}
@@ -638,7 +636,7 @@ func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool {
if val.Len() != tpe.Len() && tpe.Dynamic() == nil {
return false
}
for i := 0; i < val.Len(); i++ {
for i := range val.Len() {
if !unify1(env, val.Elem(i), tpe.Select(i), union) {
return false
}
@@ -732,8 +730,8 @@ func (rc *refChecker) Visit(x interface{}) bool {
}
func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
switch tpe := curr.Get(ref).(type) {
case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`.
if tpe, ok := curr.GetByRef(ref).(*types.Function); ok {
// NOTE(sr): We don't support first-class functions, except for `with`.
return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
}
@@ -755,19 +753,19 @@ func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx i
switch head.Value.(type) {
case Var, String: // OK
default:
have := rc.env.Get(head.Value)
have := rc.env.GetByValue(head.Value)
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node))
}
}
if v, ok := head.Value.(Var); ok && idx != 0 {
if _, ok := head.Value.(Var); ok && idx != 0 {
tpe := types.Keys(rc.env.getRefRecExtent(node))
if exist := rc.env.Get(v); exist != nil {
if exist := rc.env.GetByValue(head.Value); exist != nil {
if !unifies(tpe, exist) {
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node))
}
} else {
rc.env.tree.PutOne(v, tpe)
rc.env.tree.PutOne(head.Value, tpe)
}
}
@@ -781,8 +779,8 @@ func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx i
case RootDocumentNames.Contains(ref[0]):
if idx != 0 {
node.Children().Iter(func(_, child util.T) bool {
_ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error
node.Children().Iter(func(_ Value, child *typeTreeNode) bool {
_ = rc.checkRef(curr, child, ref, idx+1) // ignore error
return false
})
return nil
@@ -817,7 +815,7 @@ func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
switch value := head.Value.(type) {
case Var:
if exist := rc.env.Get(value); exist != nil {
if exist := rc.env.GetByValue(value); exist != nil {
if !unifies(exist, keys) {
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
}
@@ -948,7 +946,7 @@ func unifiesArrays(a, b *types.Array) bool {
func unifiesArraysStatic(a, b *types.Array) bool {
if a.Len() != 0 {
for i := 0; i < a.Len(); i++ {
for i := range a.Len() {
if !unifies(a.Select(i), b.Select(i)) {
return false
}
@@ -1003,7 +1001,7 @@ type ArgErrDetail struct {
func (d *ArgErrDetail) Lines() []string {
lines := make([]string, 2)
lines[0] = "have: " + formatArgs(d.Have)
lines[1] = "want: " + fmt.Sprint(d.Want)
lines[1] = "want: " + d.Want.String()
return lines
}
@@ -1069,7 +1067,7 @@ func (r *RefErrInvalidDetail) Lines() []string {
lines := []string{r.Ref.String()}
offset := len(r.Ref[:r.Pos].String()) + 1
pad := strings.Repeat(" ", offset)
lines = append(lines, fmt.Sprintf("%s^", pad))
lines = append(lines, pad+"^")
if r.Have != nil {
lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
} else {
@@ -1127,8 +1125,8 @@ func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type,
}
func getOneOfForNode(node *typeTreeNode) (result []Value) {
node.Children().Iter(func(k, _ util.T) bool {
result = append(result, k.(Value))
node.Children().Iter(func(k Value, _ *typeTreeNode) bool {
result = append(result, k)
return false
})

View File

@@ -236,7 +236,7 @@ func Compare(a, b interface{}) int {
type termSlice []*Term
func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s termSlice) Len() int { return len(s) }
func sortOrder(x interface{}) int {
@@ -300,7 +300,7 @@ func importsCompare(a, b []*Import) int {
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
@@ -319,7 +319,7 @@ func annotationsCompare(a, b []*Annotations) int {
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
@@ -338,7 +338,7 @@ func rulesCompare(a, b []*Rule) int {
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := a[i].Compare(b[i]); cmp != 0 {
return cmp
}
@@ -357,7 +357,7 @@ func termSliceCompare(a, b []*Term) int {
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := Compare(a[i], b[i]); cmp != 0 {
return cmp
}
@@ -375,7 +375,7 @@ func withSliceCompare(a, b []*With) int {
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := Compare(a[i], b[i]); cmp != 0 {
return cmp
}
@@ -402,6 +402,10 @@ func TermValueCompare(a, b *Term) int {
return a.Value.Compare(b.Value)
}
func TermValueEqual(a, b *Term) bool {
return ValueEqual(a.Value, b.Value)
}
func ValueEqual(a, b Value) bool {
// TODO(ae): why doesn't this work the same?
//

View File

@@ -124,7 +124,7 @@ type Compiler struct {
localvargen *localVarGenerator
moduleLoader ModuleLoader
ruleIndices *util.HashMap
ruleIndices *util.HasherMap[Ref, RuleIndex]
stages []stage
maxErrs int
sorted []string // list of sorted module names
@@ -303,15 +303,10 @@ type stage struct {
func NewCompiler() *Compiler {
c := &Compiler{
Modules: map[string]*Module{},
RewrittenVars: map[Var]Var{},
Required: &Capabilities{},
ruleIndices: util.NewHashMap(func(a, b util.T) bool {
r1, r2 := a.(Ref), b.(Ref)
return r1.Equal(r2)
}, func(x util.T) int {
return x.(Ref).Hash()
}),
Modules: map[string]*Module{},
RewrittenVars: map[Var]Var{},
Required: &Capabilities{},
ruleIndices: util.NewHasherMap[Ref, RuleIndex](RefEqual),
maxErrs: CompileErrorLimitDefault,
after: map[string][]CompilerStageDefinition{},
unsafeBuiltinsMap: map[string]struct{}{},
@@ -825,7 +820,7 @@ func (c *Compiler) RuleIndex(path Ref) RuleIndex {
if !ok {
return nil
}
return r.(RuleIndex)
return r
}
// PassesTypeCheck determines whether the given body passes type checking
@@ -1114,7 +1109,7 @@ func (c *Compiler) checkRuleConflicts() {
for _, rule := range node.Values {
r := rule.(*Rule)
ref := r.Ref()
name = rw(ref.Copy()).String() // varRewriter operates in-place
name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place
kinds[r.Head.RuleKind()] = struct{}{}
arities[len(r.Head.Args)] = struct{}{}
if r.Default {
@@ -1156,7 +1151,7 @@ func (c *Compiler) checkRuleConflicts() {
// data.p.q[r][s] { r := input.r; s := input.s }
// data.p[q].r.s { q := input.q }
if r.Ref().IsGround() && len(node.Children) > 0 {
if ref.IsGround() && len(node.Children) > 0 {
conflicts = node.flattenChildren()
}
@@ -1351,7 +1346,7 @@ func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schem
if goSchema != nil {
refLoader = gojsonschema.NewGoLoader(goSchema)
} else {
return nil, fmt.Errorf("no schema as input to compile")
return nil, errors.New("no schema as input to compile")
}
schemasCompiled, err := sl.Compile(refLoader)
if err != nil {
@@ -1370,13 +1365,13 @@ func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema,
if len(schemas[i].PropertiesChildren) > 0 {
if !schemas[i].Types.Contains("object") {
if err := schemas[i].Types.Add("object"); err != nil {
return nil, fmt.Errorf("unable to set the type in schemas")
return nil, errors.New("unable to set the type in schemas")
}
}
} else if len(schemas[i].ItemsChildren) > 0 {
if !schemas[i].Types.Contains("array") {
if err := schemas[i].Types.Add("array"); err != nil {
return nil, fmt.Errorf("unable to set the type in schemas")
return nil, errors.New("unable to set the type in schemas")
}
}
}
@@ -1388,12 +1383,12 @@ func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema,
} else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 {
result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...)
} else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 {
for j := 0; j < len(schemas[i].ItemsChildren); j++ {
for j := range len(schemas[i].ItemsChildren) {
if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) {
result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j])
}
if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() {
return nil, fmt.Errorf("unable to merge these schemas")
return nil, errors.New("unable to merge these schemas")
}
}
}
@@ -1482,7 +1477,7 @@ func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, prope
}
return parser.parseSchema(objectOrArrayResult)
} else if subSchema.Types.String() != allOfResult.Types.String() {
return nil, fmt.Errorf("unable to merge these schemas")
return nil, errors.New("unable to merge these schemas")
}
}
return parser.parseSchema(allOfResult)
@@ -1738,13 +1733,9 @@ func (c *Compiler) err(err *Error) {
c.Errors = append(c.Errors, err)
}
func (c *Compiler) getExports() *util.HashMap {
func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] {
rules := util.NewHashMap(func(a, b util.T) bool {
return a.(Ref).Equal(b.(Ref))
}, func(v util.T) int {
return v.(Ref).Hash()
})
rules := util.NewHasherMap[Ref, []Ref](RefEqual)
for _, name := range c.sorted {
mod := c.Modules[name]
@@ -1757,18 +1748,30 @@ func (c *Compiler) getExports() *util.HashMap {
return rules
}
func hashMapAdd(rules *util.HashMap, pkg, rule Ref) {
func refSliceEqual(a, b []Ref) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
func hashMapAdd(rules *util.HasherMap[Ref, []Ref], pkg, rule Ref) {
prev, ok := rules.Get(pkg)
if !ok {
rules.Put(pkg, []Ref{rule})
return
}
for _, p := range prev.([]Ref) {
for _, p := range prev {
if p.Equal(rule) {
return
}
}
rules.Put(pkg, append(prev.([]Ref), rule))
rules.Put(pkg, append(prev, rule))
}
func (c *Compiler) GetAnnotationSet() *AnnotationSet {
@@ -1867,7 +1870,7 @@ func (c *Compiler) resolveAllRefs() {
var ruleExports []Ref
if x, ok := rules.Get(mod.Package.Path); ok {
ruleExports = x.([]Ref)
ruleExports = x
}
globals := getGlobals(mod.Package, ruleExports, mod.Imports)
@@ -3014,7 +3017,7 @@ func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error
var ruleExports []Ref
rules := qc.compiler.getExports()
if exist, ok := rules.Get(pkg.Path); ok {
ruleExports = exist.([]Ref)
ruleExports = exist
}
globals = getGlobals(qctx.Package, ruleExports, qctx.Imports)
@@ -3542,10 +3545,8 @@ func (n *TreeNode) add(path Ref, rule *Rule) {
}
node.Children[sub.Key] = sub
node.Sorted = append(node.Sorted, sub.Key)
} else {
if rule != nil {
node.Values = append(node.Values, rule)
}
} else if rule != nil {
node.Values = append(node.Values, rule)
}
}
@@ -4231,6 +4232,9 @@ func (f *equalityFactory) Generate(other *Term) *Expr {
return expr
}
// TODO: Move to internal package?
const LocalVarPrefix = "__local"
type localVarGenerator struct {
exclude VarSet
suffix string
@@ -4255,7 +4259,7 @@ func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator {
func (l *localVarGenerator) Generate() Var {
for {
result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__")
result := Var(LocalVarPrefix + l.suffix + strconv.Itoa(l.next) + "__")
l.next++
if !l.exclude.Contains(result) {
return result
@@ -4411,7 +4415,7 @@ func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr
cpy.Terms = resolveRefsInTerm(globals, ignore, ts)
case []*Term:
buf := make([]*Term, len(ts))
for i := 0; i < len(ts); i++ {
for i := range ts {
buf[i] = resolveRefsInTerm(globals, ignore, ts[i])
}
cpy.Terms = buf
@@ -4516,7 +4520,7 @@ func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term
func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term {
cpy := make([]*Term, terms.Len())
for i := 0; i < terms.Len(); i++ {
for i := range terms.Len() {
cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i))
}
return cpy
@@ -4524,7 +4528,7 @@ func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack,
func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term {
cpy := make([]*Term, len(terms))
for i := 0; i < len(terms); i++ {
for i := range terms {
cpy[i] = resolveRefsInTerm(globals, ignore, terms[i])
}
return cpy
@@ -4798,7 +4802,7 @@ func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result B
connectGeneratedExprs(original, generated)
return result, result[len(result)-1].Operand(0)
case *Array:
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
var t *Term
result, t = rewriteDynamicsOne(original, f, v.Elem(i), result)
v.set(i, t)
@@ -4875,7 +4879,7 @@ func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) {
func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body {
cpy := make(Body, 0, len(body))
for i := 0; i < len(body); i++ {
for i := range body {
for _, expr := range expandExpr(gen, body[i]) {
cpy.Append(expr)
}
@@ -5028,7 +5032,7 @@ func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) {
}
func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) {
for i := 0; i < arr.Len(); i++ {
for i := range arr.Len() {
extras, v := expandExprTerm(gen, arr.Elem(i))
arr.set(i, v)
support = append(support, extras...)
@@ -5710,7 +5714,7 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr
case isDataRef(target):
ref := target.Value.(Ref)
targetNode := c.RuleTree
for i := 0; i < len(ref)-1; i++ {
for i := range len(ref) - 1 {
child := targetNode.Child(ref[i].Value)
if child == nil {
break

View File

@@ -29,29 +29,38 @@ func newTypeEnv(f func() *typeChecker) *TypeEnv {
}
// Get returns the type of x.
// Deprecated: Use GetByValue or GetByRef instead, as they are more efficient.
func (env *TypeEnv) Get(x interface{}) types.Type {
if term, ok := x.(*Term); ok {
x = term.Value
}
switch x := x.(type) {
if v, ok := x.(Value); ok {
return env.GetByValue(v)
}
panic("unreachable")
}
// GetByValue returns the type of v.
func (env *TypeEnv) GetByValue(v Value) types.Type {
switch x := v.(type) {
// Scalars.
case Null:
return types.NewNull()
return types.Nl
case Boolean:
return types.NewBoolean()
return types.B
case Number:
return types.NewNumber()
return types.N
case String:
return types.NewString()
return types.S
// Composites.
case *Array:
static := make([]types.Type, x.Len())
for i := range static {
tpe := env.Get(x.Elem(i).Value)
tpe := env.GetByValue(x.Elem(i).Value)
static[i] = tpe
}
@@ -63,7 +72,7 @@ func (env *TypeEnv) Get(x interface{}) types.Type {
return types.NewArray(static, dynamic)
case *lazyObj:
return env.Get(x.force())
return env.GetByValue(x.force())
case *object:
static := []*types.StaticProperty{}
var dynamic *types.DynamicProperty
@@ -72,14 +81,14 @@ func (env *TypeEnv) Get(x interface{}) types.Type {
if IsConstant(k.Value) {
kjson, err := JSON(k.Value)
if err == nil {
tpe := env.Get(v)
tpe := env.GetByValue(v.Value)
static = append(static, types.NewStaticProperty(kjson, tpe))
return
}
}
// Can't handle it as a static property, fallback to dynamic
typeK := env.Get(k.Value)
typeV := env.Get(v.Value)
typeK := env.GetByValue(k.Value)
typeV := env.GetByValue(v.Value)
dynamic = types.NewDynamicProperty(typeK, typeV)
})
@@ -92,8 +101,7 @@ func (env *TypeEnv) Get(x interface{}) types.Type {
case Set:
var tpe types.Type
x.Foreach(func(elem *Term) {
other := env.Get(elem.Value)
tpe = types.Or(tpe, other)
tpe = types.Or(tpe, env.GetByValue(elem.Value))
})
if tpe == nil {
tpe = types.A
@@ -104,47 +112,46 @@ func (env *TypeEnv) Get(x interface{}) types.Type {
case *ArrayComprehension:
cpy, errs := env.newChecker().CheckBody(env, x.Body)
if len(errs) == 0 {
return types.NewArray(nil, cpy.Get(x.Term))
return types.NewArray(nil, cpy.GetByValue(x.Term.Value))
}
return nil
case *ObjectComprehension:
cpy, errs := env.newChecker().CheckBody(env, x.Body)
if len(errs) == 0 {
return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
return types.NewObject(nil, types.NewDynamicProperty(cpy.GetByValue(x.Key.Value), cpy.GetByValue(x.Value.Value)))
}
return nil
case *SetComprehension:
cpy, errs := env.newChecker().CheckBody(env, x.Body)
if len(errs) == 0 {
return types.NewSet(cpy.Get(x.Term))
return types.NewSet(cpy.GetByValue(x.Term.Value))
}
return nil
// Refs.
case Ref:
return env.getRef(x)
return env.GetByRef(x)
// Vars.
case Var:
if node := env.tree.Child(x); node != nil {
if node := env.tree.Child(v); node != nil {
return node.Value()
}
if env.next != nil {
return env.next.Get(x)
return env.next.GetByValue(v)
}
return nil
// Calls.
case Call:
return nil
default:
panic("unreachable")
}
return env.Get(v)
}
func (env *TypeEnv) getRef(ref Ref) types.Type {
// GetByRef returns the type of the value referred to by ref.
func (env *TypeEnv) GetByRef(ref Ref) types.Type {
node := env.tree.Child(ref[0].Value)
if node == nil {
return env.getRefFallback(ref)
@@ -156,7 +163,7 @@ func (env *TypeEnv) getRef(ref Ref) types.Type {
func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
if env.next != nil {
return env.next.Get(ref)
return env.next.GetByRef(ref)
}
if RootDocumentNames.Contains(ref[0]) {
@@ -200,10 +207,7 @@ func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
children := []*types.StaticProperty{}
node.Children().Iter(func(k, v util.T) bool {
key := k.(Value)
child := v.(*typeTreeNode)
node.Children().Iter(func(key Value, child *typeTreeNode) bool {
tpe := env.getRefRecExtent(child)
// NOTE(sr): Converting to Golang-native types here is an extension of what we did
@@ -237,14 +241,14 @@ func (env *TypeEnv) wrap() *TypeEnv {
type typeTreeNode struct {
key Value
value types.Type
children *util.HashMap
children *util.HasherMap[Value, *typeTreeNode]
}
func newTypeTree() *typeTreeNode {
return &typeTreeNode{
key: nil,
value: nil,
children: util.NewHashMap(valueEq, valueHash),
children: util.NewHasherMap[Value, *typeTreeNode](ValueEqual),
}
}
@@ -253,10 +257,10 @@ func (n *typeTreeNode) Child(key Value) *typeTreeNode {
if !ok {
return nil
}
return value.(*typeTreeNode)
return value
}
func (n *typeTreeNode) Children() *util.HashMap {
func (n *typeTreeNode) Children() *util.HasherMap[Value, *typeTreeNode] {
return n.children
}
@@ -267,7 +271,7 @@ func (n *typeTreeNode) Get(path Ref) types.Type {
if !ok {
return nil
}
curr = child.(*typeTreeNode)
curr = child
}
return curr.Value()
}
@@ -285,7 +289,7 @@ func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
child.key = key
n.children.Put(key, child)
} else {
child = c.(*typeTreeNode)
child = c
}
child.value = tpe
@@ -302,7 +306,7 @@ func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
child.key = term.Value
curr.children.Put(child.key, child)
} else {
child = c.(*typeTreeNode)
child = c
}
curr = child
@@ -324,8 +328,7 @@ func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) {
child.key = term.Value
curr.children.Put(child.key, child)
} else {
child = c.(*typeTreeNode)
child = c
if child.value != nil && i+1 < len(path) {
// If child has an object value, merge the new value into it.
if o, ok := child.value.(*types.Object); ok {
@@ -426,13 +429,12 @@ func (n *typeTreeNode) String() string {
b.WriteString(v.String())
}
n.children.Iter(func(_, v util.T) bool {
if child, ok := v.(*typeTreeNode); ok {
b.WriteString("\n\t+ ")
s := child.String()
s = strings.ReplaceAll(s, "\n", "\n\t")
b.WriteString(s)
}
n.children.Iter(func(_ Value, child *typeTreeNode) bool {
b.WriteString("\n\t+ ")
s := child.String()
s = strings.ReplaceAll(s, "\n", "\n\t")
b.WriteString(s)
return false
})
@@ -444,7 +446,7 @@ func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (
return o, nil
}
key := env.Get(path[0].Value)
key := env.GetByValue(path[0].Value)
if len(path) == 1 {
var dynamicProps *types.DynamicProperty
@@ -472,8 +474,8 @@ func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (
func (n *typeTreeNode) Leafs() map[*Ref]types.Type {
leafs := map[*Ref]types.Type{}
n.children.Iter(func(_, v util.T) bool {
collectLeafs(v.(*typeTreeNode), nil, leafs)
n.children.Iter(func(_ Value, v *typeTreeNode) bool {
collectLeafs(v, nil, leafs)
return false
})
return leafs
@@ -485,8 +487,8 @@ func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) {
leafs[&nPath] = n.Value()
return
}
n.children.Iter(func(_, v util.T) bool {
collectLeafs(v.(*typeTreeNode), nPath, leafs)
n.children.Iter(func(_ Value, v *typeTreeNode) bool {
collectLeafs(v, nPath, leafs)
return false
})
}

View File

@@ -6,7 +6,8 @@ package ast
import (
"fmt"
"sort"
"slices"
"strconv"
"strings"
)
@@ -35,15 +36,12 @@ func (e Errors) Error() string {
// Sort sorts the error slice by location. If the locations are equal then the
// error message is compared.
func (e Errors) Sort() {
sort.Slice(e, func(i, j int) bool {
a := e[i]
b := e[j]
slices.SortFunc(e, func(a, b *Error) int {
if cmp := a.Location.Compare(b.Location); cmp != 0 {
return cmp < 0
return cmp
}
return a.Error() < b.Error()
return strings.Compare(a.Error(), b.Error())
})
}
@@ -92,9 +90,9 @@ func (e *Error) Error() string {
if e.Location != nil {
if len(e.Location.File) > 0 {
prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
prefix += e.Location.File + ":" + strconv.Itoa(e.Location.Row)
} else {
prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
prefix += strconv.Itoa(e.Location.Row) + ":" + strconv.Itoa(e.Location.Col)
}
}

View File

@@ -6,6 +6,7 @@ package ast
import (
"fmt"
"slices"
"sort"
"strings"
"sync"
@@ -33,10 +34,10 @@ type RuleIndex interface {
// IndexResult contains the result of an index lookup.
type IndexResult struct {
Kind RuleKind
Rules []*Rule
Else map[*Rule][]*Rule
Default *Rule
Kind RuleKind
EarlyExit bool
OnlyGroundRefs bool
}
@@ -45,7 +46,6 @@ type IndexResult struct {
func NewIndexResult(kind RuleKind) *IndexResult {
return &IndexResult{
Kind: kind,
Else: map[*Rule][]*Rule{},
}
}
@@ -55,7 +55,6 @@ func (ir *IndexResult) Empty() bool {
}
type baseDocEqIndex struct {
skipIndexing Set
isVirtual func(Ref) bool
root *trieNode
defaultRule *Rule
@@ -64,15 +63,17 @@ type baseDocEqIndex struct {
}
var (
equalityRef = Equality.Ref()
equalRef = Equal.Ref()
globMatchRef = GlobMatch.Ref()
internalPrintRef = InternalPrint.Ref()
equalityRef = Equality.Ref()
equalRef = Equal.Ref()
globMatchRef = GlobMatch.Ref()
internalPrintRef = InternalPrint.Ref()
internalTestCaseRef = InternalTestCase.Ref()
skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef))
)
func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
return &baseDocEqIndex{
skipIndexing: NewSet(NewTerm(internalPrintRef)),
isVirtual: isVirtual,
root: newTrieNodeImpl(),
onlyGroundRefs: true,
@@ -98,15 +99,15 @@ func (i *baseDocEqIndex) Build(rules []*Rule) bool {
i.onlyGroundRefs = rule.Head.Reference.IsGround()
}
var skip bool
for _, expr := range rule.Body {
if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) {
for i := range rule.Body {
if op := rule.Body[i].OperatorTerm(); op != nil && skipIndexing.Contains(op) {
skip = true
break
}
}
if !skip {
for _, expr := range rule.Body {
indices.Update(rule, expr)
for i := range rule.Body {
indices.Update(rule, rule.Body[i])
}
}
return false
@@ -143,7 +144,8 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
defer func() {
clear(tr.unordered)
tr.ordering = tr.ordering[:0]
tr.values.clear()
tr.multiple = false
tr.exist = nil
ttrPool.Put(tr)
}()
@@ -153,20 +155,33 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
return nil, err
}
result := NewIndexResult(i.kind)
result := IndexResultPool.Get()
result.Kind = i.kind
result.Default = i.defaultRule
result.OnlyGroundRefs = i.onlyGroundRefs
result.Rules = make([]*Rule, 0, len(tr.ordering))
if result.Rules == nil {
result.Rules = make([]*Rule, 0, len(tr.ordering))
} else {
result.Rules = result.Rules[:0]
}
clear(result.Else)
for _, pos := range tr.ordering {
sort.Slice(tr.unordered[pos], func(i, j int) bool {
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
return a.prio[1] - b.prio[1]
})
nodes := tr.unordered[pos]
root := nodes[0].rule
result.Rules = append(result.Rules, root)
if len(nodes) > 1 {
if result.Else == nil {
result.Else = map[*Rule][]*Rule{}
}
result.Else[root] = make([]*Rule, len(nodes)-1)
for i := 1; i < len(nodes); i++ {
result.Else[root][i-1] = nodes[i].rule
@@ -174,7 +189,26 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
}
}
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
if !tr.multiple {
// even when the indexer hasn't seen multiple values, the rule itself could be one
// where early exit shouldn't be applied.
var lastValue Value
for i := range result.Rules {
if result.Rules[i].Head.DocKind() != CompleteDoc {
tr.multiple = true
break
}
if result.Rules[i].Head.Value != nil {
if lastValue != nil && !ValueEqual(lastValue, result.Rules[i].Head.Value.Value) {
tr.multiple = true
break
}
lastValue = result.Rules[i].Head.Value.Value
}
}
}
result.EarlyExit = !tr.multiple
return result, nil
}
@@ -192,13 +226,17 @@ func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
result.Rules = make([]*Rule, 0, len(tr.ordering))
for _, pos := range tr.ordering {
sort.Slice(tr.unordered[pos], func(i, j int) bool {
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
return a.prio[1] - b.prio[1]
})
nodes := tr.unordered[pos]
root := nodes[0].rule
result.Rules = append(result.Rules, root)
if len(nodes) > 1 {
if result.Else == nil {
result.Else = map[*Rule][]*Rule{}
}
result.Else[root] = make([]*Rule, len(nodes)-1)
for i := 1; i < len(nodes); i++ {
result.Else[root][i-1] = nodes[i].rule
@@ -206,7 +244,7 @@ func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
}
}
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
result.EarlyExit = !tr.multiple
return result, nil
}
@@ -235,7 +273,7 @@ type refindex struct {
type refindices struct {
isVirtual func(Ref) bool
rules map[*Rule][]*refindex
frequency *util.HashMap
frequency *util.HasherMap[Ref, int]
sorted []Ref
}
@@ -243,12 +281,7 @@ func newrefindices(isVirtual func(Ref) bool) *refindices {
return &refindices{
isVirtual: isVirtual,
rules: map[*Rule][]*refindex{},
frequency: util.NewHashMap(func(a, b util.T) bool {
r1, r2 := a.(Ref), b.(Ref)
return r1.Equal(r2)
}, func(x util.T) int {
return x.(Ref).Hash()
}),
frequency: util.NewHasherMap[Ref, int](RefEqual),
}
}
@@ -296,9 +329,9 @@ func (i *refindices) Sorted() []Ref {
counts := make([]int, 0, i.frequency.Len())
i.sorted = make([]Ref, 0, i.frequency.Len())
i.frequency.Iter(func(k, v util.T) bool {
counts = append(counts, v.(int))
i.sorted = append(i.sorted, k.(Ref))
i.frequency.Iter(func(k Ref, v int) bool {
counts = append(counts, v)
i.sorted = append(i.sorted, k)
return false
})
@@ -399,7 +432,7 @@ func (i *refindices) insert(rule *Rule, index *refindex) {
count = 0
}
i.frequency.Put(index.Ref, count.(int)+1)
i.frequency.Put(index.Ref, count+1)
for pos, other := range i.rules[rule] {
if other.Ref.Equal(index.Ref) {
@@ -427,7 +460,8 @@ type trieWalker interface {
type trieTraversalResult struct {
unordered map[int][]*ruleNode
ordering []int
values *set
exist *Term
multiple bool
}
var ttrPool = sync.Pool{
@@ -439,10 +473,6 @@ var ttrPool = sync.Pool{
func newTrieTraversalResult() *trieTraversalResult {
return &trieTraversalResult{
unordered: map[int][]*ruleNode{},
// Number 3 is arbitrary, but seemed to be the most common number of values
// stored when benchmarking the trie traversal against a large policy library
// (Regal).
values: newset(3),
}
}
@@ -455,21 +485,30 @@ func (tr *trieTraversalResult) Add(t *trieNode) {
}
tr.unordered[root] = append(nodes, node)
}
if t.values != nil {
t.values.Foreach(tr.values.insertNoGuard)
if t.multiple {
tr.multiple = true
}
if tr.multiple || t.value == nil {
return
}
if t.value.IsGround() && tr.exist == nil || tr.exist.Equal(t.value) {
tr.exist = t.value
return
}
tr.multiple = true
}
type trieNode struct {
ref Ref
values Set
mappers []*valueMapper
next *trieNode
any *trieNode
undefined *trieNode
scalars *util.HashMap
scalars *util.HasherMap[Value, *trieNode]
array *trieNode
rules []*ruleNode
value *Term
multiple bool
}
func (node *trieNode) String() string {
@@ -492,9 +531,7 @@ func (node *trieNode) String() string {
}
if node.scalars.Len() > 0 {
buf := make([]string, 0, node.scalars.Len())
node.scalars.Iter(func(k, v util.T) bool {
key := k.(Value)
val := v.(*trieNode)
node.scalars.Iter(func(key Value, val *trieNode) bool {
buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val))
return false
})
@@ -507,10 +544,8 @@ func (node *trieNode) String() string {
if len(node.mappers) > 0 {
flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers)))
}
if node.values != nil {
if l := node.values.Len(); l > 0 {
flags = append(flags, fmt.Sprintf("%d value(s)", l))
}
if node.value != nil {
flags = append(flags, "value exists")
}
return strings.Join(flags, " ")
}
@@ -518,13 +553,12 @@ func (node *trieNode) String() string {
func (node *trieNode) append(prio [2]int, rule *Rule) {
node.rules = append(node.rules, &ruleNode{prio, rule})
if node.values != nil && rule.Head.Value != nil {
node.values.Add(rule.Head.Value)
return
if node.value != nil && rule.Head.Value != nil && !node.value.Equal(rule.Head.Value) {
node.multiple = true
}
if node.values == nil && rule.Head.DocKind() == CompleteDoc {
node.values = NewSet(rule.Head.Value)
if node.value == nil && rule.Head.DocKind() == CompleteDoc {
node.value = rule.Head.Value
}
}
@@ -535,7 +569,7 @@ type ruleNode struct {
func newTrieNodeImpl() *trieNode {
return &trieNode{
scalars: util.NewHashMap(valueEq, valueHash),
scalars: util.NewHasherMap[Value, *trieNode](ValueEqual),
}
}
@@ -551,8 +585,7 @@ func (node *trieNode) Do(walker trieWalker) {
node.undefined.Do(next)
}
node.scalars.Iter(func(_, v util.T) bool {
child := v.(*trieNode)
node.scalars.Iter(func(_ Value, child *trieNode) bool {
child.Do(next)
return false
})
@@ -618,7 +651,7 @@ func (node *trieNode) insertValue(value Value) *trieNode {
child = newTrieNodeImpl()
node.scalars.Put(value, child)
}
return child.(*trieNode)
return child
case *Array:
if node.array == nil {
node.array = newTrieNodeImpl()
@@ -647,7 +680,7 @@ func (node *trieNode) insertArray(arr *Array) *trieNode {
child = newTrieNodeImpl()
node.scalars.Put(head, child)
}
return child.(*trieNode).insertArray(arr.Slice(1, -1))
return child.insertArray(arr.Slice(1, -1))
}
panic("illegal value")
@@ -712,7 +745,7 @@ func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalRes
if !ok {
return nil
}
return child.(*trieNode).Traverse(resolver, tr)
return child.Traverse(resolver, tr)
}
return nil
@@ -737,11 +770,16 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes
return nil
}
child, ok := node.scalars.Get(head)
if !ok {
return nil
switch head := head.(type) {
case Null, Boolean, Number, String:
child, ok := node.scalars.Get(head)
if !ok {
return nil
}
return child.traverseArray(resolver, tr, arr.Slice(1, -1))
}
return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1))
panic("illegal value")
}
func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
@@ -767,12 +805,8 @@ func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalR
}
var iterErr error
node.scalars.Iter(func(_, v util.T) bool {
child := v.(*trieNode)
if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil {
return true
}
return false
node.scalars.Iter(func(_ Value, child *trieNode) bool {
return child.traverseUnknown(resolver, tr) != nil
})
return iterErr
@@ -786,7 +820,7 @@ func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term)
switch v := a.Value.(type) {
case Var:
for i, arg := range args {
if arg.Value.Compare(v) == 0 {
if arg.Value.Compare(a.Value) == 0 {
if bval, ok := indexValue(b); ok {
return &refindex{Ref: Ref{FunctionArgRootDocument, InternedIntNumberTerm(i)}, Value: bval}, true
}
@@ -849,7 +883,7 @@ func globDelimiterToString(delim *Term) (string, bool) {
if arr.Len() == 0 {
result = "."
} else {
for i := 0; i < arr.Len(); i++ {
for i := range arr.Len() {
term := arr.Elem(i)
s, ok := term.Value.(String)
if !ok {
@@ -862,6 +896,8 @@ func globDelimiterToString(delim *Term) (string, bool) {
return result, true
}
var globwildcard = VarTerm("$globwildcard")
func globPatternToArray(pattern *Term, delim string) *Term {
s, ok := pattern.Value.(String)
@@ -874,7 +910,7 @@ func globPatternToArray(pattern *Term, delim string) *Term {
for i := range parts {
if parts[i] == "*" {
arr[i] = VarTerm("$globwildcard")
arr[i] = globwildcard
} else {
var escaped bool
for _, c := range parts[i] {

View File

@@ -9,9 +9,9 @@ import (
"io"
"unicode"
"unicode/utf8"
"unsafe"
"github.com/open-policy-agent/opa/v1/ast/internal/tokens"
"github.com/open-policy-agent/opa/v1/util"
)
const bom = 0xFEFF
@@ -101,8 +101,8 @@ func (s *Scanner) Keyword(lit string) tokens.Token {
func (s *Scanner) AddKeyword(kw string, tok tokens.Token) {
s.keywords[kw] = tok
switch tok {
case tokens.Every: // importing 'every' means also importing 'in'
if tok == tokens.Every {
// importing 'every' means also importing 'in'
s.keywords["in"] = tokens.In
}
}
@@ -165,7 +165,21 @@ func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) {
var lit string
if s.isWhitespace() {
lit = string(s.curr)
// string(rune) is an unnecessary heap allocation in this case as we know all
// the possible whitespace values, and can simply translate to string ourselves
switch s.curr {
case ' ':
lit = " "
case '\t':
lit = "\t"
case '\n':
lit = "\n"
case '\r':
lit = "\r"
default:
// unreachable unless isWhitespace changes
lit = string(s.curr)
}
s.next()
tok = tokens.Whitespace
} else if isLetter(s.curr) {
@@ -272,7 +286,7 @@ func (s *Scanner) scanIdentifier() string {
s.next()
}
return byteSliceToString(s.bs[start : s.offset-1])
return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanNumber() string {
@@ -323,7 +337,7 @@ func (s *Scanner) scanNumber() string {
}
}
return byteSliceToString(s.bs[start : s.offset-1])
return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanString() string {
@@ -357,7 +371,7 @@ func (s *Scanner) scanString() string {
}
}
return byteSliceToString(s.bs[start : s.offset-1])
return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanRawString() string {
@@ -373,7 +387,7 @@ func (s *Scanner) scanRawString() string {
}
}
return byteSliceToString(s.bs[start : s.offset-1])
return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanComment() string {
@@ -384,10 +398,10 @@ func (s *Scanner) scanComment() string {
end := s.offset - 1
// Trim carriage returns that precede the newline
if s.offset > 1 && s.bs[s.offset-2] == '\r' {
end = end - 1
end -= 1
}
return byteSliceToString(s.bs[start:end])
return util.ByteSliceToString(s.bs[start:end])
}
func (s *Scanner) next() {
@@ -457,7 +471,3 @@ func (s *Scanner) error(reason string) {
Col: s.col,
}, Message: reason})
}
func byteSliceToString(bs []byte) string {
return unsafe.String(unsafe.SliceData(bs), len(bs))
}

View File

@@ -4,12 +4,14 @@
package tokens
import "maps"
// Token represents a single Rego source code token
// for use by the Parser.
type Token int
type Token uint8
func (t Token) String() string {
if t < 0 || int(t) >= len(strings) {
if int(t) >= len(strings) {
return "unknown"
}
return strings[t]
@@ -137,11 +139,7 @@ var keywords = map[string]Token{
// Keywords returns a copy of the default string -> Token keyword map.
func Keywords() map[string]Token {
cpy := make(map[string]Token, len(keywords))
for k, v := range keywords {
cpy[k] = v
}
return cpy
return maps.Clone(keywords)
}
// IsKeyword returns if a token is a keyword

View File

@@ -17,6 +17,9 @@ var (
minusOneTerm = &Term{Value: Number("-1")}
InternedNullTerm = &Term{Value: Null{}}
InternedEmptyString = StringTerm("")
InternedEmptyObject = ObjectTerm()
)
// InternedBooleanTerm returns an interned term with the given boolean value.
@@ -60,6 +63,29 @@ func HasInternedIntNumberTerm(i int) bool {
return i >= -1 && i < len(intNumberTerms)
}
func InternedStringTerm(s string) *Term {
if term, ok := internedStringTerms[s]; ok {
return term
}
return StringTerm(s)
}
var internedStringTerms = map[string]*Term{
"": InternedEmptyString,
"0": StringTerm("0"),
"1": StringTerm("1"),
"2": StringTerm("2"),
"3": StringTerm("3"),
"4": StringTerm("4"),
"5": StringTerm("5"),
"6": StringTerm("6"),
"7": StringTerm("7"),
"8": StringTerm("8"),
"9": StringTerm("9"),
"10": StringTerm("10"),
}
var stringToIntNumberTermMap = map[string]*Term{
"-1": minusOneTerm,
"0": intNumberTerms[0],
@@ -1092,7 +1118,3 @@ var intNumberTerms = [...]*Term{
{Value: Number("511")},
{Value: Number("512")},
}
var InternedEmptyString = StringTerm("")
var InternedEmptyObject = ObjectTerm()

View File

@@ -13,15 +13,14 @@ import (
// ValueMap represents a key/value map between AST term values. Any type of term
// can be used as a key in the map.
type ValueMap struct {
hashMap *util.HashMap
hashMap *util.TypedHashMap[Value, Value]
}
// NewValueMap returns a new ValueMap.
func NewValueMap() *ValueMap {
vs := &ValueMap{
hashMap: util.NewHashMap(valueEq, valueHash),
return &ValueMap{
hashMap: util.NewTypedHashMap(ValueEqual, ValueEqual, Value.Hash, Value.Hash, nil),
}
return vs
}
// MarshalJSON provides a custom marshaller for the ValueMap which
@@ -39,16 +38,6 @@ func (vs *ValueMap) MarshalJSON() ([]byte, error) {
return json.Marshal(tmp)
}
// Copy returns a shallow copy of the ValueMap.
func (vs *ValueMap) Copy() *ValueMap {
if vs == nil {
return nil
}
cpy := NewValueMap()
cpy.hashMap = vs.hashMap.Copy()
return cpy
}
// Equal returns true if this ValueMap equals the other.
func (vs *ValueMap) Equal(other *ValueMap) bool {
if vs == nil {
@@ -72,7 +61,7 @@ func (vs *ValueMap) Len() int {
func (vs *ValueMap) Get(k Value) Value {
if vs != nil {
if v, ok := vs.hashMap.Get(k); ok {
return v.(Value)
return v
}
}
return nil
@@ -92,11 +81,7 @@ func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
if vs == nil {
return false
}
return vs.hashMap.Iter(func(kt, vt util.T) bool {
k := kt.(Value)
v := vt.(Value)
return iter(k, v)
})
return vs.hashMap.Iter(iter)
}
// Put inserts a key k into the map with value v.
@@ -121,13 +106,3 @@ func (vs *ValueMap) String() string {
}
return vs.hashMap.String()
}
func valueHash(v util.T) int {
return v.(Value).Hash()
}
func valueEq(a, b util.T) bool {
av := a.(Value)
bv := b.(Value)
return av.Compare(bv) == 0
}

View File

@@ -7,6 +7,7 @@ package ast
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"math/big"
@@ -133,7 +134,7 @@ func (c parsedTermCache) String() string {
s.WriteRune('{')
var e *parsedTermCacheItem
for e = c.m; e != nil; e = e.next {
s.WriteString(fmt.Sprintf("%v", e))
s.WriteString(e.String())
}
s.WriteRune('}')
return s.String()
@@ -517,7 +518,7 @@ func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) {
var curr *metadataParser
var blocks []*metadataParser
for i := 0; i < len(comments); i++ {
for i := range comments {
if curr != nil {
if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 {
curr.Append(comments[i])
@@ -725,7 +726,9 @@ func (p *Parser) parseRules() []*Rule {
// p[x] if ... becomes a single-value rule p[x]
if hasIf && !usesContains && len(rule.Head.Ref()) == 2 {
if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 {
v := rule.Head.Ref()[1]
_, isRef := v.Value.(Ref)
if (!v.IsGround() || isRef) && len(rule.Head.Args) == 0 {
rule.Head.Key = rule.Head.Ref()[1]
}
@@ -1638,6 +1641,10 @@ func (p *Parser) parseNumber() *Term {
func (p *Parser) parseString() *Term {
if p.s.lit[0] == '"' {
if p.s.lit == "\"\"" {
return NewTerm(InternedEmptyString.Value).SetLocation(p.s.Loc())
}
var s string
err := json.Unmarshal([]byte(p.s.lit), &s)
if err != nil {
@@ -2060,7 +2067,7 @@ func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
for i := range values {
if p.s.tok == values[i] {
r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
p.scan()
return r
}
@@ -2354,7 +2361,7 @@ func (b *metadataParser) Parse() (*Annotations, error) {
var raw rawAnnotation
if len(bytes.TrimSpace(b.buf.Bytes())) == 0 {
return nil, fmt.Errorf("expected METADATA block, found whitespace")
return nil, errors.New("expected METADATA block, found whitespace")
}
if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil {
@@ -2403,7 +2410,7 @@ func (b *metadataParser) Parse() (*Annotations, error) {
a.Path, err = ParseRef(k)
if err != nil {
return nil, fmt.Errorf("invalid document reference")
return nil, errors.New("invalid document reference")
}
switch v := v.(type) {
@@ -2503,7 +2510,7 @@ func unwrapPair(pair map[string]interface{}) (string, interface{}) {
return "", nil
}
var errInvalidSchemaRef = fmt.Errorf("invalid schema reference")
var errInvalidSchemaRef = errors.New("invalid schema reference")
// NOTE(tsandall): 'schema' is not registered as a root because it's not
// supported by the compiler or evaluator today. Once we fix that, we can remove
@@ -2542,7 +2549,7 @@ func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) {
}
return &RelatedResourceAnnotation{Ref: *u}, nil
}
return nil, fmt.Errorf("ref URL may not be empty string")
return nil, errors.New("ref URL may not be empty string")
case map[string]interface{}:
description := strings.TrimSpace(getSafeString(rr, "description"))
ref := strings.TrimSpace(getSafeString(rr, "ref"))
@@ -2553,10 +2560,10 @@ func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) {
}
return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil
}
return nil, fmt.Errorf("'ref' value required in object")
return nil, errors.New("'ref' value required in object")
}
return nil, fmt.Errorf("invalid value type, must be string or map")
return nil, errors.New("invalid value type, must be string or map")
}
func parseAuthor(a interface{}) (*AuthorAnnotation, error) {
@@ -2574,10 +2581,10 @@ func parseAuthor(a interface{}) (*AuthorAnnotation, error) {
if len(name) > 0 || len(email) > 0 {
return &AuthorAnnotation{name, email}, nil
}
return nil, fmt.Errorf("'name' and/or 'email' values required in object")
return nil, errors.New("'name' and/or 'email' values required in object")
}
return nil, fmt.Errorf("invalid value type, must be string or map")
return nil, errors.New("invalid value type, must be string or map")
}
func getSafeString(m map[string]interface{}, k string) string {
@@ -2599,7 +2606,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
parts := strings.Fields(s)
if len(parts) == 0 {
return nil, fmt.Errorf("author is an empty string")
return nil, errors.New("author is an empty string")
}
namePartCount := len(parts)
@@ -2609,7 +2616,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
strings.HasSuffix(trailing, emailSuffix) {
email = trailing[len(emailPrefix):]
email = email[0 : len(email)-len(emailSuffix)]
namePartCount = namePartCount - 1
namePartCount -= 1
}
name := strings.Join(parts[0:namePartCount], " ")
@@ -2635,7 +2642,7 @@ func convertYAMLMapKeyTypes(x any, path []string) (any, error) {
return result, nil
case []any:
for i := range x {
x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i)))
x[i], err = convertYAMLMapKeyTypes(x[i], append(path, strconv.Itoa(i)))
if err != nil {
return nil, err
}
@@ -2681,7 +2688,7 @@ func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool {
func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) {
path := imp.Path.Value.(Ref)
if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) {
if len(path) == 1 || !path[1].Equal(keywordsTerm) {
p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`")
return
}

View File

@@ -155,7 +155,7 @@ func MustParseTerm(input string) *Term {
func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
if len(body) != 1 {
return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
return nil, errors.New("multiple expressions cannot be used for rule head")
}
return ParseRuleFromExpr(module, body[0])
@@ -166,11 +166,11 @@ func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
if len(expr.With) > 0 {
return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
return nil, errors.New("expressions using with keyword cannot be used for rule head")
}
if expr.Negated {
return nil, fmt.Errorf("negated expressions cannot be used for rule head")
return nil, errors.New("negated expressions cannot be used for rule head")
}
if _, ok := expr.Terms.(*SomeDecl); ok {
@@ -207,7 +207,7 @@ func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
}
if _, ok := BuiltinMap[expr.Operator().String()]; ok {
return nil, fmt.Errorf("rule name conflicts with built-in function")
return nil, errors.New("rule name conflicts with built-in function")
}
return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
@@ -272,7 +272,7 @@ func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, erro
}
head = RefHead(r)
if len(r) > 1 && !r[len(r)-1].IsGround() {
return nil, fmt.Errorf("ref not ground")
return nil, errors.New("ref not ground")
}
} else {
return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value))
@@ -387,7 +387,7 @@ func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
call, ok := lhs.Value.(Call)
if !ok {
return nil, fmt.Errorf("must be call")
return nil, errors.New("must be call")
}
ref, ok := call[0].Value.(Ref)
@@ -419,7 +419,7 @@ func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
if len(terms) <= 1 {
return nil, fmt.Errorf("rule argument list must take at least one argument")
return nil, errors.New("rule argument list must take at least one argument")
}
loc := terms[0].Location
@@ -600,7 +600,7 @@ func ParseStatement(input string) (Statement, error) {
return nil, err
}
if len(stmts) != 1 {
return nil, fmt.Errorf("expected exactly one statement")
return nil, errors.New("expected exactly one statement")
}
return stmts[0], nil
}
@@ -611,7 +611,7 @@ func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error
return nil, err
}
if len(stmts) != 1 {
return nil, fmt.Errorf("expected exactly one statement")
return nil, errors.New("expected exactly one statement")
}
return stmts[0], nil
}

View File

@@ -8,21 +8,14 @@ import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"slices"
"strings"
"time"
"github.com/open-policy-agent/opa/v1/ast/internal/tokens"
astJSON "github.com/open-policy-agent/opa/v1/ast/json"
"github.com/open-policy-agent/opa/v1/util"
)
// Initialize seed for term hashing. This is intentionally placed before the
// root document sets are constructed to ensure they use the same hash seed as
// subsequent lookups. If the hash seeds are out of sync, lookups will fail.
var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano()))
var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32())
// DefaultRootDocument is the default root document.
//
// All package directives inside source files are implicitly prefixed with the
@@ -502,7 +495,7 @@ func (c *Comment) Equal(other *Comment) bool {
// Compare returns an integer indicating whether pkg is less than, equal to,
// or greater than other.
func (pkg *Package) Compare(other *Package) int {
return Compare(pkg.Path, other.Path)
return termSliceCompare(pkg.Path, other.Path)
}
// Copy returns a deep copy of pkg.
@@ -594,7 +587,8 @@ func (imp *Import) Compare(other *Import) int {
if cmp := Compare(imp.Path, other.Path); cmp != 0 {
return cmp
}
return Compare(imp.Alias, other.Alias)
return VarCompare(imp.Alias, other.Alias)
}
// Copy returns a deep copy of imp.
@@ -644,7 +638,7 @@ func (imp *Import) Name() Var {
func (imp *Import) String() string {
buf := []string{"import", imp.Path.String()}
if len(imp.Alias) > 0 {
buf = append(buf, "as "+imp.Alias.String())
buf = append(buf, "as", imp.Alias.String())
}
return strings.Join(buf, " ")
}
@@ -681,8 +675,11 @@ func (rule *Rule) Compare(other *Rule) int {
if cmp := rule.Head.Compare(other.Head); cmp != 0 {
return cmp
}
if cmp := util.Compare(rule.Default, other.Default); cmp != 0 {
return cmp
if rule.Default != other.Default {
if !rule.Default {
return -1
}
return 1
}
if cmp := rule.Body.Compare(other.Body); cmp != 0 {
return cmp
@@ -701,9 +698,11 @@ func (rule *Rule) Copy() *Rule {
cpy.Head = rule.Head.Copy()
cpy.Body = rule.Body.Copy()
cpy.Annotations = make([]*Annotations, len(rule.Annotations))
for i, a := range rule.Annotations {
cpy.Annotations[i] = a.Copy(&cpy)
if len(cpy.Annotations) > 0 {
cpy.Annotations = make([]*Annotations, len(rule.Annotations))
for i, a := range rule.Annotations {
cpy.Annotations[i] = a.Copy(&cpy)
}
}
if cpy.Else != nil {
@@ -780,9 +779,7 @@ func (rule *Rule) stringWithOpts(opts toStringOpts) string {
case RegoV1, RegoV0CompatV1:
buf = append(buf, "if")
}
buf = append(buf, "{")
buf = append(buf, rule.Body.String())
buf = append(buf, "}")
buf = append(buf, "{", rule.Body.String(), "}")
}
if rule.Else != nil {
buf = append(buf, rule.Else.elseString(opts))
@@ -828,8 +825,7 @@ func (rule *Rule) elseString(opts toStringOpts) string {
value := rule.Head.Value
if value != nil {
buf = append(buf, "=")
buf = append(buf, value.String())
buf = append(buf, "=", value.String())
}
switch opts.RegoVersion() {
@@ -837,9 +833,7 @@ func (rule *Rule) elseString(opts toStringOpts) string {
buf = append(buf, "if")
}
buf = append(buf, "{")
buf = append(buf, rule.Body.String())
buf = append(buf, "}")
buf = append(buf, "{", rule.Body.String(), "}")
if rule.Else != nil {
buf = append(buf, rule.Else.elseString(opts))
@@ -892,7 +886,7 @@ func RefHead(ref Ref, args ...*Term) *Head {
}
// DocKind represents the collection of document types that can be produced by rules.
type DocKind int
type DocKind byte
const (
// CompleteDoc represents a document that is completely defined by the rule.
@@ -912,11 +906,13 @@ func (head *Head) DocKind() DocKind {
return PartialObjectDoc
}
return PartialSetDoc
} else if head.HasDynamicRef() {
return PartialObjectDoc
}
return CompleteDoc
}
type RuleKind int
type RuleKind byte
const (
SingleValue = iota
@@ -973,7 +969,7 @@ func (head *Head) Compare(other *Head) int {
if cmp := Compare(head.Reference, other.Reference); cmp != 0 {
return cmp
}
if cmp := Compare(head.Name, other.Name); cmp != 0 {
if cmp := VarCompare(head.Name, other.Name); cmp != 0 {
return cmp
}
if cmp := Compare(head.Key, other.Key); cmp != 0 {
@@ -1091,8 +1087,7 @@ func (head *Head) SetLoc(loc *Location) {
func (head *Head) HasDynamicRef() bool {
pos := head.Reference.Dynamic()
// Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule.
return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue)
return pos > 0 && (pos < len(head.Reference))
}
// Copy returns a deep copy of a.
@@ -1177,7 +1172,7 @@ func (body Body) Compare(other Body) int {
if len(other) < minLen {
minLen = len(other)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
if cmp := body[i].Compare(other[i]); cmp != 0 {
return cmp
}
@@ -1202,12 +1197,7 @@ func (body Body) Copy() Body {
// Contains returns true if this body contains the given expression.
func (body Body) Contains(x *Expr) bool {
for _, e := range body {
if e.Equal(x) {
return true
}
}
return false
return slices.ContainsFunc(body, x.Equal)
}
// Equal returns true if this Body is equal to the other Body.
@@ -1406,11 +1396,7 @@ func (expr *Expr) Copy() *Expr {
case *SomeDecl:
cpy.Terms = ts.Copy()
case []*Term:
cpyTs := make([]*Term, len(ts))
for i := range ts {
cpyTs[i] = ts[i].Copy()
}
cpy.Terms = cpyTs
cpy.Terms = termSliceCopy(ts)
case *Term:
cpy.Terms = ts.Copy()
case *Every:

View File

@@ -13,41 +13,32 @@ import (
// SchemaSet holds a map from a path to a schema.
type SchemaSet struct {
m *util.HashMap
m *util.HasherMap[Ref, any]
}
// NewSchemaSet returns an empty SchemaSet.
func NewSchemaSet() *SchemaSet {
eqFunc := func(a, b util.T) bool {
return a.(Ref).Equal(b.(Ref))
}
hashFunc := func(x util.T) int { return x.(Ref).Hash() }
return &SchemaSet{
m: util.NewHashMap(eqFunc, hashFunc),
m: util.NewHasherMap[Ref, any](RefEqual),
}
}
// Put inserts a raw schema into the set.
func (ss *SchemaSet) Put(path Ref, raw interface{}) {
func (ss *SchemaSet) Put(path Ref, raw any) {
ss.m.Put(path, raw)
}
// Get returns the raw schema identified by the path.
func (ss *SchemaSet) Get(path Ref) interface{} {
if ss == nil {
return nil
func (ss *SchemaSet) Get(path Ref) any {
if ss != nil {
if x, ok := ss.m.Get(path); ok {
return x
}
}
x, ok := ss.m.Get(path)
if !ok {
return nil
}
return x
return nil
}
func loadSchema(raw interface{}, allowNet []string) (types.Type, error) {
func loadSchema(raw any, allowNet []string) (types.Type, error) {
jsonSchema, err := compileSchema(raw, allowNet)
if err != nil {

View File

@@ -0,0 +1,69 @@
package ast
import (
"strings"
"sync"
)
type termPtrPool struct {
pool sync.Pool
}
type stringBuilderPool struct {
pool sync.Pool
}
type indexResultPool struct {
pool sync.Pool
}
func (p *termPtrPool) Get() *Term {
return p.pool.Get().(*Term)
}
func (p *termPtrPool) Put(t *Term) {
p.pool.Put(t)
}
func (p *stringBuilderPool) Get() *strings.Builder {
return p.pool.Get().(*strings.Builder)
}
func (p *stringBuilderPool) Put(sb *strings.Builder) {
sb.Reset()
p.pool.Put(sb)
}
func (p *indexResultPool) Get() *IndexResult {
return p.pool.Get().(*IndexResult)
}
func (p *indexResultPool) Put(x *IndexResult) {
if x != nil {
p.pool.Put(x)
}
}
var TermPtrPool = &termPtrPool{
pool: sync.Pool{
New: func() any {
return &Term{}
},
},
}
var sbPool = &stringBuilderPool{
pool: sync.Pool{
New: func() any {
return &strings.Builder{}
},
},
}
var IndexResultPool = &indexResultPool{
pool: sync.Pool{
New: func() any {
return &IndexResult{}
},
},
}

View File

@@ -8,6 +8,7 @@ package ast
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"math"
@@ -19,14 +20,14 @@ import (
"strings"
"sync"
"github.com/OneOfOne/xxhash"
"github.com/cespare/xxhash/v2"
astJSON "github.com/open-policy-agent/opa/v1/ast/json"
"github.com/open-policy-agent/opa/v1/ast/location"
"github.com/open-policy-agent/opa/v1/util"
)
var errFindNotFound = fmt.Errorf("find: not found")
var errFindNotFound = errors.New("find: not found")
// Location records a position in source code.
type Location = location.Location
@@ -55,13 +56,12 @@ type Value interface {
// InterfaceToValue converts a native Go value x to a Value.
func InterfaceToValue(x interface{}) (Value, error) {
switch x := x.(type) {
case Value:
return x, nil
case nil:
return NullValue, nil
case bool:
if x {
return InternedBooleanTerm(true).Value, nil
}
return InternedBooleanTerm(false).Value, nil
return InternedBooleanTerm(x).Value, nil
case json.Number:
if interned := InternedIntNumberTermFromString(string(x)); interned != nil {
return interned.Value, nil
@@ -87,6 +87,12 @@ func InterfaceToValue(x interface{}) (Value, error) {
r[i].Value = e
}
return NewArray(r...), nil
case []string:
r := util.NewPtrSlice[Term](len(x))
for i, e := range x {
r[i].Value = String(e)
}
return NewArray(r...), nil
case map[string]any:
kvs := util.NewPtrSlice[Term](len(x) * 2)
idx := 0
@@ -182,7 +188,7 @@ func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, err
return string(v), nil
case *Array:
buf := []interface{}{}
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
x1, err := valueToInterface(v.Elem(i).Value, resolver, opt)
if err != nil {
return nil, err
@@ -618,10 +624,7 @@ func (bol Boolean) Compare(other Value) int {
// Find returns the current value or a not found error.
func (bol Boolean) Find(path Ref) (Value, error) {
if len(path) == 0 {
if bol {
return InternedBooleanTerm(true).Value, nil
}
return InternedBooleanTerm(false).Value, nil
return InternedBooleanTerm(bool(bol)).Value, nil
}
return nil, errFindNotFound
}
@@ -718,7 +721,7 @@ func (num Number) Hash() int {
f, err := json.Number(num).Float64()
if err != nil {
bs := []byte(num)
h := xxhash.Checksum64(bs)
h := xxhash.Sum64(bs)
return int(h)
}
return int(f)
@@ -834,8 +837,7 @@ func (str String) String() string {
// Hash returns the hash code for the Value.
func (str String) Hash() int {
h := xxhash.ChecksumString64S(string(str), hashSeed0)
return int(h)
return int(xxhash.Sum64String(string(str)))
}
// Var represents a variable as defined by the language.
@@ -876,8 +878,7 @@ func (v Var) Find(path Ref) (Value, error) {
// Hash returns the hash code for the Value.
func (v Var) Hash() int {
h := xxhash.ChecksumString64S(string(v), hashSeed0)
return int(h)
return int(xxhash.Sum64String(string(v)))
}
// IsGround always returns false.
@@ -1014,6 +1015,25 @@ func (ref Ref) Copy() Ref {
return termSliceCopy(ref)
}
// CopyNonGround returns a new ref with deep copies of the non-ground parts and shallow
// copies of the ground parts. This is a *much* cheaper operation than Copy for operations
// that only intend to modify (e.g. plug) the non-ground parts. The head element of the ref
// is always shallow copied.
func (ref Ref) CopyNonGround() Ref {
cpy := make(Ref, len(ref))
cpy[0] = ref[0]
for i := 1; i < len(ref); i++ {
if ref[i].Value.IsGround() {
cpy[i] = ref[i]
} else {
cpy[i] = ref[i].Copy()
}
}
return cpy
}
// Equal returns true if ref is equal to other.
func (ref Ref) Equal(other Value) bool {
switch o := other.(type) {
@@ -1143,7 +1163,7 @@ func (ref Ref) Ptr() (string, error) {
if str, ok := term.Value.(String); ok {
parts = append(parts, url.PathEscape(string(str)))
} else {
return "", fmt.Errorf("invalid path value type")
return "", errors.New("invalid path value type")
}
}
return strings.Join(parts, "/"), nil
@@ -1155,20 +1175,12 @@ func IsVarCompatibleString(s string) bool {
return varRegexp.MatchString(s)
}
var sbPool = sync.Pool{
New: func() any {
return &strings.Builder{}
},
}
func (ref Ref) String() string {
if len(ref) == 0 {
return ""
}
sb := sbPool.Get().(*strings.Builder)
sb.Reset()
sb := sbPool.Get()
defer sbPool.Put(sb)
sb.Grow(10 * len(ref))
@@ -1311,7 +1323,15 @@ func (arr *Array) Find(path Ref) (Value, error) {
if i < 0 || i >= arr.Len() {
return nil, errFindNotFound
}
return arr.Elem(i).Value.Find(path[1:])
term := arr.Elem(i)
// Using Find on scalar values costs an allocation (type -> Value conversion)
// and since we already have the Value here, we can avoid that.
if len(path) == 1 && IsScalar(term.Value) {
return term.Value, nil
}
return term.Value.Find(path[1:])
}
// Get returns the element at pos or nil if not possible.
@@ -1366,20 +1386,19 @@ func (arr *Array) MarshalJSON() ([]byte, error) {
}
func (arr *Array) String() string {
sb := sbPool.Get().(*strings.Builder)
sb.Reset()
sb := sbPool.Get()
sb.Grow(len(arr.elems) * 16)
defer sbPool.Put(sb)
sb.WriteRune('[')
sb.WriteByte('[')
for i, e := range arr.elems {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(e.String())
}
sb.WriteRune(']')
sb.WriteByte(']')
return sb.String()
}
@@ -1565,20 +1584,19 @@ func (s *set) String() string {
return "set()"
}
sb := sbPool.Get().(*strings.Builder)
sb.Reset()
sb := sbPool.Get()
sb.Grow(s.Len() * 16)
defer sbPool.Put(sb)
sb.WriteRune('{')
sb.WriteByte('{')
for i := range s.sortedKeys() {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(s.keys[i].Value.String())
}
sb.WriteRune('}')
sb.WriteByte('}')
return sb.String()
}
@@ -1748,20 +1766,6 @@ func (s *set) Slice() []*Term {
return s.sortedKeys()
}
// Internal method to use for cases where a set may be reused in favor
// of creating a new one (with the associated allocations).
func (s *set) clear() {
clear(s.elems)
s.keys = s.keys[:0]
s.hash = 0
s.ground = true
s.sortGuard = sync.Once{}
}
func (s *set) insertNoGuard(x *Term) {
s.insert(x, false)
}
// NOTE(philipc): We assume a many-readers, single-writer model here.
// This method should NOT be used concurrently, or else we risk data races.
func (s *set) insert(x *Term, resetSortGuard bool) {
@@ -2213,7 +2217,7 @@ type objectElem struct {
type objectElemSlice []*objectElem
func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 }
func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
func (s objectElemSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s objectElemSlice) Len() int { return len(s) }
// Item is a helper for constructing an tuple containing two Terms
@@ -2253,7 +2257,7 @@ func (obj *object) Compare(other Value) int {
if len(b.keys) < len(akeys) {
minLen = len(bkeys)
}
for i := 0; i < minLen; i++ {
for i := range minLen {
keysCmp := Compare(akeys[i].key, bkeys[i].key)
if keysCmp < 0 {
return -1
@@ -2282,11 +2286,17 @@ func (obj *object) Find(path Ref) (Value, error) {
if len(path) == 0 {
return obj, nil
}
value := obj.Get(path[0])
if value == nil {
term := obj.Get(path[0])
if term == nil {
return nil, errFindNotFound
}
return value.Value.Find(path[1:])
// Using Find on scalar values costs an allocation (type -> Value conversion)
// and since we already have the Value here, we can avoid that.
if len(path) == 1 && IsScalar(term.Value) {
return term.Value, nil
}
return term.Value.Find(path[1:])
}
func (obj *object) Insert(k, v *Term) {
@@ -2375,7 +2385,8 @@ func (obj *object) Foreach(f func(*Term, *Term)) {
}
// Map returns a new Object constructed by mapping each element in the object
// using the function f.
// using the function f. If f returns an error, the error is returned by Map.
// If f return a nil key, the element is skipped.
func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
cpy := newobject(obj.Len())
for _, node := range obj.sortedKeys() {
@@ -2383,7 +2394,9 @@ func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, erro
if err != nil {
return nil, err
}
cpy.insert(k, v, false)
if k != nil {
cpy.insert(k, v, false)
}
}
return cpy, nil
}
@@ -2484,13 +2497,12 @@ func (obj *object) Len() int {
}
func (obj *object) String() string {
sb := sbPool.Get().(*strings.Builder)
sb.Reset()
sb := sbPool.Get()
sb.Grow(obj.Len() * 32)
defer sbPool.Put(sb)
sb.WriteRune('{')
sb.WriteByte('{')
for i, elem := range obj.sortedKeys() {
if i > 0 {
@@ -2500,7 +2512,7 @@ func (obj *object) String() string {
sb.WriteString(": ")
sb.WriteString(elem.value.String())
}
sb.WriteRune('}')
sb.WriteByte('}')
return sb.String()
}
@@ -2750,7 +2762,7 @@ func filterObject(o Value, filter Value) (Value, error) {
return o, nil
case *Array:
values := NewArray()
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i)))
if subFilter != nil {
filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value)
@@ -3054,14 +3066,10 @@ func (c Call) String() string {
func termSliceCopy(a []*Term) []*Term {
cpy := make([]*Term, len(a))
termSliceCopyTo(a, cpy)
return cpy
}
func termSliceCopyTo(src, dst []*Term) {
for i := range src {
dst[i] = src[i].Copy()
for i := range a {
cpy[i] = a[i].Copy()
}
return cpy
}
func termSliceEqual(a, b []*Term) bool {
@@ -3115,7 +3123,7 @@ func unmarshalBody(b []interface{}) (Body, error) {
}
return buf, nil
unmarshal_error:
return nil, fmt.Errorf("ast: unable to unmarshal body")
return nil, errors.New("ast: unable to unmarshal body")
}
func unmarshalExpr(expr *Expr, v map[string]interface{}) error {
@@ -3252,7 +3260,7 @@ func unmarshalTermSlice(s []interface{}) ([]*Term, error) {
}
return nil, err
}
return nil, fmt.Errorf("ast: unable to unmarshal term")
return nil, errors.New("ast: unable to unmarshal term")
}
return buf, nil
}
@@ -3261,7 +3269,7 @@ func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) {
if s, ok := d["value"].([]interface{}); ok {
return unmarshalTermSlice(s)
}
return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
return nil, errors.New(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
}
func unmarshalWith(i interface{}) (*With, error) {
@@ -3281,7 +3289,7 @@ func unmarshalWith(i interface{}) (*With, error) {
}
return nil, err
}
return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
return nil, errors.New(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
}
func unmarshalValue(d map[string]interface{}) (Value, error) {
@@ -3399,5 +3407,5 @@ func unmarshalValue(d map[string]interface{}) (Value, error) {
}
}
unmarshal_error:
return nil, fmt.Errorf("ast: unable to unmarshal term")
return nil, errors.New("ast: unable to unmarshal term")
}

View File

@@ -234,7 +234,7 @@ func Transform(t Transformer, x interface{}) (interface{}, error) {
return k, v, nil
})
case *Array:
for i := 0; i < y.Len(); i++ {
for i := range y.Len() {
v, err := transformTerm(t, y.Elem(i))
if err != nil {
return nil, err

View File

@@ -135,7 +135,7 @@ func (u *unifier) unify(a *Term, b *Term) {
}
case *Array:
if a.Len() == b.Len() {
for i := 0; i < a.Len(); i++ {
for i := range a.Len() {
u.unify(a.Elem(i), b.Elem(i))
}
}

View File

@@ -16,13 +16,18 @@ type VarSet map[Var]struct{}
// NewVarSet returns a new VarSet containing the specified variables.
func NewVarSet(vs ...Var) VarSet {
s := VarSet{}
s := make(VarSet, len(vs))
for _, v := range vs {
s.Add(v)
}
return s
}
// NewVarSet returns a new VarSet containing the specified variables.
func NewVarSetOfSize(size int) VarSet {
return make(VarSet, size)
}
// Add updates the set to include the variable "v".
func (s VarSet) Add(v Var) {
s[v] = struct{}{}
@@ -36,7 +41,7 @@ func (s VarSet) Contains(v Var) bool {
// Copy returns a shallow copy of the VarSet.
func (s VarSet) Copy() VarSet {
cpy := VarSet{}
cpy := NewVarSetOfSize(len(s))
for v := range s {
cpy.Add(v)
}
@@ -45,7 +50,13 @@ func (s VarSet) Copy() VarSet {
// Diff returns a VarSet containing variables in s that are not in vs.
func (s VarSet) Diff(vs VarSet) VarSet {
r := VarSet{}
i := 0
for v := range s {
if !vs.Contains(v) {
i++
}
}
r := NewVarSetOfSize(i)
for v := range s {
if !vs.Contains(v) {
r.Add(v)
@@ -56,15 +67,26 @@ func (s VarSet) Diff(vs VarSet) VarSet {
// Equal returns true if s contains exactly the same elements as vs.
func (s VarSet) Equal(vs VarSet) bool {
if len(s.Diff(vs)) > 0 {
if len(s) != len(vs) {
return false
}
return len(vs.Diff(s)) == 0
for v := range s {
if !vs.Contains(v) {
return false
}
}
return true
}
// Intersect returns a VarSet containing variables in s that are in vs.
func (s VarSet) Intersect(vs VarSet) VarSet {
r := VarSet{}
i := 0
for v := range s {
if vs.Contains(v) {
i++
}
}
r := NewVarSetOfSize(i)
for v := range s {
if vs.Contains(v) {
r.Add(v)
@@ -73,7 +95,7 @@ func (s VarSet) Intersect(vs VarSet) VarSet {
return r
}
// Sorted returns a sorted slice of vars from s.
// Sorted returns a new sorted slice of vars from s.
func (s VarSet) Sorted() []Var {
sorted := make([]Var, 0, len(s))
for v := range s {

View File

@@ -497,6 +497,13 @@
"PreRelease": "",
"Metadata": ""
},
"internal.test_case": {
"Major": 1,
"Minor": 2,
"Patch": 0,
"PreRelease": "",
"Metadata": ""
},
"intersection": {
"Major": 0,
"Minor": 17,

View File

@@ -362,7 +362,7 @@ func (vis *GenericVisitor) Walk(x interface{}) {
vis.Walk(x.Get(k))
}
case *Array:
for i := 0; i < x.Len(); i++ {
for i := range x.Len() {
vis.Walk(x.Elem(i))
}
case Set:

View File

@@ -267,7 +267,7 @@ func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool {
return false
}
for i := 0; i < len(m.WasmResolvers); i++ {
for i := range len(m.WasmResolvers) {
if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) {
return false
}
@@ -298,7 +298,7 @@ func (wr *WasmResolver) Equal(other *WasmResolver) bool {
return false
}
for i := 0; i < annotLen; i++ {
for i := range annotLen {
if wr.Annotations[i].Compare(other.Annotations[i]) != 0 {
return false
}
@@ -333,7 +333,7 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error {
roots[i] = strings.Trim(roots[i], "/")
}
for i := 0; i < len(roots)-1; i++ {
for i := range len(roots) - 1 {
for j := i + 1; j < len(roots); j++ {
if RootPathsOverlap(roots[i], roots[j]) {
return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j])
@@ -715,8 +715,11 @@ func (r *Reader) Read() (Bundle, error) {
popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion())
for _, mf := range modules {
modulePopts := popts
if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil {
if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil {
return bundle, err
} else if regoVersion != ast.RegoUndefined {
// We don't expect ast.RegoUndefined here, but don't override configured rego-version if we do just to be extra protective
modulePopts.RegoVersion = regoVersion
}
r.metrics.Timer(metrics.RegoModuleParse).Start()
mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts)
@@ -729,19 +732,19 @@ func (r *Reader) Read() (Bundle, error) {
if bundle.Type() == DeltaBundleType {
if len(bundle.Data) != 0 {
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found")
return bundle, errors.New("delta bundle expected to contain only patch file but data files found")
}
if len(bundle.Modules) != 0 {
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found")
return bundle, errors.New("delta bundle expected to contain only patch file but policy files found")
}
if len(bundle.WasmModules) != 0 {
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found")
return bundle, errors.New("delta bundle expected to contain only patch file but wasm files found")
}
if r.persist {
return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported")
return bundle, errors.New("'persist' property is true in config. persisting delta bundle to disk is not supported")
}
}
@@ -763,7 +766,7 @@ func (r *Reader) Read() (Bundle, error) {
for _, r := range bundle.Manifest.WasmResolvers {
epMap[r.Module] = append(epMap[r.Module], r.Entrypoint)
}
for i := 0; i < len(bundle.WasmModules); i++ {
for i := range len(bundle.WasmModules) {
entrypoints := epMap[bundle.WasmModules[i].Path]
for _, entrypoint := range entrypoints {
ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint)
@@ -816,12 +819,12 @@ func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) erro
}
if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" {
return fmt.Errorf("bundle missing .signatures.json file")
return errors.New("bundle missing .signatures.json file")
}
if !signatures.isEmpty() {
if r.verificationConfig == nil {
return fmt.Errorf("verification key not provided")
return errors.New("verification key not provided")
}
// verify the JWT signatures included in the `.signatures.json` file
@@ -1204,10 +1207,6 @@ func (b *Bundle) SetRegoVersion(v ast.RegoVersion) {
// If there is no defined version for the given path, the default version def is returned.
// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned.
func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) {
if def == ast.RegoUndefined {
def = ast.DefaultRegoVersion
}
version, err := b.Manifest.numericRegoVersionForFile(path)
if err != nil {
return def, err
@@ -1354,7 +1353,7 @@ func (b *Bundle) readData(key []string) *interface{} {
node := b.Data
for i := 0; i < len(key)-1; i++ {
for i := range len(key) - 1 {
child, ok := node[key[i]]
if !ok {
@@ -1390,7 +1389,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) {
// For 0 length path the value is the full tree.
obj, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("root value must be object")
return nil, errors.New("root value must be object")
}
return obj, nil
}
@@ -1513,7 +1512,7 @@ func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath boo
return nil, err
}
// only record the rego version if it's different from one applied globally to the result bundle
if v != regoVersion {
if regoVersion != ast.RegoUndefined && v != regoVersion {
// We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path
// to the module inside the merged bundle.
fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int()

View File

@@ -101,11 +101,9 @@ func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte,
for claim, value := range claims {
payload[claim] = value
}
} else {
if keyID != "" {
// keyid claim is deprecated but include it for backwards compatibility.
payload["keyid"] = keyID
}
} else if keyID != "" {
// keyid claim is deprecated but include it for backwards compatibility.
payload["keyid"] = keyID
}
return json.Marshal(payload)
}

View File

@@ -8,6 +8,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"strings"
@@ -94,7 +95,7 @@ func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn stor
bundleMap, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("corrupt manifest roots")
return nil, errors.New("corrupt manifest roots")
}
bundles := make([]string, len(bundleMap))
@@ -196,14 +197,14 @@ func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn sto
bs, err := json.Marshal(value)
if err != nil {
return nil, fmt.Errorf("corrupt wasm manifest data")
return nil, errors.New("corrupt wasm manifest data")
}
var wasmMetadata []WasmResolver
err = util.UnmarshalJSON(bs, &wasmMetadata)
if err != nil {
return nil, fmt.Errorf("corrupt wasm manifest data")
return nil, errors.New("corrupt wasm manifest data")
}
return wasmMetadata, nil
@@ -219,14 +220,14 @@ func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn stor
encodedModules, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("corrupt wasm modules")
return nil, errors.New("corrupt wasm modules")
}
rawModules := map[string][]byte{}
for path, enc := range encodedModules {
encStr, ok := enc.(string)
if !ok {
return nil, fmt.Errorf("corrupt wasm modules")
return nil, errors.New("corrupt wasm modules")
}
bs, err := base64.StdEncoding.DecodeString(encStr)
if err != nil {
@@ -248,7 +249,7 @@ func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn stor
sl, ok := value.([]interface{})
if !ok {
return nil, fmt.Errorf("corrupt manifest roots")
return nil, errors.New("corrupt manifest roots")
}
roots := make([]string, len(sl))
@@ -256,7 +257,7 @@ func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn stor
for i := range sl {
roots[i], ok = sl[i].(string)
if !ok {
return nil, fmt.Errorf("corrupt manifest root")
return nil, errors.New("corrupt manifest root")
}
}
@@ -278,7 +279,7 @@ func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage
str, ok := value.(string)
if !ok {
return "", fmt.Errorf("corrupt manifest revision")
return "", errors.New("corrupt manifest revision")
}
return str, nil
@@ -299,7 +300,7 @@ func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage
data, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("corrupt manifest metadata")
return nil, errors.New("corrupt manifest metadata")
}
return data, nil
@@ -320,7 +321,7 @@ func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Tra
str, ok := value.(string)
if !ok {
return "", fmt.Errorf("corrupt bundle etag")
return "", errors.New("corrupt bundle etag")
}
return str, nil
@@ -446,7 +447,7 @@ func activateBundles(opts *ActivateOpts) error {
p := getNormalizedPath(path)
if len(p) == 0 {
return fmt.Errorf("root value must be object")
return errors.New("root value must be object")
}
// verify valid YAML or JSON value
@@ -716,7 +717,7 @@ func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn stora
if vs, ok := ver.(json.Number); ok {
i, err := vs.Int64()
if err != nil {
return nil, fmt.Errorf("corrupt rego version")
return nil, errors.New("corrupt rego version")
}
versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))}
}
@@ -726,7 +727,7 @@ func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn stora
return versions, nil
}
return nil, fmt.Errorf("corrupt rego version")
return nil, errors.New("corrupt rego version")
}
func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) {
@@ -826,7 +827,7 @@ func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn
if regoVersion == ast.RegoUndefined {
var err error
regoVersion, err = b.RegoVersionForFile(mf.Path, ast.RegoUndefined)
regoVersion, err = b.RegoVersionForFile(mf.Path, runtimeRegoVersion)
if err != nil {
return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err)
}
@@ -1019,7 +1020,7 @@ func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool)
if len(path) == 0 {
return data, true
}
for i := 0; i < len(path)-1; i++ {
for i := range len(path) - 1 {
value, ok := data[path[i]]
if !ok {
return nil, false
@@ -1093,7 +1094,7 @@ func applyPatches(ctx context.Context, store storage.Store, txn storage.Transact
// construct patch path
path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/"))
if !ok {
return fmt.Errorf("error parsing patch path")
return errors.New("error parsing patch path")
}
var op storage.PatchOp

View File

@@ -10,6 +10,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
@@ -60,11 +61,11 @@ func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *Verifica
files := make(map[string]FileInfo)
if len(sc.Signatures) == 0 {
return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
return files, errors.New(".signatures.json: missing JWT (expected exactly one)")
}
if len(sc.Signatures) > 1 {
return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
return files, errors.New(".signatures.json: multiple JWTs not supported (expected exactly one)")
}
for _, token := range sc.Signatures {
@@ -120,7 +121,7 @@ func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignatur
}
if keyID == "" {
return nil, fmt.Errorf("verification key ID is empty")
return nil, errors.New("verification key ID is empty")
}
// now that we have the keyID, fetch the actual key
@@ -148,7 +149,7 @@ func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignatur
}
if ds.Scope != scope {
return nil, fmt.Errorf("scope mismatch")
return nil, errors.New("scope mismatch")
}
return &ds, nil
}

View File

@@ -7,6 +7,7 @@ package config
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@@ -98,7 +99,7 @@ func (c Config) PluginNames() (result []string) {
// PluginsEnabled returns true if one or more plugin features are enabled.
//
// Deprecated. Use PluginNames instead.
// Deprecated: Use PluginNames instead.
func (c Config) PluginsEnabled() bool {
return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0
}
@@ -243,7 +244,7 @@ func removeCryptoKeys(x interface{}) error {
func removeKey(x interface{}, keys ...string) error {
val, ok := x.(map[string]interface{})
if !ok {
return fmt.Errorf("type assertion error")
return errors.New("type assertion error")
}
for _, key := range keys {

View File

@@ -9,6 +9,7 @@ import (
"bytes"
"fmt"
"regexp"
"slices"
"sort"
"strings"
"unicode"
@@ -62,12 +63,10 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
var parserOpts ast.ParserOptions
if opts.ParserOptions != nil {
parserOpts = *opts.ParserOptions
} else {
if regoVersion == ast.RegoV1 {
// If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported.
// Otherwise, we'll default to the default rego-version.
parserOpts.RegoVersion = ast.RegoV1
}
} else if regoVersion == ast.RegoV1 {
// If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported.
// Otherwise, we'll default to the default rego-version.
parserOpts.RegoVersion = ast.RegoV1
}
if parserOpts.RegoVersion == ast.RegoUndefined {
@@ -179,6 +178,9 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
o.contains = true
}
memberRef := ast.Member.Ref()
memberWithKeyRef := ast.MemberWithKey.Ref()
// Preprocess the AST. Set any required defaults and calculate
// values required for printing the formatted output.
ast.WalkNodes(x, func(x ast.Node) bool {
@@ -192,7 +194,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
case *ast.Expr:
switch {
case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()):
case n.IsCall() && memberRef.Equal(n.Operator()) || memberWithKeyRef.Equal(n.Operator()):
extraFutureKeywordImports["in"] = struct{}{}
case n.IsEvery():
extraFutureKeywordImports["every"] = struct{}{}
@@ -421,7 +423,7 @@ func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.
}
func (w *writer) writeComments(comments []*ast.Comment) {
for i := 0; i < len(comments); i++ {
for i := range comments {
if i > 0 && locCmp(comments[i], comments[i-1]) > 1 {
w.blankLine()
}
@@ -438,6 +440,8 @@ func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.C
return comments
}
var expandedConst = ast.NewBody(ast.NewExpr(ast.InternedBooleanTerm(true)))
func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment {
if rule == nil {
return comments
@@ -455,7 +459,7 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment)
// `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation
// in the formatted code instead of expanding the bodies into rules, so we
// pretend that the rule has no body in this case.
isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil
isExpandedConst := rule.Body.Equal(expandedConst) && rule.Else == nil
comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments)
@@ -508,6 +512,8 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment)
return comments
}
var elseVar ast.Value = ast.Var("else")
func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment {
// If there was nothing else on the line before the "else" starts
// then preserve this style of else block, otherwise it will be
@@ -554,7 +560,7 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comme
rule.Else.Head.Name = "else" // NOTE(sr): whaaat
elseHeadReference := ast.VarTerm("else") // construct a reference for the term
elseHeadReference := ast.NewTerm(elseVar) // construct a reference for the term
elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location
rule.Else.Head.Reference = ast.Ref{elseHeadReference}
@@ -612,7 +618,7 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm
}
if head.Value != nil &&
(head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) {
(head.Key != nil || !ast.InternedBooleanTerm(true).Equal(head.Value) || isExpandedConst || isDefault) {
// in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.:
// * a -> parser error, won't reach here
@@ -623,7 +629,7 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm
if head.Location == head.Value.Location &&
head.Name != "else" &&
ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 &&
ast.InternedBooleanTerm(true).Equal(head.Value) &&
!isRegoV1RefConst {
// If the value location is the same as the location of the head,
// we know that the value is generated, i.e. f(1)
@@ -1115,11 +1121,7 @@ func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []
comments = w.insertComments(comments, group[0].Loc())
// Sort imports within a newline grouping.
sort.Slice(group, func(i, j int) bool {
a := group[i]
b := group[j]
return a.Compare(b) < 0
})
slices.SortFunc(group, (*ast.Import).Compare)
for _, i := range group {
w.startLine()
w.writeImport(i)
@@ -1277,9 +1279,8 @@ func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} {
return [][]interface{}{elements}
}
}
sort.Slice(elements, func(i, j int) bool {
return locLess(elements[i], elements[j])
})
slices.SortFunc(elements, locCmp)
var lines [][]interface{}
cur := make([]interface{}, 0, len(elements))
@@ -1351,7 +1352,30 @@ func groupImports(imports []*ast.Import) [][]*ast.Import {
return groups
}
func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) {
func partitionComments(comments []*ast.Comment, l *ast.Location) ([]*ast.Comment, *ast.Comment, []*ast.Comment) {
if len(comments) == 0 {
return nil, nil, nil
}
numBefore, numAfter := 0, 0
for _, c := range comments {
switch cmp := c.Location.Row - l.Row; {
case cmp < 0:
numBefore++
case cmp > 0:
numAfter++
}
}
if numAfter == len(comments) {
return nil, nil, comments
}
var at *ast.Comment
before := make([]*ast.Comment, 0, numBefore)
after := comments[0 : 0 : len(comments)-numBefore]
for _, c := range comments {
switch cmp := c.Location.Row - l.Row; {
case cmp < 0:
@@ -1430,6 +1454,8 @@ func getLoc(x interface{}) *ast.Location {
}
}
var negativeRow = &ast.Location{Row: -1}
func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location) *ast.Location {
i, offset := 0, 0
@@ -1445,14 +1471,14 @@ func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location
}
if i >= len(loc.Text) {
return &ast.Location{Row: -1}
return negativeRow
}
state := 1
for state > 0 {
i++
if i >= len(loc.Text) {
return &ast.Location{Row: -1}
return negativeRow
}
switch loc.Text[i] {
@@ -1500,7 +1526,7 @@ func skipPast(openChar, closeChar byte, loc *ast.Location) (int, int) {
// startLine begins a line with the current indentation level.
func (w *writer) startLine() {
w.inline = true
for i := 0; i < w.level; i++ {
for range w.level {
w.write(w.indent)
}
}
@@ -1636,7 +1662,7 @@ func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Lo
}
have := make([]string, len(operands))
for i := 0; i < len(operands); i++ {
for i := range operands {
have[i] = ast.ValueName(operands[i].Value)
}
err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch")
@@ -1650,8 +1676,8 @@ func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Lo
// Lines returns the string representation of the detail.
func (d *ArityFormatErrDetail) Lines() []string {
return []string{
"have: " + "(" + strings.Join(d.Have, ",") + ")",
"want: " + "(" + strings.Join(d.Want, ",") + ")",
"have: (" + strings.Join(d.Have, ",") + ")",
"want: (" + strings.Join(d.Want, ",") + ")",
}
}
@@ -1664,10 +1690,12 @@ func moduleIsRegoV1Compatible(m *ast.Module) bool {
return false
}
var v1StringTerm = ast.StringTerm("v1")
// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1`
func isRegoV1Compatible(imp *ast.Import) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 2 &&
ast.RegoRootDocument.Equal(path[0]) &&
path[1].Equal(ast.StringTerm("v1"))
path[1].Equal(v1StringTerm)
}

View File

@@ -8,7 +8,7 @@ package metrics
import (
"encoding/json"
"fmt"
"sort"
"slices"
"strings"
"sync"
"sync/atomic"
@@ -94,8 +94,8 @@ func (m *metrics) String() string {
})
}
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Key < sorted[j].Key
slices.SortFunc(sorted, func(a, b metric) int {
return strings.Compare(a.Key, b.Key)
})
buf := make([]string, len(sorted))

View File

@@ -448,6 +448,11 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
f(m)
}
if m.parserOptions.RegoVersion == ast.RegoUndefined {
// Default to v1 if rego-version is not set through options
m.parserOptions.RegoVersion = ast.DefaultRegoVersion
}
if m.logger == nil {
m.logger = logging.Get()
}
@@ -480,13 +485,7 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
return nil, err
}
serviceOpts := cfg.ServiceOptions{
Raw: parsedConfig.Services,
AuthPlugin: m.AuthPlugin,
Keys: m.keys,
Logger: m.logger,
DistributedTacingOpts: m.distributedTacingOpts,
}
serviceOpts := m.DefaultServiceOpts(parsedConfig)
m.services, err = cfg.ParseServicesConfig(serviceOpts)
if err != nil {
@@ -502,8 +501,8 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) {
var minimumCompatibleVersion string
if m.compiler != nil && m.compiler.Required != nil {
minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion()
if c := m.GetCompiler(); c != nil && c.Required != nil {
minimumCompatibleVersion, _ = c.Required.MinimumCompatibleVersion()
}
return minimumCompatibleVersion, nil
})
@@ -755,14 +754,19 @@ func (m *Manager) Stop(ctx context.Context) {
}
}
// Reconfigure updates the configuration on the manager.
func (m *Manager) Reconfigure(config *config.Config) error {
opts := cfg.ServiceOptions{
func (m *Manager) DefaultServiceOpts(config *config.Config) cfg.ServiceOptions {
return cfg.ServiceOptions{
Raw: config.Services,
AuthPlugin: m.AuthPlugin,
Logger: m.logger,
Keys: m.keys,
DistributedTacingOpts: m.distributedTacingOpts,
}
}
// Reconfigure updates the configuration on the manager.
func (m *Manager) Reconfigure(config *config.Config) error {
opts := m.DefaultServiceOpts(config)
keys, err := keys.ParseKeysConfig(config.Keys)
if err != nil {
@@ -799,7 +803,7 @@ func (m *Manager) Reconfigure(config *config.Config) error {
m.Config = config
m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig
for name, client := range services {
for name, client := range services { //nolint:gocritic
m.services[name] = client
}

View File

@@ -126,10 +126,14 @@ type bearerAuthPlugin struct {
// encode is set to true for the OCIDownloader because
// it expects tokens in plain text but needs them in base64.
encode bool
logger logging.Logger
}
func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) {
t, err := DefaultTLSConfig(c)
ap.logger = c.logger
if err != nil {
return nil, err
}
@@ -153,6 +157,9 @@ func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) {
func (ap *bearerAuthPlugin) Prepare(req *http.Request) error {
token := ap.Token
if ap.logger == nil {
ap.logger = logging.Get()
}
if ap.TokenPath != "" {
bytes, err := os.ReadFile(ap.TokenPath)
@@ -166,7 +173,12 @@ func (ap *bearerAuthPlugin) Prepare(req *http.Request) error {
token = base64.StdEncoding.EncodeToString([]byte(token))
}
req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token))
if req.Response != nil && (req.Response.StatusCode == http.StatusPermanentRedirect || req.Response.StatusCode == http.StatusTemporaryRedirect) {
ap.logger.Debug("not attaching authorization header as the response contains a redirect")
} else {
ap.logger.Debug("attaching authorization header")
req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token))
}
return nil
}
@@ -194,7 +206,7 @@ func convertSignatureToBase64(alg string, der []byte) (string, error) {
return signatureData, nil
}
func pointsFromDER(der []byte) (R, S *big.Int, err error) {
func pointsFromDER(der []byte) (R, S *big.Int, err error) { //nolint:gocritic
R, S = &big.Int{}, &big.Int{}
data := asn1.RawValue{}
if _, err := asn1.Unmarshal(der, &data); err != nil {
@@ -382,12 +394,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, pa
encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
input := strings.Join(
[]string{
encodedHdr,
encodedPayload,
}, ".",
)
input := encodedHdr + "." + encodedPayload
digest, err := messageDigest([]byte(input), ap.AWSKmsKey.Algorithm)
if err != nil {
return nil, err
@@ -616,7 +623,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (
return nil, err
}
if strings.ToLower(tokenResponse.TokenType) != "bearer" {
if !strings.EqualFold(tokenResponse.TokenType, "bearer") {
return nil, errors.New("unknown token type returned from token endpoint")
}

View File

@@ -678,7 +678,7 @@ func (ap *ecrAuthPlugin) Prepare(r *http.Request) error {
ap.logger.Debug("Signing request with ECR authorization token")
r.Header.Set("Authorization", fmt.Sprintf("Basic %s", ap.token.AuthorizationToken))
r.Header.Set("Authorization", "Basic "+ap.token.AuthorizationToken)
return nil
}

View File

@@ -12,6 +12,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"net/http/httputil"
"reflect"
@@ -94,7 +95,7 @@ func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error)
}
// reflection avoids need for this code to change as auth plugins are added
s := reflect.ValueOf(c.Credentials)
for i := 0; i < s.NumField(); i++ {
for i := range s.NumField() {
if s.Field(i).IsNil() {
continue
}
@@ -293,7 +294,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
}
url := c.config.URL + "/" + path
req, err := http.NewRequest(method, url, body)
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, err
}
@@ -303,23 +304,16 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
}
// Copy custom headers from config.
for key, value := range c.config.Headers {
headers[key] = value
}
maps.Copy(headers, c.config.Headers)
// Overwrite with headers set directly on client.
for key, value := range c.headers {
headers[key] = value
}
maps.Copy(headers, c.headers)
for key, value := range headers {
req.Header.Add(key, value)
}
req = req.WithContext(ctx)
err = c.config.authPrepare(req, c.authPluginLookup)
if err != nil {
if err = c.config.authPrepare(req, c.authPluginLookup); err != nil {
return nil, err
}
@@ -347,7 +341,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
return nil, err
}
if len(string(dump)) < defaultResponseSizeLimitBytes {
if len(dump) < defaultResponseSizeLimitBytes {
c.loggerFields["response"] = string(dump)
} else {
c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes]))

View File

@@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"strings"
"time"
@@ -78,12 +79,8 @@ func (pr PartialResult) Rego(options ...func(*Rego)) *Rego {
r := New(options...)
// Propagate any custom builtins.
for k, v := range pr.builtinDecls {
r.builtinDecls[k] = v
}
for k, v := range pr.builtinFuncs {
r.builtinFuncs[k] = v
}
maps.Copy(r.builtinDecls, pr.builtinDecls)
maps.Copy(r.builtinFuncs, pr.builtinFuncs)
return r
}
@@ -128,6 +125,7 @@ type EvalContext struct {
capabilities *ast.Capabilities
strictBuiltinErrors bool
virtualCache topdown.VirtualCache
baseCache topdown.BaseCache
}
func (e *EvalContext) RawInput() *interface{} {
@@ -365,14 +363,22 @@ func EvalPrintHook(ph print.Hook) EvalOption {
}
}
// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is
// optional, and if not set, the default cache is used.
// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation.
// This is optional, and if not set, the default cache is used.
func EvalVirtualCache(vc topdown.VirtualCache) EvalOption {
return func(e *EvalContext) {
e.virtualCache = vc
}
}
// EvalBaseCache sets the topdown.BaseCache to use for evaluation.
// This is optional, and if not set, the default cache is used.
func EvalBaseCache(bc topdown.BaseCache) EvalOption {
return func(e *EvalContext) {
e.baseCache = bc
}
}
// EvalNondeterministicBuiltins causes non-deterministic builtins to be evalued
// during partial evaluation. This is needed to pull in external data, or validate
// a JWT, during PE, so that the result informs what queries are returned.
@@ -825,7 +831,7 @@ func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty fun
// The term slice _may_ include an output term depending on how the caller
// referred to the built-in function. Only use the arguments as the cache
// key. Unification ensures we don't get false positive matches.
for i := 0; i < decl.Decl.Arity(); i++ {
for i := range decl.Decl.Arity() {
if _, err := b.WriteString(terms[i].String()); err != nil {
return nil, err
}
@@ -1570,7 +1576,7 @@ func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResu
}
if tgt := r.targetPlugin(r.target); tgt != nil {
return nil, fmt.Errorf("unsupported for rego target plugins")
return nil, errors.New("unsupported for rego target plugins")
}
return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here
@@ -1630,10 +1636,9 @@ func WithNoInline(paths []string) PrepareOption {
func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption {
return func(p *PrepareConfig) {
if p.builtinFuncs == nil {
p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis))
}
for k, v := range bis {
p.builtinFuncs[k] = v
p.builtinFuncs = maps.Clone(bis)
} else {
maps.Copy(p.builtinFuncs, bis)
}
}
}
@@ -1648,7 +1653,7 @@ func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin {
// of evaluating them.
func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) {
if !r.hasQuery() {
return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query")
return PreparedEvalQuery{}, errors.New("cannot evaluate empty query")
}
pCfg := &PrepareConfig{}
@@ -1702,7 +1707,7 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa
if r.hasWasmModule() {
_ = txnClose(ctx, err) // Ignore error
return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported")
return PreparedEvalQuery{}, errors.New("wasm target not supported")
}
var modules []*ast.Module
@@ -1767,7 +1772,7 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa
// of partially evaluating them.
func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) {
if !r.hasQuery() {
return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query")
return PreparedPartialQuery{}, errors.New("cannot evaluate empty query")
}
pCfg := &PrepareConfig{}
@@ -2183,7 +2188,8 @@ func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
WithSeed(ectx.seed).
WithPrintHook(ectx.printHook).
WithDistributedTracingOpts(r.distributedTacingOpts).
WithVirtualCache(ectx.virtualCache)
WithVirtualCache(ectx.virtualCache).
WithBaseCache(ectx.baseCache)
if !ectx.time.IsZero() {
q = q.WithTime(ectx.time)
@@ -2270,7 +2276,7 @@ func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, erro
func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) {
resultSet, ok := res.(ast.Set)
if !ok {
return nil, fmt.Errorf("illegal result type")
return nil, errors.New("illegal result type")
}
if resultSet.Len() == 0 {
@@ -2281,7 +2287,7 @@ func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet,
err := resultSet.Iter(func(term *ast.Term) error {
obj, ok := term.Value.(ast.Object)
if !ok {
return fmt.Errorf("illegal result type")
return errors.New("illegal result type")
}
qr := topdown.QueryResult{}
obj.Foreach(func(k, v *ast.Term) {
@@ -2391,7 +2397,7 @@ func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialR
module, err := ast.ParseModuleWithOpts(id, "package "+ectx.partialNamespace,
ast.ParserOptions{RegoVersion: r.regoVersion})
if err != nil {
return PartialResult{}, fmt.Errorf("bad partial namespace")
return PartialResult{}, errors.New("bad partial namespace")
}
module.Rules = make([]*ast.Rule, len(pq.Queries))
@@ -2611,12 +2617,12 @@ func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (
func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
if len(query) != 1 {
return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)")
return nil, errors.New("partial evaluation requires single ref (not multiple expressions)")
}
term, ok := query[0].Terms.(*ast.Term)
if !ok {
return nil, fmt.Errorf("partial evaluation requires ref (not expression)")
return nil, errors.New("partial evaluation requires ref (not expression)")
}
ref, ok := term.Value.(ast.Ref)
@@ -2625,7 +2631,7 @@ func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (
}
if !ref.IsGround() {
return nil, fmt.Errorf("partial evaluation requires ground ref")
return nil, errors.New("partial evaluation requires ground ref")
}
return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil
@@ -2844,17 +2850,26 @@ func parseStringsToRefs(s []string) ([]ast.Ref, error) {
func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error {
if err != nil {
var e *HaltError
sb := strings.Builder{}
if errors.As(err, &e) {
sb.Grow(len(name) + len(e.Error()) + 2)
sb.WriteString(name)
sb.WriteString(": ")
sb.WriteString(e.Error())
tdErr := &topdown.Error{
Code: topdown.BuiltinErr,
Message: fmt.Sprintf("%v: %v", name, e.Error()),
Message: sb.String(),
Location: bctx.Location,
}
return topdown.Halt{Err: tdErr.Wrap(e)}
}
sb.Grow(len(name) + len(err.Error()) + 2)
sb.WriteString(name)
sb.WriteString(": ")
sb.WriteString(err.Error())
tdErr := &topdown.Error{
Code: topdown.BuiltinErr,
Message: fmt.Sprintf("%v: %v", name, err.Error()),
Message: sb.String(),
Location: bctx.Location,
}
return tdErr.Wrap(err)
@@ -2895,14 +2910,8 @@ func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Polic
}
decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap))
for k, v := range ast.BuiltinMap {
decls[k] = v
}
for k, v := range r.builtinDecls {
decls[k] = v
}
maps.Copy(decls, ast.BuiltinMap)
maps.Copy(decls, r.builtinDecls)
const queryName = "eval" // NOTE(tsandall): the query name is arbitrary

View File

@@ -6,6 +6,7 @@ package wasm
import (
"context"
"errors"
"fmt"
"strconv"
@@ -144,7 +145,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) {
resultSet, ok := parsed.Value.(ast.Set)
if !ok {
return nil, fmt.Errorf("illegal result type")
return nil, errors.New("illegal result type")
}
if resultSet.Len() == 0 {
@@ -152,14 +153,14 @@ func getResult(evalResult *opa.Result) (ast.Value, error) {
}
if resultSet.Len() > 1 {
return nil, fmt.Errorf("illegal result type")
return nil, errors.New("illegal result type")
}
var obj ast.Object
err = resultSet.Iter(func(term *ast.Term) error {
obj, ok = term.Value.(ast.Object)
if !ok || obj.Len() != 1 {
return fmt.Errorf("illegal result type")
return errors.New("illegal result type")
}
return nil
})

View File

@@ -56,8 +56,7 @@ func (err *Error) Error() string {
// IsNotFound returns true if this error is a NotFoundErr.
func IsNotFound(err error) bool {
switch err := err.(type) {
case *Error:
if err, ok := err.(*Error); ok {
return err.Code == NotFoundErr
}
return false

View File

@@ -101,8 +101,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
return nil, invalidPatchError("%v: invalid patch path", path)
}
cpy := data.Copy()
cpy = cpy.Append(ast.NewTerm(value))
cpy := data.Append(ast.NewTerm(value))
return &updateAST{path[:len(path)-1], false, cpy}, nil
}
@@ -114,7 +113,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
switch op {
case storage.AddOp:
var results []*ast.Term
for i := 0; i < data.Len(); i++ {
for i := range data.Len() {
if i == pos {
results = append(results, ast.NewTerm(value))
}
@@ -125,7 +124,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
case storage.RemoveOp:
var results []*ast.Term
for i := 0; i < data.Len(); i++ {
for i := range data.Len() {
if i != pos {
results = append(results, data.Elem(i))
}
@@ -134,7 +133,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
default:
var results []*ast.Term
for i := 0; i < data.Len(); i++ {
for i := range data.Len() {
if i == pos {
results = append(results, ast.NewTerm(value))
} else {
@@ -296,7 +295,7 @@ func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) {
if len(path) == 1 {
var elems []*ast.Term
// Note: possibly expensive operation for large data.
for i := 0; i < arr.Len(); i++ {
for i := range arr.Len() {
if i == idx {
continue
}

View File

@@ -185,7 +185,9 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s
}
}
if err != nil && err != io.EOF {
// err is known not to be nil at this point, as it getting assigned
// a non-nil value is the only way the loop above can exit.
if err != io.EOF {
return err
}
@@ -442,7 +444,7 @@ func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool)
if len(path) == 0 {
return data, true
}
for i := 0; i < len(path)-1; i++ {
for i := range len(path) - 1 {
value, ok := data[path[i]]
if !ok {
return nil, false

View File

@@ -20,7 +20,11 @@ func NewNotFoundError(path storage.Path) *storage.Error {
}
func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error {
return NewNotFoundErrorf("%v: %v", path.String(), hint)
message := path.String() + ": " + hint
return &storage.Error{
Code: storage.NotFoundErr,
Message: message,
}
}
func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error {

View File

@@ -43,8 +43,15 @@ func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
key := path[i]
switch curr := node.(type) {
case ast.Object:
keyTerm := ast.StringTerm(key)
// This term is only created for the lookup, which is not.. ideal.
// By using the pool, we can at least avoid allocating the term itself,
// while still having to pay 1 allocation for the value. A better solution
// would be dynamically interned string terms.
keyTerm := ast.TermPtrPool.Get()
keyTerm.Value = ast.String(key)
val := curr.Get(keyTerm)
ast.TermPtrPool.Put(keyTerm)
if val == nil {
return nil, errors.NewNotFoundError(path)
}

View File

@@ -5,6 +5,7 @@
package storage
import (
"errors"
"fmt"
"net/url"
"strconv"
@@ -50,7 +51,7 @@ func ParsePathEscaped(str string) (path Path, ok bool) {
func NewPathForRef(ref ast.Ref) (path Path, err error) {
if len(ref) == 0 {
return nil, fmt.Errorf("empty reference (indicates error in caller)")
return nil, errors.New("empty reference (indicates error in caller)")
}
if len(ref) == 1 {
@@ -84,7 +85,7 @@ func NewPathForRef(ref ast.Ref) (path Path, err error) {
// is less than other, 0 if p is equal to other, or 1 if p is greater than
// other.
func (p Path) Compare(other Path) (cmp int) {
for i := 0; i < min(len(p), len(other)); i++ {
for i := range min(len(p), len(other)) {
if cmp := strings.Compare(p[i], other[i]); cmp != 0 {
return cmp
}
@@ -132,11 +133,22 @@ func (p Path) Ref(head *ast.Term) (ref ast.Ref) {
}
func (p Path) String() string {
buf := make([]string, len(p))
for i := range buf {
buf[i] = url.PathEscape(p[i])
if len(p) == 0 {
return "/"
}
return "/" + strings.Join(buf, "/")
l := 0
for i := range p {
l += len(p[i]) + 1
}
sb := strings.Builder{}
sb.Grow(l)
for i := range p {
sb.WriteByte('/')
sb.WriteString(url.PathEscape(p[i]))
}
return sb.String()
}
// MustParsePath returns a new Path for s. If s cannot be parsed, this function

View File

@@ -230,7 +230,7 @@ func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
case ast.Set:
return iter(ast.InternedBooleanTerm(c.Contains(containee)))
case *ast.Array:
for i := 0; i < c.Len(); i++ {
for i := range c.Len() {
if c.Elem(i).Value.Compare(containee.Value) == 0 {
return iter(ast.InternedBooleanTerm(true))
}

View File

@@ -5,7 +5,7 @@
package topdown
import (
"fmt"
"errors"
"math/big"
"github.com/open-policy-agent/opa/v1/ast"
@@ -116,14 +116,14 @@ func arithMultiply(a, b *big.Float) (*big.Float, error) {
func arithDivide(a, b *big.Float) (*big.Float, error) {
i, acc := b.Int64()
if acc == big.Exact && i == 0 {
return nil, fmt.Errorf("divide by zero")
return nil, errors.New("divide by zero")
}
return new(big.Float).Quo(a, b), nil
}
func arithRem(a, b *big.Int) (*big.Int, error) {
if b.Int64() == 0 {
return nil, fmt.Errorf("modulo by zero")
return nil, errors.New("modulo by zero")
}
return new(big.Int).Rem(a, b), nil
}
@@ -210,7 +210,7 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) {
if y == 0 {
return fmt.Errorf("modulo by zero")
return errors.New("modulo by zero")
}
return iter(ast.InternedIntNumberTerm(x % y))
@@ -220,7 +220,7 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
op2, err2 := builtins.NumberToInt(n2)
if err1 != nil || err2 != nil {
return fmt.Errorf("modulo on floating-point number")
return errors.New("modulo on floating-point number")
}
i, err := arithRem(op1, op2)

View File

@@ -91,7 +91,7 @@ func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
length := arr.Len()
reversedArr := make([]*ast.Term, length)
for index := 0; index < length; index++ {
for index := range length {
reversedArr[index] = arr.Elem(length - index - 1)
}

View File

@@ -6,6 +6,7 @@ package topdown
import (
"fmt"
"strconv"
"strings"
"github.com/open-policy-agent/opa/v1/ast"
@@ -184,7 +185,7 @@ func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term {
// Root documents (i.e., data, input) should never be namespaced because they
// are globally unique.
if !ast.RootDocumentNames.Contains(v) {
return ast.NewTerm(ast.Var(string(name) + fmt.Sprint(u.id)))
return ast.NewTerm(ast.Var(string(name) + strconv.FormatUint(u.id, 10)))
}
}
return v
@@ -313,12 +314,12 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) {
if b.a == nil {
b.a = new([maxLinearScan]bindingArrayKeyValue)
} else if i := b.find(key); i >= 0 {
(*b.a)[i].value = value
b.a[i].value = value
return
}
if b.n < maxLinearScan {
(*b.a)[b.n] = bindingArrayKeyValue{key, value}
b.a[b.n] = bindingArrayKeyValue{key, value}
b.n++
return
}
@@ -341,7 +342,7 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) {
func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) {
if b.m == nil {
if i := b.find(key); i >= 0 {
return (*b.a)[i].value, true
return b.a[i].value, true
}
return value{}, false
@@ -360,7 +361,7 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) {
if i := b.find(key); i >= 0 {
n := b.n - 1
if i < n {
(*b.a)[i] = (*b.a)[n]
b.a[i] = b.a[n]
}
b.n = n
@@ -373,8 +374,8 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) {
func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) {
if b.m == nil {
for i := 0; i < b.n; i++ {
if f((*b.a)[i].key, (*b.a)[i].value) {
for i := range b.n {
if f(b.a[i].key, b.a[i].value) {
return
}
}
@@ -390,8 +391,8 @@ func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) {
func (b *bindingsArrayHashmap) find(key *ast.Term) int {
v := key.Value.(ast.Var)
for i := 0; i < b.n; i++ {
if (*b.a)[i].key.Value.(ast.Var) == v {
for i := range b.n {
if b.a[i].key.Value.(ast.Var) == v {
return i
}
}

View File

@@ -7,6 +7,7 @@ package builtins
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"strings"
@@ -97,7 +98,7 @@ func (c *NDBCache) UnmarshalJSON(data []byte) error {
out[string(k.Value.(ast.String))] = obj
return nil
}
return fmt.Errorf("expected Object, got other Value type in conversion")
return errors.New("expected Object, got other Value type in conversion")
})
if err != nil {
return err
@@ -262,7 +263,7 @@ func NumberToInt(n ast.Number) (*big.Int, error) {
f := NumberToFloat(n)
r, accuracy := f.Int(nil)
if accuracy != big.Exact {
return nil, fmt.Errorf("illegal value")
return nil, errors.New("illegal value")
}
return r, nil
}
@@ -309,7 +310,7 @@ func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) {
}
var f = make([]rune, a.Len())
for k := 0; k < a.Len(); k++ {
for k := range a.Len() {
b := a.Elem(k)
c, ok := b.Value.(ast.String)
if !ok {

Some files were not shown because too many files have changed in this diff Show More