mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-25 15:19:48 -05:00
Compare commits
24 Commits
localApiTe
...
fixTransla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
903346a712 | ||
|
|
5d577930ae | ||
|
|
c897ec321f | ||
|
|
9f91963036 | ||
|
|
d772aea84a | ||
|
|
b6db5f7677 | ||
|
|
747df432d0 | ||
|
|
b56c74cfea | ||
|
|
3ecb89733d | ||
|
|
fde6a96045 | ||
|
|
38d3c7b826 | ||
|
|
202b7cc7e6 | ||
|
|
7de08f90b3 | ||
|
|
8fd3c1fb10 | ||
|
|
322f395b39 | ||
|
|
dfa782cccc | ||
|
|
a000dd9612 | ||
|
|
b0cc4e59fa | ||
|
|
314a79710c | ||
|
|
90cdca03f1 | ||
|
|
f413c618e5 | ||
|
|
7cfc6eb429 | ||
|
|
bb55aa8405 | ||
|
|
0834c15c87 |
284
.woodpecker.star
284
.woodpecker.star
@@ -24,7 +24,7 @@ OC_CI_BAZEL_BUILDIFIER = "owncloudci/bazel-buildifier:latest"
|
||||
OC_CI_CLAMAVD = "owncloudci/clamavd"
|
||||
OC_CI_DRONE_ANSIBLE = "owncloudci/drone-ansible:latest"
|
||||
OC_CI_DRONE_SKIP_PIPELINE = "owncloudci/drone-skip-pipeline"
|
||||
OC_CI_GOLANG = "owncloudci/golang:1.24"
|
||||
OC_CI_GOLANG = "docker.io/golang:1.24"
|
||||
OC_CI_NODEJS = "owncloudci/nodejs:%s"
|
||||
OC_CI_PHP = "owncloudci/php:%s"
|
||||
OC_CI_WAIT_FOR = "owncloudci/wait-for:latest"
|
||||
@@ -56,12 +56,12 @@ dirs = {
|
||||
"baseGo": "/go/src/github.com/opencloud-eu/opencloud",
|
||||
"gobinTar": "go-bin.tar.gz",
|
||||
"gobinTarPath": "/go/src/github.com/opencloud-eu/opencloud/go-bin.tar.gz",
|
||||
"opencloudConfig": "tests/config/drone/opencloud-config.json",
|
||||
"opencloudConfig": "tests/config/woodpecker/opencloud-config.json",
|
||||
"ocis": "/woodpecker/src/github.com/opencloud-eu/opencloud/srv/app/tmp/ocis",
|
||||
"ocisRevaDataRoot": "/woodpecker/src/github.com/opencloud-eu/opencloud/srv/app/tmp/ocis/owncloud/data",
|
||||
"ocWrapper": "/woodpecker/src/github.com/opencloud-eu/opencloud/tests/ocwrapper",
|
||||
"bannedPasswordList": "tests/config/drone/banned-password-list.txt",
|
||||
"ocmProviders": "tests/config/drone/providers.json",
|
||||
"bannedPasswordList": "tests/config/woodpecker/banned-password-list.txt",
|
||||
"ocmProviders": "tests/config/woodpecker/providers.json",
|
||||
"opencloudBinPath": "opencloud/bin",
|
||||
"opencloudBin": "opencloud/bin/opencloud",
|
||||
"opencloudBinArtifact": "opencloud-binary-amd64",
|
||||
@@ -78,13 +78,13 @@ OC_FED_DOMAIN = "%s:10200" % FED_OC_SERVER_NAME
|
||||
# configuration
|
||||
config = {
|
||||
"cs3ApiTests": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"wopiValidatorTests": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"k6LoadTests": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"localApiTests": {
|
||||
"basic": {
|
||||
@@ -98,13 +98,13 @@ config = {
|
||||
"apiLocks",
|
||||
"apiActivities",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"settings": {
|
||||
"suites": [
|
||||
"apiSettings",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
"emailNeeded": True,
|
||||
"extraEnvironment": {
|
||||
@@ -125,27 +125,27 @@ config = {
|
||||
"apiGraph",
|
||||
"apiServiceAvailability",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
},
|
||||
"graphUserGroup": {
|
||||
"suites": [
|
||||
"apiGraphUserGroup",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
},
|
||||
"spaces": {
|
||||
"suites": [
|
||||
"apiSpaces",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"spacesShares": {
|
||||
"suites": [
|
||||
"apiSpacesShares",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"spacesDavOperation": {
|
||||
"suites": [
|
||||
@@ -157,13 +157,13 @@ config = {
|
||||
"suites": [
|
||||
"apiSearch1",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"search2": {
|
||||
"suites": [
|
||||
"apiSearch2",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"sharingNg": {
|
||||
"suites": [
|
||||
@@ -171,23 +171,23 @@ config = {
|
||||
"apiSharingNg1",
|
||||
"apiSharingNg2",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"sharingNgShareInvitation": {
|
||||
"suites": [
|
||||
"apiSharingNgShareInvitation",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"sharingNgLinkShare": {
|
||||
"suites": [
|
||||
"apiSharingNgLinkSharePermission",
|
||||
"apiSharingNgLinkShareRoot",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
},
|
||||
"accountsHashDifficulty": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"suites": [
|
||||
"apiAccountsHashDifficulty",
|
||||
],
|
||||
@@ -197,7 +197,7 @@ config = {
|
||||
"suites": [
|
||||
"apiNotification",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
"emailNeeded": True,
|
||||
"extraEnvironment": {
|
||||
@@ -217,7 +217,7 @@ config = {
|
||||
"suites": [
|
||||
"apiAntivirus",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"antivirusNeeded": True,
|
||||
"extraServerEnvironment": {
|
||||
"ANTIVIRUS_SCANNER_TYPE": "clamav",
|
||||
@@ -232,14 +232,14 @@ config = {
|
||||
"suites": [
|
||||
"apiSearchContent",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"tikaNeeded": True,
|
||||
},
|
||||
"ocm": {
|
||||
"suites": [
|
||||
"apiOcm",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
"federationServer": True,
|
||||
"emailNeeded": True,
|
||||
@@ -265,7 +265,7 @@ config = {
|
||||
"suites": [
|
||||
"apiCollaboration",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"collaborationServiceNeeded": True,
|
||||
"extraServerEnvironment": {
|
||||
"GATEWAY_GRPC_ADDR": "0.0.0.0:9142",
|
||||
@@ -275,7 +275,7 @@ config = {
|
||||
"suites": [
|
||||
"apiAuthApp",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
"extraServerEnvironment": {
|
||||
"OCIS_ADD_RUN_SERVICES": "auth-app",
|
||||
@@ -286,7 +286,7 @@ config = {
|
||||
"suites": [
|
||||
"cliCommands",
|
||||
],
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"withRemotePhp": [True],
|
||||
"antivirusNeeded": True,
|
||||
"extraServerEnvironment": {
|
||||
@@ -299,24 +299,24 @@ config = {
|
||||
},
|
||||
"apiTests": {
|
||||
"numberOfParts": 7,
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"skipExceptParts": [],
|
||||
},
|
||||
"e2eTests": {
|
||||
"part": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"totalParts": 4, # divide and run all suites in parts (divide pipelines)
|
||||
"xsuites": ["search", "app-provider", "oidc", "ocm"], # suites to skip
|
||||
},
|
||||
"search": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"suites": ["search"], # suites to run
|
||||
"tikaNeeded": True,
|
||||
},
|
||||
},
|
||||
"e2eMultiService": {
|
||||
"testSuites": {
|
||||
"skip": False,
|
||||
"skip": True,
|
||||
"suites": [
|
||||
"smoke",
|
||||
"shares",
|
||||
@@ -433,7 +433,6 @@ def main(ctx):
|
||||
testOpencloudAndUploadResults(ctx) + \
|
||||
testPipelines(ctx)
|
||||
|
||||
# testPipelines(ctx)
|
||||
# build_release_pipelines = \
|
||||
# dockerReleases(ctx) + \
|
||||
# binaryReleases(ctx)
|
||||
@@ -511,7 +510,7 @@ def testPipelines(ctx):
|
||||
pipelines = []
|
||||
|
||||
if config["litmus"]:
|
||||
pipelines += litmus(ctx, "ocis")
|
||||
pipelines += litmus(ctx, "decomposed")
|
||||
|
||||
if "skip" not in config["cs3ApiTests"] or not config["cs3ApiTests"]["skip"]:
|
||||
pipelines.append(cs3ApiTests(ctx, "ocis", "default"))
|
||||
@@ -564,7 +563,7 @@ def checkGoBinCache():
|
||||
},
|
||||
},
|
||||
"commands": [
|
||||
"bash -x %s/tests/config/drone/check_go_bin_cache.sh %s %s" % (dirs["baseGo"], dirs["baseGo"], dirs["gobinTar"]),
|
||||
"bash -x %s/tests/config/woodpecker/check_go_bin_cache.sh %s %s" % (dirs["baseGo"], dirs["baseGo"], dirs["gobinTar"]),
|
||||
],
|
||||
}]
|
||||
|
||||
@@ -652,7 +651,7 @@ def testOpencloud(ctx):
|
||||
},
|
||||
"bucket": "cache",
|
||||
"source": "cache/**/*",
|
||||
"target": "%s/%s" % (repo_slug, ctx.build.commit + "-${DRONE_BUILD_NUMBER}"),
|
||||
"target": "%s/%s" % (repo_slug, ctx.build.commit + "-${CI_PIPELINE_NUMBER}"),
|
||||
"path_style": True,
|
||||
"access_key": {
|
||||
"from_secret": "cache_s3_access_key",
|
||||
@@ -940,7 +939,7 @@ def localApiTestPipeline(ctx):
|
||||
|
||||
def localApiTests(ctx, name, suites, storage = "decomposed", extra_environment = {}, with_remote_php = False):
|
||||
test_dir = "%s/tests/acceptance" % dirs["base"]
|
||||
expected_failures_file = "%s/expected-failures-localAPI-on-%s-storage.md" % (test_dir, storage.upper())
|
||||
expected_failures_file = "%s/expected-failures-localAPI-on-%s-storage.md" % (test_dir, storage)
|
||||
|
||||
environment = {
|
||||
"TEST_SERVER_URL": OC_URL,
|
||||
@@ -949,10 +948,10 @@ def localApiTests(ctx, name, suites, storage = "decomposed", extra_environment =
|
||||
"SEND_SCENARIO_LINE_REFERENCES": True,
|
||||
"STORAGE_DRIVER": storage,
|
||||
"BEHAT_SUITES": ",".join(suites),
|
||||
"BEHAT_FILTER_TAGS": "~@skip&&~@skipOnGraph&&~@skipOnOcis-%s-Storage" % ("OC" if storage == "owncloud" else "OCIS"),
|
||||
"BEHAT_FILTER_TAGS": "~@skip&&~@skipOnGraph&&~@skipOnOpencloud-%s-Storage" % storage,
|
||||
"EXPECTED_FAILURES_FILE": expected_failures_file,
|
||||
"UPLOAD_DELETE_WAIT_TIME": "1" if storage == "owncloud" else 0,
|
||||
"OCIS_WRAPPER_URL": "http://%s:5200" % OC_SERVER_NAME,
|
||||
"OC_WRAPPER_URL": "http://%s:5200" % OC_SERVER_NAME,
|
||||
"WITH_REMOTE_PHP": with_remote_php,
|
||||
"COLLABORATION_SERVICE_URL": "http://wopi-fakeoffice:9300",
|
||||
}
|
||||
@@ -975,7 +974,7 @@ def cs3ApiTests(ctx, storage, accounts_hash_difficulty = 4):
|
||||
return {
|
||||
"name": "cs3ApiTests",
|
||||
"steps": restoreBuildArtifactCache(ctx, dirs["opencloudBinArtifact"], dirs["opencloudBinPath"]) +
|
||||
ocisServer(storage, accounts_hash_difficulty, [], [], "cs3api_validator") +
|
||||
opencloudServer(storage, accounts_hash_difficulty, [], [], "cs3api_validator") +
|
||||
[
|
||||
{
|
||||
"name": "cs3ApiTests",
|
||||
@@ -1029,7 +1028,7 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
|
||||
"image": "cs3org/wopiserver:v10.4.0",
|
||||
"detach": True,
|
||||
"commands": [
|
||||
"cp %s/tests/config/drone/wopiserver.conf /etc/wopi/wopiserver.conf" % (dirs["base"]),
|
||||
"cp %s/tests/config/woodpecker/wopiserver.conf /etc/wopi/wopiserver.conf" % (dirs["base"]),
|
||||
"echo 123 > /etc/wopi/wopisecret",
|
||||
"/app/wopiserver.py",
|
||||
],
|
||||
@@ -1079,7 +1078,7 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
|
||||
"steps": restoreBuildArtifactCache(ctx, dirs["opencloudBinArtifact"], dirs["opencloudBinPath"]) +
|
||||
fakeOffice() +
|
||||
waitForServices("fake-office", ["fakeoffice:8080"]) +
|
||||
ocisServer(storage, accounts_hash_difficulty, deploy_type = "wopi_validator", extra_server_environment = extra_server_environment) +
|
||||
opencloudServer(storage, accounts_hash_difficulty, deploy_type = "wopi_validator", extra_server_environment = extra_server_environment) +
|
||||
wopiServer +
|
||||
waitForServices("wopi-fakeoffice", ["wopi-fakeoffice:9300"]) +
|
||||
[
|
||||
@@ -1088,10 +1087,10 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
|
||||
"image": OC_CI_ALPINE,
|
||||
"environment": {},
|
||||
"commands": [
|
||||
"curl -v -X PUT '%s/remote.php/webdav/test.wopitest' -k --fail --retry-connrefused --retry 7 --retry-all-errors -u admin:admin -D headers.txt" % OCIS_URL,
|
||||
"curl -v -X PUT '%s/remote.php/webdav/test.wopitest' -k --fail --retry-connrefused --retry 7 --retry-all-errors -u admin:admin -D headers.txt" % OC_URL,
|
||||
"cat headers.txt",
|
||||
"export FILE_ID=$(cat headers.txt | sed -n -e 's/^.*Oc-Fileid: //p')",
|
||||
"export URL=\"%s/app/open?app_name=FakeOffice&file_id=$FILE_ID\"" % OCIS_URL,
|
||||
"export URL=\"%s/app/open?app_name=FakeOffice&file_id=$FILE_ID\"" % OC_URL,
|
||||
"export URL=$(echo $URL | tr -d '[:cntrl:]')",
|
||||
"curl -v -X POST \"$URL\" -k --fail --retry-connrefused --retry 7 --retry-all-errors -u admin:admin > open.json",
|
||||
"cat open.json",
|
||||
@@ -1126,13 +1125,13 @@ def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = Fa
|
||||
return {
|
||||
"name": "Core-API-Tests-%s%s" % (part_number, "-withoutRemotePhp" if not with_remote_php else ""),
|
||||
"steps": restoreBuildArtifactCache(ctx, dirs["opencloudBinArtifact"], dirs["opencloudBinPath"]) +
|
||||
ocisServer(storage, accounts_hash_difficulty, with_wrapper = True) +
|
||||
opencloudServer(storage, accounts_hash_difficulty, with_wrapper = True) +
|
||||
[
|
||||
{
|
||||
"name": "oC10ApiTests-%s" % part_number,
|
||||
"image": OC_CI_PHP % DEFAULT_PHP_VERSION,
|
||||
"environment": {
|
||||
"TEST_SERVER_URL": OCIS_URL,
|
||||
"TEST_SERVER_URL": OC_URL,
|
||||
"OCIS_REVA_DATA_ROOT": "%s" % (dirs["ocisRevaDataRoot"] if storage == "owncloud" else ""),
|
||||
"SEND_SCENARIO_LINE_REFERENCES": True,
|
||||
"STORAGE_DRIVER": storage,
|
||||
@@ -1250,16 +1249,16 @@ def e2eTestPipeline(ctx):
|
||||
restoreWebCache() + \
|
||||
restoreWebPnpmCache() + \
|
||||
(tikaService() if params["tikaNeeded"] else []) + \
|
||||
ocisServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"])
|
||||
opencloudServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"])
|
||||
|
||||
step_e2e = {
|
||||
"name": "e2e-tests",
|
||||
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
|
||||
"environment": {
|
||||
"BASE_URL_OCIS": OCIS_DOMAIN,
|
||||
"BASE_URL_OCIS": OC_DOMAIN,
|
||||
"HEADLESS": True,
|
||||
"RETRY": "1",
|
||||
"WEB_UI_CONFIG_FILE": "%s/%s" % (dirs["base"], dirs["ocisConfig"]),
|
||||
"WEB_UI_CONFIG_FILE": "%s/%s" % (dirs["base"], dirs["opencloudConfig"]),
|
||||
"LOCAL_UPLOAD_DIR": "/uploads",
|
||||
},
|
||||
"commands": [
|
||||
@@ -1340,14 +1339,14 @@ def multiServiceE2ePipeline(ctx):
|
||||
}
|
||||
|
||||
storage_users_environment = {
|
||||
"OCIS_CORS_ALLOW_ORIGINS": "%s,https://%s:9201" % (OCIS_URL, OC_SERVER_NAME),
|
||||
"OCIS_CORS_ALLOW_ORIGINS": "%s,https://%s:9201" % (OC_URL, OC_SERVER_NAME),
|
||||
"STORAGE_USERS_JWT_SECRET": "some-ocis-jwt-secret",
|
||||
"STORAGE_USERS_MOUNT_ID": "storage-users-id",
|
||||
"STORAGE_USERS_SERVICE_ACCOUNT_ID": "service-account-id",
|
||||
"STORAGE_USERS_SERVICE_ACCOUNT_SECRET": "service-account-secret",
|
||||
"STORAGE_USERS_GATEWAY_GRPC_ADDR": "%s:9142" % OC_SERVER_NAME,
|
||||
"STORAGE_USERS_EVENTS_ENDPOINT": "%s:9233" % OC_SERVER_NAME,
|
||||
"STORAGE_USERS_DATA_GATEWAY_URL": "%s/data" % OCIS_URL,
|
||||
"STORAGE_USERS_DATA_GATEWAY_URL": "%s/data" % OC_URL,
|
||||
"OCIS_CACHE_STORE": "nats-js-kv",
|
||||
"OCIS_CACHE_STORE_NODES": "%s:9233" % OC_SERVER_NAME,
|
||||
"MICRO_REGISTRY_ADDRESS": "%s:9233" % OC_SERVER_NAME,
|
||||
@@ -1399,13 +1398,13 @@ def multiServiceE2ePipeline(ctx):
|
||||
restoreWebCache() + \
|
||||
restoreWebPnpmCache() + \
|
||||
tikaService() + \
|
||||
ocisServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"]) + \
|
||||
opencloudServer(extra_server_environment = extra_server_environment, tika_enabled = params["tikaNeeded"]) + \
|
||||
storage_users_services + \
|
||||
[{
|
||||
"name": "e2e-tests",
|
||||
"image": OC_CI_NODEJS % DEFAULT_NODEJS_VERSION,
|
||||
"environment": {
|
||||
"BASE_URL_OCIS": OCIS_DOMAIN,
|
||||
"BASE_URL_OCIS": OC_DOMAIN,
|
||||
"HEADLESS": True,
|
||||
"RETRY": "1",
|
||||
},
|
||||
@@ -1438,7 +1437,7 @@ def uploadTracingResult(ctx):
|
||||
"path_style": True,
|
||||
"source": "webTestRunner/reports/e2e/playwright/tracing/**/*",
|
||||
"strip_prefix": "webTestRunner/reports/e2e/playwright/tracing",
|
||||
"target": "/${DRONE_REPO}/${DRONE_BUILD_NUMBER}/tracing",
|
||||
"target": "/${DRONE_REPO}/${CI_PIPELINE_NUMBER}/tracing",
|
||||
},
|
||||
"environment": {
|
||||
"AWS_ACCESS_KEY_ID": {
|
||||
@@ -1466,7 +1465,7 @@ def logTracingResults():
|
||||
"commands": [
|
||||
"cd %s/reports/e2e/playwright/tracing/" % dirs["web"],
|
||||
'echo "To see the trace, please open the following link in the console"',
|
||||
'for f in *.zip; do echo "npx playwright show-trace https://cache.owncloud.com/public/${DRONE_REPO}/${DRONE_BUILD_NUMBER}/tracing/$f \n"; done',
|
||||
'for f in *.zip; do echo "npx playwright show-trace https://cache.owncloud.com/public/${DRONE_REPO}/${CI_PIPELINE_NUMBER}/tracing/$f \n"; done',
|
||||
],
|
||||
"when": {
|
||||
"status": [
|
||||
@@ -1658,25 +1657,6 @@ def binaryReleases(ctx):
|
||||
return pipelines
|
||||
|
||||
def binaryRelease(ctx, arch, build_type, target, depends_on = []):
|
||||
settings = {
|
||||
"endpoint": {
|
||||
"from_secret": "upload_s3_endpoint",
|
||||
},
|
||||
"access_key": {
|
||||
"from_secret": "upload_s3_access_key",
|
||||
},
|
||||
"secret_key": {
|
||||
"from_secret": "upload_s3_secret_key",
|
||||
},
|
||||
"bucket": {
|
||||
"from_secret": "upload_s3_bucket",
|
||||
},
|
||||
"path_style": True,
|
||||
"strip_prefix": "ocis/dist/release/",
|
||||
"source": "ocis/dist/release/*",
|
||||
"target": target,
|
||||
}
|
||||
|
||||
return {
|
||||
"name": "binaries-%s-%s" % (arch, build_type),
|
||||
"steps": makeNodeGenerate("") +
|
||||
@@ -1707,20 +1687,6 @@ def binaryRelease(ctx, arch, build_type, target, depends_on = []):
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "upload",
|
||||
"image": PLUGINS_S3,
|
||||
"settings": settings,
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "changelog",
|
||||
"image": OC_CI_GOLANG,
|
||||
@@ -1786,24 +1752,6 @@ def licenseCheck(ctx):
|
||||
folder = "testing"
|
||||
target = "/ocis/%s/%s/%s" % (ctx.repo.name.replace("ocis-", ""), folder, buildref)
|
||||
|
||||
settings = {
|
||||
"endpoint": {
|
||||
"from_secret": "upload_s3_endpoint",
|
||||
},
|
||||
"access_key": {
|
||||
"from_secret": "upload_s3_access_key",
|
||||
},
|
||||
"secret_key": {
|
||||
"from_secret": "upload_s3_secret_key",
|
||||
},
|
||||
"bucket": {
|
||||
"from_secret": "upload_s3_bucket",
|
||||
},
|
||||
"path_style": True,
|
||||
"source": "third-party-licenses.tar.gz",
|
||||
"target": target,
|
||||
}
|
||||
|
||||
return [{
|
||||
"name": "check-licenses",
|
||||
"steps": [
|
||||
@@ -1844,20 +1792,6 @@ def licenseCheck(ctx):
|
||||
"cd third-party-licenses && tar -czf ../third-party-licenses.tar.gz *",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "upload",
|
||||
"image": PLUGINS_S3,
|
||||
"settings": settings,
|
||||
"when": [
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "changelog",
|
||||
"image": OC_CI_GOLANG,
|
||||
@@ -2005,7 +1939,7 @@ def changelog():
|
||||
"push",
|
||||
],
|
||||
"message": "Automated changelog update [skip ci]",
|
||||
"branch": "master",
|
||||
"branch": "main",
|
||||
"author_email": "devops@opencloud.eu",
|
||||
"author_name": "openclouders",
|
||||
"netrc_machine": "github.com",
|
||||
@@ -2108,7 +2042,7 @@ def makeNodeGenerate(module):
|
||||
},
|
||||
"commands": [
|
||||
"pnpm config set store-dir ./.pnpm-store",
|
||||
"retry -t 3 '%s node-generate-prod'" % (make),
|
||||
"for i in $(seq 3); do %s node-generate-prod && break || sleep 1; done" % (make),
|
||||
],
|
||||
},
|
||||
]
|
||||
@@ -2123,7 +2057,7 @@ def makeGoGenerate(module):
|
||||
"name": "generate go",
|
||||
"image": OC_CI_GOLANG,
|
||||
"commands": [
|
||||
"retry -t 3 '%s go-generate'" % (make),
|
||||
"for i in $(seq 3); do %s go-generate && break || sleep 1; done" % (make),
|
||||
],
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
},
|
||||
@@ -2154,13 +2088,12 @@ def notify(ctx):
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": ["main", "release-*"],
|
||||
"status": status,
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
"status": status,
|
||||
},
|
||||
],
|
||||
"runs_on": status,
|
||||
}
|
||||
|
||||
def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volumes = [], depends_on = [], deploy_type = "", extra_server_environment = {}, with_wrapper = False, tika_enabled = False):
|
||||
@@ -2172,17 +2105,17 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
|
||||
"STORAGE_USERS_DRIVER": "%s" % (storage),
|
||||
"PROXY_ENABLE_BASIC_AUTH": True,
|
||||
"WEB_UI_CONFIG_FILE": "%s/%s" % (dirs["base"], dirs["opencloudConfig"]),
|
||||
"OCIS_LOG_LEVEL": "error",
|
||||
"OC_LOG_LEVEL": "error",
|
||||
"IDM_CREATE_DEMO_USERS": True, # needed for litmus and cs3api-validator tests
|
||||
"IDM_ADMIN_PASSWORD": "admin", # override the random admin password from `ocis init`
|
||||
"IDM_ADMIN_PASSWORD": "admin", # override the random admin password from `opencloud init`
|
||||
"FRONTEND_SEARCH_MIN_LENGTH": "2",
|
||||
"OCIS_ASYNC_UPLOADS": True,
|
||||
"OCIS_EVENTS_ENABLE_TLS": False,
|
||||
"OC_ASYNC_UPLOADS": True,
|
||||
"OC_EVENTS_ENABLE_TLS": False,
|
||||
"NATS_NATS_HOST": "0.0.0.0",
|
||||
"NATS_NATS_PORT": 9233,
|
||||
"OCIS_JWT_SECRET": "some-ocis-jwt-secret",
|
||||
"OC_JWT_SECRET": "some-opencloud-jwt-secret",
|
||||
"EVENTHISTORY_STORE": "memory",
|
||||
"OCIS_TRANSLATION_PATH": "%s/tests/config/translations" % dirs["base"],
|
||||
"OC_TRANSLATION_PATH": "%s/tests/config/translations" % dirs["base"],
|
||||
# debug addresses required for running services health tests
|
||||
"ACTIVITYLOG_DEBUG_ADDR": "0.0.0.0:9197",
|
||||
"APP_PROVIDER_DEBUG_ADDR": "0.0.0.0:9165",
|
||||
@@ -2225,11 +2158,11 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
|
||||
environment["FRONTEND_OCS_ENABLE_DENIALS"] = True
|
||||
|
||||
# fonts map for txt thumbnails (including unicode support)
|
||||
environment["THUMBNAILS_TXT_FONTMAP_FILE"] = "%s/tests/config/drone/fontsMap.json" % (dirs["base"])
|
||||
environment["THUMBNAILS_TXT_FONTMAP_FILE"] = "%s/tests/config/woodpecker/fontsMap.json" % (dirs["base"])
|
||||
|
||||
if deploy_type == "cs3api_validator":
|
||||
environment["GATEWAY_GRPC_ADDR"] = "0.0.0.0:9142" # make gateway available to cs3api-validator
|
||||
environment["OCIS_SHARING_PUBLIC_SHARE_MUST_HAVE_PASSWORD"] = False
|
||||
environment["OC_SHARING_PUBLIC_SHARE_MUST_HAVE_PASSWORD"] = False
|
||||
|
||||
if deploy_type == "wopi_validator":
|
||||
environment["GATEWAY_GRPC_ADDR"] = "0.0.0.0:9142" # make gateway available to wopi server
|
||||
@@ -2239,10 +2172,10 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
|
||||
environment["APP_PROVIDER_WOPI_APP_URL"] = "http://fakeoffice:8080"
|
||||
environment["APP_PROVIDER_WOPI_INSECURE"] = True
|
||||
environment["APP_PROVIDER_WOPI_WOPI_SERVER_EXTERNAL_URL"] = "http://wopi-fakeoffice:9300"
|
||||
environment["APP_PROVIDER_WOPI_FOLDER_URL_BASE_URL"] = OCIS_URL
|
||||
environment["APP_PROVIDER_WOPI_FOLDER_URL_BASE_URL"] = OC_URL
|
||||
|
||||
if deploy_type == "federation":
|
||||
environment["OCIS_URL"] = OC_FED_URL
|
||||
environment["OC_URL"] = OC_FED_URL
|
||||
environment["PROXY_HTTP_ADDR"] = OC_FED_DOMAIN
|
||||
container_name = FED_OC_SERVER_NAME
|
||||
|
||||
@@ -2253,7 +2186,7 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
|
||||
environment["SEARCH_EXTRACTOR_CS3SOURCE_INSECURE"] = True
|
||||
|
||||
# Pass in "default" accounts_hash_difficulty to not set this environment variable.
|
||||
# That will allow OCIS to use whatever its built-in default is.
|
||||
# That will allow OpenCloud to use whatever its built-in default is.
|
||||
# Otherwise pass in a value from 4 to about 11 or 12 (default 4, for making regular tests fast)
|
||||
# The high values cause lots of CPU to be used when hashing passwords, and really slow down the tests.
|
||||
if (accounts_hash_difficulty != "default"):
|
||||
@@ -2267,39 +2200,46 @@ def opencloudServer(storage = "decomposed", accounts_hash_difficulty = 4, volume
|
||||
"%s/bin/ocwrapper serve --bin %s --url %s --admin-username admin --admin-password admin" % (dirs["ocWrapper"], dirs["opencloudBin"], environment["OC_URL"]),
|
||||
]
|
||||
|
||||
wait_for_ocis = {
|
||||
wait_for_opencloud = {
|
||||
"name": "wait-for-%s" % (container_name),
|
||||
"image": OC_CI_ALPINE,
|
||||
"commands": [
|
||||
# wait for ocis-server to be ready (5 minutes)
|
||||
# wait for opencloud-server to be ready (5 minutes)
|
||||
"timeout 300 bash -c 'while [ $(curl -sk -uadmin:admin " +
|
||||
"%s/graph/v1.0/users/admin " % environment["OCIS_URL"] +
|
||||
"%s/graph/v1.0/users/admin " % environment["OC_URL"] +
|
||||
"-w %{http_code} -o /dev/null) != 200 ]; do sleep 1; done'",
|
||||
],
|
||||
"depends_on": depends_on,
|
||||
}
|
||||
|
||||
return [
|
||||
{
|
||||
"name": container_name,
|
||||
"image": OC_CI_GOLANG,
|
||||
"detach": True,
|
||||
"environment": environment,
|
||||
"backend_options": {
|
||||
"docker": {
|
||||
"user": user,
|
||||
},
|
||||
opencloud_server = {
|
||||
"name": container_name,
|
||||
"image": OC_CI_GOLANG,
|
||||
"detach": True,
|
||||
"environment": environment,
|
||||
"backend_options": {
|
||||
"docker": {
|
||||
"user": user,
|
||||
},
|
||||
"commands": [
|
||||
"%s init --insecure true" % dirs["opencloudBin"],
|
||||
"cat $OCIS_CONFIG_DIR/ocis.yaml",
|
||||
"cp tests/config/drone/app-registry.yaml /root/.ocis/config/app-registry.yaml",
|
||||
] + (wrapper_commands),
|
||||
"depends_on": depends_on,
|
||||
},
|
||||
wait_for_ocis,
|
||||
"commands": [
|
||||
"%s init --insecure true" % dirs["opencloudBin"],
|
||||
"cat $OC_CONFIG_DIR/opencloud.yaml",
|
||||
"cp tests/config/woodpecker/app-registry.yaml $OC_CONFIG_DIR/app-registry.yaml",
|
||||
] + (wrapper_commands),
|
||||
}
|
||||
|
||||
steps = [
|
||||
opencloud_server,
|
||||
wait_for_opencloud,
|
||||
]
|
||||
|
||||
# empty depends_on list makes steps to run in parallel, what we don't want
|
||||
if depends_on:
|
||||
steps[0]["depends_on"] = depends_on
|
||||
steps[1]["depends_on"] = depends_on
|
||||
|
||||
return steps
|
||||
|
||||
def startOcisService(service = None, name = None, environment = {}, volumes = []):
|
||||
"""
|
||||
Starts an OCIS service in a detached container.
|
||||
@@ -2351,7 +2291,7 @@ def build():
|
||||
"name": "build",
|
||||
"image": OC_CI_GOLANG,
|
||||
"commands": [
|
||||
"retry -t 3 'make -C opencloud build'",
|
||||
"for i in $(seq 3); do make -C opencloud build && break || sleep 1; done",
|
||||
],
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
},
|
||||
@@ -2578,7 +2518,7 @@ def genericCachePurge(flush_path):
|
||||
|
||||
def genericBuildArtifactCache(ctx, name, action, path):
|
||||
if action == "rebuild" or action == "restore":
|
||||
cache_path = "%s/%s/%s" % ("cache", repo_slug, ctx.build.commit + "-${DRONE_BUILD_NUMBER}")
|
||||
cache_path = "%s/%s/%s" % ("cache", repo_slug, ctx.build.commit + "-${CI_PIPELINE_NUMBER}")
|
||||
name = "%s_build_artifact_cache" % (name)
|
||||
return genericCache(name, action, [path], cache_path)
|
||||
|
||||
@@ -2680,7 +2620,7 @@ def litmus(ctx, storage):
|
||||
result = {
|
||||
"name": "litmus",
|
||||
"steps": restoreBuildArtifactCache(ctx, dirs["opencloudBinArtifact"], dirs["opencloudBinPath"]) +
|
||||
ocisServer(storage) +
|
||||
opencloudServer(storage) +
|
||||
setupForLitmus() +
|
||||
[
|
||||
{
|
||||
@@ -2689,7 +2629,7 @@ def litmus(ctx, storage):
|
||||
"environment": environment,
|
||||
"commands": [
|
||||
"source .env",
|
||||
'export LITMUS_URL="%s/remote.php/webdav"' % OCIS_URL,
|
||||
'export LITMUS_URL="%s/remote.php/webdav"' % OC_URL,
|
||||
litmusCommand,
|
||||
],
|
||||
},
|
||||
@@ -2699,7 +2639,7 @@ def litmus(ctx, storage):
|
||||
"environment": environment,
|
||||
"commands": [
|
||||
"source .env",
|
||||
'export LITMUS_URL="%s/remote.php/dav/files/admin"' % OCIS_URL,
|
||||
'export LITMUS_URL="%s/remote.php/dav/files/admin"' % OC_URL,
|
||||
litmusCommand,
|
||||
],
|
||||
},
|
||||
@@ -2709,7 +2649,7 @@ def litmus(ctx, storage):
|
||||
"environment": environment,
|
||||
"commands": [
|
||||
"source .env",
|
||||
'export LITMUS_URL="%s/remote.php/dav/files/admin/Shares/new_folder/"' % OCIS_URL,
|
||||
'export LITMUS_URL="%s/remote.php/dav/files/admin/Shares/new_folder/"' % OC_URL,
|
||||
litmusCommand,
|
||||
],
|
||||
},
|
||||
@@ -2719,7 +2659,7 @@ def litmus(ctx, storage):
|
||||
"environment": environment,
|
||||
"commands": [
|
||||
"source .env",
|
||||
'export LITMUS_URL="%s/remote.php/webdav/Shares/new_folder/"' % OCIS_URL,
|
||||
'export LITMUS_URL="%s/remote.php/webdav/Shares/new_folder/"' % OC_URL,
|
||||
litmusCommand,
|
||||
],
|
||||
},
|
||||
@@ -2743,7 +2683,7 @@ def litmus(ctx, storage):
|
||||
"environment": environment,
|
||||
"commands": [
|
||||
"source .env",
|
||||
"export LITMUS_URL='%s/remote.php/dav/spaces/'$SPACE_ID" % OCIS_URL,
|
||||
"export LITMUS_URL='%s/remote.php/dav/spaces/'$SPACE_ID" % OC_URL,
|
||||
litmusCommand,
|
||||
],
|
||||
},
|
||||
@@ -2772,10 +2712,10 @@ def setupForLitmus():
|
||||
"name": "setup-for-litmus",
|
||||
"image": OC_UBUNTU,
|
||||
"environment": {
|
||||
"TEST_SERVER_URL": OCIS_URL,
|
||||
"TEST_SERVER_URL": OC_URL,
|
||||
},
|
||||
"commands": [
|
||||
"bash ./tests/config/drone/setup-for-litmus.sh",
|
||||
"bash ./tests/config/woodpecker/setup-for-litmus.sh",
|
||||
"cat .env",
|
||||
],
|
||||
}]
|
||||
@@ -2783,7 +2723,7 @@ def setupForLitmus():
|
||||
def getDroneEnvAndCheckScript(ctx):
|
||||
ocis_git_base_url = "https://raw.githubusercontent.com/opencloud-eu/opencloud"
|
||||
path_to_drone_env = "%s/%s/.drone.env" % (ocis_git_base_url, ctx.build.commit)
|
||||
path_to_check_script = "%s/%s/tests/config/drone/check_web_cache.sh" % (ocis_git_base_url, ctx.build.commit)
|
||||
path_to_check_script = "%s/%s/tests/config/woodpecker/check_web_cache.sh" % (ocis_git_base_url, ctx.build.commit)
|
||||
return {
|
||||
"name": "get-drone-env-and-check-script",
|
||||
"image": OC_UBUNTU,
|
||||
@@ -2834,7 +2774,7 @@ def generateWebPnpmCache(ctx):
|
||||
"cd %s" % dirs["web"],
|
||||
'npm install --silent --global --force "$(jq -r ".packageManager" < package.json)"',
|
||||
"pnpm config set store-dir ./.pnpm-store",
|
||||
"retry -t 3 'pnpm install'",
|
||||
"for i in $(seq 3); do pnpm install && break || sleep 1; done",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -2926,7 +2866,7 @@ def restoreWebPnpmCache():
|
||||
"tar -xvf %s" % dirs["webPnpmZip"],
|
||||
'npm install --silent --global --force "$(jq -r ".packageManager" < package.json)"',
|
||||
"pnpm config set store-dir ./.pnpm-store",
|
||||
"retry -t 3 'pnpm install'",
|
||||
"for i in $(seq 3); do pnpm install && break || sleep 1; done",
|
||||
],
|
||||
}]
|
||||
|
||||
@@ -2968,7 +2908,7 @@ def fakeOffice():
|
||||
"detach": True,
|
||||
"environment": {},
|
||||
"commands": [
|
||||
"sh %s/tests/config/drone/serve-hosting-discovery.sh" % (dirs["base"]),
|
||||
"sh %s/tests/config/woodpecker/serve-hosting-discovery.sh" % (dirs["base"]),
|
||||
],
|
||||
},
|
||||
]
|
||||
@@ -3071,7 +3011,7 @@ def k6LoadTests(ctx):
|
||||
return []
|
||||
|
||||
ocis_git_base_url = "https://raw.githubusercontent.com/opencloud-eu/opencloud"
|
||||
script_link = "%s/%s/tests/config/drone/run_k6_tests.sh" % (ocis_git_base_url, ctx.build.commit)
|
||||
script_link = "%s/%s/tests/config/woodpecker/run_k6_tests.sh" % (ocis_git_base_url, ctx.build.commit)
|
||||
|
||||
event_array = ["cron"]
|
||||
|
||||
@@ -3161,7 +3101,7 @@ def collaboraService():
|
||||
"detach": True,
|
||||
"environment": {
|
||||
"DONT_GEN_SSL_CERT": "set",
|
||||
"extra_params": "--o:ssl.enable=true --o:ssl.termination=true --o:welcome.enable=false --o:net.frame_ancestors=%s" % OCIS_URL,
|
||||
"extra_params": "--o:ssl.enable=true --o:ssl.termination=true --o:welcome.enable=false --o:net.frame_ancestors=%s" % OC_URL,
|
||||
},
|
||||
"commands": [
|
||||
"coolconfig generate-proof-key",
|
||||
@@ -3182,7 +3122,7 @@ def onlyofficeService():
|
||||
"USE_UNAUTHORIZED_STORAGE": True, # self signed certificates
|
||||
},
|
||||
"commands": [
|
||||
"cp %s/tests/config/drone/only-office.json /etc/onlyoffice/documentserver/local.json" % dirs["base"],
|
||||
"cp %s/tests/config/woodpecker/only-office.json /etc/onlyoffice/documentserver/local.json" % dirs["base"],
|
||||
"openssl req -x509 -newkey rsa:4096 -keyout onlyoffice.key -out onlyoffice.crt -sha256 -days 365 -batch -nodes",
|
||||
"mkdir -p /var/www/onlyoffice/Data/certs",
|
||||
"cp onlyoffice.key /var/www/onlyoffice/Data/certs/",
|
||||
|
||||
50
go.mod
50
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/opencloud-eu/opencloud
|
||||
|
||||
go 1.24.0
|
||||
go 1.24.1
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1
|
||||
@@ -42,14 +42,14 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gookit/config/v2 v2.2.5
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1
|
||||
github.com/invopop/validation v0.8.0
|
||||
github.com/jellydator/ttlcache/v2 v2.11.1
|
||||
github.com/jellydator/ttlcache/v3 v3.3.0
|
||||
github.com/jinzhu/now v1.1.5
|
||||
github.com/justinas/alice v1.2.0
|
||||
github.com/kovidgoyal/imaging v1.6.3
|
||||
github.com/leonelquinteros/gotext v1.7.0
|
||||
github.com/leonelquinteros/gotext v1.7.1
|
||||
github.com/libregraph/idm v0.5.0
|
||||
github.com/libregraph/lico v0.65.1
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
@@ -63,7 +63,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.23.0
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250304172421-22b1ead80cdd
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -82,7 +82,7 @@ require (
|
||||
github.com/test-go/testify v1.1.4
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tus/tusd/v2 v2.6.0
|
||||
github.com/tus/tusd/v2 v2.7.1
|
||||
github.com/unrolled/secure v1.16.0
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/xhit/go-simple-mail/v2 v2.16.0
|
||||
@@ -93,20 +93,20 @@ require (
|
||||
go.opentelemetry.io/contrib/zpages v0.57.0
|
||||
go.opentelemetry.io/otel v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.17.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
|
||||
go.opentelemetry.io/otel/sdk v1.35.0
|
||||
go.opentelemetry.io/otel/trace v1.35.0
|
||||
golang.org/x/crypto v0.34.0
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/image v0.24.0
|
||||
golang.org/x/net v0.35.0
|
||||
golang.org/x/oauth2 v0.26.0
|
||||
golang.org/x/sync v0.11.0
|
||||
golang.org/x/term v0.29.0
|
||||
golang.org/x/text v0.22.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f
|
||||
google.golang.org/grpc v1.70.0
|
||||
google.golang.org/protobuf v1.36.3
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/term v0.30.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.5.2
|
||||
stash.kopano.io/kgol/rndm v1.1.2
|
||||
@@ -308,24 +308,24 @@ require (
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.18 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.18 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.18 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.19 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.19 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
|
||||
96
go.sum
96
go.sum
@@ -581,8 +581,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
@@ -721,8 +721,8 @@ github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvf
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/leonelquinteros/gotext v1.7.0 h1:jcJmF4AXqyamP7vuw2MMIKs+O3jAEmvrc5JQiI8Ht/8=
|
||||
github.com/leonelquinteros/gotext v1.7.0/go.mod h1:qJdoQuERPpccw7L70uoU+K/BvTfRBHYsisCQyFLXyvw=
|
||||
github.com/leonelquinteros/gotext v1.7.1 h1:/JNPeE3lY5JeVYv2+KBpz39994W3W9fmZCGq3eO9Ri8=
|
||||
github.com/leonelquinteros/gotext v1.7.1/go.mod h1:I0WoFDn9u2D3VbPnnDPT8mzZu0iSXG8iih+AH2fHHqg=
|
||||
github.com/libregraph/idm v0.5.0 h1:tDMwKbAOZzdeDYMxVlY5PbSqRKO7dbAW9KT42A51WSk=
|
||||
github.com/libregraph/idm v0.5.0/go.mod h1:BGMwIQ/6orJSPVzJ1x6kgG2JyG9GY05YFmbsnaD80k0=
|
||||
github.com/libregraph/lico v0.65.1 h1:7ENAoAgbetZkJSwa1dMMP5WvXMTQ5E/3LI4uRDhwjEk=
|
||||
@@ -865,8 +865,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250304172421-22b1ead80cdd h1:n4w8SHHuk74HOHy7ZEsL6zw2N/SEarqr482jAh5C/p4=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250304172421-22b1ead80cdd/go.mod h1:Zi6h/WupAKzY/umvteGJCY3q4GOvTyrlm4JZfiuHeds=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be h1:dxKsVUzdKIGf1hfGdr1GH6NFfo0xuIcPma/qjHNG8mU=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250312134906-766c69c5d1be/go.mod h1:sqlExPoEnEd0KdfoSKogV8PrwEBY3l06icoa4gJnGnU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
@@ -1097,8 +1097,8 @@ github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXz
|
||||
github.com/transip/gotransip/v6 v6.2.0/go.mod h1:pQZ36hWWRahCUXkFWlx9Hs711gLd8J4qdgLdRzmtY+g=
|
||||
github.com/trustelem/zxcvbn v1.0.1 h1:mp4JFtzdDYGj9WYSD3KQSkwwUumWNFzXaAjckaTYpsc=
|
||||
github.com/trustelem/zxcvbn v1.0.1/go.mod h1:zonUyKeh7sw6psPf/e3DtRqkRyZvAbOfjNz/aO7YQ5s=
|
||||
github.com/tus/tusd/v2 v2.6.0 h1:Je243QDKnFTvm/WkLH2bd1oQ+7trolrflRWyuI0PdWI=
|
||||
github.com/tus/tusd/v2 v2.6.0/go.mod h1:1Eb1lBoSRBfYJ/mQfFVjyw8ZdNMdBqW17vgQKl3Ah9g=
|
||||
github.com/tus/tusd/v2 v2.7.1 h1:TGJjhv9RYXDmsTz8ug/qSd9vQpmD0Ik0G0IPo80Qmc0=
|
||||
github.com/tus/tusd/v2 v2.7.1/go.mod h1:PLdIMQ/ge+5ADgGKcL3FgTaPs+7wB0JIiI5HQXAiJE8=
|
||||
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
@@ -1144,12 +1144,12 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
|
||||
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
|
||||
go.etcd.io/etcd/api/v3 v3.5.18 h1:Q4oDAKnmwqTo5lafvB+afbgCDF7E35E4EYV2g+FNGhs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.18/go.mod h1:uY03Ob2H50077J7Qq0DeehjM/A9S8PhVfbQ1mSaMopU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.18 h1:mZPOYw4h8rTk7TeJ5+3udUkfVGBqc+GCjOJYd68QgNM=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.18/go.mod h1:BxVf2o5wXG9ZJV+/Cu7QNUiJYk4A29sAhoI5tIRsCu4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.18 h1:nvvYmNHGumkDjZhTHgVU36A9pykGa2K4lAJ0yY7hcXA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.18/go.mod h1:kmemwOsPU9broExyhYsBxX4spCTDX3yLgPMWtpBXG6E=
|
||||
go.etcd.io/etcd/api/v3 v3.5.19 h1:w3L6sQZGsWPuBxRQ4m6pPP3bVUtV8rjW033EGwlr0jw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.19/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19 h1:9VsyGhg0WQGjDWWlDI4VuaS9PZJGNbPkaHEIuLwtixk=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.19 h1:+4byIz6ti3QC28W0zB0cEZWwhpVHXdrKovyycJh1KNo=
|
||||
go.etcd.io/etcd/client/v3 v3.5.19/go.mod h1:FNzyinmMIl0oVsty1zA3hFeUrxXI/JpEnz4sG+POzjU=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@@ -1172,10 +1172,10 @@ go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
@@ -1196,8 +1196,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
@@ -1226,8 +1226,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA=
|
||||
golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -1238,8 +1238,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs=
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
@@ -1269,8 +1269,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1327,8 +1327,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1336,8 +1336,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1356,8 +1356,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1439,8 +1439,8 @@ golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1453,8 +1453,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1471,8 +1471,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1535,8 +1535,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1598,10 +1598,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 h1:DMTIbak9GhdaSxEjvVzAeNZvyc03I61duqNbnm3SU0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@@ -1617,8 +1617,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU=
|
||||
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
@@ -1635,8 +1635,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
|
||||
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y=
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI=
|
||||
|
||||
@@ -313,9 +313,9 @@ func setCmd(cfg *config.Config) *cli.Command {
|
||||
func backend(root, backend string) metadata.Backend {
|
||||
switch backend {
|
||||
case "xattrs":
|
||||
return metadata.NewXattrsBackend(root, cache.Config{})
|
||||
return metadata.NewXattrsBackend(cache.Config{})
|
||||
case "mpk":
|
||||
return metadata.NewMessagePackBackend(root, cache.Config{})
|
||||
return metadata.NewMessagePackBackend(cache.Config{})
|
||||
}
|
||||
return metadata.NullBackend{}
|
||||
}
|
||||
|
||||
@@ -376,7 +376,7 @@ func composeMessage(nt NotificationTemplate, locale, defaultLocale, path string,
|
||||
|
||||
func loadTemplates(nt NotificationTemplate, locale, defaultLocale, path string) (string, string) {
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, path, _translationFS, "l10n/locale").Locale(locale)
|
||||
return t.Get("%s", nt.Subject), t.Get("%s", nt.Message)
|
||||
return t.Get("%s", nt.Subject), t.Get(nt.Message)
|
||||
}
|
||||
|
||||
func executeTemplate(raw string, vars map[string]interface{}) (string, error) {
|
||||
|
||||
16
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
16
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
@@ -126,6 +126,15 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin
|
||||
}
|
||||
}
|
||||
|
||||
// Check if oneof already set
|
||||
if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() {
|
||||
if f := msgValue.WhichOneof(of); f != nil {
|
||||
if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() {
|
||||
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the last element, we're done
|
||||
if i == len(fieldPath)-1 {
|
||||
break
|
||||
@@ -140,13 +149,6 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin
|
||||
msgValue = msgValue.Mutable(fieldDescriptor).Message()
|
||||
}
|
||||
|
||||
// Check if oneof already set
|
||||
if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() {
|
||||
if f := msgValue.WhichOneof(of); f != nil {
|
||||
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case fieldDescriptor.IsList():
|
||||
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
|
||||
|
||||
@@ -53,7 +53,7 @@ func New(root string) (*Blobstore, error) {
|
||||
}
|
||||
|
||||
// Upload stores some data in the blobstore under the given key
|
||||
func (bs *Blobstore) Upload(node *node.Node, source string) error {
|
||||
func (bs *Blobstore) Upload(node *node.Node, source, _copyTarget string) error {
|
||||
if node.BlobID == "" {
|
||||
return ErrBlobIDEmpty
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func New(endpoint, region, bucket, accessKey, secretKey string, defaultPutOption
|
||||
}
|
||||
|
||||
// Upload stores some data in the blobstore under the given key
|
||||
func (bs *Blobstore) Upload(node *node.Node, source string) error {
|
||||
func (bs *Blobstore) Upload(node *node.Node, source, _copyTarget string) error {
|
||||
reader, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "can not open source file to upload")
|
||||
|
||||
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
@@ -81,7 +81,11 @@ func (bs *Blobstore) Upload(node *node.Node, source, copyTarget string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
file.Seek(0, 0)
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget)
|
||||
|
||||
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go
generated
vendored
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go
generated
vendored
@@ -59,6 +59,11 @@ func New(m map[string]interface{}) (*Options, error) {
|
||||
m["metadata_backend"] = "hybrid"
|
||||
}
|
||||
|
||||
// debounced scan delay
|
||||
if o.ScanDebounceDelay == 0 {
|
||||
o.ScanDebounceDelay = 10 * time.Millisecond
|
||||
}
|
||||
|
||||
do, err := decomposedoptions.New(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go
generated
vendored
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go
generated
vendored
@@ -78,7 +78,7 @@ func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (s
|
||||
var lu *lookup.Lookup
|
||||
switch o.MetadataBackend {
|
||||
case "xattrs":
|
||||
lu = lookup.New(metadata.NewXattrsBackend(o.Root, o.FileMetadataCache), um, o, &timemanager.Manager{})
|
||||
lu = lookup.New(metadata.NewXattrsBackend(o.FileMetadataCache), um, o, &timemanager.Manager{})
|
||||
case "hybrid":
|
||||
lu = lookup.New(metadata.NewHybridBackend(1024, // start offloading grants after 1KB
|
||||
func(n metadata.MetadataNode) string {
|
||||
|
||||
43
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
43
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
@@ -259,56 +259,71 @@ func (tb *Trashbin) ListRecycle(ctx context.Context, spaceID string, key, relati
|
||||
}
|
||||
|
||||
// RestoreRecycleItem restores the specified item
|
||||
func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, relativePath string, restoreRef *provider.Reference) error {
|
||||
func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, relativePath string, restoreRef *provider.Reference) (*node.Node, error) {
|
||||
_, span := tracer.Start(ctx, "RestoreRecycleItem")
|
||||
defer span.End()
|
||||
|
||||
trashRoot := filepath.Join(tb.lu.InternalPath(spaceID, spaceID), ".Trash")
|
||||
trashPath := filepath.Clean(filepath.Join(trashRoot, "files", key+".trashitem", relativePath))
|
||||
|
||||
restorePath := ""
|
||||
// TODO why can we not use NodeFromResource here? It will use walk path. Do trashed items have a problem with that?
|
||||
restoreBaseNode, err := tb.lu.NodeFromID(ctx, restoreRef.GetResourceId())
|
||||
if err != nil {
|
||||
return err
|
||||
if restoreRef != nil {
|
||||
restoreBaseNode, err := tb.lu.NodeFromID(ctx, restoreRef.GetResourceId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restorePath = filepath.Join(restoreBaseNode.InternalPath(), restoreRef.GetPath())
|
||||
} else {
|
||||
originalPath, _, err := tb.readInfoFile(trashRoot, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restorePath = filepath.Join(tb.lu.InternalPath(spaceID, spaceID), originalPath, relativePath)
|
||||
}
|
||||
restorePath := filepath.Join(restoreBaseNode.InternalPath(), restoreRef.GetPath())
|
||||
// TODO the decomposed trash also checks the permissions on the restore node
|
||||
|
||||
_, id, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// update parent id in case it was restored to a different location
|
||||
_, parentID, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if len(parentID) == 0 {
|
||||
return fmt.Errorf("trashbin: parent id not found for %s", restorePath)
|
||||
return nil, fmt.Errorf("trashbin: parent id not found for %s", restorePath)
|
||||
}
|
||||
|
||||
trashNode := &trashNode{spaceID: spaceID, id: id, path: trashPath}
|
||||
err = tb.lu.MetadataBackend().Set(ctx, trashNode, prefixes.ParentidAttr, []byte(parentID))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// restore the item
|
||||
err = os.Rename(trashPath, restorePath)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := tb.lu.CacheID(ctx, spaceID, string(id), restorePath); err != nil {
|
||||
tb.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", restorePath).Msg("trashbin: error caching id")
|
||||
if err := tb.lu.CacheID(ctx, spaceID, id, restorePath); err != nil {
|
||||
tb.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", restorePath).Msg("trashbin: error caching id")
|
||||
}
|
||||
|
||||
restoredNode, err := tb.lu.NodeFromID(ctx, &provider.ResourceId{SpaceId: spaceID, OpaqueId: id})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cleanup trash info
|
||||
if relativePath == "." || relativePath == "/" {
|
||||
return os.Remove(filepath.Join(trashRoot, "info", key+".trashinfo"))
|
||||
return restoredNode, os.Remove(filepath.Join(trashRoot, "info", key+".trashinfo"))
|
||||
} else {
|
||||
return nil
|
||||
return restoredNode, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// PurgeRecycleItem purges the specified item, all its children and all their revisions
|
||||
|
||||
33
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
33
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
@@ -197,7 +197,7 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
})
|
||||
}
|
||||
if err := t.setDirty(filepath.Dir(path), true); err != nil {
|
||||
return err
|
||||
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to mark directory as dirty")
|
||||
}
|
||||
t.scanDebouncer.Debounce(scanItem{
|
||||
Path: filepath.Dir(path),
|
||||
@@ -208,7 +208,7 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
// 2. New directory
|
||||
// -> scan directory
|
||||
if err := t.setDirty(path, true); err != nil {
|
||||
return err
|
||||
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to mark directory as dirty")
|
||||
}
|
||||
t.scanDebouncer.Debounce(scanItem{
|
||||
Path: path,
|
||||
@@ -244,8 +244,13 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
t.log.Debug().Str("path", path).Bool("isDir", isDir).Msg("scanning path (ActionMoveFrom)")
|
||||
// 6. file/directory moved out of the watched directory
|
||||
// -> update directory
|
||||
if err := t.setDirty(filepath.Dir(path), true); err != nil {
|
||||
return err
|
||||
err := t.HandleFileDelete(path)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item")
|
||||
}
|
||||
err = t.setDirty(filepath.Dir(path), true)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to mark directory as dirty")
|
||||
}
|
||||
|
||||
go func() { _ = t.WarmupIDCache(filepath.Dir(path), false, true) }()
|
||||
@@ -258,7 +263,7 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
|
||||
err := t.HandleFileDelete(path)
|
||||
if err != nil {
|
||||
return err
|
||||
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item")
|
||||
}
|
||||
|
||||
t.scanDebouncer.Debounce(scanItem{
|
||||
@@ -342,7 +347,7 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return string(spaceID), spaceAttrs, nil
|
||||
return spaceID, spaceAttrs, nil
|
||||
}
|
||||
spaceCandidate = filepath.Dir(spaceCandidate)
|
||||
}
|
||||
@@ -387,7 +392,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
// the file has an id set, we already know it from the past
|
||||
n := node.NewBaseNode(spaceID, id, t.lookup)
|
||||
|
||||
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, string(id))
|
||||
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id)
|
||||
previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr)
|
||||
|
||||
// compare metadata mtime with actual mtime. if it matches AND the path hasn't changed (move operation)
|
||||
@@ -418,10 +423,10 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
// this is a move
|
||||
t.log.Debug().Str("path", item.Path).Msg("move detected")
|
||||
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, id, item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
_, attrs, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
_, attrs, err := t.updateFile(item.Path, id, spaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -471,11 +476,11 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
} else {
|
||||
// This item had already been assimilated in the past. Update the path
|
||||
t.log.Debug().Str("path", item.Path).Msg("updating cached path")
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, id, item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
|
||||
_, _, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
_, _, err := t.updateFile(item.Path, id, spaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -753,7 +758,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
}
|
||||
|
||||
spaceID, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate)
|
||||
if err == nil {
|
||||
if err == nil && len(spaceID) > 0 {
|
||||
err = scopeSpace(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
49
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
generated
vendored
49
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
generated
vendored
@@ -223,44 +223,6 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
|
||||
return ri, reader, nil
|
||||
}
|
||||
|
||||
func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, revisionKey string, hasPermission func(*provider.ResourcePermissions) bool) (*node.Node, error) {
|
||||
_, span := tracer.Start(ctx, "getRevisionNode")
|
||||
defer span.End()
|
||||
log := appctx.GetLogger(ctx)
|
||||
|
||||
// verify revision key format
|
||||
kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2)
|
||||
if len(kp) != 2 {
|
||||
log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey")
|
||||
return nil, errtypes.NotFound(revisionKey)
|
||||
}
|
||||
log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision")
|
||||
|
||||
spaceID := ref.ResourceId.SpaceId
|
||||
// check if the node is available and has not been deleted
|
||||
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !n.Exists {
|
||||
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := tp.permissions.AssemblePermissions(ctx, n)
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case !hasPermission(p):
|
||||
return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name))
|
||||
}
|
||||
|
||||
// Set space owner in context
|
||||
storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (tp *Tree) RestoreRevision(ctx context.Context, srcNode, targetNode metadata.MetadataNode) error {
|
||||
source := srcNode.InternalPath()
|
||||
target := targetNode.InternalPath()
|
||||
@@ -275,7 +237,10 @@ func (tp *Tree) RestoreRevision(ctx context.Context, srcNode, targetNode metadat
|
||||
return err
|
||||
}
|
||||
defer wf.Close()
|
||||
wf.Truncate(0)
|
||||
err = wf.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(wf, rf); err != nil {
|
||||
return err
|
||||
@@ -293,7 +258,11 @@ func (tp *Tree) RestoreRevision(ctx context.Context, srcNode, targetNode metadat
|
||||
|
||||
// always set the node mtime to the current time
|
||||
mtime := time.Now()
|
||||
os.Chtimes(target, mtime, mtime)
|
||||
err = os.Chtimes(target, mtime, mtime)
|
||||
if err != nil {
|
||||
return errtypes.InternalError("failed to update times:" + err.Error())
|
||||
}
|
||||
|
||||
err = tp.lookup.MetadataBackend().SetMultiple(ctx, targetNode,
|
||||
map[string][]byte{
|
||||
prefixes.MTimeAttr: []byte(mtime.UTC().Format(time.RFC3339Nano)),
|
||||
|
||||
27
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
27
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -62,13 +61,6 @@ func init() {
|
||||
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/tree")
|
||||
}
|
||||
|
||||
// Blobstore defines an interface for storing blobs in a blobstore
|
||||
type Blobstore interface {
|
||||
Upload(node *node.Node, source, copyTarget string) error
|
||||
Download(node *node.Node) (io.ReadCloser, error)
|
||||
Delete(node *node.Node) error
|
||||
}
|
||||
|
||||
type Watcher interface {
|
||||
Watch(path string)
|
||||
}
|
||||
@@ -82,7 +74,7 @@ type scanItem struct {
|
||||
// Tree manages a hierarchical tree
|
||||
type Tree struct {
|
||||
lookup *lookup.Lookup
|
||||
blobstore Blobstore
|
||||
blobstore node.Blobstore
|
||||
trashbin *trashbin.Trashbin
|
||||
propagator propagator.Propagator
|
||||
permissions permissions.Permissions
|
||||
@@ -103,7 +95,7 @@ type Tree struct {
|
||||
type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool
|
||||
|
||||
// New returns a new instance of Tree
|
||||
func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, permissions permissions.Permissions, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) {
|
||||
func New(lu node.PathLookup, bs node.Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, permissions permissions.Permissions, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) {
|
||||
scanQueue := make(chan scanItem)
|
||||
t := &Tree{
|
||||
lookup: lu.(*lookup.Lookup),
|
||||
@@ -242,7 +234,10 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.lookup.TimeManager().OverrideMtime(ctx, n, &attributes, nodeMTime)
|
||||
err = t.lookup.TimeManager().OverrideMtime(ctx, n, &attributes, nodeMTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
@@ -572,13 +567,13 @@ func (t *Tree) DeleteBlob(node *node.Node) error {
|
||||
}
|
||||
|
||||
// BuildSpaceIDIndexEntry returns the entry for the space id index
|
||||
func (t *Tree) BuildSpaceIDIndexEntry(spaceID, nodeID string) string {
|
||||
return nodeID
|
||||
func (t *Tree) BuildSpaceIDIndexEntry(spaceID string) string {
|
||||
return spaceID
|
||||
}
|
||||
|
||||
// ResolveSpaceIDIndexEntry returns the node id for the space id index entry
|
||||
func (t *Tree) ResolveSpaceIDIndexEntry(spaceid, entry string) (string, string, error) {
|
||||
return spaceid, entry, nil
|
||||
func (t *Tree) ResolveSpaceIDIndexEntry(spaceID string) (string, error) {
|
||||
return spaceID, nil
|
||||
}
|
||||
|
||||
// InitNewNode initializes a new node
|
||||
@@ -652,8 +647,6 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
return n.SetXattrsWithContext(ctx, attributes, false)
|
||||
}
|
||||
|
||||
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
|
||||
|
||||
func (t *Tree) isIgnored(path string) bool {
|
||||
return isLockFile(path) || isTrash(path) || t.isUpload(path) || t.isInternal(path)
|
||||
}
|
||||
|
||||
24
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go
generated
vendored
24
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go
generated
vendored
@@ -130,7 +130,7 @@ type Decomposedfs struct {
|
||||
}
|
||||
|
||||
// NewDefault returns an instance with default components
|
||||
func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream, log *zerolog.Logger) (storage.FS, error) {
|
||||
func NewDefault(m map[string]interface{}, bs node.Blobstore, es events.Stream, log *zerolog.Logger) (storage.FS, error) {
|
||||
if log == nil {
|
||||
log = &zerolog.Logger{}
|
||||
}
|
||||
@@ -143,9 +143,9 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream, l
|
||||
var lu *lookup.Lookup
|
||||
switch o.MetadataBackend {
|
||||
case "xattrs":
|
||||
lu = lookup.New(metadata.NewXattrsBackend(o.Root, o.FileMetadataCache), o, &timemanager.Manager{})
|
||||
lu = lookup.New(metadata.NewXattrsBackend(o.FileMetadataCache), o, &timemanager.Manager{})
|
||||
case "messagepack":
|
||||
lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), o, &timemanager.Manager{})
|
||||
lu = lookup.New(metadata.NewMessagePackBackend(o.FileMetadataCache), o, &timemanager.Manager{})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown metadata backend %s, only 'messagepack' or 'xattrs' (default) supported", o.MetadataBackend)
|
||||
}
|
||||
@@ -1292,7 +1292,23 @@ func (fs *Decomposedfs) RestoreRecycleItem(ctx context.Context, space *provider.
|
||||
return errtypes.NotFound(key)
|
||||
}
|
||||
|
||||
return fs.trashbin.RestoreRecycleItem(ctx, spaceID, key, relativePath, restoreRef)
|
||||
restoredNode, err := fs.trashbin.RestoreRecycleItem(ctx, spaceID, key, relativePath, restoreRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var sizeDiff int64
|
||||
if restoredNode.IsDir(ctx) {
|
||||
treeSize, err := restoredNode.GetTreeSize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sizeDiff = int64(treeSize)
|
||||
} else {
|
||||
sizeDiff = restoredNode.Blobsize
|
||||
}
|
||||
|
||||
return fs.tp.Propagate(ctx, restoredNode, sizeDiff)
|
||||
}
|
||||
|
||||
func (fs *Decomposedfs) PurgeRecycleItem(ctx context.Context, space *provider.Reference, key, relativePath string) error {
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
|
||||
// MessagePackBackend persists the attributes in messagepack format inside the file
|
||||
type MessagePackBackend struct {
|
||||
rootPath string
|
||||
metaCache cache.FileMetadataCache
|
||||
}
|
||||
|
||||
@@ -51,9 +50,8 @@ type readWriteCloseSeekTruncater interface {
|
||||
}
|
||||
|
||||
// NewMessagePackBackend returns a new MessagePackBackend instance
|
||||
func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
|
||||
func NewMessagePackBackend(o cache.Config) MessagePackBackend {
|
||||
return MessagePackBackend{
|
||||
rootPath: filepath.Clean(rootPath),
|
||||
metaCache: cache.GetFileMetadataCache(o),
|
||||
}
|
||||
}
|
||||
@@ -63,7 +61,7 @@ func (MessagePackBackend) Name() string { return "messagepack" }
|
||||
|
||||
// IdentifyPath returns the id and mtime of a file
|
||||
func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
|
||||
metaPath := filepath.Join(path + ".mpk")
|
||||
metaPath := filepath.Clean(path + ".mpk")
|
||||
source, err := os.Open(metaPath)
|
||||
// // No cached entry found. Read from storage and store in cache
|
||||
if err != nil {
|
||||
|
||||
@@ -37,12 +37,11 @@ import (
|
||||
|
||||
// XattrsBackend stores the file attributes in extended attributes
|
||||
type XattrsBackend struct {
|
||||
rootPath string
|
||||
metaCache cache.FileMetadataCache
|
||||
}
|
||||
|
||||
// NewMessageBackend returns a new XattrsBackend instance
|
||||
func NewXattrsBackend(rootPath string, o cache.Config) XattrsBackend {
|
||||
func NewXattrsBackend(o cache.Config) XattrsBackend {
|
||||
return XattrsBackend{
|
||||
metaCache: cache.GetFileMetadataCache(o),
|
||||
}
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
@@ -86,6 +86,13 @@ const (
|
||||
ProcessingStatus = "processing:"
|
||||
)
|
||||
|
||||
// Blobstore defines an interface for storing blobs in a blobstore
|
||||
type Blobstore interface {
|
||||
Upload(node *Node, source, copyTarget string) error
|
||||
Download(node *Node) (io.ReadCloser, error)
|
||||
Delete(node *Node) error
|
||||
}
|
||||
|
||||
type TimeManager interface {
|
||||
// OverrideMTime overrides the mtime of the node, either on the node itself or in the given attributes, depending on the implementation
|
||||
OverrideMtime(ctx context.Context, n *Node, attrs *Attributes, mtime time.Time) error
|
||||
@@ -129,8 +136,8 @@ type Tree interface {
|
||||
ReadBlob(node *Node) (io.ReadCloser, error)
|
||||
DeleteBlob(node *Node) error
|
||||
|
||||
BuildSpaceIDIndexEntry(spaceID, nodeID string) string
|
||||
ResolveSpaceIDIndexEntry(spaceID, entry string) (string, string, error)
|
||||
BuildSpaceIDIndexEntry(spaceID string) string
|
||||
ResolveSpaceIDIndexEntry(spaceID string) (string, error)
|
||||
|
||||
CreateRevision(ctx context.Context, n *Node, version string, f *lockedfile.File) (string, error)
|
||||
ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error)
|
||||
|
||||
20
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
20
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
@@ -129,7 +129,7 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, spaceID string,
|
||||
}
|
||||
item := &provider.RecycleItem{
|
||||
Type: provider.ResourceType(typeInt),
|
||||
Size: uint64(size),
|
||||
Size: size,
|
||||
Key: filepath.Join(key, relativePath),
|
||||
DeletionTime: deletionTime,
|
||||
Ref: &provider.Reference{
|
||||
@@ -345,7 +345,7 @@ func (tb *DecomposedfsTrashbin) listTrashRoot(ctx context.Context, spaceID strin
|
||||
}
|
||||
|
||||
// RestoreRecycleItem restores the specified item
|
||||
func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, relativePath string, restoreRef *provider.Reference) error {
|
||||
func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, relativePath string, restoreRef *provider.Reference) (*node.Node, error) {
|
||||
_, span := tracer.Start(ctx, "RestoreRecycleItem")
|
||||
defer span.End()
|
||||
|
||||
@@ -353,7 +353,7 @@ func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, spaceID
|
||||
if restoreRef != nil {
|
||||
tn, err := tb.fs.lu.NodeFromResource(ctx, restoreRef)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targetNode = tn
|
||||
@@ -361,19 +361,19 @@ func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, spaceID
|
||||
|
||||
rn, parent, restoreFunc, err := tb.fs.tp.(*tree.Tree).RestoreRecycleItemFunc(ctx, spaceID, key, relativePath, targetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check permissions of deleted node
|
||||
rp, err := tb.fs.p.AssembleTrashPermissions(ctx, rn)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
return nil, err
|
||||
case !rp.RestoreRecycleItem:
|
||||
if rp.Stat {
|
||||
return errtypes.PermissionDenied(key)
|
||||
return nil, errtypes.PermissionDenied(key)
|
||||
}
|
||||
return errtypes.NotFound(key)
|
||||
return nil, errtypes.NotFound(key)
|
||||
}
|
||||
|
||||
// Set space owner in context
|
||||
@@ -383,13 +383,13 @@ func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, spaceID
|
||||
pp, err := tb.fs.p.AssemblePermissions(ctx, parent)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
return nil, err
|
||||
case !pp.InitiateFileUpload:
|
||||
// share receiver cannot restore to a shared resource to which she does not have write permissions.
|
||||
if rp.Stat {
|
||||
return errtypes.PermissionDenied(key)
|
||||
return nil, errtypes.PermissionDenied(key)
|
||||
}
|
||||
return errtypes.NotFound(key)
|
||||
return nil, errtypes.NotFound(key)
|
||||
}
|
||||
|
||||
// Run the restore func
|
||||
|
||||
58
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
58
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
@@ -30,6 +30,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"maps"
|
||||
|
||||
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
@@ -248,7 +250,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
|
||||
var (
|
||||
spaceID = spaceIDAny
|
||||
nodeID = spaceIDAny
|
||||
entry = spaceIDAny
|
||||
requestedUserID *userv1beta1.UserId
|
||||
)
|
||||
|
||||
@@ -266,8 +268,8 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
spaceTypes[filter[i].GetSpaceType()] = struct{}{}
|
||||
}
|
||||
case provider.ListStorageSpacesRequest_Filter_TYPE_ID:
|
||||
_, spaceID, nodeID, _ = storagespace.SplitID(filter[i].GetId().OpaqueId)
|
||||
if strings.Contains(nodeID, "/") {
|
||||
_, spaceID, entry, _ = storagespace.SplitID(filter[i].GetId().OpaqueId)
|
||||
if strings.Contains(entry, "/") {
|
||||
return []*provider.StorageSpace{}, nil
|
||||
}
|
||||
case provider.ListStorageSpacesRequest_Filter_TYPE_USER:
|
||||
@@ -296,11 +298,11 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
// /path/to/root/spaces/personal/nodeid
|
||||
// /path/to/root/spaces/shared/nodeid
|
||||
|
||||
if spaceID != spaceIDAny && nodeID != spaceIDAny {
|
||||
if spaceID != spaceIDAny && entry != spaceIDAny {
|
||||
// try directly reading the node
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID, true, nil, false) // permission to read disabled space is checked later
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked later
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", entry).Msg("could not read node")
|
||||
return nil, err
|
||||
}
|
||||
if !n.Exists {
|
||||
@@ -332,12 +334,10 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
return nil, errors.Wrap(err, "error reading user index")
|
||||
}
|
||||
|
||||
if nodeID == spaceIDAny {
|
||||
for spaceID, nodeID := range allMatches {
|
||||
matches[spaceID] = nodeID
|
||||
}
|
||||
if entry == spaceIDAny {
|
||||
maps.Copy(matches, allMatches)
|
||||
} else {
|
||||
matches[allMatches[nodeID]] = allMatches[nodeID]
|
||||
matches[allMatches[entry]] = allMatches[entry]
|
||||
}
|
||||
|
||||
// get Groups for userid
|
||||
@@ -359,12 +359,10 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
return nil, errors.Wrap(err, "error reading group index")
|
||||
}
|
||||
|
||||
if nodeID == spaceIDAny {
|
||||
for spaceID, nodeID := range allMatches {
|
||||
matches[spaceID] = nodeID
|
||||
}
|
||||
if entry == spaceIDAny {
|
||||
maps.Copy(matches, allMatches)
|
||||
} else {
|
||||
matches[allMatches[nodeID]] = allMatches[nodeID]
|
||||
matches[allMatches[entry]] = allMatches[entry]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,12 +387,10 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
return nil, errors.Wrap(err, "error reading type index")
|
||||
}
|
||||
|
||||
if nodeID == spaceIDAny {
|
||||
for spaceID, nodeID := range allMatches {
|
||||
matches[spaceID] = nodeID
|
||||
}
|
||||
if entry == spaceIDAny {
|
||||
maps.Copy(matches, allMatches)
|
||||
} else {
|
||||
matches[allMatches[nodeID]] = allMatches[nodeID]
|
||||
matches[allMatches[entry]] = allMatches[entry]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -417,9 +413,9 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
// Distribute work
|
||||
errg.Go(func() error {
|
||||
defer close(work)
|
||||
for spaceID, nodeID := range matches {
|
||||
for spaceID, entry := range matches {
|
||||
select {
|
||||
case work <- []string{spaceID, nodeID}:
|
||||
case work <- []string{spaceID, entry}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -435,15 +431,15 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
errg.Go(func() error {
|
||||
for match := range work {
|
||||
spaceID, nodeID, err := fs.tp.ResolveSpaceIDIndexEntry(match[0], match[1])
|
||||
spaceID, err := fs.tp.ResolveSpaceIDIndexEntry(match[1])
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("resolve space id index entry, skipping")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", spaceID).Msg("resolve space id index entry, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID, true, nil, true)
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, true)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node, skipping")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", spaceID).Msg("could not read node, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -459,7 +455,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
case errtypes.NotFound:
|
||||
// ok
|
||||
default:
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not convert to storage space")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("id", spaceID).Msg("could not convert to storage space")
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -497,9 +493,9 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
|
||||
}
|
||||
|
||||
// if there are no matches (or they happened to be spaces for the owner) and the node is a child return a space
|
||||
if int64(len(matches)) <= numShares.Load() && nodeID != spaceID {
|
||||
if int64(len(matches)) <= numShares.Load() && entry != spaceID {
|
||||
// try node id
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID, true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
|
||||
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -817,7 +813,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
|
||||
// - for decomposedfs/decomposeds3 it is the relative link to the space root
|
||||
// - for the posixfs it is the node id
|
||||
func (fs *Decomposedfs) updateIndexes(ctx context.Context, grantee *provider.Grantee, spaceType, spaceID, nodeID string) error {
|
||||
target := fs.tp.BuildSpaceIDIndexEntry(spaceID, nodeID)
|
||||
target := fs.tp.BuildSpaceIDIndexEntry(spaceID)
|
||||
err := fs.linkStorageSpaceType(ctx, spaceType, spaceID, target)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -23,13 +23,14 @@ import (
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
)
|
||||
|
||||
type Trashbin interface {
|
||||
Setup(storage.FS) error
|
||||
|
||||
ListRecycle(ctx context.Context, spaceID, key, relativePath string) ([]*provider.RecycleItem, error)
|
||||
RestoreRecycleItem(ctx context.Context, spaceID, key, relativePath string, restoreRef *provider.Reference) error
|
||||
RestoreRecycleItem(ctx context.Context, spaceID, key, relativePath string, restoreRef *provider.Reference) (*node.Node, error)
|
||||
PurgeRecycleItem(ctx context.Context, spaceID, key, relativePath string) error
|
||||
EmptyRecycle(ctx context.Context, spaceID string) error
|
||||
}
|
||||
|
||||
53
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
53
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
@@ -56,17 +56,10 @@ func init() {
|
||||
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
|
||||
}
|
||||
|
||||
// Blobstore defines an interface for storing blobs in a blobstore
|
||||
type Blobstore interface {
|
||||
Upload(node *node.Node, source string) error
|
||||
Download(node *node.Node) (io.ReadCloser, error)
|
||||
Delete(node *node.Node) error
|
||||
}
|
||||
|
||||
// Tree manages a hierarchical tree
|
||||
type Tree struct {
|
||||
lookup node.PathLookup
|
||||
blobstore Blobstore
|
||||
blobstore node.Blobstore
|
||||
propagator propagator.Propagator
|
||||
permissions permissions.Permissions
|
||||
|
||||
@@ -79,7 +72,7 @@ type Tree struct {
|
||||
type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool
|
||||
|
||||
// New returns a new instance of Tree
|
||||
func New(lu node.PathLookup, bs Blobstore, o *options.Options, p permissions.Permissions, cache store.Store, log *zerolog.Logger) *Tree {
|
||||
func New(lu node.PathLookup, bs node.Blobstore, o *options.Options, p permissions.Permissions, cache store.Store, log *zerolog.Logger) *Tree {
|
||||
return &Tree{
|
||||
lookup: lu,
|
||||
blobstore: bs,
|
||||
@@ -524,7 +517,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
|
||||
}
|
||||
|
||||
// RestoreRecycleItemFunc returns a node and a function to restore it from the trash.
|
||||
func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, targetNode *node.Node) (*node.Node, *node.Node, func() error, error) {
|
||||
func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, targetNode *node.Node) (*node.Node, *node.Node, func() (*node.Node, error), error) {
|
||||
_, span := tracer.Start(ctx, "RestoreRecycleItemFunc")
|
||||
defer span.End()
|
||||
logger := appctx.GetLogger(ctx)
|
||||
@@ -555,9 +548,9 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fn := func() error {
|
||||
fn := func() (*node.Node, error) {
|
||||
if targetNode.Exists {
|
||||
return errtypes.AlreadyExists("origin already exists")
|
||||
return nil, errtypes.AlreadyExists("origin already exists")
|
||||
}
|
||||
|
||||
parts := strings.SplitN(recycleNode.ID, node.TrashIDDelimiter, 2)
|
||||
@@ -567,18 +560,18 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
// add the entry for the parent dir
|
||||
err = os.Symlink("../../../../../"+lookup.Pathify(originalId, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// attempt to rename only if we're not in a subfolder
|
||||
if recycleNode.ID != restoreNode.ID {
|
||||
err = os.Rename(recycleNode.InternalPath(), restoreNode.InternalPath())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
err = t.lookup.MetadataBackend().Rename(recycleNode, restoreNode)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -590,7 +583,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
|
||||
|
||||
if err = t.lookup.MetadataBackend().SetMultiple(ctx, restoreNode, map[string][]byte(attrs), true); err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
|
||||
return nil, errors.Wrap(err, "Decomposedfs: could not update recycle node")
|
||||
}
|
||||
|
||||
// delete item link in trash
|
||||
@@ -598,7 +591,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
if trashPath != "" && trashPath != "/" {
|
||||
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
|
||||
return nil, errors.Wrap(err, "Decomposedfs: could not resolve trash root")
|
||||
}
|
||||
deletePath = filepath.Join(resolvedTrashRoot, trashPath)
|
||||
if err = os.Remove(deletePath); err != nil {
|
||||
@@ -609,18 +602,11 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletePath", deletePath).Str("trashPath", trashPath).Msg("error recursively deleting trash item")
|
||||
}
|
||||
}
|
||||
|
||||
var sizeDiff int64
|
||||
if recycleNode.IsDir(ctx) {
|
||||
treeSize, err := recycleNode.GetTreeSize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sizeDiff = int64(treeSize)
|
||||
} else {
|
||||
sizeDiff = recycleNode.Blobsize
|
||||
}
|
||||
return t.Propagate(ctx, targetNode, sizeDiff)
|
||||
rn := node.New(restoreNode.SpaceID, restoreNode.ID, targetNode.ParentID, targetNode.Name, recycleNode.Blobsize, recycleNode.BlobID, recycleNode.Type(ctx), recycleNode.Owner(), t.lookup)
|
||||
rn.SpaceRoot = targetNode.SpaceRoot
|
||||
rn.Exists = true
|
||||
// the recycle node has an id with the trish timestamp, but the propagation is only interested in the parent id
|
||||
return rn, nil
|
||||
}
|
||||
return recycleNode, parent, fn, nil
|
||||
}
|
||||
@@ -801,7 +787,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
|
||||
|
||||
// WriteBlob writes a blob to the blobstore
|
||||
func (t *Tree) WriteBlob(node *node.Node, source string) error {
|
||||
return t.blobstore.Upload(node, source)
|
||||
return t.blobstore.Upload(node, source, "")
|
||||
}
|
||||
|
||||
// ReadBlob reads a blob from the blobstore
|
||||
@@ -826,13 +812,14 @@ func (t *Tree) DeleteBlob(node *node.Node) error {
|
||||
}
|
||||
|
||||
// BuildSpaceIDIndexEntry returns the entry for the space id index
|
||||
func (t *Tree) BuildSpaceIDIndexEntry(spaceID, nodeID string) string {
|
||||
func (t *Tree) BuildSpaceIDIndexEntry(spaceID string) string {
|
||||
return "../../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
|
||||
}
|
||||
|
||||
// ResolveSpaceIDIndexEntry returns the node id for the space id index entry
|
||||
func (t *Tree) ResolveSpaceIDIndexEntry(_, entry string) (string, string, error) {
|
||||
return ReadSpaceAndNodeFromIndexLink(entry)
|
||||
func (t *Tree) ResolveSpaceIDIndexEntry(entry string) (string, error) {
|
||||
spaceID, _, err := ReadSpaceAndNodeFromIndexLink(entry)
|
||||
return spaceID, err
|
||||
}
|
||||
|
||||
// ReadSpaceAndNodeFromIndexLink reads a symlink and parses space and node id if the link has the correct format, eg:
|
||||
|
||||
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
generated
vendored
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
generated
vendored
@@ -125,7 +125,7 @@ func (store DecomposedFsStore) List(ctx context.Context) ([]*DecomposedFsSession
|
||||
func (store DecomposedFsStore) Get(ctx context.Context, id string) (*DecomposedFsSession, error) {
|
||||
sessionPath := sessionPath(store.root, id)
|
||||
match := _idRegexp.FindStringSubmatch(sessionPath)
|
||||
if match == nil || len(match) < 2 {
|
||||
if len(match) < 2 {
|
||||
return nil, fmt.Errorf("invalid upload path")
|
||||
}
|
||||
|
||||
|
||||
@@ -125,7 +125,7 @@ func (store OcisStore) List(ctx context.Context) ([]*OcisSession, error) {
|
||||
func (store OcisStore) Get(ctx context.Context, id string) (*OcisSession, error) {
|
||||
sessionPath := sessionPath(store.root, id)
|
||||
match := _idRegexp.FindStringSubmatch(sessionPath)
|
||||
if match == nil || len(match) < 2 {
|
||||
if len(match) < 2 {
|
||||
return nil, fmt.Errorf("invalid upload path")
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
// Code generated by mockery v2.40.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.2. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
20
vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go
generated
vendored
20
vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go
generated
vendored
@@ -1,4 +1,22 @@
|
||||
// Code generated by mockery v2.46.3. DO NOT EDIT.
|
||||
// Copyright 2018-2022 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
// Code generated by mockery v2.53.2. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
7
vendor/github.com/tus/tusd/v2/pkg/handler/composer.go
generated
vendored
7
vendor/github.com/tus/tusd/v2/pkg/handler/composer.go
generated
vendored
@@ -14,6 +14,8 @@ type StoreComposer struct {
|
||||
Concater ConcaterDataStore
|
||||
UsesLengthDeferrer bool
|
||||
LengthDeferrer LengthDeferrerDataStore
|
||||
ContentServer ContentServerDataStore
|
||||
UsesContentServer bool
|
||||
}
|
||||
|
||||
// NewStoreComposer creates a new and empty store composer.
|
||||
@@ -85,3 +87,8 @@ func (store *StoreComposer) UseLengthDeferrer(ext LengthDeferrerDataStore) {
|
||||
store.UsesLengthDeferrer = ext != nil
|
||||
store.LengthDeferrer = ext
|
||||
}
|
||||
|
||||
func (store *StoreComposer) UseContentServer(ext ContentServerDataStore) {
|
||||
store.UsesContentServer = ext != nil
|
||||
store.ContentServer = ext
|
||||
}
|
||||
|
||||
7
vendor/github.com/tus/tusd/v2/pkg/handler/config.go
generated
vendored
7
vendor/github.com/tus/tusd/v2/pkg/handler/config.go
generated
vendored
@@ -75,6 +75,13 @@ type Config struct {
|
||||
// If the error is non-nil, the error will be forwarded to the client. Furthermore,
|
||||
// HTTPResponse will be ignored and the error value can contain values for the HTTP response.
|
||||
PreFinishResponseCallback func(hook HookEvent) (HTTPResponse, error)
|
||||
// PreUploadTerminateCallback will be invoked on DELETE requests before an upload is terminated,
|
||||
// giving the application the opportunity to reject the termination. For example, to ensure resources
|
||||
// used by other services are not deleted.
|
||||
// If the callback returns no error, optional values from HTTPResponse will be contained in the HTTP response.
|
||||
// If the error is non-nil, the error will be forwarded to the client. Furthermore,
|
||||
// HTTPResponse will be ignored and the error value can contain values for the HTTP response.
|
||||
PreUploadTerminateCallback func(hook HookEvent) (HTTPResponse, error)
|
||||
// GracefulRequestCompletionTimeout is the timeout for operations to complete after an HTTP
|
||||
// request has ended (successfully or by error). For example, if an HTTP request is interrupted,
|
||||
// instead of stopping immediately, the handler and data store will be given some additional
|
||||
|
||||
19
vendor/github.com/tus/tusd/v2/pkg/handler/datastore.go
generated
vendored
19
vendor/github.com/tus/tusd/v2/pkg/handler/datastore.go
generated
vendored
@@ -3,6 +3,7 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type MetaData map[string]string
|
||||
@@ -191,3 +192,21 @@ type Lock interface {
|
||||
// Unlock releases an existing lock for the given upload.
|
||||
Unlock() error
|
||||
}
|
||||
|
||||
type ServableUpload interface {
|
||||
// ServeContent serves the uploaded data as specified by the GET request.
|
||||
// It allows data stores to delegate the handling of range requests and conditional
|
||||
// requests to their underlying providers.
|
||||
// The tusd handler will set the Content-Type and Content-Disposition headers
|
||||
// before calling ServeContent, but the implementation can override them.
|
||||
// After calling ServeContent, the handler will not take any further action
|
||||
// other than handling a potential error.
|
||||
ServeContent(ctx context.Context, w http.ResponseWriter, r *http.Request) error
|
||||
}
|
||||
|
||||
// ContentServerDataStore is the interface for DataStores that can serve content directly.
|
||||
// When the handler serves a GET request, it will pass the request to ServeContent
|
||||
// and delegate its handling to the DataStore, instead of using GetReader to obtain the content.
|
||||
type ContentServerDataStore interface {
|
||||
AsServableUpload(upload Upload) ServableUpload
|
||||
}
|
||||
|
||||
108
vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go
generated
vendored
108
vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go
generated
vendored
@@ -60,6 +60,7 @@ var (
|
||||
ErrInvalidUploadDeferLength = NewError("ERR_INVALID_UPLOAD_LENGTH_DEFER", "invalid Upload-Defer-Length header", http.StatusBadRequest)
|
||||
ErrUploadStoppedByServer = NewError("ERR_UPLOAD_STOPPED", "upload has been stopped by server", http.StatusBadRequest)
|
||||
ErrUploadRejectedByServer = NewError("ERR_UPLOAD_REJECTED", "upload creation has been rejected by server", http.StatusBadRequest)
|
||||
ErrUploadTerminationRejected = NewError("ERR_UPLOAD_TERMINATION_REJECTED", "upload termination has been rejected by server", http.StatusBadRequest)
|
||||
ErrUploadInterrupted = NewError("ERR_UPLOAD_INTERRUPTED", "upload has been interrupted by another request for this upload resource", http.StatusBadRequest)
|
||||
ErrServerShutdown = NewError("ERR_SERVER_SHUTDOWN", "request has been interrupted because the server is shutting down", http.StatusServiceUnavailable)
|
||||
ErrOriginNotAllowed = NewError("ERR_ORIGIN_NOT_ALLOWED", "request origin is not allowed", http.StatusForbidden)
|
||||
@@ -177,10 +178,10 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||
// We also update the write deadline, but makes sure that it is larger than the read deadline, so we
|
||||
// can still write a response in the case of a read timeout.
|
||||
if err := c.resC.SetReadDeadline(time.Now().Add(handler.config.NetworkTimeout)); err != nil {
|
||||
c.log.Warn("NetworkControlError", "error", err)
|
||||
c.log.WarnContext(c, "NetworkControlError", "error", err)
|
||||
}
|
||||
if err := c.resC.SetWriteDeadline(time.Now().Add(2 * handler.config.NetworkTimeout)); err != nil {
|
||||
c.log.Warn("NetworkControlError", "error", err)
|
||||
c.log.WarnContext(c, "NetworkControlError", "error", err)
|
||||
}
|
||||
|
||||
// Allow overriding the HTTP method. The reason for this is
|
||||
@@ -190,7 +191,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||
r.Method = newMethod
|
||||
}
|
||||
|
||||
c.log.Info("RequestIncoming")
|
||||
c.log.InfoContext(c, "RequestIncoming")
|
||||
|
||||
handler.Metrics.incRequestsTotal(r.Method)
|
||||
|
||||
@@ -405,7 +406,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
handler.Metrics.incUploadsCreated()
|
||||
c.log = c.log.With("id", id)
|
||||
c.log.Info("UploadCreated", "size", size, "url", url)
|
||||
c.log.InfoContext(c, "UploadCreated", "size", size, "url", url)
|
||||
|
||||
if handler.config.NotifyCreatedUploads {
|
||||
handler.CreatedUploads <- newHookEvent(c, info)
|
||||
@@ -572,7 +573,7 @@ func (handler *UnroutedHandler) PostFileV2(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
handler.Metrics.incUploadsCreated()
|
||||
c.log = c.log.With("id", id)
|
||||
c.log.Info("UploadCreated", "size", info.Size, "url", url)
|
||||
c.log.InfoContext(c, "UploadCreated", "size", info.Size, "url", url)
|
||||
|
||||
if handler.config.NotifyCreatedUploads {
|
||||
handler.CreatedUploads <- newHookEvent(c, info)
|
||||
@@ -891,7 +892,7 @@ func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, up
|
||||
maxSize = length
|
||||
}
|
||||
|
||||
c.log.Info("ChunkWriteStart", "maxSize", maxSize, "offset", offset)
|
||||
c.log.InfoContext(c, "ChunkWriteStart", "maxSize", maxSize, "offset", offset)
|
||||
|
||||
var bytesWritten int64
|
||||
var err error
|
||||
@@ -907,12 +908,12 @@ func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, up
|
||||
// Update the read deadline for every successful read operation. This ensures that the request handler
|
||||
// keeps going while data is transmitted but that dead connections can also time out and be cleaned up.
|
||||
if err := c.resC.SetReadDeadline(time.Now().Add(handler.config.NetworkTimeout)); err != nil {
|
||||
c.log.Warn("NetworkTimeoutError", "error", err)
|
||||
c.log.WarnContext(c, "NetworkTimeoutError", "error", err)
|
||||
}
|
||||
|
||||
// The write deadline is updated accordingly to ensure that we can also write responses.
|
||||
if err := c.resC.SetWriteDeadline(time.Now().Add(2 * handler.config.NetworkTimeout)); err != nil {
|
||||
c.log.Warn("NetworkTimeoutError", "error", err)
|
||||
c.log.WarnContext(c, "NetworkTimeoutError", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -935,7 +936,7 @@ func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, up
|
||||
// it in the response, if the store did not also return an error.
|
||||
bodyErr := c.body.hasError()
|
||||
if bodyErr != nil {
|
||||
c.log.Error("BodyReadError", "error", bodyErr.Error())
|
||||
c.log.ErrorContext(c, "BodyReadError", "error", bodyErr.Error())
|
||||
if err == nil {
|
||||
err = bodyErr
|
||||
}
|
||||
@@ -947,12 +948,12 @@ func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, up
|
||||
if terminateErr := handler.terminateUpload(c, upload, info); terminateErr != nil {
|
||||
// We only log this error and not show it to the user since this
|
||||
// termination error is not relevant to the uploading client
|
||||
c.log.Error("UploadStopTerminateError", "error", terminateErr.Error())
|
||||
c.log.ErrorContext(c, "UploadStopTerminateError", "error", terminateErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("ChunkWriteComplete", "bytesWritten", bytesWritten)
|
||||
c.log.InfoContext(c, "ChunkWriteComplete", "bytesWritten", bytesWritten)
|
||||
|
||||
// Send new offset to client
|
||||
newOffset := offset + bytesWritten
|
||||
@@ -1003,7 +1004,7 @@ func (handler *UnroutedHandler) emitFinishEvents(c *httpContext, resp HTTPRespon
|
||||
resp = resp.MergeWith(resp2)
|
||||
}
|
||||
|
||||
c.log.Info("UploadFinished", "size", info.Size)
|
||||
c.log.InfoContext(c, "UploadFinished", "size", info.Size)
|
||||
handler.Metrics.incUploadsFinished()
|
||||
|
||||
if handler.config.NotifyCompleteUploads {
|
||||
@@ -1047,6 +1048,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to the existing GetReader implementation if ContentServerDataStore is not implemented
|
||||
contentType, contentDisposition := filterContentType(info)
|
||||
resp := HTTPResponse{
|
||||
StatusCode: http.StatusOK,
|
||||
@@ -1058,6 +1060,27 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||
Body: "", // Body is intentionally left empty, and we copy it manually in later.
|
||||
}
|
||||
|
||||
// If the data store implements ContentServerDataStore, use delegate the handling
|
||||
// of GET requests to the data store.
|
||||
// Otherwise, we will use the existing GetReader implementation.
|
||||
if handler.composer.UsesContentServer {
|
||||
servableUpload := handler.composer.ContentServer.AsServableUpload(upload)
|
||||
|
||||
// Pass file type and name to the implementation, but it may override them.
|
||||
w.Header().Set("Content-Type", resp.Header["Content-Type"])
|
||||
w.Header().Set("Content-Disposition", resp.Header["Content-Disposition"])
|
||||
|
||||
// Use loggingResponseWriter to get the ResponseOutgoing log entry that
|
||||
// normally handler.sendResp would produce.
|
||||
loggingW := &loggingResponseWriter{ResponseWriter: w, logger: c.log}
|
||||
|
||||
err = servableUpload.ServeContent(c, loggingW, r)
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no data has been uploaded yet, respond with an empty "204 No Content" status.
|
||||
if info.Offset == 0 {
|
||||
resp.StatusCode = http.StatusNoContent
|
||||
@@ -1065,6 +1088,15 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if handler.composer.UsesContentServer {
|
||||
servableUpload := handler.composer.ContentServer.AsServableUpload(upload)
|
||||
err = servableUpload.ServeContent(c, w, r)
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
src, err := upload.GetReader(c)
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
@@ -1172,7 +1204,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
var info FileInfo
|
||||
if handler.config.NotifyTerminatedUploads {
|
||||
if handler.config.NotifyTerminatedUploads || handler.config.PreUploadTerminateCallback != nil {
|
||||
info, err = upload.GetInfo(c)
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
@@ -1180,15 +1212,26 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
resp := HTTPResponse{
|
||||
StatusCode: http.StatusNoContent,
|
||||
}
|
||||
|
||||
if handler.config.PreUploadTerminateCallback != nil {
|
||||
resp2, err := handler.config.PreUploadTerminateCallback(newHookEvent(c, info))
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
resp = resp.MergeWith(resp2)
|
||||
}
|
||||
|
||||
err = handler.terminateUpload(c, upload, info)
|
||||
if err != nil {
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
handler.sendResp(c, HTTPResponse{
|
||||
StatusCode: http.StatusNoContent,
|
||||
})
|
||||
handler.sendResp(c, resp)
|
||||
}
|
||||
|
||||
// terminateUpload passes a given upload to the DataStore's Terminater,
|
||||
@@ -1208,7 +1251,7 @@ func (handler *UnroutedHandler) terminateUpload(c *httpContext, upload Upload, i
|
||||
handler.TerminatedUploads <- newHookEvent(c, info)
|
||||
}
|
||||
|
||||
c.log.Info("UploadTerminated")
|
||||
c.log.InfoContext(c, "UploadTerminated")
|
||||
handler.Metrics.incUploadsTerminated()
|
||||
|
||||
return nil
|
||||
@@ -1222,7 +1265,7 @@ func (handler *UnroutedHandler) sendError(c *httpContext, err error) {
|
||||
var detailedErr Error
|
||||
|
||||
if !errors.As(err, &detailedErr) {
|
||||
c.log.Error("InternalServerError", "message", err.Error())
|
||||
c.log.ErrorContext(c, "InternalServerError", "message", err.Error())
|
||||
detailedErr = NewError("ERR_INTERNAL_SERVER_ERROR", err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
@@ -1240,7 +1283,7 @@ func (handler *UnroutedHandler) sendError(c *httpContext, err error) {
|
||||
func (handler *UnroutedHandler) sendResp(c *httpContext, resp HTTPResponse) {
|
||||
resp.writeTo(c.res)
|
||||
|
||||
c.log.Info("ResponseOutgoing", "status", resp.StatusCode, "body", resp.Body)
|
||||
c.log.InfoContext(c, "ResponseOutgoing", "status", resp.StatusCode, "body", resp.Body)
|
||||
}
|
||||
|
||||
// Make an absolute URLs to the given upload id. If the base path is absolute
|
||||
@@ -1323,6 +1366,14 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
|
||||
}
|
||||
}
|
||||
|
||||
// Remove default ports
|
||||
if proto == "http" {
|
||||
host = strings.TrimSuffix(host, ":80")
|
||||
}
|
||||
if proto == "https" {
|
||||
host = strings.TrimSuffix(host, ":443")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1393,7 +1444,7 @@ func (handler *UnroutedHandler) lockUpload(c *httpContext, id string) (Lock, err
|
||||
|
||||
// No need to wrap this in a sync.OnceFunc because c.cancel will be a noop after the first call.
|
||||
releaseLock := func() {
|
||||
c.log.Info("UploadInterrupted")
|
||||
c.log.InfoContext(c, "UploadInterrupted")
|
||||
c.cancel(ErrUploadInterrupted)
|
||||
}
|
||||
|
||||
@@ -1671,3 +1722,20 @@ func validateUploadId(newId string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loggingResponseWriter is a wrapper around http.ResponseWriter that logs the
|
||||
// final status code similar to UnroutedHandler.sendResp.
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (w *loggingResponseWriter) WriteHeader(statusCode int) {
|
||||
if statusCode >= 200 {
|
||||
w.logger.Info("ResponseOutgoing", "status", statusCode)
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// Unwrap provides access to the underlying http.ResponseWriter.
|
||||
func (w *loggingResponseWriter) Unwrap() http.ResponseWriter { return w.ResponseWriter }
|
||||
|
||||
2
vendor/go.etcd.io/etcd/api/v3/version/version.go
generated
vendored
2
vendor/go.etcd.io/etcd/api/v3/version/version.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
var (
|
||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||
MinClusterVersion = "3.0.0"
|
||||
Version = "3.5.18"
|
||||
Version = "3.5.19"
|
||||
APIVersion = "unknown"
|
||||
|
||||
// Git SHA Value will be set during build
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
@@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
||||
func Version() string {
|
||||
return "1.34.0"
|
||||
return "1.35.0"
|
||||
}
|
||||
|
||||
15
vendor/go.uber.org/multierr/CHANGELOG.md
generated
vendored
15
vendor/go.uber.org/multierr/CHANGELOG.md
generated
vendored
@@ -1,6 +1,21 @@
|
||||
Releases
|
||||
========
|
||||
|
||||
v1.11.0 (2023-03-28)
|
||||
====================
|
||||
- `Errors` now supports any error that implements multiple-error
|
||||
interface.
|
||||
- Add `Every` function to allow checking if all errors in the chain
|
||||
satisfies `errors.Is` against the target error.
|
||||
|
||||
v1.10.0 (2023-03-08)
|
||||
====================
|
||||
|
||||
- Comply with Go 1.20's multiple-error interface.
|
||||
- Drop Go 1.18 support.
|
||||
Per the support policy, only Go 1.19 and 1.20 are supported now.
|
||||
- Drop all non-test external dependencies.
|
||||
|
||||
v1.9.0 (2022-12-12)
|
||||
===================
|
||||
|
||||
|
||||
22
vendor/go.uber.org/multierr/README.md
generated
vendored
22
vendor/go.uber.org/multierr/README.md
generated
vendored
@@ -2,9 +2,29 @@
|
||||
|
||||
`multierr` allows combining one or more Go `error`s together.
|
||||
|
||||
## Features
|
||||
|
||||
- **Idiomatic**:
|
||||
multierr follows best practices in Go, and keeps your code idiomatic.
|
||||
- It keeps the underlying error type hidden,
|
||||
allowing you to deal in `error` values exclusively.
|
||||
- It provides APIs to safely append into an error from a `defer` statement.
|
||||
- **Performant**:
|
||||
multierr is optimized for performance:
|
||||
- It avoids allocations where possible.
|
||||
- It utilizes slice resizing semantics to optimize common cases
|
||||
like appending into the same error object from a loop.
|
||||
- **Interoperable**:
|
||||
multierr interoperates with the Go standard library's error APIs seamlessly:
|
||||
- The `errors.Is` and `errors.As` functions *just work*.
|
||||
- **Lightweight**:
|
||||
multierr comes with virtually no dependencies.
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u go.uber.org/multierr
|
||||
```bash
|
||||
go get -u go.uber.org/multierr@latest
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
63
vendor/go.uber.org/multierr/error.go
generated
vendored
63
vendor/go.uber.org/multierr/error.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017-2021 Uber Technologies, Inc.
|
||||
// Copyright (c) 2017-2023 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -147,8 +147,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -196,23 +195,7 @@ type errorGroup interface {
|
||||
//
|
||||
// Callers of this function are free to modify the returned slice.
|
||||
func Errors(err error) []error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that we're casting to multiError, not errorGroup. Our contract is
|
||||
// that returned errors MAY implement errorGroup. Errors, however, only
|
||||
// has special behavior for multierr-specific error objects.
|
||||
//
|
||||
// This behavior can be expanded in the future but I think it's prudent to
|
||||
// start with as little as possible in terms of contract and possibility
|
||||
// of misuse.
|
||||
eg, ok := err.(*multiError)
|
||||
if !ok {
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
return append(([]error)(nil), eg.Errors()...)
|
||||
return extractErrors(err)
|
||||
}
|
||||
|
||||
// multiError is an error that holds one or more errors.
|
||||
@@ -227,8 +210,6 @@ type multiError struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
var _ errorGroup = (*multiError)(nil)
|
||||
|
||||
// Errors returns the list of underlying errors.
|
||||
//
|
||||
// This slice MUST NOT be modified.
|
||||
@@ -239,33 +220,6 @@ func (merr *multiError) Errors() []error {
|
||||
return merr.errors
|
||||
}
|
||||
|
||||
// As attempts to find the first error in the error list that matches the type
|
||||
// of the value that target points to.
|
||||
//
|
||||
// This function allows errors.As to traverse the values stored on the
|
||||
// multierr error.
|
||||
func (merr *multiError) As(target interface{}) bool {
|
||||
for _, err := range merr.Errors() {
|
||||
if errors.As(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Is attempts to match the provided error against errors in the error list.
|
||||
//
|
||||
// This function allows errors.Is to traverse the values stored on the
|
||||
// multierr error.
|
||||
func (merr *multiError) Is(target error) bool {
|
||||
for _, err := range merr.Errors() {
|
||||
if errors.Is(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (merr *multiError) Error() string {
|
||||
if merr == nil {
|
||||
return ""
|
||||
@@ -281,6 +235,17 @@ func (merr *multiError) Error() string {
|
||||
return result
|
||||
}
|
||||
|
||||
// Every compares every error in the given err against the given target error
|
||||
// using [errors.Is], and returns true only if every comparison returned true.
|
||||
func Every(err error, target error) bool {
|
||||
for _, e := range extractErrors(err) {
|
||||
if !errors.Is(e, target) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (merr *multiError) Format(f fmt.State, c rune) {
|
||||
if c == 'v' && f.Flag('+') {
|
||||
merr.writeMultiline(f)
|
||||
|
||||
48
vendor/go.uber.org/multierr/error_post_go120.go
generated
vendored
Normal file
48
vendor/go.uber.org/multierr/error_post_go120.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2017-2023 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package multierr
|
||||
|
||||
// Unwrap returns a list of errors wrapped by this multierr.
|
||||
func (merr *multiError) Unwrap() []error {
|
||||
return merr.Errors()
|
||||
}
|
||||
|
||||
type multipleErrors interface {
|
||||
Unwrap() []error
|
||||
}
|
||||
|
||||
func extractErrors(err error) []error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the given err is an Unwrapable error that
|
||||
// implements multipleErrors interface.
|
||||
eg, ok := err.(multipleErrors)
|
||||
if !ok {
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
return append(([]error)(nil), eg.Unwrap()...)
|
||||
}
|
||||
79
vendor/go.uber.org/multierr/error_pre_go120.go
generated
vendored
Normal file
79
vendor/go.uber.org/multierr/error_pre_go120.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (c) 2017-2023 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
//go:build !go1.20
|
||||
// +build !go1.20
|
||||
|
||||
package multierr
|
||||
|
||||
import "errors"
|
||||
|
||||
// Versions of Go before 1.20 did not support the Unwrap() []error method.
|
||||
// This provides a similar behavior by implementing the Is(..) and As(..)
|
||||
// methods.
|
||||
// See the errors.Join proposal for details:
|
||||
// https://github.com/golang/go/issues/53435
|
||||
|
||||
// As attempts to find the first error in the error list that matches the type
|
||||
// of the value that target points to.
|
||||
//
|
||||
// This function allows errors.As to traverse the values stored on the
|
||||
// multierr error.
|
||||
func (merr *multiError) As(target interface{}) bool {
|
||||
for _, err := range merr.Errors() {
|
||||
if errors.As(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Is attempts to match the provided error against errors in the error list.
|
||||
//
|
||||
// This function allows errors.Is to traverse the values stored on the
|
||||
// multierr error.
|
||||
func (merr *multiError) Is(target error) bool {
|
||||
for _, err := range merr.Errors() {
|
||||
if errors.Is(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func extractErrors(err error) []error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that we're casting to multiError, not errorGroup. Our contract is
|
||||
// that returned errors MAY implement errorGroup. Errors, however, only
|
||||
// has special behavior for multierr-specific error objects.
|
||||
//
|
||||
// This behavior can be expanded in the future but I think it's prudent to
|
||||
// start with as little as possible in terms of contract and possibility
|
||||
// of misuse.
|
||||
eg, ok := err.(*multiError)
|
||||
if !ok {
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
return append(([]error)(nil), eg.Errors()...)
|
||||
}
|
||||
8
vendor/go.uber.org/multierr/glide.yaml
generated
vendored
8
vendor/go.uber.org/multierr/glide.yaml
generated
vendored
@@ -1,8 +0,0 @@
|
||||
package: go.uber.org/multierr
|
||||
import:
|
||||
- package: go.uber.org/atomic
|
||||
version: ^1
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
subpackages:
|
||||
- assert
|
||||
47
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
47
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
@@ -25,6 +25,11 @@ const debugHandshake = false
|
||||
// quickly.
|
||||
const chanSize = 16
|
||||
|
||||
// maxPendingPackets sets the maximum number of packets to queue while waiting
|
||||
// for KEX to complete. This limits the total pending data to maxPendingPackets
|
||||
// * maxPacket bytes, which is ~16.8MB.
|
||||
const maxPendingPackets = 64
|
||||
|
||||
// keyingTransport is a packet based transport that supports key
|
||||
// changes. It need not be thread-safe. It should pass through
|
||||
// msgNewKeys in both directions.
|
||||
@@ -73,11 +78,19 @@ type handshakeTransport struct {
|
||||
incoming chan []byte
|
||||
readError error
|
||||
|
||||
mu sync.Mutex
|
||||
writeError error
|
||||
sentInitPacket []byte
|
||||
sentInitMsg *kexInitMsg
|
||||
pendingPackets [][]byte // Used when a key exchange is in progress.
|
||||
mu sync.Mutex
|
||||
// Condition for the above mutex. It is used to notify a completed key
|
||||
// exchange or a write failure. Writes can wait for this condition while a
|
||||
// key exchange is in progress.
|
||||
writeCond *sync.Cond
|
||||
writeError error
|
||||
sentInitPacket []byte
|
||||
sentInitMsg *kexInitMsg
|
||||
// Used to queue writes when a key exchange is in progress. The length is
|
||||
// limited by pendingPacketsSize. Once full, writes will block until the key
|
||||
// exchange is completed or an error occurs. If not empty, it is emptied
|
||||
// all at once when the key exchange is completed in kexLoop.
|
||||
pendingPackets [][]byte
|
||||
writePacketsLeft uint32
|
||||
writeBytesLeft int64
|
||||
userAuthComplete bool // whether the user authentication phase is complete
|
||||
@@ -134,6 +147,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
|
||||
|
||||
config: config,
|
||||
}
|
||||
t.writeCond = sync.NewCond(&t.mu)
|
||||
t.resetReadThresholds()
|
||||
t.resetWriteThresholds()
|
||||
|
||||
@@ -260,6 +274,7 @@ func (t *handshakeTransport) recordWriteError(err error) {
|
||||
defer t.mu.Unlock()
|
||||
if t.writeError == nil && err != nil {
|
||||
t.writeError = err
|
||||
t.writeCond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -363,6 +378,8 @@ write:
|
||||
}
|
||||
}
|
||||
t.pendingPackets = t.pendingPackets[:0]
|
||||
// Unblock writePacket if waiting for KEX.
|
||||
t.writeCond.Broadcast()
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -577,11 +594,20 @@ func (t *handshakeTransport) writePacket(p []byte) error {
|
||||
}
|
||||
|
||||
if t.sentInitMsg != nil {
|
||||
// Copy the packet so the writer can reuse the buffer.
|
||||
cp := make([]byte, len(p))
|
||||
copy(cp, p)
|
||||
t.pendingPackets = append(t.pendingPackets, cp)
|
||||
return nil
|
||||
if len(t.pendingPackets) < maxPendingPackets {
|
||||
// Copy the packet so the writer can reuse the buffer.
|
||||
cp := make([]byte, len(p))
|
||||
copy(cp, p)
|
||||
t.pendingPackets = append(t.pendingPackets, cp)
|
||||
return nil
|
||||
}
|
||||
for t.sentInitMsg != nil {
|
||||
// Block and wait for KEX to complete or an error.
|
||||
t.writeCond.Wait()
|
||||
if t.writeError != nil {
|
||||
return t.writeError
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.writeBytesLeft > 0 {
|
||||
@@ -598,6 +624,7 @@ func (t *handshakeTransport) writePacket(p []byte) error {
|
||||
|
||||
if err := t.pushPacket(p); err != nil {
|
||||
t.writeError = err
|
||||
t.writeCond.Broadcast()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
2
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
@@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) {
|
||||
return new(userAuthSuccessMsg), nil
|
||||
case msgUserAuthFailure:
|
||||
msg = new(userAuthFailureMsg)
|
||||
case msgUserAuthBanner:
|
||||
msg = new(userAuthBannerMsg)
|
||||
case msgUserAuthPubKeyOk:
|
||||
msg = new(userAuthPubKeyOkMsg)
|
||||
case msgGlobalRequest:
|
||||
|
||||
2
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
@@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel
|
||||
return nil, err
|
||||
}
|
||||
go DiscardRequests(in)
|
||||
return ch, err
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
type tcpChan struct {
|
||||
|
||||
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package constraints defines a set of useful constraints to be used
|
||||
// with type parameters.
|
||||
package constraints
|
||||
|
||||
// Signed is a constraint that permits any signed integer type.
|
||||
// If future releases of Go add new predeclared signed integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Signed interface {
|
||||
~int | ~int8 | ~int16 | ~int32 | ~int64
|
||||
}
|
||||
|
||||
// Unsigned is a constraint that permits any unsigned integer type.
|
||||
// If future releases of Go add new predeclared unsigned integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Unsigned interface {
|
||||
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
|
||||
}
|
||||
|
||||
// Integer is a constraint that permits any integer type.
|
||||
// If future releases of Go add new predeclared integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Integer interface {
|
||||
Signed | Unsigned
|
||||
}
|
||||
|
||||
// Float is a constraint that permits any floating-point type.
|
||||
// If future releases of Go add new predeclared floating-point types,
|
||||
// this constraint will be modified to include them.
|
||||
type Float interface {
|
||||
~float32 | ~float64
|
||||
}
|
||||
|
||||
// Complex is a constraint that permits any complex numeric type.
|
||||
// If future releases of Go add new predeclared complex numeric types,
|
||||
// this constraint will be modified to include them.
|
||||
type Complex interface {
|
||||
~complex64 | ~complex128
|
||||
}
|
||||
|
||||
// Ordered is a constraint that permits any ordered type: any type
|
||||
// that supports the operators < <= >= >.
|
||||
// If future releases of Go add new ordered types,
|
||||
// this constraint will be modified to include them.
|
||||
type Ordered interface {
|
||||
Integer | Float | ~string
|
||||
}
|
||||
58
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
58
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
@@ -5,9 +5,20 @@
|
||||
// Package maps defines various functions useful with maps of any type.
|
||||
package maps
|
||||
|
||||
import "maps"
|
||||
|
||||
// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
|
||||
// these functions except Keys and Values should be annotated
|
||||
// (provisionally with "//go:fix inline") so that tools can safely and
|
||||
// automatically replace calls to exp/maps with calls to std maps by
|
||||
// inlining them.
|
||||
|
||||
// Keys returns the keys of the map m.
|
||||
// The keys will be in an indeterminate order.
|
||||
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||
// The simplest true equivalent using std is:
|
||||
// return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)).
|
||||
|
||||
r := make([]K, 0, len(m))
|
||||
for k := range m {
|
||||
r = append(r, k)
|
||||
@@ -18,6 +29,9 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||
// Values returns the values of the map m.
|
||||
// The values will be in an indeterminate order.
|
||||
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||
// The simplest true equivalent using std is:
|
||||
// return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)).
|
||||
|
||||
r := make([]V, 0, len(m))
|
||||
for _, v := range m {
|
||||
r = append(r, v)
|
||||
@@ -28,50 +42,24 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||
// Equal reports whether two maps contain the same key/value pairs.
|
||||
// Values are compared using ==.
|
||||
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || v1 != v2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return maps.Equal(m1, m2)
|
||||
}
|
||||
|
||||
// EqualFunc is like Equal, but compares values using eq.
|
||||
// Keys are still compared with ==.
|
||||
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return maps.EqualFunc(m1, m2, eq)
|
||||
}
|
||||
|
||||
// Clear removes all entries from m, leaving it empty.
|
||||
func Clear[M ~map[K]V, K comparable, V any](m M) {
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
clear(m)
|
||||
}
|
||||
|
||||
// Clone returns a copy of m. This is a shallow clone:
|
||||
// the new keys and values are set using ordinary assignment.
|
||||
func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
||||
// Preserve nil in case it matters.
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
r := make(M, len(m))
|
||||
for k, v := range m {
|
||||
r[k] = v
|
||||
}
|
||||
return r
|
||||
return maps.Clone(m)
|
||||
}
|
||||
|
||||
// Copy copies all key/value pairs in src adding them to dst.
|
||||
@@ -79,16 +67,10 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
||||
// the value in dst will be overwritten by the value associated
|
||||
// with the key in src.
|
||||
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
maps.Copy(dst, src)
|
||||
}
|
||||
|
||||
// DeleteFunc deletes any key/value pairs from m for which del returns true.
|
||||
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
|
||||
for k, v := range m {
|
||||
if del(k, v) {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
maps.DeleteFunc(m, del)
|
||||
}
|
||||
|
||||
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// min is a version of the predeclared function from the Go 1.21 release.
|
||||
func min[T constraints.Ordered](a, b T) T {
|
||||
if a < b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// max is a version of the predeclared function from the Go 1.21 release.
|
||||
func max[T constraints.Ordered](a, b T) T {
|
||||
if a > b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
|
||||
func cmpLess[T constraints.Ordered](x, y T) bool {
|
||||
return (isNaN(x) && !isNaN(y)) || x < y
|
||||
}
|
||||
|
||||
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
|
||||
func cmpCompare[T constraints.Ordered](x, y T) int {
|
||||
xNaN := isNaN(x)
|
||||
yNaN := isNaN(y)
|
||||
if xNaN && yNaN {
|
||||
return 0
|
||||
}
|
||||
if xNaN || x < y {
|
||||
return -1
|
||||
}
|
||||
if yNaN || x > y {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
416
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
416
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
@@ -6,26 +6,22 @@
|
||||
package slices
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
"cmp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
|
||||
// these functions should be annotated (provisionally with "//go:fix
|
||||
// inline") so that tools can safely and automatically replace calls
|
||||
// to exp/slices with calls to std slices by inlining them.
|
||||
|
||||
// Equal reports whether two slices are equal: the same length and all
|
||||
// elements equal. If the lengths are different, Equal returns false.
|
||||
// Otherwise, the elements are compared in increasing index order, and the
|
||||
// comparison stops at the first unequal pair.
|
||||
// Floating point NaNs are not considered equal.
|
||||
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return slices.Equal(s1, s2)
|
||||
}
|
||||
|
||||
// EqualFunc reports whether two slices are equal using an equality
|
||||
@@ -34,16 +30,7 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
|
||||
// increasing index order, and the comparison stops at the first index
|
||||
// for which eq returns false.
|
||||
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i, v1 := range s1 {
|
||||
v2 := s2[i]
|
||||
if !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return slices.EqualFunc(s1, s2, eq)
|
||||
}
|
||||
|
||||
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
|
||||
@@ -53,20 +40,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
|
||||
// If both slices are equal until one of them ends, the shorter slice is
|
||||
// considered less than the longer one.
|
||||
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
|
||||
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
if c := cmpCompare(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
|
||||
return slices.Compare(s1, s2)
|
||||
}
|
||||
|
||||
// CompareFunc is like [Compare] but uses a custom comparison function on each
|
||||
@@ -75,52 +50,30 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
|
||||
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
|
||||
// and +1 if len(s1) > len(s2).
|
||||
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
if c := cmp(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
return slices.CompareFunc(s1, s2, cmp)
|
||||
}
|
||||
|
||||
// Index returns the index of the first occurrence of v in s,
|
||||
// or -1 if not present.
|
||||
func Index[S ~[]E, E comparable](s S, v E) int {
|
||||
for i := range s {
|
||||
if v == s[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.Index(s, v)
|
||||
}
|
||||
|
||||
// IndexFunc returns the first index i satisfying f(s[i]),
|
||||
// or -1 if none do.
|
||||
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
|
||||
for i := range s {
|
||||
if f(s[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.IndexFunc(s, f)
|
||||
}
|
||||
|
||||
// Contains reports whether v is present in s.
|
||||
func Contains[S ~[]E, E comparable](s S, v E) bool {
|
||||
return Index(s, v) >= 0
|
||||
return slices.Contains(s, v)
|
||||
}
|
||||
|
||||
// ContainsFunc reports whether at least one
|
||||
// element e of s satisfies f(e).
|
||||
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
|
||||
return IndexFunc(s, f) >= 0
|
||||
return slices.ContainsFunc(s, f)
|
||||
}
|
||||
|
||||
// Insert inserts the values v... into s at index i,
|
||||
@@ -131,92 +84,7 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
|
||||
// Insert panics if i is out of range.
|
||||
// This function is O(len(s) + len(v)).
|
||||
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
|
||||
m := len(v)
|
||||
if m == 0 {
|
||||
return s
|
||||
}
|
||||
n := len(s)
|
||||
if i == n {
|
||||
return append(s, v...)
|
||||
}
|
||||
if n+m > cap(s) {
|
||||
// Use append rather than make so that we bump the size of
|
||||
// the slice up to the next storage class.
|
||||
// This is what Grow does but we don't call Grow because
|
||||
// that might copy the values twice.
|
||||
s2 := append(s[:i], make(S, n+m-i)...)
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+m:], s[i:])
|
||||
return s2
|
||||
}
|
||||
s = s[:n+m]
|
||||
|
||||
// before:
|
||||
// s: aaaaaaaabbbbccccccccdddd
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// after:
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
//
|
||||
// a are the values that don't move in s.
|
||||
// v are the values copied in from v.
|
||||
// b and c are the values from s that are shifted up in index.
|
||||
// d are the values that get overwritten, never to be seen again.
|
||||
|
||||
if !overlaps(v, s[i+m:]) {
|
||||
// Easy case - v does not overlap either the c or d regions.
|
||||
// (It might be in some of a or b, or elsewhere entirely.)
|
||||
// The data we copy up doesn't write to v at all, so just do it.
|
||||
|
||||
copy(s[i+m:], s[i:])
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// Note the b values are duplicated.
|
||||
|
||||
copy(s[i:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// The hard case - v overlaps c or d. We can't just shift up
|
||||
// the data because we'd move or clobber the values we're trying
|
||||
// to insert.
|
||||
// So instead, write v on top of d, then rotate.
|
||||
copy(s[n:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbccccccccvvvv
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
|
||||
rotateRight(s[i:], m)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// clearSlice sets all elements up to the length of s to the zero value of E.
|
||||
// We may use the builtin clear func instead, and remove clearSlice, when upgrading
|
||||
// to Go 1.21+.
|
||||
func clearSlice[S ~[]E, E any](s S) {
|
||||
var zero E
|
||||
for i := range s {
|
||||
s[i] = zero
|
||||
}
|
||||
return slices.Insert(s, i, v...)
|
||||
}
|
||||
|
||||
// Delete removes the elements s[i:j] from s, returning the modified slice.
|
||||
@@ -225,135 +93,27 @@ func clearSlice[S ~[]E, E any](s S) {
|
||||
// make a single call deleting them all together than to delete one at a time.
|
||||
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
|
||||
func Delete[S ~[]E, E any](s S, i, j int) S {
|
||||
_ = s[i:j:len(s)] // bounds check
|
||||
|
||||
if i == j {
|
||||
return s
|
||||
}
|
||||
|
||||
oldlen := len(s)
|
||||
s = append(s[:i], s[j:]...)
|
||||
clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
|
||||
return s
|
||||
return slices.Delete(s, i, j)
|
||||
}
|
||||
|
||||
// DeleteFunc removes any elements from s for which del returns true,
|
||||
// returning the modified slice.
|
||||
// DeleteFunc zeroes the elements between the new length and the original length.
|
||||
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
|
||||
i := IndexFunc(s, del)
|
||||
if i == -1 {
|
||||
return s
|
||||
}
|
||||
// Don't start copying elements until we find one to delete.
|
||||
for j := i + 1; j < len(s); j++ {
|
||||
if v := s[j]; !del(v) {
|
||||
s[i] = v
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
return slices.DeleteFunc(s, del)
|
||||
}
|
||||
|
||||
// Replace replaces the elements s[i:j] by the given v, and returns the
|
||||
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
|
||||
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
|
||||
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
|
||||
_ = s[i:j] // verify that i:j is a valid subslice
|
||||
|
||||
if i == j {
|
||||
return Insert(s, i, v...)
|
||||
}
|
||||
if j == len(s) {
|
||||
return append(s[:i], v...)
|
||||
}
|
||||
|
||||
tot := len(s[:i]) + len(v) + len(s[j:])
|
||||
if tot > cap(s) {
|
||||
// Too big to fit, allocate and copy over.
|
||||
s2 := append(s[:i], make(S, tot-i)...) // See Insert
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[j:])
|
||||
return s2
|
||||
}
|
||||
|
||||
r := s[:tot]
|
||||
|
||||
if i+len(v) <= j {
|
||||
// Easy, as v fits in the deleted portion.
|
||||
copy(r[i:], v)
|
||||
if i+len(v) != j {
|
||||
copy(r[i+len(v):], s[j:])
|
||||
}
|
||||
clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
|
||||
return r
|
||||
}
|
||||
|
||||
// We are expanding (v is bigger than j-i).
|
||||
// The situation is something like this:
|
||||
// (example has i=4,j=8,len(s)=16,len(v)=6)
|
||||
// s: aaaaxxxxbbbbbbbbyy
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
// a: prefix of s
|
||||
// x: deleted range
|
||||
// b: more of s
|
||||
// y: area to expand into
|
||||
|
||||
if !overlaps(r[i+len(v):], v) {
|
||||
// Easy, as v is not clobbered by the first copy.
|
||||
copy(r[i+len(v):], s[j:])
|
||||
copy(r[i:], v)
|
||||
return r
|
||||
}
|
||||
|
||||
// This is a situation where we don't have a single place to which
|
||||
// we can copy v. Parts of it need to go to two different places.
|
||||
// We want to copy the prefix of v into y and the suffix into x, then
|
||||
// rotate |y| spots to the right.
|
||||
//
|
||||
// v[2:] v[:2]
|
||||
// | |
|
||||
// s: aaaavvvvbbbbbbbbvv
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
//
|
||||
// If either of those two destinations don't alias v, then we're good.
|
||||
y := len(v) - (j - i) // length of y portion
|
||||
|
||||
if !overlaps(r[i:j], v) {
|
||||
copy(r[i:j], v[y:])
|
||||
copy(r[len(s):], v[:y])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
if !overlaps(r[len(s):], v) {
|
||||
copy(r[len(s):], v[:y])
|
||||
copy(r[i:j], v[y:])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
|
||||
// Now we know that v overlaps both x and y.
|
||||
// That means that the entirety of b is *inside* v.
|
||||
// So we don't need to preserve b at all; instead we
|
||||
// can copy v first, then copy the b part of v out of
|
||||
// v to the right destination.
|
||||
k := startIdx(v, s[j:])
|
||||
copy(r[i:], v)
|
||||
copy(r[i+len(v):], r[i+k:])
|
||||
return r
|
||||
return slices.Replace(s, i, j, v...)
|
||||
}
|
||||
|
||||
// Clone returns a copy of the slice.
|
||||
// The elements are copied using assignment, so this is a shallow clone.
|
||||
func Clone[S ~[]E, E any](s S) S {
|
||||
// Preserve nil in case it matters.
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return append(S([]E{}), s...)
|
||||
return slices.Clone(s)
|
||||
}
|
||||
|
||||
// Compact replaces consecutive runs of equal elements with a single copy.
|
||||
@@ -362,40 +122,14 @@ func Clone[S ~[]E, E any](s S) S {
|
||||
// which may have a smaller length.
|
||||
// Compact zeroes the elements between the new length and the original length.
|
||||
func Compact[S ~[]E, E comparable](s S) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
for k := 1; k < len(s); k++ {
|
||||
if s[k] != s[k-1] {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
return slices.Compact(s)
|
||||
}
|
||||
|
||||
// CompactFunc is like [Compact] but uses an equality function to compare elements.
|
||||
// For runs of elements that compare equal, CompactFunc keeps the first one.
|
||||
// CompactFunc zeroes the elements between the new length and the original length.
|
||||
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
for k := 1; k < len(s); k++ {
|
||||
if !eq(s[k], s[k-1]) {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
return slices.CompactFunc(s, eq)
|
||||
}
|
||||
|
||||
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||
@@ -403,113 +137,15 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
// to the slice without another allocation. If n is negative or too large to
|
||||
// allocate the memory, Grow panics.
|
||||
func Grow[S ~[]E, E any](s S, n int) S {
|
||||
if n < 0 {
|
||||
panic("cannot be negative")
|
||||
}
|
||||
if n -= cap(s) - len(s); n > 0 {
|
||||
// TODO(https://go.dev/issue/53888): Make using []E instead of S
|
||||
// to workaround a compiler bug where the runtime.growslice optimization
|
||||
// does not take effect. Revert when the compiler is fixed.
|
||||
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
|
||||
}
|
||||
return s
|
||||
return slices.Grow(s, n)
|
||||
}
|
||||
|
||||
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
func Clip[S ~[]E, E any](s S) S {
|
||||
return s[:len(s):len(s)]
|
||||
}
|
||||
|
||||
// Rotation algorithm explanation:
|
||||
//
|
||||
// rotate left by 2
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join first parts
|
||||
// 89234567 01
|
||||
// recursively rotate first left part by 2
|
||||
// 23456789 01
|
||||
// join at the end
|
||||
// 2345678901
|
||||
//
|
||||
// rotate left by 8
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join last parts
|
||||
// 89 23456701
|
||||
// recursively rotate second part left by 6
|
||||
// 89 01234567
|
||||
// join at the end
|
||||
// 8901234567
|
||||
|
||||
// TODO: There are other rotate algorithms.
|
||||
// This algorithm has the desirable property that it moves each element exactly twice.
|
||||
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
|
||||
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
|
||||
|
||||
// rotateLeft rotates b left by n spaces.
|
||||
// s_final[i] = s_orig[i+r], wrapping around.
|
||||
func rotateLeft[E any](s []E, r int) {
|
||||
for r != 0 && r != len(s) {
|
||||
if r*2 <= len(s) {
|
||||
swap(s[:r], s[len(s)-r:])
|
||||
s = s[:len(s)-r]
|
||||
} else {
|
||||
swap(s[:len(s)-r], s[r:])
|
||||
s, r = s[len(s)-r:], r*2-len(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
func rotateRight[E any](s []E, r int) {
|
||||
rotateLeft(s, len(s)-r)
|
||||
}
|
||||
|
||||
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
|
||||
func swap[E any](x, y []E) {
|
||||
for i := 0; i < len(x); i++ {
|
||||
x[i], y[i] = y[i], x[i]
|
||||
}
|
||||
}
|
||||
|
||||
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
|
||||
func overlaps[E any](a, b []E) bool {
|
||||
if len(a) == 0 || len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
elemSize := unsafe.Sizeof(a[0])
|
||||
if elemSize == 0 {
|
||||
return false
|
||||
}
|
||||
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
|
||||
// Also see crypto/internal/alias/alias.go:AnyOverlap
|
||||
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
|
||||
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
|
||||
}
|
||||
|
||||
// startIdx returns the index in haystack where the needle starts.
|
||||
// prerequisite: the needle must be aliased entirely inside the haystack.
|
||||
func startIdx[E any](haystack, needle []E) int {
|
||||
p := &needle[0]
|
||||
for i := range haystack {
|
||||
if p == &haystack[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// TODO: what if the overlap is by a non-integral number of Es?
|
||||
panic("needle not found")
|
||||
return slices.Clip(s)
|
||||
}
|
||||
|
||||
// Reverse reverses the elements of the slice in place.
|
||||
func Reverse[S ~[]E, E any](s S) {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
slices.Reverse(s)
|
||||
}
|
||||
|
||||
143
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
143
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
@@ -2,21 +2,20 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
|
||||
|
||||
package slices
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
"cmp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// TODO(adonovan): add a "//go:fix inline" annotation to each function
|
||||
// in this file; see https://go.dev/issue/32816.
|
||||
|
||||
// Sort sorts a slice of any ordered type in ascending order.
|
||||
// When sorting floating-point numbers, NaNs are ordered before other values.
|
||||
func Sort[S ~[]E, E constraints.Ordered](x S) {
|
||||
n := len(x)
|
||||
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
|
||||
func Sort[S ~[]E, E cmp.Ordered](x S) {
|
||||
slices.Sort(x)
|
||||
}
|
||||
|
||||
// SortFunc sorts the slice x in ascending order as determined by the cmp
|
||||
@@ -29,118 +28,60 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
|
||||
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
|
||||
// To indicate 'uncomparable', return 0 from the function.
|
||||
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
n := len(x)
|
||||
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
|
||||
slices.SortFunc(x, cmp)
|
||||
}
|
||||
|
||||
// SortStableFunc sorts the slice x while keeping the original order of equal
|
||||
// elements, using cmp to compare elements in the same way as [SortFunc].
|
||||
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
stableCmpFunc(x, len(x), cmp)
|
||||
slices.SortStableFunc(x, cmp)
|
||||
}
|
||||
|
||||
// IsSorted reports whether x is sorted in ascending order.
|
||||
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if cmpLess(x[i], x[i-1]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
|
||||
return slices.IsSorted(x)
|
||||
}
|
||||
|
||||
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
|
||||
// comparison function as defined by [SortFunc].
|
||||
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if cmp(x[i], x[i-1]) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return slices.IsSortedFunc(x, cmp)
|
||||
}
|
||||
|
||||
// Min returns the minimal value in x. It panics if x is empty.
|
||||
// For floating-point numbers, Min propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Min[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Min: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = min(m, x[i])
|
||||
}
|
||||
return m
|
||||
func Min[S ~[]E, E cmp.Ordered](x S) E {
|
||||
return slices.Min(x)
|
||||
}
|
||||
|
||||
// MinFunc returns the minimal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one minimal element
|
||||
// according to the cmp function, MinFunc returns the first one.
|
||||
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MinFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) < 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
return slices.MinFunc(x, cmp)
|
||||
}
|
||||
|
||||
// Max returns the maximal value in x. It panics if x is empty.
|
||||
// For floating-point E, Max propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Max[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Max: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = max(m, x[i])
|
||||
}
|
||||
return m
|
||||
func Max[S ~[]E, E cmp.Ordered](x S) E {
|
||||
return slices.Max(x)
|
||||
}
|
||||
|
||||
// MaxFunc returns the maximal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one maximal element
|
||||
// according to the cmp function, MaxFunc returns the first one.
|
||||
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MaxFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) > 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
return slices.MaxFunc(x, cmp)
|
||||
}
|
||||
|
||||
// BinarySearch searches for target in a sorted slice and returns the position
|
||||
// where target is found, or the position where target would appear in the
|
||||
// sort order; it also returns a bool saying whether the target is really found
|
||||
// in the slice. The slice must be sorted in increasing order.
|
||||
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
|
||||
// Inlining is faster than calling BinarySearchFunc with a lambda.
|
||||
n := len(x)
|
||||
// Define x[-1] < target and x[n] >= target.
|
||||
// Invariant: x[i-1] < target, x[j] >= target.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmpLess(x[h], target) {
|
||||
i = h + 1 // preserves x[i-1] < target
|
||||
} else {
|
||||
j = h // preserves x[j] >= target
|
||||
}
|
||||
}
|
||||
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
|
||||
func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
|
||||
return slices.BinarySearch(x, target)
|
||||
}
|
||||
|
||||
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
|
||||
@@ -151,47 +92,5 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
|
||||
// cmp must implement the same ordering as the slice, such that if
|
||||
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
|
||||
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
|
||||
n := len(x)
|
||||
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
|
||||
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmp(x[h], target) < 0 {
|
||||
i = h + 1 // preserves cmp(x[i - 1], target) < 0
|
||||
} else {
|
||||
j = h // preserves cmp(x[j], target) >= 0
|
||||
}
|
||||
}
|
||||
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
|
||||
return i, i < n && cmp(x[i], target) == 0
|
||||
}
|
||||
|
||||
type sortedHint int // hint for pdqsort when choosing the pivot
|
||||
|
||||
const (
|
||||
unknownHint sortedHint = iota
|
||||
increasingHint
|
||||
decreasingHint
|
||||
)
|
||||
|
||||
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||
type xorshift uint64
|
||||
|
||||
func (r *xorshift) Next() uint64 {
|
||||
*r ^= *r << 13
|
||||
*r ^= *r >> 17
|
||||
*r ^= *r << 5
|
||||
return uint64(*r)
|
||||
}
|
||||
|
||||
func nextPowerOfTwo(length int) uint {
|
||||
return 1 << bits.Len(uint(length))
|
||||
}
|
||||
|
||||
// isNaN reports whether x is a NaN without requiring the math package.
|
||||
// This will always return false if T is not floating-point.
|
||||
func isNaN[T constraints.Ordered](x T) bool {
|
||||
return x != x
|
||||
return slices.BinarySearchFunc(x, target, cmp)
|
||||
}
|
||||
|
||||
479
vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
479
vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
@@ -1,479 +0,0 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
|
||||
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownCmpFunc implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
|
||||
child++
|
||||
}
|
||||
if !(cmp(data[first+root], data[first+child]) < 0) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownCmpFunc(data, i, hi, first, cmp)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownCmpFunc(data, lo, i, first, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortCmpFunc sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsCmpFunc(data, a, b, cmp)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeCmpFunc(data, a, b, cmp)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortCmpFunc(data, a, b, cmp) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
|
||||
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortCmpFunc(data, a, mid, limit, cmp)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionCmpFunc does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !(cmp(data[a], data[i]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && (cmp(data[a], data[j]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !(cmp(data[i], data[i-1]) < 0) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotCmpFunc chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
|
||||
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
|
||||
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
|
||||
if cmp(data[b], data[a]) < 0 {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
b, c = order2CmpFunc(data, b, c, swaps, cmp)
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
|
||||
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
|
||||
}
|
||||
|
||||
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortCmpFunc(data, a, n, cmp)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeCmpFunc(data, a, m, n, cmp)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if cmp(data[h], data[a]) < 0 {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !(cmp(data[m], data[h]) < 0) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !(cmp(data[p-c], data[c]) < 0) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateCmpFunc(data, start, m, end, cmp)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeCmpFunc(data, a, start, mid, cmp)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeCmpFunc(data, mid, end, b, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeCmpFunc(data, m-i, m, j, cmp)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeCmpFunc(data, m-i, m, i, cmp)
|
||||
}
|
||||
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
@@ -1,481 +0,0 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// insertionSortOrdered sorts data[a:b] using insertion sort.
|
||||
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownOrdered implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
|
||||
child++
|
||||
}
|
||||
if !cmpLess(data[first+root], data[first+child]) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownOrdered(data, i, hi, first)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownOrdered(data, lo, i, first)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortOrdered sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsOrdered(data, a, b)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotOrdered(data, a, b)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeOrdered(data, a, b)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortOrdered(data, a, b) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
|
||||
mid := partitionEqualOrdered(data, a, b, pivot)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortOrdered(data, a, mid, limit)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortOrdered(data, mid+1, b, limit)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionOrdered does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !cmpLess(data[a], data[i]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && cmpLess(data[a], data[j]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !cmpLess(data[i], data[i-1]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotOrdered chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentOrdered(data, i, &swaps)
|
||||
j = medianAdjacentOrdered(data, j, &swaps)
|
||||
k = medianAdjacentOrdered(data, k, &swaps)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianOrdered(data, i, j, k, &swaps)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
|
||||
if cmpLess(data[b], data[a]) {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
b, c = order2Ordered(data, b, c, swaps)
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
|
||||
return medianOrdered(data, a-1, a, a+1, swaps)
|
||||
}
|
||||
|
||||
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableOrdered[E constraints.Ordered](data []E, n int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortOrdered(data, a, b)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortOrdered(data, a, n)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeOrdered(data, a, a+blockSize, b)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeOrdered(data, a, m, n)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if cmpLess(data[h], data[a]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !cmpLess(data[m], data[h]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !cmpLess(data[p-c], data[c]) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateOrdered(data, start, m, end)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeOrdered(data, a, start, mid)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeOrdered(data, mid, end, b)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeOrdered(data, m-i, m, j)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeOrdered(data, m-i, m+j-i, i)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeOrdered(data, m-i, m, i)
|
||||
}
|
||||
112
vendor/golang.org/x/net/context/context.go
generated
vendored
112
vendor/golang.org/x/net/context/context.go
generated
vendored
@@ -3,29 +3,31 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// cancellation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
// name [context], and migrating to it can be done automatically with [go fix].
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
// Incoming requests to a server should create a [Context], and outgoing
|
||||
// calls to servers should accept a Context. The chain of function
|
||||
// calls between them must propagate the Context, optionally replacing
|
||||
// it with a derived Context created using [WithCancel], [WithDeadline],
|
||||
// [WithTimeout], or [WithValue].
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// explicitly to each function that needs it. This is discussed further in
|
||||
// https://go.dev/blog/context-and-structs. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
@@ -34,9 +36,30 @@
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// See https://go.dev/blog/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
//
|
||||
// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Context carries a deadline, a cancellation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// Canceled is the error returned by [Context.Err] when the context is canceled
|
||||
// for some reason other than its deadline passing.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
|
||||
// due to its deadline passing.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
@@ -49,8 +72,73 @@ func Background() Context {
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
// parameter).
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
||||
|
||||
var (
|
||||
background = context.Background()
|
||||
todo = context.TODO()
|
||||
)
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// A CancelFunc may be called by multiple goroutines simultaneously.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
||||
|
||||
// WithCancel returns a derived context that points to the parent context
|
||||
// but has a new Done channel. The returned context's Done channel is closed
|
||||
// when the returned cancel function is called or when the parent context's
|
||||
// Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
return context.WithCancel(parent)
|
||||
}
|
||||
|
||||
// WithDeadline returns a derived context that points to the parent context
|
||||
// but has the deadline adjusted to be no later than d. If the parent's
|
||||
// deadline is already earlier than d, WithDeadline(parent, d) is semantically
|
||||
// equivalent to parent. The returned [Context.Done] channel is closed when
|
||||
// the deadline expires, when the returned cancel function is called,
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
|
||||
return context.WithDeadline(parent, d)
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
||||
|
||||
// WithValue returns a derived context that points to the parent Context.
|
||||
// In the derived context, the value associated with key is val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The provided key must be comparable and should not be of type
|
||||
// string or any other built-in type to avoid collisions between
|
||||
// packages using context. Users of WithValue should define their own
|
||||
// types for keys. To avoid allocating when assigning to an
|
||||
// interface{}, context keys often have concrete type
|
||||
// struct{}. Alternatively, exported context key variables' static
|
||||
// type should be a pointer or interface.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
2
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
2
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
@@ -3,7 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
package ctxhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
@@ -1,72 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
||||
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
@@ -1,300 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
||||
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
@@ -1,109 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
||||
121
vendor/golang.org/x/net/http2/server.go
generated
vendored
121
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
|
||||
sc.serveG.check()
|
||||
|
||||
rp := requestParam{
|
||||
method: f.PseudoValue("method"),
|
||||
scheme: f.PseudoValue("scheme"),
|
||||
authority: f.PseudoValue("authority"),
|
||||
path: f.PseudoValue("path"),
|
||||
protocol: f.PseudoValue("protocol"),
|
||||
rp := httpcommon.ServerRequestParam{
|
||||
Method: f.PseudoValue("method"),
|
||||
Scheme: f.PseudoValue("scheme"),
|
||||
Authority: f.PseudoValue("authority"),
|
||||
Path: f.PseudoValue("path"),
|
||||
Protocol: f.PseudoValue("protocol"),
|
||||
}
|
||||
|
||||
// extended connect is disabled, so we should not see :protocol
|
||||
if disableExtendedConnectProtocol && rp.protocol != "" {
|
||||
if disableExtendedConnectProtocol && rp.Protocol != "" {
|
||||
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
isConnect := rp.method == "CONNECT"
|
||||
isConnect := rp.Method == "CONNECT"
|
||||
if isConnect {
|
||||
if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
|
||||
if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
|
||||
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
||||
} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
|
||||
// See 8.1.2.6 Malformed Requests and Responses:
|
||||
//
|
||||
// Malformed requests or responses that are detected
|
||||
@@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
rp.header = make(http.Header)
|
||||
header := make(http.Header)
|
||||
rp.Header = header
|
||||
for _, hf := range f.RegularFields() {
|
||||
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
|
||||
header.Add(sc.canonicalHeader(hf.Name), hf.Value)
|
||||
}
|
||||
if rp.authority == "" {
|
||||
rp.authority = rp.header.Get("Host")
|
||||
if rp.Authority == "" {
|
||||
rp.Authority = header.Get("Host")
|
||||
}
|
||||
if rp.protocol != "" {
|
||||
rp.header.Set(":protocol", rp.protocol)
|
||||
if rp.Protocol != "" {
|
||||
header.Set(":protocol", rp.Protocol)
|
||||
}
|
||||
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
|
||||
@@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
}
|
||||
bodyOpen := !f.StreamEnded()
|
||||
if bodyOpen {
|
||||
if vv, ok := rp.header["Content-Length"]; ok {
|
||||
if vv, ok := rp.Header["Content-Length"]; ok {
|
||||
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
|
||||
req.ContentLength = int64(cl)
|
||||
} else {
|
||||
@@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
return rw, req, nil
|
||||
}
|
||||
|
||||
type requestParam struct {
|
||||
method string
|
||||
scheme, authority, path string
|
||||
protocol string
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
|
||||
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
|
||||
sc.serveG.check()
|
||||
|
||||
var tlsState *tls.ConnectionState // nil if not scheme https
|
||||
if rp.scheme == "https" {
|
||||
if rp.Scheme == "https" {
|
||||
tlsState = sc.tlsState
|
||||
}
|
||||
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
rp.header.Del("Expect")
|
||||
}
|
||||
// Merge Cookie headers into one "; "-delimited value.
|
||||
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
|
||||
rp.header.Set("Cookie", strings.Join(cookies, "; "))
|
||||
}
|
||||
|
||||
// Setup Trailers
|
||||
var trailer http.Header
|
||||
for _, v := range rp.header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = http.CanonicalHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
// Ignore.
|
||||
default:
|
||||
if trailer == nil {
|
||||
trailer = make(http.Header)
|
||||
}
|
||||
trailer[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(rp.header, "Trailer")
|
||||
|
||||
var url_ *url.URL
|
||||
var requestURI string
|
||||
if rp.method == "CONNECT" && rp.protocol == "" {
|
||||
url_ = &url.URL{Host: rp.authority}
|
||||
requestURI = rp.authority // mimic HTTP/1 server behavior
|
||||
} else {
|
||||
var err error
|
||||
url_, err = url.ParseRequestURI(rp.path)
|
||||
if err != nil {
|
||||
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
|
||||
}
|
||||
requestURI = rp.path
|
||||
res := httpcommon.NewServerRequest(rp)
|
||||
if res.InvalidReason != "" {
|
||||
return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
body := &requestBody{
|
||||
conn: sc,
|
||||
stream: st,
|
||||
needsContinue: needsContinue,
|
||||
needsContinue: res.NeedsContinue,
|
||||
}
|
||||
req := &http.Request{
|
||||
Method: rp.method,
|
||||
URL: url_,
|
||||
req := (&http.Request{
|
||||
Method: rp.Method,
|
||||
URL: res.URL,
|
||||
RemoteAddr: sc.remoteAddrStr,
|
||||
Header: rp.header,
|
||||
RequestURI: requestURI,
|
||||
Header: rp.Header,
|
||||
RequestURI: res.RequestURI,
|
||||
Proto: "HTTP/2.0",
|
||||
ProtoMajor: 2,
|
||||
ProtoMinor: 0,
|
||||
TLS: tlsState,
|
||||
Host: rp.authority,
|
||||
Host: rp.Authority,
|
||||
Body: body,
|
||||
Trailer: trailer,
|
||||
}
|
||||
req = req.WithContext(st.ctx)
|
||||
|
||||
Trailer: res.Trailer,
|
||||
}).WithContext(st.ctx)
|
||||
rw := sc.newResponseWriter(st, req)
|
||||
return rw, req, nil
|
||||
}
|
||||
@@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
// we start in "half closed (remote)" for simplicity.
|
||||
// See further comments at the definition of stateHalfClosedRemote.
|
||||
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
|
||||
method: msg.method,
|
||||
scheme: msg.url.Scheme,
|
||||
authority: msg.url.Host,
|
||||
path: msg.url.RequestURI(),
|
||||
header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
|
||||
Method: msg.method,
|
||||
Scheme: msg.url.Scheme,
|
||||
Authority: msg.url.Host,
|
||||
Path: msg.url.RequestURI(),
|
||||
Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
|
||||
})
|
||||
if err != nil {
|
||||
// Should not happen, since we've already validated msg.url.
|
||||
|
||||
52
vendor/golang.org/x/net/http2/transport.go
generated
vendored
52
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -1286,6 +1286,19 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
// actualContentLength returns a sanitized version of
|
||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||
// means unknown.
|
||||
func actualContentLength(req *http.Request) int64 {
|
||||
if req.Body == nil || req.Body == http.NoBody {
|
||||
return 0
|
||||
}
|
||||
if req.ContentLength != 0 {
|
||||
return req.ContentLength
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (cc *ClientConn) decrStreamReservations() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
@@ -1310,7 +1323,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
reqCancel: req.Cancel,
|
||||
isHead: req.Method == "HEAD",
|
||||
reqBody: req.Body,
|
||||
reqBodyContentLength: httpcommon.ActualContentLength(req),
|
||||
reqBodyContentLength: actualContentLength(req),
|
||||
trace: httptrace.ContextClientTrace(ctx),
|
||||
peerClosed: make(chan struct{}),
|
||||
abort: make(chan struct{}),
|
||||
@@ -1318,7 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
|
||||
cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
|
||||
cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
|
||||
|
||||
go cs.doRequest(req, streamf)
|
||||
|
||||
@@ -1349,7 +1362,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
}
|
||||
res.Request = req
|
||||
res.TLS = cc.tlsState
|
||||
if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
|
||||
if res.Body == noBody && actualContentLength(req) == 0 {
|
||||
// If there isn't a request or response body still being
|
||||
// written, then wait for the stream to be closed before
|
||||
// RoundTrip returns.
|
||||
@@ -1596,12 +1609,7 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
|
||||
// sent by writeRequestBody below, along with any Trailers,
|
||||
// again in form HEADERS{1}, CONTINUATION{0,})
|
||||
cc.hbuf.Reset()
|
||||
res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
|
||||
Request: req,
|
||||
AddGzipHeader: cs.requestedGzip,
|
||||
PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
|
||||
DefaultUserAgent: defaultUserAgent,
|
||||
}, func(name, value string) {
|
||||
res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
|
||||
cc.writeHeader(name, value)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1617,6 +1625,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
|
||||
return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
|
||||
Request: httpcommon.Request{
|
||||
Header: req.Header,
|
||||
Trailer: req.Trailer,
|
||||
URL: req.URL,
|
||||
Host: req.Host,
|
||||
Method: req.Method,
|
||||
ActualContentLength: actualContentLength(req),
|
||||
},
|
||||
AddGzipHeader: addGzipHeader,
|
||||
PeerMaxHeaderListSize: peerMaxHeaderListSize,
|
||||
DefaultUserAgent: defaultUserAgent,
|
||||
}, headerf)
|
||||
}
|
||||
|
||||
// cleanupWriteRequest performs post-request tasks.
|
||||
//
|
||||
// If err (the result of writeRequest) is non-nil and the stream is not closed,
|
||||
@@ -2186,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
|
||||
if !cc.seenSettings {
|
||||
// If we have a pending request that wants extended CONNECT,
|
||||
// let it continue and fail with the connection error.
|
||||
cc.extendedConnectAllowed = true
|
||||
close(cc.seenSettingsChan)
|
||||
}
|
||||
}
|
||||
|
||||
// countReadFrameError calls Transport.CountError with a string
|
||||
@@ -2278,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
|
||||
if VerboseLogs {
|
||||
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
|
||||
}
|
||||
if !cc.seenSettings {
|
||||
close(cc.seenSettingsChan)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
6
vendor/golang.org/x/net/internal/httpcommon/headermap.go
generated
vendored
6
vendor/golang.org/x/net/internal/httpcommon/headermap.go
generated
vendored
@@ -5,7 +5,7 @@
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -82,7 +82,7 @@ func buildCommonHeaderMaps() {
|
||||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
for _, v := range common {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
chk := textproto.CanonicalMIMEHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func CanonicalHeader(v string) string {
|
||||
if s, ok := commonCanonHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return http.CanonicalHeaderKey(v)
|
||||
return textproto.CanonicalMIMEHeaderKey(v)
|
||||
}
|
||||
|
||||
// CachedCanonicalHeader returns the canonical form of a well-known header name.
|
||||
|
||||
166
vendor/golang.org/x/net/internal/httpcommon/request.go
generated
vendored
166
vendor/golang.org/x/net/internal/httpcommon/request.go
generated
vendored
@@ -5,10 +5,12 @@
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -21,9 +23,21 @@ var (
|
||||
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
|
||||
)
|
||||
|
||||
// Request is a subset of http.Request.
|
||||
// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
|
||||
// without creating a dependency cycle.
|
||||
type Request struct {
|
||||
URL *url.URL
|
||||
Method string
|
||||
Host string
|
||||
Header map[string][]string
|
||||
Trailer map[string][]string
|
||||
ActualContentLength int64 // 0 means 0, -1 means unknown
|
||||
}
|
||||
|
||||
// EncodeHeadersParam is parameters to EncodeHeaders.
|
||||
type EncodeHeadersParam struct {
|
||||
Request *http.Request
|
||||
Request Request
|
||||
|
||||
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
|
||||
// added to the request.
|
||||
@@ -47,11 +61,11 @@ type EncodeHeadersResult struct {
|
||||
// It validates a request and calls headerf with each pseudo-header and header
|
||||
// for the request.
|
||||
// The headerf function is called with the validated, canonicalized header name.
|
||||
func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
|
||||
func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
|
||||
req := param.Request
|
||||
|
||||
// Check for invalid connection-level headers.
|
||||
if err := checkConnHeaders(req); err != nil {
|
||||
if err := checkConnHeaders(req.Header); err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
@@ -73,7 +87,10 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
|
||||
// isNormalConnect is true if this is a non-extended CONNECT request.
|
||||
isNormalConnect := false
|
||||
protocol := req.Header.Get(":protocol")
|
||||
var protocol string
|
||||
if vv := req.Header[":protocol"]; len(vv) > 0 {
|
||||
protocol = vv[0]
|
||||
}
|
||||
if req.Method == "CONNECT" && protocol == "" {
|
||||
isNormalConnect = true
|
||||
} else if protocol != "" && req.Method != "CONNECT" {
|
||||
@@ -107,9 +124,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
return res, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
contentLength := ActualContentLength(req)
|
||||
|
||||
trailers, err := commaSeparatedTrailers(req)
|
||||
trailers, err := commaSeparatedTrailers(req.Trailer)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
@@ -123,7 +138,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
if m == "" {
|
||||
m = http.MethodGet
|
||||
m = "GET"
|
||||
}
|
||||
f(":method", m)
|
||||
if !isNormalConnect {
|
||||
@@ -198,8 +213,8 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
f(k, v)
|
||||
}
|
||||
}
|
||||
if shouldSendReqContentLength(req.Method, contentLength) {
|
||||
f("content-length", strconv.FormatInt(contentLength, 10))
|
||||
if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
|
||||
f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
|
||||
}
|
||||
if param.AddGzipHeader {
|
||||
f("accept-encoding", "gzip")
|
||||
@@ -225,7 +240,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
}
|
||||
}
|
||||
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
@@ -243,19 +258,19 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
|
||||
}
|
||||
})
|
||||
|
||||
res.HasBody = contentLength != 0
|
||||
res.HasBody = req.ActualContentLength != 0
|
||||
res.HasTrailers = trailers != ""
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
|
||||
// for a request.
|
||||
func IsRequestGzip(req *http.Request, disableCompression bool) bool {
|
||||
func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !disableCompression &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
req.Method != "HEAD" {
|
||||
len(header["Accept-Encoding"]) == 0 &&
|
||||
len(header["Range"]) == 0 &&
|
||||
method != "HEAD" {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
@@ -280,22 +295,22 @@ func IsRequestGzip(req *http.Request, disableCompression bool) bool {
|
||||
//
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
|
||||
func checkConnHeaders(req *http.Request) error {
|
||||
if v := req.Header.Get("Upgrade"); v != "" {
|
||||
return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
|
||||
func checkConnHeaders(h map[string][]string) error {
|
||||
if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("invalid Upgrade request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
return fmt.Errorf("invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
|
||||
keys := make([]string, 0, len(trailer))
|
||||
for k := range trailer {
|
||||
k = CanonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
@@ -310,19 +325,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ActualContentLength returns a sanitized version of
|
||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||
// means unknown.
|
||||
func ActualContentLength(req *http.Request) int64 {
|
||||
if req.Body == nil || req.Body == http.NoBody {
|
||||
return 0
|
||||
}
|
||||
if req.ContentLength != 0 {
|
||||
return req.ContentLength
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
@@ -340,7 +342,7 @@ func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
func validateHeaders(hdrs map[string][]string) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
@@ -377,3 +379,89 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ServerRequestParam is parameters to NewServerRequest.
|
||||
type ServerRequestParam struct {
|
||||
Method string
|
||||
Scheme, Authority, Path string
|
||||
Protocol string
|
||||
Header map[string][]string
|
||||
}
|
||||
|
||||
// ServerRequestResult is the result of NewServerRequest.
|
||||
type ServerRequestResult struct {
|
||||
// Various http.Request fields.
|
||||
URL *url.URL
|
||||
RequestURI string
|
||||
Trailer map[string][]string
|
||||
|
||||
NeedsContinue bool // client provided an "Expect: 100-continue" header
|
||||
|
||||
// If the request should be rejected, this is a short string suitable for passing
|
||||
// to the http2 package's CountError function.
|
||||
// It might be a bit odd to return errors this way rather than returing an error,
|
||||
// but this ensures we don't forget to include a CountError reason.
|
||||
InvalidReason string
|
||||
}
|
||||
|
||||
func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
delete(rp.Header, "Expect")
|
||||
}
|
||||
// Merge Cookie headers into one "; "-delimited value.
|
||||
if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
|
||||
rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
|
||||
}
|
||||
|
||||
// Setup Trailers
|
||||
var trailer map[string][]string
|
||||
for _, v := range rp.Header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
// Ignore.
|
||||
default:
|
||||
if trailer == nil {
|
||||
trailer = make(map[string][]string)
|
||||
}
|
||||
trailer[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(rp.Header, "Trailer")
|
||||
|
||||
// "':authority' MUST NOT include the deprecated userinfo subcomponent
|
||||
// for "http" or "https" schemed URIs."
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
|
||||
if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
|
||||
return ServerRequestResult{
|
||||
InvalidReason: "userinfo_in_authority",
|
||||
}
|
||||
}
|
||||
|
||||
var url_ *url.URL
|
||||
var requestURI string
|
||||
if rp.Method == "CONNECT" && rp.Protocol == "" {
|
||||
url_ = &url.URL{Host: rp.Authority}
|
||||
requestURI = rp.Authority // mimic HTTP/1 server behavior
|
||||
} else {
|
||||
var err error
|
||||
url_, err = url.ParseRequestURI(rp.Path)
|
||||
if err != nil {
|
||||
return ServerRequestResult{
|
||||
InvalidReason: "bad_path",
|
||||
}
|
||||
}
|
||||
requestURI = rp.Path
|
||||
}
|
||||
|
||||
return ServerRequestResult{
|
||||
URL: url_,
|
||||
NeedsContinue: needsContinue,
|
||||
RequestURI: requestURI,
|
||||
Trailer: trailer,
|
||||
}
|
||||
}
|
||||
|
||||
8
vendor/golang.org/x/net/proxy/per_host.go
generated
vendored
8
vendor/golang.org/x/net/proxy/per_host.go
generated
vendored
@@ -7,6 +7,7 @@ package proxy
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.
|
||||
}
|
||||
|
||||
func (p *PerHost) dialerForRequest(host string) Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if nip, err := netip.ParseAddr(host); err == nil {
|
||||
ip := net.IP(nip.AsSlice())
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
@@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
if nip, err := netip.ParseAddr(host); err == nil {
|
||||
p.AddIP(net.IP(nip.AsSlice()))
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
|
||||
26
vendor/golang.org/x/net/publicsuffix/list.go
generated
vendored
26
vendor/golang.org/x/net/publicsuffix/list.go
generated
vendored
@@ -88,7 +88,7 @@ func PublicSuffix(domain string) (publicSuffix string, icann bool) {
|
||||
s, suffix, icannNode, wildcard := domain, len(domain), false, false
|
||||
loop:
|
||||
for {
|
||||
dot := strings.LastIndex(s, ".")
|
||||
dot := strings.LastIndexByte(s, '.')
|
||||
if wildcard {
|
||||
icann = icannNode
|
||||
suffix = 1 + dot
|
||||
@@ -129,7 +129,7 @@ loop:
|
||||
}
|
||||
if suffix == len(domain) {
|
||||
// If no rules match, the prevailing rule is "*".
|
||||
return domain[1+strings.LastIndex(domain, "."):], icann
|
||||
return domain[1+strings.LastIndexByte(domain, '.'):], icann
|
||||
}
|
||||
return domain[suffix:], icann
|
||||
}
|
||||
@@ -178,26 +178,28 @@ func EffectiveTLDPlusOne(domain string) (string, error) {
|
||||
if domain[i] != '.' {
|
||||
return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain)
|
||||
}
|
||||
return domain[1+strings.LastIndex(domain[:i], "."):], nil
|
||||
return domain[1+strings.LastIndexByte(domain[:i], '.'):], nil
|
||||
}
|
||||
|
||||
type uint32String string
|
||||
|
||||
func (u uint32String) get(i uint32) uint32 {
|
||||
off := i * 4
|
||||
return (uint32(u[off])<<24 |
|
||||
uint32(u[off+1])<<16 |
|
||||
uint32(u[off+2])<<8 |
|
||||
uint32(u[off+3]))
|
||||
u = u[off:] // help the compiler reduce bounds checks
|
||||
return uint32(u[3]) |
|
||||
uint32(u[2])<<8 |
|
||||
uint32(u[1])<<16 |
|
||||
uint32(u[0])<<24
|
||||
}
|
||||
|
||||
type uint40String string
|
||||
|
||||
func (u uint40String) get(i uint32) uint64 {
|
||||
off := uint64(i * (nodesBits / 8))
|
||||
return uint64(u[off])<<32 |
|
||||
uint64(u[off+1])<<24 |
|
||||
uint64(u[off+2])<<16 |
|
||||
uint64(u[off+3])<<8 |
|
||||
uint64(u[off+4])
|
||||
u = u[off:] // help the compiler reduce bounds checks
|
||||
return uint64(u[4]) |
|
||||
uint64(u[3])<<8 |
|
||||
uint64(u[2])<<16 |
|
||||
uint64(u[1])<<24 |
|
||||
uint64(u[0])<<32
|
||||
}
|
||||
|
||||
8
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
8
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
@@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
return tk, nil
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
@@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
return internal.ContextClient(ctx)
|
||||
}
|
||||
cc := internal.ContextClient(ctx)
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextClient(ctx).Transport,
|
||||
Base: cc.Transport,
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
CheckRedirect: cc.CheckRedirect,
|
||||
Jar: cc.Jar,
|
||||
Timeout: cc.Timeout,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
4
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
@@ -21,7 +21,7 @@ const (
|
||||
//
|
||||
// A fresh verifier should be generated for each authorization.
|
||||
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
|
||||
// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAccessToken).
|
||||
func GenerateVerifier() string {
|
||||
// "RECOMMENDED that the output of a suitable random number generator be
|
||||
@@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string {
|
||||
}
|
||||
|
||||
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
|
||||
// only.
|
||||
func S256ChallengeOption(verifier string) AuthCodeOption {
|
||||
return challengeOption{
|
||||
|
||||
2
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
2
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
@@ -46,7 +46,7 @@ func (g *Group) done() {
|
||||
// returns a non-nil error or the first time Wait returns, whichever occurs
|
||||
// first.
|
||||
func WithContext(ctx context.Context) (*Group, context.Context) {
|
||||
ctx, cancel := withCancelCause(ctx)
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
return &Group{cancel: cancel}, ctx
|
||||
}
|
||||
|
||||
|
||||
13
vendor/golang.org/x/sync/errgroup/go120.go
generated
vendored
13
vendor/golang.org/x/sync/errgroup/go120.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package errgroup
|
||||
|
||||
import "context"
|
||||
|
||||
func withCancelCause(parent context.Context) (context.Context, func(error)) {
|
||||
return context.WithCancelCause(parent)
|
||||
}
|
||||
14
vendor/golang.org/x/sync/errgroup/pre_go120.go
generated
vendored
14
vendor/golang.org/x/sync/errgroup/pre_go120.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package errgroup
|
||||
|
||||
import "context"
|
||||
|
||||
func withCancelCause(parent context.Context) (context.Context, func(error)) {
|
||||
ctx, cancel := context.WithCancel(parent)
|
||||
return ctx, func(error) { cancel() }
|
||||
}
|
||||
2
vendor/golang.org/x/text/language/parse.go
generated
vendored
2
vendor/golang.org/x/text/language/parse.go
generated
vendored
@@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) {
|
||||
if changed {
|
||||
tt.RemakeString()
|
||||
}
|
||||
return makeTag(tt), err
|
||||
return makeTag(tt), nil
|
||||
}
|
||||
|
||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||
|
||||
2
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
2
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
@@ -183,7 +183,7 @@ type application struct {
|
||||
|
||||
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
|
||||
// convert typed nil into untyped nil
|
||||
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
n = nil
|
||||
}
|
||||
|
||||
|
||||
2
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
2
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
@@ -8,4 +8,6 @@ import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
// Deprecated: use [ast.Unparen].
|
||||
//
|
||||
//go:fix inline
|
||||
func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
|
||||
|
||||
12
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
12
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -141,6 +141,8 @@ const (
|
||||
LoadAllSyntax = LoadSyntax | NeedDeps
|
||||
|
||||
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
|
||||
//
|
||||
//go:fix inline
|
||||
NeedExportsFile = NeedExportFile
|
||||
)
|
||||
|
||||
@@ -161,7 +163,7 @@ type Config struct {
|
||||
// If the user provides a logger, debug logging is enabled.
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the logger is nil, default to log.Printf.
|
||||
Logf func(format string, args ...interface{})
|
||||
Logf func(format string, args ...any)
|
||||
|
||||
// Dir is the directory in which to run the build system's query tool
|
||||
// that provides information about the packages.
|
||||
@@ -564,13 +566,13 @@ type ModuleError struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
|
||||
packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError {
|
||||
return p.(*Package).depsErrors
|
||||
}
|
||||
packagesinternal.SetModFile = func(config interface{}, value string) {
|
||||
packagesinternal.SetModFile = func(config any, value string) {
|
||||
config.(*Config).modFile = value
|
||||
}
|
||||
packagesinternal.SetModFlag = func(config interface{}, value string) {
|
||||
packagesinternal.SetModFlag = func(config any, value string) {
|
||||
config.(*Config).modFlag = value
|
||||
}
|
||||
packagesinternal.TypecheckCgo = int(typecheckCgo)
|
||||
@@ -739,7 +741,7 @@ func newLoader(cfg *Config) *loader {
|
||||
if debug {
|
||||
ld.Config.Logf = log.Printf
|
||||
} else {
|
||||
ld.Config.Logf = func(format string, args ...interface{}) {}
|
||||
ld.Config.Logf = func(format string, args ...any) {}
|
||||
}
|
||||
}
|
||||
if ld.Config.Mode == 0 {
|
||||
|
||||
9
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
9
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
@@ -389,8 +389,13 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 {
|
||||
// path, and whether or not it is a package-level typename. It
|
||||
// is rare for a package to define multiple local types with
|
||||
// the same name.)
|
||||
hash := uintptr(unsafe.Pointer(tname))
|
||||
return uint32(hash ^ (hash >> 32))
|
||||
ptr := uintptr(unsafe.Pointer(tname))
|
||||
if unsafe.Sizeof(ptr) == 8 {
|
||||
hash := uint64(ptr)
|
||||
return uint32(hash ^ (hash >> 32))
|
||||
} else {
|
||||
return uint32(ptr)
|
||||
}
|
||||
}
|
||||
|
||||
// shallowHash computes a hash of t without looking at any of its
|
||||
|
||||
6
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
6
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
@@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Value) Get(lm label.Map) interface{} {
|
||||
func (k *Value) Get(lm label.Map) any {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
@@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} {
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
|
||||
func (k *Value) From(t label.Label) any { return t.UnpackValue() }
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
|
||||
func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) }
|
||||
|
||||
// Tag represents a key for tagging labels that have no value.
|
||||
// These are used when the existence of the label is the entire information it
|
||||
|
||||
6
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
6
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
@@ -32,7 +32,7 @@ type Key interface {
|
||||
type Label struct {
|
||||
key Key
|
||||
packed uint64
|
||||
untyped interface{}
|
||||
untyped any
|
||||
}
|
||||
|
||||
// Map is the interface to a collection of Labels indexed by key.
|
||||
@@ -76,13 +76,13 @@ type mapChain struct {
|
||||
// OfValue creates a new label from the key and value.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
|
||||
func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} }
|
||||
|
||||
// UnpackValue assumes the label was built using LabelOfValue and returns the value
|
||||
// that was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) UnpackValue() interface{} { return t.untyped }
|
||||
func (t Label) UnpackValue() any { return t.untyped }
|
||||
|
||||
// Of64 creates a new label from a key and a uint64. This is often
|
||||
// used for non uint64 values that can be packed into a uint64.
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
2
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
@@ -14,7 +14,7 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
func errorf(format string, args ...any) {
|
||||
panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
|
||||
6
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
6
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
@@ -310,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt
|
||||
}
|
||||
|
||||
// ReportFunc is the type of a function used to report formatted bugs.
|
||||
type ReportFunc = func(string, ...interface{})
|
||||
type ReportFunc = func(string, ...any)
|
||||
|
||||
// Current bundled export format version. Increase with each format change.
|
||||
// 0: initial implementation
|
||||
@@ -597,7 +597,7 @@ type filePositions struct {
|
||||
needed []uint64 // unordered list of needed file offsets
|
||||
}
|
||||
|
||||
func (p *iexporter) trace(format string, args ...interface{}) {
|
||||
func (p *iexporter) trace(format string, args ...any) {
|
||||
if !trace {
|
||||
// Call sites should also be guarded, but having this check here allows
|
||||
// easily enabling/disabling debug trace statements.
|
||||
@@ -1583,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||
// "internalErrorf" as the former is used for bugs, whose cause is
|
||||
// internal inconsistency, whereas the latter is used for ordinary
|
||||
// situations like bad input, whose cause is external.
|
||||
func internalErrorf(format string, args ...interface{}) error {
|
||||
func internalErrorf(format string, args ...any) error {
|
||||
return internalError(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
2
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@@ -400,7 +400,7 @@ type iimporter struct {
|
||||
indent int // for tracing support
|
||||
}
|
||||
|
||||
func (p *iimporter) trace(format string, args ...interface{}) {
|
||||
func (p *iimporter) trace(format string, args ...any) {
|
||||
if !trace {
|
||||
// Call sites should also be guarded, but having this check here allows
|
||||
// easily enabling/disabling debug trace statements.
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
2
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
@@ -574,7 +574,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
||||
|
||||
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
|
||||
typesinternal.SetVarKind(recv, typesinternal.RecvVar)
|
||||
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
|
||||
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic()))
|
||||
}
|
||||
|
||||
embeds := make([]types.Type, iface.NumEmbeddeds())
|
||||
|
||||
4
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
4
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
// Options controls the behavior of a Walk call.
|
||||
type Options struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
Logf func(format string, args ...any)
|
||||
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
@@ -81,7 +81,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if opts.Logf == nil {
|
||||
opts.Logf = func(format string, args ...interface{}) {}
|
||||
opts.Logf = func(format string, args ...any) {}
|
||||
}
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
|
||||
4
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
4
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
@@ -559,7 +559,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
|
||||
return err
|
||||
}
|
||||
apply(fset, f, fixes)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
|
||||
@@ -1030,7 +1030,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
|
||||
//
|
||||
// For gopls, we can optionally explicitly choose a resolver type, since we
|
||||
// already know the view type.
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") {
|
||||
e.resolver = newGopathResolver(e)
|
||||
e.logf("created gopath resolver")
|
||||
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
2
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
@@ -17,4 +17,4 @@ var TypecheckCgo int
|
||||
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
|
||||
|
||||
var SetModFlag = func(config any, value string) {}
|
||||
var SetModFile = func(config interface{}, value string) {}
|
||||
var SetModFile = func(config any, value string) {}
|
||||
|
||||
359
vendor/golang.org/x/tools/internal/stdlib/deps.go
generated
vendored
Normal file
359
vendor/golang.org/x/tools/internal/stdlib/deps.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by generate.go. DO NOT EDIT.
|
||||
|
||||
package stdlib
|
||||
|
||||
type pkginfo struct {
|
||||
name string
|
||||
deps string // list of indices of dependencies, as varint-encoded deltas
|
||||
}
|
||||
|
||||
var deps = [...]pkginfo{
|
||||
{"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"},
|
||||
{"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"},
|
||||
{"bufio", "\x03k}E\x13"},
|
||||
{"bytes", "n+R\x03\fG\x02\x02"},
|
||||
{"cmp", ""},
|
||||
{"compress/bzip2", "\x02\x02\xe7\x01B"},
|
||||
{"compress/flate", "\x02l\x03z\r\x024\x01\x03"},
|
||||
{"compress/gzip", "\x02\x04a\a\x03\x15eT"},
|
||||
{"compress/lzw", "\x02l\x03z"},
|
||||
{"compress/zlib", "\x02\x04a\a\x03\x13\x01f"},
|
||||
{"container/heap", "\xae\x02"},
|
||||
{"container/list", ""},
|
||||
{"container/ring", ""},
|
||||
{"context", "n\\h\x01\f"},
|
||||
{"crypto", "\x84\x01gD"},
|
||||
{"crypto/aes", "\x10\n\a\x8e\x02"},
|
||||
{"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"},
|
||||
{"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"},
|
||||
{"crypto/dsa", "@\x04*}\x0e"},
|
||||
{"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"},
|
||||
{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"},
|
||||
{"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"},
|
||||
{"crypto/elliptic", "0>}\x0e9"},
|
||||
{"crypto/fips140", " \x05\x91\x01"},
|
||||
{"crypto/hkdf", "-\x12\x01.\x16"},
|
||||
{"crypto/hmac", "\x1a\x14\x11\x01\x113"},
|
||||
{"crypto/internal/boring", "\x0e\x02\rg"},
|
||||
{"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
|
||||
{"crypto/internal/boring/bcache", "\xb3\x02\x12"},
|
||||
{"crypto/internal/boring/sig", ""},
|
||||
{"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"},
|
||||
{"crypto/internal/entropy", "E"},
|
||||
{"crypto/internal/fips140", ">0}9\f\x15"},
|
||||
{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"},
|
||||
{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"},
|
||||
{"crypto/internal/fips140/alias", "\xc5\x02"},
|
||||
{"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"},
|
||||
{"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
|
||||
{"crypto/internal/fips140/check/checktest", "%\xff\x01!"},
|
||||
{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"},
|
||||
{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"},
|
||||
{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"},
|
||||
{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"},
|
||||
{"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"},
|
||||
{"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"},
|
||||
{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"},
|
||||
{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"},
|
||||
{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"},
|
||||
{"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"},
|
||||
{"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"},
|
||||
{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"},
|
||||
{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"},
|
||||
{"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
|
||||
{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"},
|
||||
{"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
|
||||
{"crypto/internal/fips140/ssh", " \x05"},
|
||||
{"crypto/internal/fips140/subtle", "#\x19\xbe\x01"},
|
||||
{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"},
|
||||
{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"},
|
||||
{"crypto/internal/fips140deps", ""},
|
||||
{"crypto/internal/fips140deps/byteorder", "\x9a\x01"},
|
||||
{"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
|
||||
{"crypto/internal/fips140deps/godebug", "\xb6\x01"},
|
||||
{"crypto/internal/fips140hash", "5\x1a5\xc1\x01"},
|
||||
{"crypto/internal/fips140only", "'\r\x01\x01N25"},
|
||||
{"crypto/internal/fips140test", ""},
|
||||
{"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"},
|
||||
{"crypto/internal/impl", "\xb0\x02"},
|
||||
{"crypto/internal/randutil", "\xeb\x01\x12"},
|
||||
{"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"},
|
||||
{"crypto/internal/sysrand/internal/seccomp", "n"},
|
||||
{"crypto/md5", "\x0e2.\x16\x16`"},
|
||||
{"crypto/mlkem", "/"},
|
||||
{"crypto/pbkdf2", "2\r\x01.\x16"},
|
||||
{"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"},
|
||||
{"crypto/rc4", "#\x1d.\xc1\x01"},
|
||||
{"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"},
|
||||
{"crypto/sha1", "\x0e\f&.\x16\x16\x14L"},
|
||||
{"crypto/sha256", "\x0e\f\x1aP"},
|
||||
{"crypto/sha3", "\x0e'O\xc1\x01"},
|
||||
{"crypto/sha512", "\x0e\f\x1cN"},
|
||||
{"crypto/subtle", "8\x98\x01T"},
|
||||
{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"},
|
||||
{"crypto/tls/internal/fips140tls", " \x93\x02"},
|
||||
{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"},
|
||||
{"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"},
|
||||
{"crypto/x509/pkix", "d\x06\a\x88\x01F"},
|
||||
{"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"},
|
||||
{"database/sql/driver", "\ra\x03\xae\x01\x10\x10"},
|
||||
{"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"},
|
||||
{"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"},
|
||||
{"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"},
|
||||
{"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"},
|
||||
{"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"},
|
||||
{"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"},
|
||||
{"debug/plan9obj", "g\a\x03`\x1a,"},
|
||||
{"embed", "n+:\x18\x01S"},
|
||||
{"embed/internal/embedtest", ""},
|
||||
{"encoding", ""},
|
||||
{"encoding/ascii85", "\xeb\x01D"},
|
||||
{"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"},
|
||||
{"encoding/base32", "\xeb\x01B\x02"},
|
||||
{"encoding/base64", "\x9a\x01QB\x02"},
|
||||
{"encoding/binary", "n}\r'\x0e\x05"},
|
||||
{"encoding/csv", "\x02\x01k\x03zE\x11\x02"},
|
||||
{"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"},
|
||||
{"encoding/hex", "n\x03zB\x03"},
|
||||
{"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"},
|
||||
{"encoding/pem", "\x03c\b}B\x03"},
|
||||
{"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"},
|
||||
{"errors", "\xca\x01{"},
|
||||
{"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"},
|
||||
{"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"},
|
||||
{"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"},
|
||||
{"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"},
|
||||
{"go/ast/internal/tests", ""},
|
||||
{"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"},
|
||||
{"go/build/constraint", "n\xc1\x01\x01\x11\x02"},
|
||||
{"go/constant", "q\x10w\x01\x015\x01\x02\x11"},
|
||||
{"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"},
|
||||
{"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"},
|
||||
{"go/format", "\x03n\x01\f\x01\x02jE"},
|
||||
{"go/importer", "t\a\x01\x01\x04\x01i9"},
|
||||
{"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"},
|
||||
{"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"},
|
||||
{"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"},
|
||||
{"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"},
|
||||
{"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"},
|
||||
{"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"},
|
||||
{"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"},
|
||||
{"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"},
|
||||
{"go/version", "\xbb\x01u"},
|
||||
{"hash", "\xeb\x01"},
|
||||
{"hash/adler32", "n\x16\x16"},
|
||||
{"hash/crc32", "n\x16\x16\x14\x84\x01\x01"},
|
||||
{"hash/crc64", "n\x16\x16\x98\x01"},
|
||||
{"hash/fnv", "n\x16\x16`"},
|
||||
{"hash/maphash", "\x95\x01\x05\x1b\x03@M"},
|
||||
{"html", "\xb0\x02\x02\x11"},
|
||||
{"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"},
|
||||
{"image", "\x02l\x1f^\x0f5\x03\x01"},
|
||||
{"image/color", ""},
|
||||
{"image/color/palette", "\x8d\x01"},
|
||||
{"image/draw", "\x8c\x01\x01\x04"},
|
||||
{"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"},
|
||||
{"image/internal/imageutil", "\x8c\x01"},
|
||||
{"image/jpeg", "\x02l\x1e\x01\x04Z"},
|
||||
{"image/png", "\x02\a^\n\x13\x02\x06\x01^D"},
|
||||
{"index/suffixarray", "\x03d\a}\r*\v\x01"},
|
||||
{"internal/abi", "\xb5\x01\x90\x01"},
|
||||
{"internal/asan", "\xc5\x02"},
|
||||
{"internal/bisect", "\xa4\x02\x0e\x01"},
|
||||
{"internal/buildcfg", "qG_\x06\x02\x05\v\x01"},
|
||||
{"internal/bytealg", "\xae\x01\x97\x01"},
|
||||
{"internal/byteorder", ""},
|
||||
{"internal/cfg", ""},
|
||||
{"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"},
|
||||
{"internal/copyright", ""},
|
||||
{"internal/coverage", ""},
|
||||
{"internal/coverage/calloc", ""},
|
||||
{"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"},
|
||||
{"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"},
|
||||
{"internal/coverage/cmerge", "q-Z"},
|
||||
{"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"},
|
||||
{"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"},
|
||||
{"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"},
|
||||
{"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."},
|
||||
{"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"},
|
||||
{"internal/coverage/rtcov", "\xc5\x02"},
|
||||
{"internal/coverage/slicereader", "g\nzZ"},
|
||||
{"internal/coverage/slicewriter", "qz"},
|
||||
{"internal/coverage/stringtab", "q8\x04>"},
|
||||
{"internal/coverage/test", ""},
|
||||
{"internal/coverage/uleb128", ""},
|
||||
{"internal/cpu", "\xc5\x02"},
|
||||
{"internal/dag", "\x04m\xbc\x01\x03"},
|
||||
{"internal/diff", "\x03n\xbd\x01\x02"},
|
||||
{"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"},
|
||||
{"internal/filepathlite", "n+:\x19A"},
|
||||
{"internal/fmtsort", "\x04\x9b\x02\x0e"},
|
||||
{"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"},
|
||||
{"internal/goarch", ""},
|
||||
{"internal/godebug", "\x97\x01 {\x01\x12"},
|
||||
{"internal/godebugs", ""},
|
||||
{"internal/goexperiment", ""},
|
||||
{"internal/goos", ""},
|
||||
{"internal/goroot", "\x97\x02\x01\x05\x13\x02"},
|
||||
{"internal/gover", "\x04"},
|
||||
{"internal/goversion", ""},
|
||||
{"internal/itoa", ""},
|
||||
{"internal/lazyregexp", "\x97\x02\v\x0e\x02"},
|
||||
{"internal/lazytemplate", "\xeb\x01,\x19\x02\v"},
|
||||
{"internal/msan", "\xc5\x02"},
|
||||
{"internal/nettrace", ""},
|
||||
{"internal/obscuretestdata", "f\x85\x01,"},
|
||||
{"internal/oserror", "n"},
|
||||
{"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"},
|
||||
{"internal/platform", ""},
|
||||
{"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"},
|
||||
{"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"},
|
||||
{"internal/profilerecord", ""},
|
||||
{"internal/race", "\x95\x01\xb0\x01"},
|
||||
{"internal/reflectlite", "\x95\x01 3<!"},
|
||||
{"internal/routebsd", "n,w\x13\x10\x11"},
|
||||
{"internal/runtime/atomic", "\xae\x01\x97\x01"},
|
||||
{"internal/runtime/exithook", "\xcc\x01y"},
|
||||
{"internal/runtime/maps", "\x95\x01\x01\x1f\v\t\x06\x01u"},
|
||||
{"internal/runtime/math", "\xb5\x01"},
|
||||
{"internal/runtime/sys", "\xae\x01\a\x04"},
|
||||
{"internal/saferio", "\xeb\x01Z"},
|
||||
{"internal/singleflight", "\xb2\x02"},
|
||||
{"internal/stringslite", "\x99\x01\xac\x01"},
|
||||
{"internal/sync", "\x95\x01 \x14j\x12"},
|
||||
{"internal/synctest", "\xc5\x02"},
|
||||
{"internal/syscall/execenv", "\xb4\x02"},
|
||||
{"internal/syscall/unix", "\x95\x01\x8f\x01\x10\x11"},
|
||||
{"internal/sysinfo", "\xae\x01\x84\x01\x02"},
|
||||
{"internal/syslist", ""},
|
||||
{"internal/testenv", "\x03\na\x02\x01*\x1a\x10'+\x01\x05\a\v\x01\x02\x02\x01\n"},
|
||||
{"internal/testlog", "\xb2\x02\x01\x12"},
|
||||
{"internal/testpty", "n\x03f@\x1d"},
|
||||
{"internal/trace", "\x02\x01\x01\x06]\a\x03n\x03\x03\x06\x03\n5\x01\x02\x0f\x06"},
|
||||
{"internal/trace/internal/testgen", "\x03d\nl\x03\x02\x03\x011\v\x0e"},
|
||||
{"internal/trace/internal/tracev1", "\x03\x01c\a\x03t\x06\r5\x01"},
|
||||
{"internal/trace/raw", "\x02e\nq\x03\x06D\x01\x11"},
|
||||
{"internal/trace/testtrace", "\x02\x01k\x03l\x03\x06\x057\v\x02\x01"},
|
||||
{"internal/trace/tracev2", ""},
|
||||
{"internal/trace/traceviewer", "\x02^\v\x06\x1a<\x16\a\a\x04\t\n\x15\x01\x05\a\v\x01\x02\r"},
|
||||
{"internal/trace/traceviewer/format", ""},
|
||||
{"internal/trace/version", "qq\t"},
|
||||
{"internal/txtar", "\x03n\xa6\x01\x19"},
|
||||
{"internal/types/errors", "\xaf\x02"},
|
||||
{"internal/unsafeheader", "\xc5\x02"},
|
||||
{"internal/xcoff", "Z\r\a\x03`\x1a,\x18\x01"},
|
||||
{"internal/zstd", "g\a\x03z\x0f"},
|
||||
{"io", "n\xc4\x01"},
|
||||
{"io/fs", "n+*(1\x11\x12\x04"},
|
||||
{"io/ioutil", "\xeb\x01\x01+\x16\x03"},
|
||||
{"iter", "\xc9\x01[!"},
|
||||
{"log", "qz\x05'\r\x0e\x01\f"},
|
||||
{"log/internal", ""},
|
||||
{"log/slog", "\x03\nU\t\x03\x03z\x04\x01\x02\x02\x04'\x05\t\x02\x01\x02\x01\f\x02\x02\x02"},
|
||||
{"log/slog/internal", ""},
|
||||
{"log/slog/internal/benchmarks", "\ra\x03z\x06\x03;\x10"},
|
||||
{"log/slog/internal/buffer", "\xb2\x02"},
|
||||
{"log/slog/internal/slogtest", "\xf1\x01"},
|
||||
{"log/syslog", "n\x03~\x12\x16\x19\x02\r"},
|
||||
{"maps", "\xee\x01W"},
|
||||
{"math", "\xfa\x01K"},
|
||||
{"math/big", "\x03k\x03)Q\r\x02\x021\x02\x01\x02\x13"},
|
||||
{"math/bits", "\xc5\x02"},
|
||||
{"math/cmplx", "\xf8\x01\x02"},
|
||||
{"math/rand", "\xb6\x01B:\x01\x12"},
|
||||
{"math/rand/v2", "n,\x02\\\x02K"},
|
||||
{"mime", "\x02\x01c\b\x03z\f \x16\x03\x02\x0f\x02"},
|
||||
{"mime/multipart", "\x02\x01G$\x03E5\f\x01\x06\x02\x15\x02\x06\x10\x02\x01\x15"},
|
||||
{"mime/quotedprintable", "\x02\x01nz"},
|
||||
{"net", "\x04\ta+\x1d\a\x04\x05\x05\a\x01\x04\x14\x01%\x06\r\t\x05\x01\x01\v\x06\a"},
|
||||
{"net/http", "\x02\x01\x04\x04\x02=\b\x14\x01\a\x03E5\x01\x03\b\x01\x02\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\t\x01\x01\x01\x02\x01\f\x02\x02\x02\b\x01\x01\x01"},
|
||||
{"net/http/cgi", "\x02P\x1c\x03z\x04\b\n\x01\x13\x01\x01\x01\x04\x01\x05\x02\t\x02\x01\x0f\x0e"},
|
||||
{"net/http/cookiejar", "\x04j\x03\x90\x01\x01\b\f\x17\x03\x02\r\x04"},
|
||||
{"net/http/fcgi", "\x02\x01\nZ\a\x03z\x16\x01\x01\x14\x19\x02\r"},
|
||||
{"net/http/httptest", "\x02\x01\nE\x02\x1c\x01z\x04\x12\x01\n\t\x02\x18\x01\x02\r\x0e"},
|
||||
{"net/http/httptrace", "\rEo@\x14\n "},
|
||||
{"net/http/httputil", "\x02\x01\na\x03z\x04\x0f\x03\x01\x05\x02\x01\v\x01\x1a\x02\r\x0e"},
|
||||
{"net/http/internal", "\x02\x01k\x03z"},
|
||||
{"net/http/internal/ascii", "\xb0\x02\x11"},
|
||||
{"net/http/internal/httpcommon", "\ra\x03\x96\x01\x0e\x01\x18\x01\x01\x02\x1b\x02"},
|
||||
{"net/http/internal/testcert", "\xb0\x02"},
|
||||
{"net/http/pprof", "\x02\x01\nd\x19,\x11$\x04\x13\x14\x01\r\x06\x02\x01\x02\x01\x0f"},
|
||||
{"net/internal/cgotest", "\xd7\x01n"},
|
||||
{"net/internal/socktest", "q\xc1\x01\x02"},
|
||||
{"net/mail", "\x02l\x03z\x04\x0f\x03\x14\x1b\x02\r\x04"},
|
||||
{"net/netip", "\x04j+\x01#;\x025\x15"},
|
||||
{"net/rpc", "\x02g\x05\x03\x10\n`\x04\x12\x01\x1d\x0e\x03\x02"},
|
||||
{"net/rpc/jsonrpc", "k\x03\x03z\x16\x11 "},
|
||||
{"net/smtp", "\x19.\v\x14\b\x03z\x16\x14\x1b"},
|
||||
{"net/textproto", "\x02\x01k\x03z\r\t.\x01\x02\x13"},
|
||||
{"net/url", "n\x03\x86\x01%\x11\x02\x01\x15"},
|
||||
{"os", "n+\x19\v\t\r\x03\x01\x04\x10\x018\t\x05\x01\x01\v\x06"},
|
||||
{"os/exec", "\x03\naH \x01\x14\x01+\x06\a\v\x01\x04\v"},
|
||||
{"os/exec/internal/fdtest", "\xb4\x02"},
|
||||
{"os/signal", "\r\x8a\x02\x16\x05\x02"},
|
||||
{"os/user", "qfM\v\x01\x02\x02\x11"},
|
||||
{"path", "n+\xaa\x01"},
|
||||
{"path/filepath", "n+\x19:+\r\t\x03\x04\x0f"},
|
||||
{"plugin", "n\xc4\x01\x13"},
|
||||
{"reflect", "n'\x04\x1c\b\f\x05\x02\x18\x06\n,\v\x03\x0f\x02\x02"},
|
||||
{"reflect/internal/example1", ""},
|
||||
{"reflect/internal/example2", ""},
|
||||
{"regexp", "\x03\xe8\x018\n\x02\x01\x02\x0f\x02"},
|
||||
{"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"},
|
||||
{"runtime", "\x95\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x04\x01\x01\x01\x01\x03\x0fc"},
|
||||
{"runtime/cgo", "\xd0\x01b\x01\x12"},
|
||||
{"runtime/coverage", "\xa0\x01K"},
|
||||
{"runtime/debug", "qUQ\r\t\x02\x01\x0f\x06"},
|
||||
{"runtime/internal/wasitest", ""},
|
||||
{"runtime/metrics", "\xb7\x01A,!"},
|
||||
{"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03$3#\r\x1f\r\t\x01\x01\x01\x02\x02\b\x03\x06"},
|
||||
{"runtime/race", ""},
|
||||
{"runtime/trace", "\rdz9\x0e\x01\x12"},
|
||||
{"slices", "\x04\xea\x01\fK"},
|
||||
{"sort", "\xca\x0103"},
|
||||
{"strconv", "n+:%\x02I"},
|
||||
{"strings", "n'\x04:\x18\x03\f8\x0f\x02\x02"},
|
||||
{"structs", ""},
|
||||
{"sync", "\xc9\x01\vP\x0f\x12"},
|
||||
{"sync/atomic", "\xc5\x02"},
|
||||
{"syscall", "n'\x01\x03\x01\x1b\b\x03\x03\x06[\x0e\x01\x12"},
|
||||
{"testing", "\x03\na\x02\x01X\x0f\x13\r\x04\x1b\x06\x02\x05\x03\x05\x01\x02\x01\x02\x01\f\x02\x02\x02"},
|
||||
{"testing/fstest", "n\x03z\x01\v%\x11\x03\b\a"},
|
||||
{"testing/internal/testdeps", "\x02\v\xa7\x01'\x10,\x03\x05\x03\b\x06\x02\r"},
|
||||
{"testing/iotest", "\x03k\x03z\x04"},
|
||||
{"testing/quick", "p\x01\x87\x01\x04#\x11\x0f"},
|
||||
{"testing/slogtest", "\ra\x03\x80\x01.\x05\x11\n"},
|
||||
{"text/scanner", "\x03nz,*\x02"},
|
||||
{"text/tabwriter", "qzX"},
|
||||
{"text/template", "n\x03B8\x01\v\x1f\x01\x05\x01\x02\x05\f\x02\f\x03\x02"},
|
||||
{"text/template/parse", "\x03n\xb3\x01\v\x01\x11\x02"},
|
||||
{"time", "n+\x1d\x1d'*\x0e\x02\x11"},
|
||||
{"time/tzdata", "n\xc6\x01\x11"},
|
||||
{"unicode", ""},
|
||||
{"unicode/utf16", ""},
|
||||
{"unicode/utf8", ""},
|
||||
{"unique", "\x95\x01>\x01P\x0e\x13\x12"},
|
||||
{"unsafe", ""},
|
||||
{"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"},
|
||||
{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"},
|
||||
{"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"},
|
||||
{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
|
||||
{"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
|
||||
{"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"},
|
||||
{"vendor/golang.org/x/net/dns/dnsmessage", "n"},
|
||||
{"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"},
|
||||
{"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"},
|
||||
{"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"},
|
||||
{"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"},
|
||||
{"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"},
|
||||
{"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"},
|
||||
{"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"},
|
||||
{"vendor/golang.org/x/text/transform", "\x03k}X"},
|
||||
{"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"},
|
||||
{"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"},
|
||||
{"weak", "\x95\x01\x8f\x01!"},
|
||||
}
|
||||
89
vendor/golang.org/x/tools/internal/stdlib/import.go
generated
vendored
Normal file
89
vendor/golang.org/x/tools/internal/stdlib/import.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package stdlib
|
||||
|
||||
// This file provides the API for the import graph of the standard library.
|
||||
//
|
||||
// Be aware that the compiler-generated code for every package
|
||||
// implicitly depends on package "runtime" and a handful of others
|
||||
// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"iter"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Imports returns the sequence of packages directly imported by the
|
||||
// named standard packages, in name order.
|
||||
// The imports of an unknown package are the empty set.
|
||||
//
|
||||
// The graph is built into the application and may differ from the
|
||||
// graph in the Go source tree being analyzed by the application.
|
||||
func Imports(pkgs ...string) iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
for _, pkg := range pkgs {
|
||||
if i, ok := find(pkg); ok {
|
||||
var depIndex uint64
|
||||
for data := []byte(deps[i].deps); len(data) > 0; {
|
||||
delta, n := binary.Uvarint(data)
|
||||
depIndex += delta
|
||||
if !yield(deps[depIndex].name) {
|
||||
return
|
||||
}
|
||||
data = data[n:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Dependencies returns the set of all dependencies of the named
|
||||
// standard packages, including the initial package,
|
||||
// in a deterministic topological order.
|
||||
// The dependencies of an unknown package are the empty set.
|
||||
//
|
||||
// The graph is built into the application and may differ from the
|
||||
// graph in the Go source tree being analyzed by the application.
|
||||
func Dependencies(pkgs ...string) iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
for _, pkg := range pkgs {
|
||||
if i, ok := find(pkg); ok {
|
||||
var seen [1 + len(deps)/8]byte // bit set of seen packages
|
||||
var visit func(i int) bool
|
||||
visit = func(i int) bool {
|
||||
bit := byte(1) << (i % 8)
|
||||
if seen[i/8]&bit == 0 {
|
||||
seen[i/8] |= bit
|
||||
var depIndex uint64
|
||||
for data := []byte(deps[i].deps); len(data) > 0; {
|
||||
delta, n := binary.Uvarint(data)
|
||||
depIndex += delta
|
||||
if !visit(int(depIndex)) {
|
||||
return false
|
||||
}
|
||||
data = data[n:]
|
||||
}
|
||||
if !yield(deps[i].name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
if !visit(i) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the index of pkg in the deps table.
|
||||
func find(pkg string) (int, bool) {
|
||||
return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
|
||||
return strings.Compare(p.name, n)
|
||||
})
|
||||
}
|
||||
20
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
20
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
@@ -2151,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(Type).String", Method, 0},
|
||||
{"(Version).GoString", Method, 0},
|
||||
{"(Version).String", Method, 0},
|
||||
{"(VersionIndex).Index", Method, 24},
|
||||
{"(VersionIndex).IsHidden", Method, 24},
|
||||
{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
|
||||
{"COMPRESS_HIOS", Const, 6},
|
||||
{"COMPRESS_HIPROC", Const, 6},
|
||||
@@ -3834,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"SymType", Type, 0},
|
||||
{"SymVis", Type, 0},
|
||||
{"Symbol", Type, 0},
|
||||
{"Symbol.HasVersion", Field, 24},
|
||||
{"Symbol.Info", Field, 0},
|
||||
{"Symbol.Library", Field, 13},
|
||||
{"Symbol.Name", Field, 0},
|
||||
@@ -3843,18 +3846,12 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Symbol.Value", Field, 0},
|
||||
{"Symbol.Version", Field, 13},
|
||||
{"Symbol.VersionIndex", Field, 24},
|
||||
{"Symbol.VersionScope", Field, 24},
|
||||
{"SymbolVersionScope", Type, 24},
|
||||
{"Type", Type, 0},
|
||||
{"VER_FLG_BASE", Const, 24},
|
||||
{"VER_FLG_INFO", Const, 24},
|
||||
{"VER_FLG_WEAK", Const, 24},
|
||||
{"Version", Type, 0},
|
||||
{"VersionScopeGlobal", Const, 24},
|
||||
{"VersionScopeHidden", Const, 24},
|
||||
{"VersionScopeLocal", Const, 24},
|
||||
{"VersionScopeNone", Const, 24},
|
||||
{"VersionScopeSpecific", Const, 24},
|
||||
{"VersionIndex", Type, 24},
|
||||
},
|
||||
"debug/gosym": {
|
||||
{"(*DecodingError).Error", Method, 0},
|
||||
@@ -7122,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"FormatFileInfo", Func, 21},
|
||||
{"Glob", Func, 16},
|
||||
{"GlobFS", Type, 16},
|
||||
{"Lstat", Func, 25},
|
||||
{"ModeAppend", Const, 16},
|
||||
{"ModeCharDevice", Const, 16},
|
||||
{"ModeDevice", Const, 16},
|
||||
@@ -7146,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ReadDirFile", Type, 16},
|
||||
{"ReadFile", Func, 16},
|
||||
{"ReadFileFS", Type, 16},
|
||||
{"ReadLink", Func, 25},
|
||||
{"ReadLinkFS", Type, 25},
|
||||
{"SkipAll", Var, 20},
|
||||
{"SkipDir", Var, 16},
|
||||
{"Stat", Func, 16},
|
||||
@@ -9149,6 +9149,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*ProcessState).SysUsage", Method, 0},
|
||||
{"(*ProcessState).SystemTime", Method, 0},
|
||||
{"(*ProcessState).UserTime", Method, 0},
|
||||
{"(*Root).Chmod", Method, 25},
|
||||
{"(*Root).Chown", Method, 25},
|
||||
{"(*Root).Close", Method, 24},
|
||||
{"(*Root).Create", Method, 24},
|
||||
{"(*Root).FS", Method, 24},
|
||||
@@ -16757,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{
|
||||
},
|
||||
"testing/fstest": {
|
||||
{"(MapFS).Glob", Method, 16},
|
||||
{"(MapFS).Lstat", Method, 25},
|
||||
{"(MapFS).Open", Method, 16},
|
||||
{"(MapFS).ReadDir", Method, 16},
|
||||
{"(MapFS).ReadFile", Method, 16},
|
||||
{"(MapFS).ReadLink", Method, 25},
|
||||
{"(MapFS).Stat", Method, 16},
|
||||
{"(MapFS).Sub", Method, 16},
|
||||
{"MapFS", Type, 16},
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/stdlib/stdlib.go
generated
vendored
2
vendor/golang.org/x/tools/internal/stdlib/stdlib.go
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Package stdlib provides a table of all exported symbols in the
|
||||
// standard library, along with the version at which they first
|
||||
// appeared.
|
||||
// appeared. It also provides the import graph of std packages.
|
||||
package stdlib
|
||||
|
||||
import (
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
2
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
@@ -120,7 +120,7 @@ type termSet struct {
|
||||
terms termlist
|
||||
}
|
||||
|
||||
func indentf(depth int, format string, args ...interface{}) {
|
||||
func indentf(depth int, format string, args ...any) {
|
||||
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
|
||||
}
|
||||
|
||||
|
||||
6
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
6
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
@@ -32,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadGo116ErrorData extracts additional information from types.Error values
|
||||
// ErrorCodeStartEnd extracts additional information from types.Error values
|
||||
// generated by Go version 1.16 and later: the error code, start position, and
|
||||
// end position. If all positions are valid, start <= err.Pos <= end.
|
||||
//
|
||||
// If the data could not be read, the final result parameter will be false.
|
||||
func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
|
||||
//
|
||||
// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
|
||||
func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
|
||||
var data [3]int
|
||||
// By coincidence all of these fields are ints, which simplifies things.
|
||||
v := reflect.ValueOf(err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user