mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-24 14:50:39 -05:00
Compare commits
5 Commits
runTestsIn
...
runTestInC
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e362f04093 | ||
|
|
9f01bf1af3 | ||
|
|
217a84b360 | ||
|
|
e40d7f4246 | ||
|
|
827a053ac0 |
@@ -1,3 +1,3 @@
|
||||
# The test runner source for UI tests
|
||||
WEB_COMMITID=4a2f3a1d14009676a3a9dfef536ed4fd3e7f4c21
|
||||
WEB_COMMITID=74c8df4f64d9bf957a0652fb92e01529efa3c0b3
|
||||
WEB_BRANCH=main
|
||||
|
||||
705
.woodpecker.star
705
.woodpecker.star
File diff suppressed because it is too large
Load Diff
23
go.mod
23
go.mod
@@ -13,7 +13,7 @@ require (
|
||||
github.com/beevik/etree v1.5.0
|
||||
github.com/blevesearch/bleve/v2 v2.4.4
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/coreos/go-oidc/v3 v3.13.0
|
||||
github.com/coreos/go-oidc/v3 v3.12.0
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20241105092511-3ad35d174fc1
|
||||
github.com/davidbyttow/govips/v2 v2.16.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
@@ -62,22 +62,22 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.23.0
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v1.2.0
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250314084055-d2fcfe6b3445
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
github.com/riandyrn/otelchi v0.12.1
|
||||
github.com/riandyrn/otelchi v0.12.0
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/shamaton/msgpack/v2 v2.2.3
|
||||
github.com/shamaton/msgpack/v2 v2.2.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.12.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/test-go/testify v1.1.4
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
@@ -88,9 +88,9 @@ require (
|
||||
github.com/xhit/go-simple-mail/v2 v2.16.0
|
||||
go-micro.dev/v4 v4.11.0
|
||||
go.etcd.io/bbolt v1.4.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||
go.opentelemetry.io/contrib/zpages v0.60.0
|
||||
go.opentelemetry.io/contrib/zpages v0.57.0
|
||||
go.opentelemetry.io/otel v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.17.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
|
||||
@@ -98,7 +98,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.35.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/image v0.25.0
|
||||
golang.org/x/image v0.24.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
golang.org/x/sync v0.12.0
|
||||
@@ -120,9 +120,10 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.5 // indirect
|
||||
github.com/RoaringBitmap/roaring v1.9.3 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.0 // indirect
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
github.com/alexedwards/argon2id v1.0.0 // indirect
|
||||
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect
|
||||
@@ -159,7 +160,7 @@ require (
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cornelk/hashmap v1.0.8 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/crewjam/httperr v0.2.0 // indirect
|
||||
github.com/crewjam/saml v0.4.14 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
|
||||
47
go.sum
47
go.sum
@@ -84,6 +84,8 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
|
||||
github.com/Nerzal/gocloak/v13 v13.9.0 h1:YWsJsdM5b0yhM2Ba3MLydiOlujkBry4TtdzfIzSVZhw=
|
||||
github.com/Nerzal/gocloak/v13 v13.9.0/go.mod h1:YYuDcXZ7K2zKECyVP7pPqjKxx2AzYSpKDj8d6GuyM10=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
||||
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks=
|
||||
github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4=
|
||||
github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
@@ -91,8 +93,8 @@ github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4
|
||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
|
||||
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
|
||||
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
|
||||
github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
|
||||
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.1.0/go.mod h1:kX6YddBkXqqywAe8c9LyvgTCyFuZCTMF4cRPQhc3Fy8=
|
||||
@@ -220,8 +222,8 @@ github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3h
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc/v3 v3.13.0 h1:M66zd0pcc5VxvBNM4pB331Wrsanby+QomQYjN8HamW8=
|
||||
github.com/coreos/go-oidc/v3 v3.13.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
@@ -233,8 +235,9 @@ github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2L
|
||||
github.com/cpu/goacmedns v0.1.1/go.mod h1:MuaouqEhPAHxsbqjgnck5zeghuwBP1dLnPoobeGqugQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo=
|
||||
github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4=
|
||||
@@ -859,10 +862,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v1.2.0 h1:88NDVCM0of1eO6Z4AFeL3utTEtMuwloFmWWU7dRV1z0=
|
||||
github.com/open-policy-agent/opa v1.2.0/go.mod h1:30euUmOvuBoebRCcJ7DMF42bRBOPznvt0ACUMYDUGVY=
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606 h1:ASUV6F7hHgar1RrnPfTQhtd+/KMeTCn7LhLzda0+HKY=
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606/go.mod h1:XWp81Uok1opSID0HeITjvxJqdorltHVx+iJv4IlWzPo=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250314084055-d2fcfe6b3445 h1:At2GtwEeNls1P60RpBa9QQridCtFQNW/pnQ5tybT8X0=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250314084055-d2fcfe6b3445/go.mod h1:yCscyJJ7FX/HA2fexM2i1OyKSZnJgdq1vnoXgXKmnn8=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
@@ -965,8 +968,8 @@ github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKc
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/riandyrn/otelchi v0.12.1 h1:FdRKK3/RgZ/T+d+qTH5Uw3MFx0KwRF38SkdfTMMq/m8=
|
||||
github.com/riandyrn/otelchi v0.12.1/go.mod h1:weZZeUJURvtCcbWsdb7Y6F8KFZGedJlSrgUjq9VirV8=
|
||||
github.com/riandyrn/otelchi v0.12.0 h1:7aXphKyzut8849DDb/0LWyCPq4mfnikpggEmmW3b38U=
|
||||
github.com/riandyrn/otelchi v0.12.0/go.mod h1:weZZeUJURvtCcbWsdb7Y6F8KFZGedJlSrgUjq9VirV8=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -1002,8 +1005,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU=
|
||||
github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs=
|
||||
github.com/shamaton/msgpack/v2 v2.2.3 h1:uDOHmxQySlvlUYfQwdjxyybAOzjlQsD1Vjy+4jmO9NM=
|
||||
github.com/shamaton/msgpack/v2 v2.2.3/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI=
|
||||
github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs=
|
||||
github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
|
||||
@@ -1037,8 +1040,8 @@ github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
@@ -1158,12 +1161,12 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/contrib/zpages v0.60.0 h1:wOM9ie1Hz4H88L9KE6GrGbKJhfm+8F1NfW/Y3q9Xt+8=
|
||||
go.opentelemetry.io/contrib/zpages v0.60.0/go.mod h1:xqfToSRGh2MYUsfyErNz8jnNDPlnpZqWM/y6Z2Cx7xw=
|
||||
go.opentelemetry.io/contrib/zpages v0.57.0 h1:mHFZlTkyrUJcuBhpytPSaVPiVkqri96RKUDk01d83eQ=
|
||||
go.opentelemetry.io/contrib/zpages v0.57.0/go.mod h1:u/SScNsxj6TacMBA6KCJZjXVC1uwkdVgLFyHrOe0x9M=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
|
||||
@@ -1239,8 +1242,8 @@ golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScy
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
|
||||
golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
// LatestTag is the latest released version plus the dev meta version.
|
||||
// Will be overwritten by the release pipeline
|
||||
// Needs a manual change for every tagged release
|
||||
LatestTag = "1.1.0+dev"
|
||||
LatestTag = "1.1.0-alpha.1+dev"
|
||||
|
||||
// Date indicates the build date.
|
||||
// This has been removed, it looks like you can only replace static strings with recent go versions
|
||||
|
||||
@@ -34,6 +34,7 @@ type Config struct {
|
||||
EnableFederatedSharingIncoming bool `yaml:"enable_federated_sharing_incoming" env:"OC_ENABLE_OCM;FRONTEND_ENABLE_FEDERATED_SHARING_INCOMING" desc:"Changing this value is NOT supported. Enables support for incoming federated sharing for clients. The backend behaviour is not changed." introductionVersion:"1.0.0"`
|
||||
EnableFederatedSharingOutgoing bool `yaml:"enable_federated_sharing_outgoing" env:"OC_ENABLE_OCM;FRONTEND_ENABLE_FEDERATED_SHARING_OUTGOING" desc:"Changing this value is NOT supported. Enables support for outgoing federated sharing for clients. The backend behaviour is not changed." introductionVersion:"1.0.0"`
|
||||
SearchMinLength int `yaml:"search_min_length" env:"FRONTEND_SEARCH_MIN_LENGTH" desc:"Minimum number of characters to enter before a client should start a search for Share receivers. This setting can be used to customize the user experience if e.g too many results are displayed." introductionVersion:"1.0.0"`
|
||||
Edition string `yaml:"edition" env:"OC_EDITION;FRONTEND_EDITION" desc:"Edition of OpenCloud. Used for branding purposes." introductionVersion:"1.0.0"`
|
||||
DisableSSE bool `yaml:"disable_sse" env:"OC_DISABLE_SSE;FRONTEND_DISABLE_SSE" desc:"When set to true, clients are informed that the Server-Sent Events endpoint is not accessible." introductionVersion:"1.0.0"`
|
||||
DefaultLinkPermissions int `yaml:"default_link_permissions" env:"FRONTEND_DEFAULT_LINK_PERMISSIONS" desc:"Defines the default permissions a link is being created with. Possible values are 0 (= internal link, for instance members only) and 1 (= public link with viewer permissions). Defaults to 1." introductionVersion:"1.0.0"`
|
||||
|
||||
|
||||
@@ -87,6 +87,7 @@ func DefaultConfig() *config.Config {
|
||||
DefaultUploadProtocol: "tus",
|
||||
DefaultLinkPermissions: 1,
|
||||
SearchMinLength: 3,
|
||||
Edition: "Community",
|
||||
Checksums: config.Checksums{
|
||||
SupportedTypes: []string{"sha1", "md5", "adler32"},
|
||||
PreferredUploadType: "sha1",
|
||||
|
||||
@@ -208,6 +208,7 @@ func FrontendConfigFromStruct(cfg *config.Config, logger log.Logger) (map[string
|
||||
"needsDbUpgrade": false,
|
||||
"version": version.Legacy,
|
||||
"versionstring": version.LegacyString,
|
||||
"edition": cfg.Edition,
|
||||
"productname": "OpenCloud",
|
||||
"product": "OpenCloud",
|
||||
"productversion": version.GetString(),
|
||||
@@ -338,7 +339,7 @@ func FrontendConfigFromStruct(cfg *config.Config, logger log.Logger) (map[string
|
||||
},
|
||||
"version": map[string]interface{}{
|
||||
"product": "OpenCloud",
|
||||
"edition": "",
|
||||
"edition": "Community",
|
||||
"major": version.ParsedLegacy().Major(),
|
||||
"minor": version.ParsedLegacy().Minor(),
|
||||
"micro": version.ParsedLegacy().Patch(),
|
||||
|
||||
@@ -105,11 +105,11 @@
|
||||
"web-vitals": "^3.5.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "7.26.10",
|
||||
"@babel/core": "7.26.9",
|
||||
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||
"@typescript-eslint/parser": "^4.33.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-loader": "10.0.0",
|
||||
"babel-loader": "9.2.1",
|
||||
"babel-plugin-named-asset-import": "^0.3.8",
|
||||
"babel-preset-react-app": "^10.1.0",
|
||||
"case-sensitive-paths-webpack-plugin": "2.4.0",
|
||||
|
||||
@@ -60,14 +60,13 @@ type Asset struct {
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
ID string `yaml:"id"`
|
||||
Name string `yaml:"name"`
|
||||
Trusted bool `yaml:"trusted"`
|
||||
Secret string `yaml:"secret"`
|
||||
RedirectURIs []string `yaml:"redirect_uris"`
|
||||
PostLogoutRedirectURIs []string `yaml:"post_logout_redirect_uris"`
|
||||
Origins []string `yaml:"origins"`
|
||||
ApplicationType string `yaml:"application_type"`
|
||||
ID string `yaml:"id"`
|
||||
Name string `yaml:"name"`
|
||||
Trusted bool `yaml:"trusted"`
|
||||
Secret string `yaml:"secret"`
|
||||
RedirectURIs []string `yaml:"redirect_uris"`
|
||||
Origins []string `yaml:"origins"`
|
||||
ApplicationType string `yaml:"application_type"`
|
||||
}
|
||||
|
||||
type Settings struct {
|
||||
|
||||
@@ -101,9 +101,6 @@ func DefaultConfig() *config.Config {
|
||||
RedirectURIs: []string{
|
||||
"oc://android.opencloud.eu",
|
||||
},
|
||||
PostLogoutRedirectURIs: []string{
|
||||
"oc://android.opencloud.eu",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "OpenCloudIOS",
|
||||
@@ -112,9 +109,6 @@ func DefaultConfig() *config.Config {
|
||||
RedirectURIs: []string{
|
||||
"oc://ios.opencloud.eu",
|
||||
},
|
||||
PostLogoutRedirectURIs: []string{
|
||||
"oc://ios.opencloud.eu",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ldap: config.Ldap{
|
||||
|
||||
998
services/idp/pnpm-lock.yaml
generated
998
services/idp/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -77,6 +77,7 @@ func Server(cfg *config.Config) *cli.Command {
|
||||
ocdav.Product(cfg.Status.Product),
|
||||
ocdav.Version(cfg.Status.Version),
|
||||
ocdav.VersionString(cfg.Status.VersionString),
|
||||
ocdav.Edition(cfg.Status.Edition),
|
||||
ocdav.MachineAuthAPIKey(cfg.MachineAuthAPIKey),
|
||||
ocdav.Broker(broker.NoOp{}),
|
||||
// ocdav.FavoriteManager() // FIXME needs a proper persistence implementation https://github.com/owncloud/ocis/issues/1228
|
||||
|
||||
@@ -81,4 +81,5 @@ type Status struct {
|
||||
Product string
|
||||
ProductName string
|
||||
ProductVersion string
|
||||
Edition string `yaml:"edition" env:"OC_EDITION;OCDAV_EDITION" desc:"Edition of OpenCloud. Used for branding purposes." introductionVersion:"1.0.0"`
|
||||
}
|
||||
|
||||
@@ -92,6 +92,7 @@ func DefaultConfig() *config.Config {
|
||||
ProductVersion: version.GetString(),
|
||||
Product: "OpenCloud",
|
||||
ProductName: "OpenCloud",
|
||||
Edition: "Community",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,21 +194,14 @@ type OwnCloudSQLDriver struct {
|
||||
// PosixDriver is the storage driver configuration when using 'posix' storage driver
|
||||
type PosixDriver struct {
|
||||
// Root is the absolute path to the location of the data
|
||||
Root string `yaml:"root" env:"STORAGE_USERS_POSIX_ROOT" desc:"The directory where the filesystem storage will store its data. If not defined, the root directory derives from $OC_BASE_DATA_PATH/storage/users." introductionVersion:"1.0.0"`
|
||||
Propagator string `yaml:"propagator" env:"OC_DECOMPOSEDFS_PROPAGATOR;STORAGE_USERS_POSIX_PROPAGATOR" desc:"The propagator used for the posix driver. At the moment, only 'sync' is fully supported, 'async' is available as an experimental option." introductionVersion:"2.0.0"`
|
||||
AsyncPropagatorOptions AsyncPropagatorOptions `yaml:"async_propagator_options"`
|
||||
PersonalSpaceAliasTemplate string `yaml:"personalspacealias_template" env:"STORAGE_USERS_POSIX_PERSONAL_SPACE_ALIAS_TEMPLATE" desc:"Template string to construct personal space aliases." introductionVersion:"1.0.0"`
|
||||
PersonalSpacePathTemplate string `yaml:"personalspacepath_template" env:"STORAGE_USERS_POSIX_PERSONAL_SPACE_PATH_TEMPLATE" desc:"Template string to construct the paths of the personal space roots." introductionVersion:"1.0.0"`
|
||||
GeneralSpaceAliasTemplate string `yaml:"generalspacealias_template" env:"STORAGE_USERS_POSIX_GENERAL_SPACE_ALIAS_TEMPLATE" desc:"Template string to construct general space aliases." introductionVersion:"1.0.0"`
|
||||
GeneralSpacePathTemplate string `yaml:"generalspacepath_template" env:"STORAGE_USERS_POSIX_GENERAL_SPACE_PATH_TEMPLATE" desc:"Template string to construct the paths of the projects space roots." introductionVersion:"1.0.0"`
|
||||
PermissionsEndpoint string `yaml:"permissions_endpoint" env:"STORAGE_USERS_PERMISSION_ENDPOINT;STORAGE_USERS_POSIX_PERMISSIONS_ENDPOINT" desc:"Endpoint of the permissions service. The endpoints can differ for 'decomposed', 'posix' and 'decomposeds3'." introductionVersion:"1.0.0"`
|
||||
AsyncUploads bool `yaml:"async_uploads" env:"OC_ASYNC_UPLOADS" desc:"Enable asynchronous file uploads." introductionVersion:"1.0.0"`
|
||||
ScanDebounceDelay time.Duration `yaml:"scan_debounce_delay" env:"STORAGE_USERS_POSIX_SCAN_DEBOUNCE_DELAY" desc:"The time in milliseconds to wait before scanning the filesystem for changes after a change has been detected." introductionVersion:"1.0.0"`
|
||||
MaxQuota uint64 `yaml:"max_quota" env:"OC_SPACES_MAX_QUOTA;STORAGE_USERS_POSIX_MAX_QUOTA" desc:"Set a global max quota for spaces in bytes. A value of 0 equals unlimited. If not using the global OC_SPACES_MAX_QUOTA, you must define the FRONTEND_MAX_QUOTA in the frontend service." introductionVersion:"2.0.0"`
|
||||
MaxAcquireLockCycles int `yaml:"max_acquire_lock_cycles" env:"STORAGE_USERS_POSIX_MAX_ACQUIRE_LOCK_CYCLES" desc:"When trying to lock files, OpenCloud will try this amount of times to acquire the lock before failing. After each try it will wait for an increasing amount of time. Values of 0 or below will be ignored and the default value will be used." introductionVersion:"2.0.0"`
|
||||
LockCycleDurationFactor int `yaml:"lock_cycle_duration_factor" env:"STORAGE_USERS_POSIX_LOCK_CYCLE_DURATION_FACTOR" desc:"When trying to lock files, OpenCloud will multiply the cycle with this factor and use it as a millisecond timeout. Values of 0 or below will be ignored and the default value will be used." introductionVersion:"2.0.0"`
|
||||
MaxConcurrency int `yaml:"max_concurrency" env:"OC_MAX_CONCURRENCY;STORAGE_USERS_POSIX_MAX_CONCURRENCY" desc:"Maximum number of concurrent go-routines. Higher values can potentially get work done faster but will also cause more load on the system. Values of 0 or below will be ignored and the default value will be used." introductionVersion:"2.0.0"`
|
||||
DisableVersioning bool `yaml:"disable_versioning" env:"OC_DISABLE_VERSIONING" desc:"Disables versioning of files. When set to true, new uploads with the same filename will overwrite existing files instead of creating a new version." introductionVersion:"2.0.0"`
|
||||
Root string `yaml:"root" env:"STORAGE_USERS_POSIX_ROOT" desc:"The directory where the filesystem storage will store its data. If not defined, the root directory derives from $OC_BASE_DATA_PATH/storage/users." introductionVersion:"1.0.0"`
|
||||
PersonalSpaceAliasTemplate string `yaml:"personalspacealias_template" env:"STORAGE_USERS_POSIX_PERSONAL_SPACE_ALIAS_TEMPLATE" desc:"Template string to construct personal space aliases." introductionVersion:"1.0.0"`
|
||||
PersonalSpacePathTemplate string `yaml:"personalspacepath_template" env:"STORAGE_USERS_POSIX_PERSONAL_SPACE_PATH_TEMPLATE" desc:"Template string to construct the paths of the personal space roots." introductionVersion:"1.0.0"`
|
||||
GeneralSpaceAliasTemplate string `yaml:"generalspacealias_template" env:"STORAGE_USERS_POSIX_GENERAL_SPACE_ALIAS_TEMPLATE" desc:"Template string to construct general space aliases." introductionVersion:"1.0.0"`
|
||||
GeneralSpacePathTemplate string `yaml:"generalspacepath_template" env:"STORAGE_USERS_POSIX_GENERAL_SPACE_PATH_TEMPLATE" desc:"Template string to construct the paths of the projects space roots." introductionVersion:"1.0.0"`
|
||||
PermissionsEndpoint string `yaml:"permissions_endpoint" env:"STORAGE_USERS_PERMISSION_ENDPOINT;STORAGE_USERS_POSIX_PERMISSIONS_ENDPOINT" desc:"Endpoint of the permissions service. The endpoints can differ for 'decomposed', 'posix' and 'decomposeds3'." introductionVersion:"1.0.0"`
|
||||
AsyncUploads bool `yaml:"async_uploads" env:"OC_ASYNC_UPLOADS" desc:"Enable asynchronous file uploads." introductionVersion:"1.0.0"`
|
||||
ScanDebounceDelay time.Duration `yaml:"scan_debounce_delay" env:"STORAGE_USERS_POSIX_SCAN_DEBOUNCE_DELAY" desc:"The time in milliseconds to wait before scanning the filesystem for changes after a change has been detected." introductionVersion:"1.0.0"`
|
||||
|
||||
UseSpaceGroups bool `yaml:"use_space_groups" env:"STORAGE_USERS_POSIX_USE_SPACE_GROUPS" desc:"Use space groups to manage permissions on spaces." introductionVersion:"1.0.0"`
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ func DefaultConfig() *config.Config {
|
||||
TransferExpires: 86400,
|
||||
UploadExpiration: 24 * 60 * 60,
|
||||
GracefulShutdownTimeout: 30,
|
||||
Driver: "posix",
|
||||
Driver: "decomposed",
|
||||
Drivers: config.Drivers{
|
||||
OwnCloudSQL: config.OwnCloudSQLDriver{
|
||||
Root: filepath.Join(defaults.BaseDataPath(), "storage", "owncloud"),
|
||||
@@ -143,7 +143,7 @@ func DefaultConfig() *config.Config {
|
||||
UseSpaceGroups: false,
|
||||
Root: filepath.Join(defaults.BaseDataPath(), "storage", "users"),
|
||||
PersonalSpaceAliasTemplate: "{{.SpaceType}}/{{.User.Username | lower}}",
|
||||
PersonalSpacePathTemplate: "users/{{.User.Id.OpaqueId}}",
|
||||
PersonalSpacePathTemplate: "users/{{.User.Username}}",
|
||||
GeneralSpaceAliasTemplate: "{{.SpaceType}}/{{.SpaceName | replace \" \" \"-\" | lower}}",
|
||||
GeneralSpacePathTemplate: "projects/{{.SpaceId}}",
|
||||
PermissionsEndpoint: "eu.opencloud.api.settings",
|
||||
@@ -165,7 +165,7 @@ func DefaultConfig() *config.Config {
|
||||
TTL: 24 * 60 * time.Second,
|
||||
},
|
||||
IDCache: config.IDCache{
|
||||
Store: "nats-js-kv",
|
||||
Store: "memory",
|
||||
Nodes: []string{"127.0.0.1:9233"},
|
||||
Database: "ids-storage-users",
|
||||
TTL: 24 * 60 * time.Second,
|
||||
|
||||
@@ -87,26 +87,15 @@ func Local(cfg *config.Config) map[string]interface{} {
|
||||
// Posix is the config mapping for the Posix storage driver
|
||||
func Posix(cfg *config.Config, enableFSScan bool) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"root": cfg.Drivers.Posix.Root,
|
||||
"personalspacepath_template": cfg.Drivers.Posix.PersonalSpacePathTemplate,
|
||||
"personalspacealias_template": cfg.Drivers.Posix.PersonalSpaceAliasTemplate,
|
||||
"generalspacepath_template": cfg.Drivers.Posix.GeneralSpacePathTemplate,
|
||||
"generalspacealias_template": cfg.Drivers.Posix.GeneralSpaceAliasTemplate,
|
||||
"permissionssvc": cfg.Drivers.Posix.PermissionsEndpoint,
|
||||
"permissionssvc_tls_mode": cfg.Commons.GRPCClientTLS.Mode,
|
||||
"treetime_accounting": true,
|
||||
"treesize_accounting": true,
|
||||
"asyncfileuploads": cfg.Drivers.Posix.AsyncUploads,
|
||||
"scan_debounce_delay": cfg.Drivers.Posix.ScanDebounceDelay,
|
||||
"max_quota": cfg.Drivers.Posix.MaxQuota,
|
||||
"disable_versioning": cfg.Drivers.Posix.DisableVersioning,
|
||||
"propagator": cfg.Drivers.Posix.Propagator,
|
||||
"async_propagator_options": map[string]interface{}{
|
||||
"propagation_delay": cfg.Drivers.Posix.AsyncPropagatorOptions.PropagationDelay,
|
||||
},
|
||||
"max_acquire_lock_cycles": cfg.Drivers.Posix.MaxAcquireLockCycles,
|
||||
"lock_cycle_duration_factor": cfg.Drivers.Posix.LockCycleDurationFactor,
|
||||
"max_concurrency": cfg.Drivers.Posix.MaxConcurrency,
|
||||
"root": cfg.Drivers.Posix.Root,
|
||||
"personalspacepath_template": cfg.Drivers.Posix.PersonalSpacePathTemplate,
|
||||
"generalspacepath_template": cfg.Drivers.Posix.GeneralSpacePathTemplate,
|
||||
"permissionssvc": cfg.Drivers.Posix.PermissionsEndpoint,
|
||||
"permissionssvc_tls_mode": cfg.Commons.GRPCClientTLS.Mode,
|
||||
"treetime_accounting": true,
|
||||
"treesize_accounting": true,
|
||||
"asyncfileuploads": cfg.Drivers.Posix.AsyncUploads,
|
||||
"scan_debounce_delay": cfg.Drivers.Posix.ScanDebounceDelay,
|
||||
"idcache": map[string]interface{}{
|
||||
"cache_store": cfg.IDCache.Store,
|
||||
"cache_nodes": cfg.IDCache.Nodes,
|
||||
@@ -125,15 +114,6 @@ func Posix(cfg *config.Config, enableFSScan bool) map[string]interface{} {
|
||||
"cache_auth_username": cfg.FilemetadataCache.AuthUsername,
|
||||
"cache_auth_password": cfg.FilemetadataCache.AuthPassword,
|
||||
},
|
||||
"events": map[string]interface{}{
|
||||
"numconsumers": cfg.Events.NumConsumers,
|
||||
},
|
||||
"tokens": map[string]interface{}{
|
||||
"transfer_shared_secret": cfg.Commons.TransferSecret,
|
||||
"transfer_expires": cfg.TransferExpires,
|
||||
"download_endpoint": cfg.DataServerURL,
|
||||
"datagateway_endpoint": cfg.DataGatewayURL,
|
||||
},
|
||||
"use_space_groups": cfg.Drivers.Posix.UseSpaceGroups,
|
||||
"enable_fs_revisions": cfg.Drivers.Posix.EnableFSRevisions,
|
||||
"scan_fs": enableFSScan,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
SHELL := bash
|
||||
NAME := web
|
||||
WEB_ASSETS_VERSION = v2.0.0
|
||||
WEB_ASSETS_VERSION = v1.0.0
|
||||
WEB_ASSETS_BRANCH = main
|
||||
|
||||
ifneq (, $(shell command -v go 2> /dev/null)) # suppress `command not found warnings` for non go targets in CI
|
||||
|
||||
@@ -214,6 +214,17 @@ class CapabilitiesContext implements Context {
|
||||
$this->featureContext->theHTTPStatusCodeShouldBe(200, '', $response);
|
||||
|
||||
$responseXmlObject = HttpRequestHelper::getResponseXml($response, __METHOD__)->data->capabilities;
|
||||
$edition = $this->getParameterValueFromXml(
|
||||
$responseXmlObject,
|
||||
'core',
|
||||
'status@@@edition'
|
||||
);
|
||||
|
||||
if (!\strlen($edition)) {
|
||||
Assert::fail(
|
||||
"Cannot get edition from core capabilities"
|
||||
);
|
||||
}
|
||||
|
||||
$product = $this->getParameterValueFromXml(
|
||||
$responseXmlObject,
|
||||
@@ -238,6 +249,7 @@ class CapabilitiesContext implements Context {
|
||||
);
|
||||
}
|
||||
|
||||
$jsonExpectedDecoded['edition'] = $edition;
|
||||
$jsonExpectedDecoded['product'] = $product;
|
||||
$jsonExpectedDecoded['productname'] = $productName;
|
||||
|
||||
|
||||
@@ -2042,6 +2042,17 @@ class FeatureContext extends BehatVariablesContext {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return string
|
||||
*/
|
||||
public function getEditionFromStatus(): string {
|
||||
$decodedResponse = $this->getJsonDecodedStatusPhp();
|
||||
if (isset($decodedResponse['edition'])) {
|
||||
return $decodedResponse['edition'];
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
/**
|
||||
* @return string|null
|
||||
*/
|
||||
@@ -2271,6 +2282,14 @@ class FeatureContext extends BehatVariablesContext {
|
||||
],
|
||||
"parameter" => []
|
||||
],
|
||||
[
|
||||
"code" => "%edition%",
|
||||
"function" => [
|
||||
$this,
|
||||
"getEditionFromStatus"
|
||||
],
|
||||
"parameter" => []
|
||||
],
|
||||
[
|
||||
"code" => "%version%",
|
||||
"function" => [
|
||||
|
||||
@@ -153,21 +153,14 @@ _ocdav: api compatibility, return correct status code_
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:277](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L277)
|
||||
|
||||
#### [Uploading file with mtime gives 500 error](https://github.com/opencloud-eu/opencloud/issues/391)
|
||||
#### [Uploading with the same mtime and filename causes "internal server errors"](https://github.com/owncloud/ocis/issues/10496)
|
||||
|
||||
- [coreApiWebdavUpload/uploadFile.feature:400](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L400)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:401](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L401)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:402](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L402)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:65](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L65)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:66](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L66)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:67](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L67)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:80](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L80)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:81](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L81)
|
||||
- [coreApiVersions/fileVersions.feature:296](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L296)
|
||||
- [coreApiVersions/fileVersions.feature:297](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L297)
|
||||
- [coreApiVersions/fileVersions.feature:298](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L298)
|
||||
- [coreApiVersions/fileVersions.feature:301](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L301)
|
||||
|
||||
### Won't fix
|
||||
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
## Scenarios from core API tests that are expected to fail with decomposed storage while running with the Graph API
|
||||
|
||||
### File
|
||||
|
||||
Basic file management like up and download, move, copy, properties, trash, versions and chunking.
|
||||
|
||||
#### [Custom dav properties with namespaces are rendered incorrectly](https://github.com/owncloud/ocis/issues/2140)
|
||||
|
||||
_ocdav: double-check the webdav property parsing when custom namespaces are used_
|
||||
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:128](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L128)
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:129](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L129)
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:130](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L130)
|
||||
|
||||
### Sync
|
||||
|
||||
Synchronization features like etag propagation, setting mtime and locking files
|
||||
|
||||
#### [Uploading an old method chunked file with checksum should fail using new DAV path](https://github.com/owncloud/ocis/issues/2323)
|
||||
|
||||
- [coreApiMain/checksums.feature:233](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L233)
|
||||
- [coreApiMain/checksums.feature:234](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L234)
|
||||
- [coreApiMain/checksums.feature:235](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L235)
|
||||
|
||||
### Share
|
||||
|
||||
#### [d:quota-available-bytes in dprop of PROPFIND give wrong response value](https://github.com/owncloud/ocis/issues/8197)
|
||||
|
||||
- [coreApiWebdavProperties/getQuota.feature:57](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L57)
|
||||
- [coreApiWebdavProperties/getQuota.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L58)
|
||||
- [coreApiWebdavProperties/getQuota.feature:59](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L59)
|
||||
- [coreApiWebdavProperties/getQuota.feature:73](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L73)
|
||||
- [coreApiWebdavProperties/getQuota.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L74)
|
||||
- [coreApiWebdavProperties/getQuota.feature:75](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L75)
|
||||
|
||||
#### [deleting a file inside a received shared folder is moved to the trash-bin of the sharer not the receiver](https://github.com/owncloud/ocis/issues/1124)
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:54](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L54)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:55](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L55)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:56](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L56)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:83](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L83)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:84](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L84)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:85](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L85)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:142](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L142)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L143)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:144](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L144)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:202](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L202)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L203)
|
||||
|
||||
### Other
|
||||
|
||||
API, search, favorites, config, capabilities, not existing endpoints, CORS and others
|
||||
|
||||
#### [sending MKCOL requests to another or non-existing user's webDav endpoints as normal user should return 404](https://github.com/owncloud/ocis/issues/5049)
|
||||
|
||||
_ocdav: api compatibility, return correct status code_
|
||||
|
||||
- [coreApiAuth/webDavMKCOLAuth.feature:42](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L42)
|
||||
- [coreApiAuth/webDavMKCOLAuth.feature:53](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L53)
|
||||
|
||||
#### [trying to lock file of another user gives http 500](https://github.com/owncloud/ocis/issues/2176)
|
||||
|
||||
- [coreApiAuth/webDavLOCKAuth.feature:46](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L46)
|
||||
- [coreApiAuth/webDavLOCKAuth.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L58)
|
||||
|
||||
#### [Support for favorites](https://github.com/owncloud/ocis/issues/1228)
|
||||
|
||||
- [coreApiFavorites/favorites.feature:101](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L101)
|
||||
- [coreApiFavorites/favorites.feature:102](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L102)
|
||||
- [coreApiFavorites/favorites.feature:103](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L103)
|
||||
- [coreApiFavorites/favorites.feature:124](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L124)
|
||||
- [coreApiFavorites/favorites.feature:125](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L125)
|
||||
- [coreApiFavorites/favorites.feature:126](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L126)
|
||||
- [coreApiFavorites/favorites.feature:189](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L189)
|
||||
- [coreApiFavorites/favorites.feature:190](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L190)
|
||||
- [coreApiFavorites/favorites.feature:191](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L191)
|
||||
- [coreApiFavorites/favorites.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L145)
|
||||
- [coreApiFavorites/favorites.feature:146](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L146)
|
||||
- [coreApiFavorites/favorites.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L147)
|
||||
- [coreApiFavorites/favorites.feature:174](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L174)
|
||||
- [coreApiFavorites/favorites.feature:175](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L175)
|
||||
- [coreApiFavorites/favorites.feature:176](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L176)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:91](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L91)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:92](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L92)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:93](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L93)
|
||||
|
||||
#### [WWW-Authenticate header for unauthenticated requests is not clear](https://github.com/owncloud/ocis/issues/2285)
|
||||
|
||||
- [coreApiWebdavOperations/refuseAccess.feature:21](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L21)
|
||||
- [coreApiWebdavOperations/refuseAccess.feature:22](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L22)
|
||||
|
||||
#### [PATCH request for TUS upload with wrong checksum gives incorrect response](https://github.com/owncloud/ocis/issues/1755)
|
||||
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L74)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:75](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L75)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:76](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L76)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:77](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L77)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:78](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L78)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L147)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:148](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L148)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:149](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L149)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:192](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L192)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:193](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L193)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:194](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L194)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:195](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L195)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:196](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L196)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:197](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L197)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:240](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L240)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:241](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L241)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:242](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L242)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:243](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L243)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:244](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L244)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:245](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L245)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:255](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L255)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:256](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L256)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:279](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L279)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:280](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L280)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:375](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L375)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:376](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L376)
|
||||
|
||||
#### [Renaming resource to banned name is allowed in spaces webdav](https://github.com/owncloud/ocis/issues/3099)
|
||||
|
||||
- [coreApiWebdavMove2/moveFile.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L143)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:36](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L36)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:50](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L50)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:64](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L64)
|
||||
|
||||
#### [Trying to delete other user's trashbin item returns 409 for spaces path instead of 404](https://github.com/owncloud/ocis/issues/9791)
|
||||
|
||||
- [coreApiTrashbin/trashbinDelete.feature:92](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L92)
|
||||
|
||||
#### [MOVE a file into same folder with same name returns 404 instead of 403](https://github.com/owncloud/ocis/issues/1976)
|
||||
|
||||
- [coreApiWebdavMove2/moveFile.feature:100](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L100)
|
||||
- [coreApiWebdavMove2/moveFile.feature:101](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L101)
|
||||
- [coreApiWebdavMove2/moveFile.feature:102](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L102)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:217](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L217)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:218](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L218)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:219](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L219)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:334](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L334)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:337](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L337)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:340](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L340)
|
||||
|
||||
#### [COPY file/folder to same name is possible (but 500 code error for folder with spaces path)](https://github.com/owncloud/ocis/issues/8711)
|
||||
|
||||
- [coreApiSharePublicLink2/copyFromPublicLink.feature:198](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiSharePublicLink2/copyFromPublicLink.feature#L198)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1094](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1094)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1095](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1095)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1096](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1096)
|
||||
|
||||
#### [Trying to restore personal file to file of share received folder returns 403 but the share file is deleted (new dav path)](https://github.com/owncloud/ocis/issues/10356)
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:277](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L277)
|
||||
|
||||
#### [Uploading file with mtime gives 500 error](https://github.com/opencloud-eu/opencloud/issues/391)
|
||||
|
||||
- [coreApiWebdavUpload/uploadFile.feature:400](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L400)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:401](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L401)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:402](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L402)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:65](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L65)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:66](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L66)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:67](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L67)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:80](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L80)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:81](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L81)
|
||||
- [coreApiVersions/fileVersions.feature:296](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L296)
|
||||
- [coreApiVersions/fileVersions.feature:297](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L297)
|
||||
- [coreApiVersions/fileVersions.feature:298](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L298)
|
||||
- [coreApiVersions/fileVersions.feature:301](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L301)
|
||||
|
||||
### Won't fix
|
||||
|
||||
Not everything needs to be implemented for opencloud.
|
||||
|
||||
- _Blacklisted ignored files are no longer required because opencloud can handle `.htaccess` files without security implications introduced by serving user provided files with apache._
|
||||
|
||||
Note: always have an empty line at the end of this file.
|
||||
The bash script that processes this file requires that the last line has a newline on the end.
|
||||
@@ -1,250 +0,0 @@
|
||||
## Scenarios from OpenCloud API tests that are expected to fail with decomposed storage
|
||||
|
||||
#### [Downloading the archive of the resource (files | folder) using resource path is not possible](https://github.com/owncloud/ocis/issues/4637)
|
||||
|
||||
- [apiArchiver/downloadByPath.feature:25](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L25)
|
||||
- [apiArchiver/downloadByPath.feature:26](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L26)
|
||||
- [apiArchiver/downloadByPath.feature:43](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L43)
|
||||
- [apiArchiver/downloadByPath.feature:44](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L44)
|
||||
- [apiArchiver/downloadByPath.feature:47](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L47)
|
||||
- [apiArchiver/downloadByPath.feature:73](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L73)
|
||||
- [apiArchiver/downloadByPath.feature:171](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L171)
|
||||
- [apiArchiver/downloadByPath.feature:172](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiArchiver/downloadByPath.feature#L172)
|
||||
|
||||
#### [PATCH request for TUS upload with wrong checksum gives incorrect response](https://github.com/owncloud/ocis/issues/1755)
|
||||
|
||||
- [apiSpacesShares/shareUploadTUS.feature:283](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareUploadTUS.feature#L283)
|
||||
- [apiSpacesShares/shareUploadTUS.feature:303](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareUploadTUS.feature#L303)
|
||||
- [apiSpacesShares/shareUploadTUS.feature:384](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareUploadTUS.feature#L384)
|
||||
|
||||
#### [Settings service user can list other peoples assignments](https://github.com/owncloud/ocis/issues/5032)
|
||||
|
||||
- [apiAccountsHashDifficulty/assignRole.feature:27](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAccountsHashDifficulty/assignRole.feature#L27)
|
||||
- [apiAccountsHashDifficulty/assignRole.feature:28](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAccountsHashDifficulty/assignRole.feature#L28)
|
||||
- [apiGraph/getAssignedRole.feature:31](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraph/getAssignedRole.feature#L31)
|
||||
- [apiGraph/getAssignedRole.feature:32](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraph/getAssignedRole.feature#L32)
|
||||
- [apiGraph/getAssignedRole.feature:33](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraph/getAssignedRole.feature#L33)
|
||||
|
||||
#### [A User can get information of another user with Graph API](https://github.com/owncloud/ocis/issues/5125)
|
||||
|
||||
- [apiGraphUserGroup/getUser.feature:84](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L84)
|
||||
- [apiGraphUserGroup/getUser.feature:85](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L85)
|
||||
- [apiGraphUserGroup/getUser.feature:86](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L86)
|
||||
- [apiGraphUserGroup/getUser.feature:628](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L628)
|
||||
- [apiGraphUserGroup/getUser.feature:629](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L629)
|
||||
- [apiGraphUserGroup/getUser.feature:630](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L630)
|
||||
- [apiGraphUserGroup/getUser.feature:645](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L645)
|
||||
- [apiGraphUserGroup/getUser.feature:646](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L646)
|
||||
- [apiGraphUserGroup/getUser.feature:647](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getUser.feature#L647)
|
||||
|
||||
#### [Normal user can get expanded members information of a group](https://github.com/owncloud/ocis/issues/5604)
|
||||
|
||||
- [apiGraphUserGroup/getGroup.feature:399](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L399)
|
||||
- [apiGraphUserGroup/getGroup.feature:400](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L400)
|
||||
- [apiGraphUserGroup/getGroup.feature:401](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L401)
|
||||
- [apiGraphUserGroup/getGroup.feature:460](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L460)
|
||||
- [apiGraphUserGroup/getGroup.feature:461](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L461)
|
||||
- [apiGraphUserGroup/getGroup.feature:462](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L462)
|
||||
- [apiGraphUserGroup/getGroup.feature:508](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L508)
|
||||
- [apiGraphUserGroup/getGroup.feature:509](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L509)
|
||||
- [apiGraphUserGroup/getGroup.feature:510](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/getGroup.feature#L510)
|
||||
|
||||
#### [Same users can be added in a group multiple time](https://github.com/owncloud/ocis/issues/5702)
|
||||
|
||||
- [apiGraphUserGroup/addUserToGroup.feature:295](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/addUserToGroup.feature#L295)
|
||||
|
||||
#### [Users are added in a group with wrong host in host-part of user](https://github.com/owncloud/ocis/issues/5871)
|
||||
|
||||
- [apiGraphUserGroup/addUserToGroup.feature:379](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/addUserToGroup.feature#L379)
|
||||
- [apiGraphUserGroup/addUserToGroup.feature:393](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/addUserToGroup.feature#L393)
|
||||
|
||||
#### [Adding the same user as multiple members in a single request results in listing the same user twice in the group](https://github.com/owncloud/ocis/issues/5855)
|
||||
|
||||
- [apiGraphUserGroup/addUserToGroup.feature:430](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiGraphUserGroup/addUserToGroup.feature#L430)
|
||||
|
||||
#### [Shared file locking is not possible using different path](https://github.com/owncloud/ocis/issues/7599)
|
||||
|
||||
- [apiLocks/lockFiles.feature:185](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L185)
|
||||
- [apiLocks/lockFiles.feature:186](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L186)
|
||||
- [apiLocks/lockFiles.feature:187](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L187)
|
||||
- [apiLocks/lockFiles.feature:309](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L309)
|
||||
- [apiLocks/lockFiles.feature:310](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L310)
|
||||
- [apiLocks/lockFiles.feature:311](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L311)
|
||||
- [apiLocks/lockFiles.feature:364](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L364)
|
||||
- [apiLocks/lockFiles.feature:365](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L365)
|
||||
- [apiLocks/lockFiles.feature:366](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L366)
|
||||
- [apiLocks/lockFiles.feature:367](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L367)
|
||||
- [apiLocks/lockFiles.feature:368](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L368)
|
||||
- [apiLocks/lockFiles.feature:369](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L369)
|
||||
- [apiLocks/lockFiles.feature:399](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L399)
|
||||
- [apiLocks/lockFiles.feature:400](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L400)
|
||||
- [apiLocks/lockFiles.feature:401](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L401)
|
||||
- [apiLocks/lockFiles.feature:402](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L402)
|
||||
- [apiLocks/lockFiles.feature:403](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L403)
|
||||
- [apiLocks/lockFiles.feature:404](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L404)
|
||||
- [apiLocks/unlockFiles.feature:62](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L62)
|
||||
- [apiLocks/unlockFiles.feature:63](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L63)
|
||||
- [apiLocks/unlockFiles.feature:64](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L64)
|
||||
- [apiLocks/unlockFiles.feature:171](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L171)
|
||||
- [apiLocks/unlockFiles.feature:172](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L172)
|
||||
- [apiLocks/unlockFiles.feature:173](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L173)
|
||||
- [apiLocks/unlockFiles.feature:174](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L174)
|
||||
- [apiLocks/unlockFiles.feature:175](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L175)
|
||||
- [apiLocks/unlockFiles.feature:176](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L176)
|
||||
- [apiLocks/unlockFiles.feature:199](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L199)
|
||||
- [apiLocks/unlockFiles.feature:200](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L200)
|
||||
- [apiLocks/unlockFiles.feature:201](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L201)
|
||||
- [apiLocks/unlockFiles.feature:202](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L202)
|
||||
- [apiLocks/unlockFiles.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L203)
|
||||
- [apiLocks/unlockFiles.feature:204](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L204)
|
||||
- [apiLocks/unlockFiles.feature:227](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L227)
|
||||
- [apiLocks/unlockFiles.feature:228](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L228)
|
||||
- [apiLocks/unlockFiles.feature:229](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L229)
|
||||
- [apiLocks/unlockFiles.feature:230](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L230)
|
||||
- [apiLocks/unlockFiles.feature:231](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L231)
|
||||
- [apiLocks/unlockFiles.feature:232](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L232)
|
||||
|
||||
#### [Folders can be locked and locking works partially](https://github.com/owncloud/ocis/issues/7641)
|
||||
|
||||
- [apiLocks/lockFiles.feature:443](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L443)
|
||||
- [apiLocks/lockFiles.feature:444](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L444)
|
||||
- [apiLocks/lockFiles.feature:445](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L445)
|
||||
- [apiLocks/lockFiles.feature:446](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L446)
|
||||
- [apiLocks/lockFiles.feature:447](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L447)
|
||||
- [apiLocks/lockFiles.feature:448](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L448)
|
||||
- [apiLocks/lockFiles.feature:417](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L417)
|
||||
- [apiLocks/lockFiles.feature:418](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L418)
|
||||
- [apiLocks/lockFiles.feature:419](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L419)
|
||||
- [apiLocks/lockFiles.feature:420](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L420)
|
||||
- [apiLocks/lockFiles.feature:421](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L421)
|
||||
- [apiLocks/lockFiles.feature:422](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L422)
|
||||
|
||||
#### [Anonymous users can unlock a file shared to them through a public link if they get the lock token](https://github.com/owncloud/ocis/issues/7761)
|
||||
|
||||
- [apiLocks/unlockFiles.feature:42](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L42)
|
||||
- [apiLocks/unlockFiles.feature:43](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L43)
|
||||
- [apiLocks/unlockFiles.feature:44](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L44)
|
||||
- [apiLocks/unlockFiles.feature:45](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L45)
|
||||
- [apiLocks/unlockFiles.feature:46](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L46)
|
||||
- [apiLocks/unlockFiles.feature:47](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L47)
|
||||
|
||||
#### [Trying to unlock a shared file with sharer's lock token gives 500](https://github.com/owncloud/ocis/issues/7767)
|
||||
|
||||
- [apiLocks/unlockFiles.feature:115](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L115)
|
||||
- [apiLocks/unlockFiles.feature:116](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L116)
|
||||
- [apiLocks/unlockFiles.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L117)
|
||||
- [apiLocks/unlockFiles.feature:118](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L118)
|
||||
- [apiLocks/unlockFiles.feature:119](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L119)
|
||||
- [apiLocks/unlockFiles.feature:120](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L120)
|
||||
- [apiLocks/unlockFiles.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L143)
|
||||
- [apiLocks/unlockFiles.feature:144](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L144)
|
||||
- [apiLocks/unlockFiles.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L145)
|
||||
- [apiLocks/unlockFiles.feature:146](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L146)
|
||||
- [apiLocks/unlockFiles.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L147)
|
||||
- [apiLocks/unlockFiles.feature:148](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L148)
|
||||
|
||||
#### [Anonymous user trying lock a file shared to them through a public link gives 405](https://github.com/owncloud/ocis/issues/7790)
|
||||
|
||||
- [apiLocks/lockFiles.feature:532](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L532)
|
||||
- [apiLocks/lockFiles.feature:533](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L533)
|
||||
- [apiLocks/lockFiles.feature:534](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L534)
|
||||
- [apiLocks/lockFiles.feature:535](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L535)
|
||||
- [apiLocks/lockFiles.feature:554](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L554)
|
||||
- [apiLocks/lockFiles.feature:555](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L555)
|
||||
- [apiLocks/lockFiles.feature:556](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L556)
|
||||
- [apiLocks/lockFiles.feature:557](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L557)
|
||||
|
||||
#### [sharee (editor role) MOVE a file by file-id into shared sub-folder returns 502](https://github.com/owncloud/ocis/issues/7617)
|
||||
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:368](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L368)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:591](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L591)
|
||||
|
||||
#### [MOVE a file into same folder with same name returns 404 instead of 403](https://github.com/owncloud/ocis/issues/1976)
|
||||
|
||||
- [apiSpacesShares/moveSpaces.feature:69](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/moveSpaces.feature#L69)
|
||||
- [apiSpacesShares/moveSpaces.feature:70](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/moveSpaces.feature#L70)
|
||||
- [apiSpacesShares/moveSpaces.feature:416](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/moveSpaces.feature#L416)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:61](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L61)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:174](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L174)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:175](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L175)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:176](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L176)
|
||||
- [apiSpacesDavOperation/moveByFileId.feature:393](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesDavOperation/moveByFileId.feature#L393)
|
||||
|
||||
#### [OCM. admin cannot get federated users if he hasn't connection with them ](https://github.com/owncloud/ocis/issues/9829)
|
||||
|
||||
- [apiOcm/searchFederationUsers.feature:429](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiOcm/searchFederationUsers.feature#L429)
|
||||
- [apiOcm/searchFederationUsers.feature:601](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiOcm/searchFederationUsers.feature#L601)
|
||||
|
||||
#### [OCM. federated connection is not dropped when one of the users deletes the connection](https://github.com/owncloud/ocis/issues/10216)
|
||||
|
||||
- [apiOcm/deleteFederatedConnections.feature:21](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiOcm/deleteFederatedConnections.feature#L21)
|
||||
- [apiOcm/deleteFederatedConnections.feature:67](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiOcm/deleteFederatedConnections.feature#L67)
|
||||
|
||||
#### [OCM. server crash after deleting share for ocm user](https://github.com/owncloud/ocis/issues/10213)
|
||||
|
||||
- [apiOcm/deleteFederatedConnections.feature:102](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiOcm/deleteFederatedConnections.feature#L102)
|
||||
|
||||
#### [Shares Jail PROPFIND returns different File IDs for the same item](https://github.com/owncloud/ocis/issues/9933)
|
||||
|
||||
- [apiSharingNg1/propfindShares.feature:149](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNg1/propfindShares.feature#L149)
|
||||
|
||||
#### [Readiness check for some services returns 500 status code](https://github.com/owncloud/ocis/issues/10661)
|
||||
- [apiServiceAvailability/serviceAvailabilityCheck.feature:116](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiServiceAvailability/serviceAvailabilityCheck.feature#L116)
|
||||
- [apiServiceAvailability/serviceAvailabilityCheck.feature:125](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiServiceAvailability/serviceAvailabilityCheck.feature#L125)
|
||||
|
||||
#### [Skip tests for different languages](https://github.com/opencloud-eu/opencloud/issues/183)
|
||||
- [apiAntivirus/antivirus.feature:309](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L309)
|
||||
- [apiAntivirus/antivirus.feature:310](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L310)
|
||||
- [apiAntivirus/antivirus.feature:311](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L311)
|
||||
- [apiAntivirus/antivirus.feature:312](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L312)
|
||||
- [apiAntivirus/antivirus.feature:313](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L313)
|
||||
- [apiAntivirus/antivirus.feature:314](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L314)
|
||||
- [apiNotification/deprovisioningNotification.feature:126](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/deprovisioningNotification.feature#L126)
|
||||
- [apiNotification/deprovisioningNotification.feature:127](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/deprovisioningNotification.feature#L127)
|
||||
- [apiNotification/notification.feature:282](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L282)
|
||||
- [apiNotification/notification.feature:283](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L283)
|
||||
- [apiNotification/notification.feature:284](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L284)
|
||||
- [apiNotification/notification.feature:285](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L285)
|
||||
- [apiNotification/notification.feature:288](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L288)
|
||||
- [apiNotification/spaceNotification.feature:434](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/spaceNotification.feature#L434)
|
||||
- [apiNotification/spaceNotification.feature:435](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/spaceNotification.feature#L435)
|
||||
- [apiNotification/emailNotification.feature:84](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L84)
|
||||
- [apiNotification/emailNotification.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L117)
|
||||
- [apiNotification/emailNotification.feature:150](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L150)
|
||||
- [apiNotification/emailNotification.feature:205](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L205)
|
||||
- [apiActivities/activities.feature:2598](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/activities.feature#L2598)
|
||||
|
||||
|
||||
#### [Missing properties in REPORT response](https://github.com/owncloud/ocis/issues/9780), [d:getetag property has empty value in REPORT response](https://github.com/owncloud/ocis/issues/9783)
|
||||
|
||||
- [apiSearch1/search.feature:437](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L437)
|
||||
- [apiSearch1/search.feature:438](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L438)
|
||||
- [apiSearch1/search.feature:439](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L439)
|
||||
- [apiSearch1/search.feature:465](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L465)
|
||||
- [apiSearch1/search.feature:466](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L466)
|
||||
- [apiSearch1/search.feature:467](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L467)
|
||||
|
||||
#### [No notification triggered for .zip virus file](https://github.com/opencloud-eu/opencloud/issues/382)
|
||||
- [apiAntivirus/antivirus.feature:41](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L41)
|
||||
- [apiAntivirus/antivirus.feature:43](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L43)
|
||||
- [apiAntivirus/antivirus.feature:45](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L45)
|
||||
- [apiAntivirus/antivirus.feature:69](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L69)
|
||||
- [apiAntivirus/antivirus.feature:71](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L71)
|
||||
- [apiAntivirus/antivirus.feature:73](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L73)
|
||||
- [apiAntivirus/antivirus.feature:115](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L115)
|
||||
- [apiAntivirus/antivirus.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L117)
|
||||
- [apiAntivirus/antivirus.feature:119](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L119)
|
||||
- [apiAntivirus/antivirus.feature:141](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L141)
|
||||
- [apiAntivirus/antivirus.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L143)
|
||||
- [apiAntivirus/antivirus.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L145)
|
||||
- [apiAntivirus/antivirus.feature:169](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L169)
|
||||
- [apiAntivirus/antivirus.feature:171](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L171)
|
||||
- [apiAntivirus/antivirus.feature:173](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L173)
|
||||
- [apiAntivirus/antivirus.feature:199](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L199)
|
||||
- [apiAntivirus/antivirus.feature:201](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L201)
|
||||
- [apiAntivirus/antivirus.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L203)
|
||||
- [apiAntivirus/antivirus.feature:228](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L228)
|
||||
- [apiAntivirus/antivirus.feature:253](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L253)
|
||||
|
||||
Note: always have an empty line at the end of this file.
|
||||
The bash script that processes this file requires that the last line has a newline on the end.
|
||||
@@ -36,9 +36,14 @@
|
||||
- [apiSearch1/search.feature:321](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L321)
|
||||
- [apiSearch1/search.feature:324](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L324)
|
||||
- [apiSearch1/search.feature:356](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L356)
|
||||
- [apiSearch1/search.feature:369](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L369)
|
||||
- [apiSearch1/search.feature:396](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L396)
|
||||
- [apiSearch1/search.feature:410](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L410)
|
||||
- [apiSearch1/search.feature:437](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L437)
|
||||
- [apiSearch1/search.feature:438](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L438)
|
||||
- [apiSearch1/search.feature:439](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L439)
|
||||
- [apiSearch1/search.feature:465](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L465)
|
||||
- [apiSearch1/search.feature:466](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L466)
|
||||
- [apiSearch1/search.feature:467](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch1/search.feature#L467)
|
||||
- [apiSearch2/mediaTypeSearch.feature:31](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L31)
|
||||
- [apiSearch2/mediaTypeSearch.feature:32](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L32)
|
||||
- [apiSearch2/mediaTypeSearch.feature:33](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSearch2/mediaTypeSearch.feature#L33)
|
||||
@@ -149,7 +154,7 @@
|
||||
- [apiSpacesShares/shareSubItemOfSpaceViaPublicLink.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSubItemOfSpaceViaPublicLink.feature#L147)
|
||||
- [apiSharingNgLinkSharePermission/createLinkShare.feature:473](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/createLinkShare.feature#L473)
|
||||
- [apiSharingNgLinkSharePermission/createLinkShare.feature:1208](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/createLinkShare.feature#L1208)
|
||||
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:204](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L204)
|
||||
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L203)
|
||||
- [apiSharingNgLinkSharePermission/updateLinkShare.feature:282](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkSharePermission/updateLinkShare.feature#L282)
|
||||
- [apiSharingNgLinkShareRoot/updateLinkShare.feature:10](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkShareRoot/updateLinkShare.feature#L10)
|
||||
- [apiSharingNgLinkShareRoot/updateLinkShare.feature:42](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSharingNgLinkShareRoot/updateLinkShare.feature#L42)
|
||||
@@ -188,7 +193,7 @@
|
||||
- [apiSpaces/uploadSpaces.feature:95](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L95)
|
||||
- [apiSpaces/uploadSpaces.feature:112](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L112)
|
||||
- [apiSpaces/uploadSpaces.feature:129](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpaces/uploadSpaces.feature#L129)
|
||||
- [apiSpacesShares/shareSpacesViaLink.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSpacesViaLink.feature#L58)
|
||||
- [apiSpacesShares/shareSpacesViaLink.feature:61](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiSpacesShares/shareSpacesViaLink.feature#L61)
|
||||
- [apiDepthInfinity/propfind.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiDepthInfinity/propfind.feature#L74)
|
||||
- [apiDepthInfinity/propfind.feature:124](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiDepthInfinity/propfind.feature#L124)
|
||||
- [apiLocks/lockFiles.feature:490](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/lockFiles.feature#L490)
|
||||
@@ -204,11 +209,17 @@
|
||||
- [apiLocks/unlockFiles.feature:322](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L322)
|
||||
- [apiLocks/unlockFiles.feature:323](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiLocks/unlockFiles.feature#L323)
|
||||
- [apiAntivirus/antivirus.feature:114](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L114)
|
||||
- [apiAntivirus/antivirus.feature:115](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L115)
|
||||
- [apiAntivirus/antivirus.feature:116](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L116)
|
||||
- [apiAntivirus/antivirus.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L117)
|
||||
- [apiAntivirus/antivirus.feature:118](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L118)
|
||||
- [apiAntivirus/antivirus.feature:119](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L119)
|
||||
- [apiAntivirus/antivirus.feature:140](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L140)
|
||||
- [apiAntivirus/antivirus.feature:141](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L141)
|
||||
- [apiAntivirus/antivirus.feature:142](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L142)
|
||||
- [apiAntivirus/antivirus.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L143)
|
||||
- [apiAntivirus/antivirus.feature:144](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L144)
|
||||
- [apiAntivirus/antivirus.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L145)
|
||||
- [apiAntivirus/antivirus.feature:356](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L356)
|
||||
- [apiAntivirus/antivirus.feature:357](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L357)
|
||||
- [apiAntivirus/antivirus.feature:358](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L358)
|
||||
@@ -299,8 +310,6 @@
|
||||
- [coreApiWebdavOperations/propfind.feature:55](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/propfind.feature#L55)
|
||||
- [coreApiWebdavOperations/propfind.feature:69](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/propfind.feature#L69)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:376](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L376)
|
||||
- [apiActivities/shareActivities.feature:1956](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/shareActivities.feature#L1956)
|
||||
- [apiActivities/shareActivities.feature:2095](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/shareActivities.feature#L2095)
|
||||
|
||||
#### [Cannot create new TUS upload resource using /webdav without remote.php - returns html instead](https://github.com/owncloud/ocis/issues/10346)
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ Feature: backup consistency
|
||||
Then the command should be successful
|
||||
And the command output should contain "💚 No inconsistency found. The backup in '%storage_path%' seems to be valid."
|
||||
|
||||
@issue-9498 @issue-391 @skipOnOpencloud-decomposed-Storage
|
||||
@issue-9498
|
||||
Scenario: check backup consistency after uploading file multiple times via TUS
|
||||
Given user "Alice" uploads a file "filesForUpload/textfile.txt" to "/today.txt" with mtime "today" via TUS inside of the space "Personal" using the WebDAV API
|
||||
And user "Alice" uploads a file "filesForUpload/textfile.txt" to "/today.txt" with mtime "today" via TUS inside of the space "Personal" using the WebDAV API
|
||||
@@ -41,7 +41,7 @@ Feature: backup consistency
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "1"
|
||||
|
||||
@issue-9498 @issue-428 @skipOnOpencloud-decomposed-Storage
|
||||
@issue-9498
|
||||
Scenario: check backup consistency after uploading a file multiple times
|
||||
Given user "Alice" has uploaded file with content "hello world" to "/textfile0.txt"
|
||||
And user "Alice" has uploaded file with content "hello world" to "/textfile0.txt"
|
||||
@@ -53,4 +53,4 @@ Feature: backup consistency
|
||||
And the administrator has started the server
|
||||
When user "Alice" gets the number of versions of file "/textfile0.txt"
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "2"
|
||||
And the number of versions should be "2"
|
||||
@@ -193,12 +193,17 @@ Feature: capabilities
|
||||
"status": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"edition",
|
||||
"product",
|
||||
"productname",
|
||||
"version",
|
||||
"versionstring"
|
||||
],
|
||||
"properties": {
|
||||
"edition": {
|
||||
"type": "string",
|
||||
"enum": ["%edition%"]
|
||||
},
|
||||
"product": {
|
||||
"type": "string",
|
||||
"enum": ["%productname%"]
|
||||
@@ -225,6 +230,7 @@ Feature: capabilities
|
||||
"type": "object",
|
||||
"required": [
|
||||
"string",
|
||||
"edition",
|
||||
"product"
|
||||
],
|
||||
"properties": {
|
||||
@@ -232,6 +238,10 @@ Feature: capabilities
|
||||
"type": "string",
|
||||
"enum": ["%versionstring%"]
|
||||
},
|
||||
"edition": {
|
||||
"type": "string",
|
||||
"enum": ["%edition%"]
|
||||
},
|
||||
"product": {
|
||||
"type": "string",
|
||||
"enum": ["%productname%"]
|
||||
|
||||
@@ -47,6 +47,7 @@ Feature: default capabilities for normal user
|
||||
"required": [
|
||||
"version",
|
||||
"versionstring",
|
||||
"edition",
|
||||
"productname"
|
||||
],
|
||||
"properties": {
|
||||
@@ -56,6 +57,9 @@ Feature: default capabilities for normal user
|
||||
"versionstring": {
|
||||
"const": "%versionstring%"
|
||||
},
|
||||
"edition": {
|
||||
"const": "%edition%"
|
||||
},
|
||||
"productname": {
|
||||
"const": "%productname%"
|
||||
}
|
||||
|
||||
@@ -8,5 +8,5 @@ Feature: Status
|
||||
When the administrator requests status.php
|
||||
Then the status.php response should include
|
||||
"""
|
||||
{"installed":true,"maintenance":false,"needsDbUpgrade":false,"version":"$CURRENT_VERSION","versionstring":"$CURRENT_VERSION_STRING","productname":"$PRODUCTNAME","product":"$PRODUCT"}
|
||||
{"installed":true,"maintenance":false,"needsDbUpgrade":false,"version":"$CURRENT_VERSION","versionstring":"$CURRENT_VERSION_STRING","edition":"$EDITION","productname":"$PRODUCTNAME","product":"$PRODUCT"}
|
||||
"""
|
||||
|
||||
@@ -15,17 +15,13 @@ BINGO_DIR="$ROOT_PATH/.bingo"
|
||||
BINGO_HASH=$(cat "$BINGO_DIR"/* | sha256sum | cut -d ' ' -f 1)
|
||||
|
||||
URL="$CACHE_ENDPOINT/$CACHE_BUCKET/opencloud/go-bin/$BINGO_HASH/$2"
|
||||
|
||||
mc alias set s3 "$MC_HOST" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
if mc ls --json s3/"$CACHE_BUCKET"/opencloud/go-bin/"$BINGO_HASH"/$2 | grep "\"status\":\"success\""; then
|
||||
if curl --output /dev/null --silent --head --fail "$URL"; then
|
||||
echo "[INFO] Go bin cache with has '$BINGO_HASH' exists."
|
||||
ENV="BIN_CACHE_FOUND=true\n"
|
||||
# https://discourse.drone.io/t/how-to-exit-a-pipeline-early-without-failing/3951
|
||||
# exit a Pipeline early without failing
|
||||
exit 78
|
||||
else
|
||||
# stored hash of a .bingo folder to '.bingo_hash' file
|
||||
echo "$BINGO_HASH" >"$ROOT_PATH/.bingo_hash"
|
||||
echo "[INFO] Go bin cache with has '$BINGO_HASH' does not exist."
|
||||
ENV="BIN_CACHE_FOUND=false\n"
|
||||
fi
|
||||
|
||||
echo -e $ENV >> .env
|
||||
|
||||
@@ -10,15 +10,16 @@ fi
|
||||
|
||||
echo "Checking web version - $WEB_COMMITID in cache"
|
||||
|
||||
mc alias set s3 "$MC_HOST" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
URL="$CACHE_ENDPOINT/$CACHE_BUCKET/opencloud/web-test-runner/$WEB_COMMITID/$1.tar.gz"
|
||||
|
||||
if mc ls --json s3/"$CACHE_BUCKET"/opencloud/web-test-runner/"$WEB_COMMITID"/"$1".tar.gz | grep "\"status\":\"success\"";
|
||||
echo "Checking for the web cache at '$URL'."
|
||||
|
||||
if curl --output /dev/null --silent --head --fail "$URL"
|
||||
then
|
||||
echo "$1 cache with commit id $WEB_COMMITID already available."
|
||||
ENV="WEB_CACHE_FOUND=true\n"
|
||||
# https://discourse.drone.io/t/how-to-exit-a-pipeline-early-without-failing/3951
|
||||
# exit a Pipeline early without failing
|
||||
exit 78
|
||||
else
|
||||
echo "$1 cache with commit id $WEB_COMMITID was not available."
|
||||
ENV="WEB_CACHE_FOUND=false\n"
|
||||
fi
|
||||
|
||||
echo -e $ENV >> .woodpecker.env
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
while true; do
|
||||
echo -e "HTTP/1.1 200 OK\n\n$(cat /woodpecker/src/github.com/opencloud-eu/opencloud/tests/config/woodpecker/hosting-discovery.xml)" | nc -l -k -p 8080
|
||||
echo -e "HTTP/1.1 200 OK\n\n$(cat /drone/src/tests/config/drone/hosting-discovery.xml)" | nc -l -k -p 8080
|
||||
done
|
||||
|
||||
4
vendor/github.com/OneOfOne/xxhash/.gitignore
generated
vendored
Normal file
4
vendor/github.com/OneOfOne/xxhash/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.txt
|
||||
*.pprof
|
||||
cmap2/
|
||||
cache/
|
||||
13
vendor/github.com/OneOfOne/xxhash/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/OneOfOne/xxhash/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- "1.10"
|
||||
- "1.11"
|
||||
- "1.12"
|
||||
- master
|
||||
|
||||
script:
|
||||
- go test -tags safe ./...
|
||||
- go test ./...
|
||||
-
|
||||
187
vendor/github.com/OneOfOne/xxhash/LICENSE
generated
vendored
Normal file
187
vendor/github.com/OneOfOne/xxhash/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
74
vendor/github.com/OneOfOne/xxhash/README.md
generated
vendored
Normal file
74
vendor/github.com/OneOfOne/xxhash/README.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# xxhash [](https://godoc.org/github.com/OneOfOne/xxhash) [](https://travis-ci.org/OneOfOne/xxhash) [](https://gocover.io/github.com/OneOfOne/xxhash)
|
||||
|
||||
This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits.
|
||||
|
||||
* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet)
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/OneOfOne/xxhash
|
||||
|
||||
## Features
|
||||
|
||||
* On Go 1.7+ the pure go version is faster than CGO for all inputs.
|
||||
* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine.
|
||||
* The native version falls back to a less optimized version on appengine due to the lack of unsafe.
|
||||
* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds.
|
||||
* To manually toggle the appengine version build with `-tags safe`.
|
||||
|
||||
## Benchmark
|
||||
|
||||
### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19)
|
||||
|
||||
```bash
|
||||
➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin
|
||||
name time/op
|
||||
|
||||
# https://github.com/cespare/xxhash
|
||||
XXSum64Cespare/Func-8 160ns ± 2%
|
||||
XXSum64Cespare/Struct-8 173ns ± 1%
|
||||
XXSum64ShortCespare/Func-8 6.78ns ± 1%
|
||||
XXSum64ShortCespare/Struct-8 19.6ns ± 2%
|
||||
|
||||
# this package (default mode, using unsafe)
|
||||
XXSum64/Func-8 170ns ± 1%
|
||||
XXSum64/Struct-8 182ns ± 1%
|
||||
XXSum64Short/Func-8 13.5ns ± 3%
|
||||
XXSum64Short/Struct-8 20.4ns ± 0%
|
||||
|
||||
# this package (appengine, *not* using unsafe)
|
||||
XXSum64/Func-8 241ns ± 5%
|
||||
XXSum64/Struct-8 243ns ± 6%
|
||||
XXSum64Short/Func-8 15.2ns ± 2%
|
||||
XXSum64Short/Struct-8 23.7ns ± 5%
|
||||
|
||||
CRC64ISO-8 1.23µs ± 1%
|
||||
CRC64ISOString-8 2.71µs ± 4%
|
||||
CRC64ISOShort-8 22.2ns ± 3%
|
||||
|
||||
Fnv64-8 2.34µs ± 1%
|
||||
Fnv64Short-8 74.7ns ± 8%
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
h := xxhash.New64()
|
||||
// r, err := os.Open("......")
|
||||
// defer f.Close()
|
||||
r := strings.NewReader(F)
|
||||
io.Copy(h, r)
|
||||
fmt.Println("xxhash.Backend:", xxhash.Backend)
|
||||
fmt.Println("File checksum:", h.Sum64())
|
||||
```
|
||||
|
||||
[<kbd>playground</kbd>](https://play.golang.org/p/wHKBwfu6CPV)
|
||||
|
||||
## TODO
|
||||
|
||||
* Rewrite the 32bit version to be more optimized.
|
||||
* General cleanup as the Go inliner gets smarter.
|
||||
|
||||
## License
|
||||
|
||||
This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details.
|
||||
294
vendor/github.com/OneOfOne/xxhash/xxhash.go
generated
vendored
Normal file
294
vendor/github.com/OneOfOne/xxhash/xxhash.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
prime32x1 uint32 = 2654435761
|
||||
prime32x2 uint32 = 2246822519
|
||||
prime32x3 uint32 = 3266489917
|
||||
prime32x4 uint32 = 668265263
|
||||
prime32x5 uint32 = 374761393
|
||||
|
||||
prime64x1 uint64 = 11400714785074694791
|
||||
prime64x2 uint64 = 14029467366897019727
|
||||
prime64x3 uint64 = 1609587929392839161
|
||||
prime64x4 uint64 = 9650029242287828579
|
||||
prime64x5 uint64 = 2870177450012600261
|
||||
|
||||
maxInt32 int32 = (1<<31 - 1)
|
||||
|
||||
// precomputed zero Vs for seed 0
|
||||
zero64x1 = 0x60ea27eeadc0b5d6
|
||||
zero64x2 = 0xc2b2ae3d27d4eb4f
|
||||
zero64x3 = 0x0
|
||||
zero64x4 = 0x61c8864e7a143579
|
||||
)
|
||||
|
||||
const (
|
||||
magic32 = "xxh\x07"
|
||||
magic64 = "xxh\x08"
|
||||
marshaled32Size = len(magic32) + 4*7 + 16
|
||||
marshaled64Size = len(magic64) + 8*6 + 32 + 1
|
||||
)
|
||||
|
||||
func NewHash32() hash.Hash { return New32() }
|
||||
func NewHash64() hash.Hash { return New64() }
|
||||
|
||||
// Checksum32 returns the checksum of the input data with the seed set to 0.
|
||||
func Checksum32(in []byte) uint32 {
|
||||
return Checksum32S(in, 0)
|
||||
}
|
||||
|
||||
// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
|
||||
func ChecksumString32(s string) uint32 {
|
||||
return ChecksumString32S(s, 0)
|
||||
}
|
||||
|
||||
type XXHash32 struct {
|
||||
mem [16]byte
|
||||
ln, memIdx int32
|
||||
v1, v2, v3, v4 uint32
|
||||
seed uint32
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (xx *XXHash32) Size() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (xx *XXHash32) BlockSize() int {
|
||||
return 16
|
||||
}
|
||||
|
||||
// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
|
||||
func NewS32(seed uint32) (xx *XXHash32) {
|
||||
xx = &XXHash32{
|
||||
seed: seed,
|
||||
}
|
||||
xx.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
|
||||
func New32() *XXHash32 {
|
||||
return NewS32(0)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Reset() {
|
||||
xx.v1 = xx.seed + prime32x1 + prime32x2
|
||||
xx.v2 = xx.seed + prime32x2
|
||||
xx.v3 = xx.seed
|
||||
xx.v4 = xx.seed - prime32x1
|
||||
xx.ln, xx.memIdx = 0, 0
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (xx *XXHash32) Sum(in []byte) []byte {
|
||||
s := xx.Sum32()
|
||||
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (xx *XXHash32) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaled32Size)
|
||||
b = append(b, magic32...)
|
||||
b = appendUint32(b, xx.v1)
|
||||
b = appendUint32(b, xx.v2)
|
||||
b = appendUint32(b, xx.v3)
|
||||
b = appendUint32(b, xx.v4)
|
||||
b = appendUint32(b, xx.seed)
|
||||
b = appendInt32(b, xx.ln)
|
||||
b = appendInt32(b, xx.memIdx)
|
||||
b = append(b, xx.mem[:]...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (xx *XXHash32) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaled32Size {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic32):]
|
||||
b, xx.v1 = consumeUint32(b)
|
||||
b, xx.v2 = consumeUint32(b)
|
||||
b, xx.v3 = consumeUint32(b)
|
||||
b, xx.v4 = consumeUint32(b)
|
||||
b, xx.seed = consumeUint32(b)
|
||||
b, xx.ln = consumeInt32(b)
|
||||
b, xx.memIdx = consumeInt32(b)
|
||||
copy(xx.mem[:], b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checksum64 an alias for Checksum64S(in, 0)
|
||||
func Checksum64(in []byte) uint64 {
|
||||
return Checksum64S(in, 0)
|
||||
}
|
||||
|
||||
// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
|
||||
func ChecksumString64(s string) uint64 {
|
||||
return ChecksumString64S(s, 0)
|
||||
}
|
||||
|
||||
type XXHash64 struct {
|
||||
v1, v2, v3, v4 uint64
|
||||
seed uint64
|
||||
ln uint64
|
||||
mem [32]byte
|
||||
memIdx int8
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (xx *XXHash64) Size() int {
|
||||
return 8
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (xx *XXHash64) BlockSize() int {
|
||||
return 32
|
||||
}
|
||||
|
||||
// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
|
||||
func NewS64(seed uint64) (xx *XXHash64) {
|
||||
xx = &XXHash64{
|
||||
seed: seed,
|
||||
}
|
||||
xx.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
|
||||
func New64() *XXHash64 {
|
||||
return NewS64(0)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Reset() {
|
||||
xx.ln, xx.memIdx = 0, 0
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (xx *XXHash64) Sum(in []byte) []byte {
|
||||
s := xx.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (xx *XXHash64) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaled64Size)
|
||||
b = append(b, magic64...)
|
||||
b = appendUint64(b, xx.v1)
|
||||
b = appendUint64(b, xx.v2)
|
||||
b = appendUint64(b, xx.v3)
|
||||
b = appendUint64(b, xx.v4)
|
||||
b = appendUint64(b, xx.seed)
|
||||
b = appendUint64(b, xx.ln)
|
||||
b = append(b, byte(xx.memIdx))
|
||||
b = append(b, xx.mem[:]...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (xx *XXHash64) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaled64Size {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic64):]
|
||||
b, xx.v1 = consumeUint64(b)
|
||||
b, xx.v2 = consumeUint64(b)
|
||||
b, xx.v3 = consumeUint64(b)
|
||||
b, xx.v4 = consumeUint64(b)
|
||||
b, xx.seed = consumeUint64(b)
|
||||
b, xx.ln = consumeUint64(b)
|
||||
xx.memIdx = int8(b[0])
|
||||
b = b[1:]
|
||||
copy(xx.mem[:], b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) }
|
||||
|
||||
func appendUint32(b []byte, x uint32) []byte {
|
||||
var a [4]byte
|
||||
binary.LittleEndian.PutUint32(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) }
|
||||
func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x }
|
||||
func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x }
|
||||
|
||||
// force the compiler to use ROTL instructions
|
||||
|
||||
func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
|
||||
func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
|
||||
func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
|
||||
func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
|
||||
func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
|
||||
func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
|
||||
func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
|
||||
|
||||
func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
|
||||
func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
|
||||
func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
|
||||
func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
|
||||
func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
|
||||
func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
|
||||
func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
|
||||
func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
|
||||
|
||||
func mix64(h uint64) uint64 {
|
||||
h ^= h >> 33
|
||||
h *= prime64x2
|
||||
h ^= h >> 29
|
||||
h *= prime64x3
|
||||
h ^= h >> 32
|
||||
return h
|
||||
}
|
||||
|
||||
func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
|
||||
if seed == 0 {
|
||||
return zero64x1, zero64x2, zero64x3, zero64x4
|
||||
}
|
||||
return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
|
||||
}
|
||||
|
||||
// borrowed from cespare
|
||||
func round64(h, v uint64) uint64 {
|
||||
h += v * prime64x2
|
||||
h = rotl64_31(h)
|
||||
h *= prime64x1
|
||||
return h
|
||||
}
|
||||
|
||||
func mergeRound64(h, v uint64) uint64 {
|
||||
v = round64(0, v)
|
||||
h ^= v
|
||||
h = h*prime64x1 + prime64x4
|
||||
return h
|
||||
}
|
||||
161
vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
generated
vendored
Normal file
161
vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
package xxhash
|
||||
|
||||
func u32(in []byte) uint32 {
|
||||
return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
|
||||
}
|
||||
|
||||
func u64(in []byte) uint64 {
|
||||
return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56
|
||||
}
|
||||
|
||||
// Checksum32S returns the checksum of the input bytes with the specific seed.
|
||||
func Checksum32S(in []byte, seed uint32) (h uint32) {
|
||||
var i int
|
||||
|
||||
if len(in) > 15 {
|
||||
var (
|
||||
v1 = seed + prime32x1 + prime32x2
|
||||
v2 = seed + prime32x2
|
||||
v3 = seed + 0
|
||||
v4 = seed - prime32x1
|
||||
)
|
||||
for ; i < len(in)-15; i += 16 {
|
||||
in := in[i : i+16 : len(in)]
|
||||
v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
v1 = rotl32_13(v1) * prime32x1
|
||||
|
||||
v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
v2 = rotl32_13(v2) * prime32x1
|
||||
|
||||
v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
v3 = rotl32_13(v3) * prime32x1
|
||||
|
||||
v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
v4 = rotl32_13(v4) * prime32x1
|
||||
}
|
||||
|
||||
h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4)
|
||||
|
||||
} else {
|
||||
h = seed + prime32x5
|
||||
}
|
||||
|
||||
h += uint32(len(in))
|
||||
for ; i <= len(in)-4; i += 4 {
|
||||
in := in[i : i+4 : len(in)]
|
||||
h += u32(in[0:4:len(in)]) * prime32x3
|
||||
h = rotl32_17(h) * prime32x4
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h += uint32(in[i]) * prime32x5
|
||||
h = rotl32_11(h) * prime32x1
|
||||
}
|
||||
|
||||
h ^= h >> 15
|
||||
h *= prime32x2
|
||||
h ^= h >> 13
|
||||
h *= prime32x3
|
||||
h ^= h >> 16
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Write(in []byte) (n int, err error) {
|
||||
i, ml := 0, int(xx.memIdx)
|
||||
n = len(in)
|
||||
xx.ln += int32(n)
|
||||
|
||||
if d := 16 - ml; ml > 0 && ml+len(in) > 16 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d]))
|
||||
ml, in = 16, in[d:len(in):len(in)]
|
||||
} else if ml+len(in) < 16 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in))
|
||||
return
|
||||
}
|
||||
|
||||
if ml > 0 {
|
||||
i += 16 - ml
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
|
||||
in := xx.mem[:16:len(xx.mem)]
|
||||
|
||||
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
xx.v1 = rotl32_13(xx.v1) * prime32x1
|
||||
|
||||
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
xx.v2 = rotl32_13(xx.v2) * prime32x1
|
||||
|
||||
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
xx.v3 = rotl32_13(xx.v3) * prime32x1
|
||||
|
||||
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
xx.v4 = rotl32_13(xx.v4) * prime32x1
|
||||
|
||||
xx.memIdx = 0
|
||||
}
|
||||
|
||||
for ; i <= len(in)-16; i += 16 {
|
||||
in := in[i : i+16 : len(in)]
|
||||
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
xx.v1 = rotl32_13(xx.v1) * prime32x1
|
||||
|
||||
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
xx.v2 = rotl32_13(xx.v2) * prime32x1
|
||||
|
||||
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
xx.v3 = rotl32_13(xx.v3) * prime32x1
|
||||
|
||||
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
xx.v4 = rotl32_13(xx.v4) * prime32x1
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Sum32() (h uint32) {
|
||||
var i int32
|
||||
if xx.ln > 15 {
|
||||
h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4)
|
||||
} else {
|
||||
h = xx.seed + prime32x5
|
||||
}
|
||||
|
||||
h += uint32(xx.ln)
|
||||
|
||||
if xx.memIdx > 0 {
|
||||
for ; i < xx.memIdx-3; i += 4 {
|
||||
in := xx.mem[i : i+4 : len(xx.mem)]
|
||||
h += u32(in[0:4:len(in)]) * prime32x3
|
||||
h = rotl32_17(h) * prime32x4
|
||||
}
|
||||
|
||||
for ; i < xx.memIdx; i++ {
|
||||
h += uint32(xx.mem[i]) * prime32x5
|
||||
h = rotl32_11(h) * prime32x1
|
||||
}
|
||||
}
|
||||
h ^= h >> 15
|
||||
h *= prime32x2
|
||||
h ^= h >> 13
|
||||
h *= prime32x3
|
||||
h ^= h >> 16
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Checksum64S returns the 64bit xxhash checksum for a single input
|
||||
func Checksum64S(in []byte, seed uint64) uint64 {
|
||||
if len(in) == 0 && seed == 0 {
|
||||
return 0xef46db3751d8e999
|
||||
}
|
||||
|
||||
if len(in) > 31 {
|
||||
return checksum64(in, seed)
|
||||
}
|
||||
|
||||
return checksum64Short(in, seed)
|
||||
}
|
||||
183
vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
generated
vendored
Normal file
183
vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// +build appengine safe ppc64le ppc64be mipsle mips s390x
|
||||
|
||||
package xxhash
|
||||
|
||||
// Backend returns the current version of xxhash being used.
|
||||
const Backend = "GoSafe"
|
||||
|
||||
func ChecksumString32S(s string, seed uint32) uint32 {
|
||||
return Checksum32S([]byte(s), seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return xx.Write([]byte(s))
|
||||
}
|
||||
|
||||
func ChecksumString64S(s string, seed uint64) uint64 {
|
||||
return Checksum64S([]byte(s), seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return xx.Write([]byte(s))
|
||||
}
|
||||
|
||||
func checksum64(in []byte, seed uint64) (h uint64) {
|
||||
var (
|
||||
v1, v2, v3, v4 = resetVs64(seed)
|
||||
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
in := in[i : i+32 : len(in)]
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
}
|
||||
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
|
||||
h += uint64(len(in))
|
||||
|
||||
for ; i < len(in)-7; i += 8 {
|
||||
h ^= round64(0, u64(in[i:len(in):len(in)]))
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < len(in)-3; i += 4 {
|
||||
h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func checksum64Short(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
h = seed + prime64x5 + uint64(len(in))
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(in)-7; i += 8 {
|
||||
k := u64(in[i : i+8 : len(in)])
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < len(in)-3; i += 4 {
|
||||
h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Write(in []byte) (n int, err error) {
|
||||
var (
|
||||
ml = int(xx.memIdx)
|
||||
d = 32 - ml
|
||||
)
|
||||
|
||||
n = len(in)
|
||||
xx.ln += uint64(n)
|
||||
|
||||
if ml+len(in) < 32 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
|
||||
return
|
||||
}
|
||||
|
||||
i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4
|
||||
if ml > 0 && ml+len(in) > 32 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)]))
|
||||
in = in[d:len(in):len(in)]
|
||||
|
||||
in := xx.mem[0:32:len(xx.mem)]
|
||||
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
|
||||
xx.memIdx = 0
|
||||
}
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
in := in[i : i+32 : len(in)]
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Sum64() (h uint64) {
|
||||
var i int
|
||||
if xx.ln > 31 {
|
||||
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
} else {
|
||||
h = xx.seed + prime64x5
|
||||
}
|
||||
|
||||
h += uint64(xx.ln)
|
||||
if xx.memIdx > 0 {
|
||||
in := xx.mem[:xx.memIdx]
|
||||
for ; i < int(xx.memIdx)-7; i += 8 {
|
||||
in := in[i : i+8 : len(in)]
|
||||
k := u64(in[0:8:len(in)])
|
||||
k *= prime64x2
|
||||
k = rotl64_31(k)
|
||||
k *= prime64x1
|
||||
h ^= k
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < int(xx.memIdx)-3; i += 4 {
|
||||
in := in[i : i+4 : len(in)]
|
||||
h ^= uint64(u32(in[0:4:len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < int(xx.memIdx); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
240
vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
240
vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,240 @@
|
||||
// +build !safe
|
||||
// +build !appengine
|
||||
// +build !ppc64le
|
||||
// +build !mipsle
|
||||
// +build !ppc64be
|
||||
// +build !mips
|
||||
// +build !s390x
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Backend returns the current version of xxhash being used.
|
||||
const Backend = "GoUnsafe"
|
||||
|
||||
// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed.
|
||||
func ChecksumString32S(s string, seed uint32) uint32 {
|
||||
if len(s) == 0 {
|
||||
return Checksum32S(nil, seed)
|
||||
}
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
|
||||
}
|
||||
|
||||
// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed.
|
||||
func ChecksumString64S(s string, seed uint64) uint64 {
|
||||
if len(s) == 0 {
|
||||
return Checksum64S(nil, seed)
|
||||
}
|
||||
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func checksum64(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
|
||||
v1, v2, v3, v4 = resetVs64(seed)
|
||||
|
||||
h uint64
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(words)-3; i += 4 {
|
||||
words := (*[4]uint64)(unsafe.Pointer(&words[i]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
}
|
||||
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
|
||||
h += uint64(len(in))
|
||||
|
||||
for _, k := range words[i:] {
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func checksum64Short(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
h = seed + prime64x5 + uint64(len(in))
|
||||
i int
|
||||
)
|
||||
|
||||
if len(in) > 7 {
|
||||
var (
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
)
|
||||
|
||||
for i := range words {
|
||||
h ^= round64(0, words[i])
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
i = wordsLen << 3
|
||||
}
|
||||
|
||||
if in = in[i:len(in):len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Write(in []byte) (n int, err error) {
|
||||
mem, idx := xx.mem[:], int(xx.memIdx)
|
||||
|
||||
xx.ln, n = xx.ln+uint64(len(in)), len(in)
|
||||
|
||||
if idx+len(in) < 32 {
|
||||
xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in))
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4
|
||||
|
||||
i int
|
||||
)
|
||||
|
||||
if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 {
|
||||
copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)])
|
||||
|
||||
words := (*[4]uint64)(unsafe.Pointer(&mem[0]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
|
||||
if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 {
|
||||
goto RET
|
||||
}
|
||||
}
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
words := (*[4]uint64)(unsafe.Pointer(&in[i]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
RET:
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Sum64() (h uint64) {
|
||||
if seed := xx.seed; xx.ln > 31 {
|
||||
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
} else if seed == 0 {
|
||||
h = prime64x5
|
||||
} else {
|
||||
h = seed + prime64x5
|
||||
}
|
||||
|
||||
h += uint64(xx.ln)
|
||||
|
||||
if xx.memIdx == 0 {
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
var (
|
||||
in = xx.mem[:xx.memIdx:xx.memIdx]
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
)
|
||||
|
||||
for _, k := range words {
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
11
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
11
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
@@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
node.Parent.Prev.Type == blackfriday.Heading &&
|
||||
node.Parent.Prev.FirstChild != nil &&
|
||||
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
|
||||
before, after, found := bytesCut(node.Literal, []byte(" - "))
|
||||
before, after, found := bytes.Cut(node.Literal, []byte(" - "))
|
||||
escapeSpecialChars(w, before)
|
||||
if found {
|
||||
out(w, ` \- `)
|
||||
@@ -406,12 +406,3 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
||||
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||
}
|
||||
}
|
||||
|
||||
// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
|
||||
// and older. We can remove this once we drop support for go1.17 and older.
|
||||
func bytesCut(s, sep []byte) (before, after []byte, found bool) {
|
||||
if i := bytes.Index(s, sep); i >= 0 {
|
||||
return s[:i], s[i+len(sep):], true
|
||||
}
|
||||
return s, nil, false
|
||||
}
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/ast/annotations.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/ast/annotations.go
generated
vendored
@@ -31,7 +31,3 @@ type (
|
||||
func NewAnnotationsRef(a *Annotations) *AnnotationsRef {
|
||||
return v1.NewAnnotationsRef(a)
|
||||
}
|
||||
|
||||
func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) {
|
||||
return v1.BuildAnnotationSet(modules)
|
||||
}
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
@@ -5,7 +5,6 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
@@ -280,7 +279,7 @@ func ParseStatement(input string) (Statement, error) {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, errors.New("expected exactly one statement")
|
||||
return nil, fmt.Errorf("expected exactly one statement")
|
||||
}
|
||||
return stmts[0], nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
@@ -184,7 +184,7 @@ func RefHead(ref Ref, args ...*Term) *Head {
|
||||
}
|
||||
|
||||
// DocKind represents the collection of document types that can be produced by rules.
|
||||
type DocKind = v1.DocKind
|
||||
type DocKind int
|
||||
|
||||
const (
|
||||
// CompleteDoc represents a document that is completely defined by the rule.
|
||||
|
||||
4849
vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json
generated
vendored
4849
vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
generated
vendored
5
vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
generated
vendored
@@ -6,7 +6,6 @@ package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -98,7 +97,7 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st
|
||||
|
||||
_, err := os.Stat(bundlePath)
|
||||
if err == nil {
|
||||
f, err := os.Open(bundlePath)
|
||||
f, err := os.Open(filepath.Join(bundlePath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,7 +132,7 @@ func SaveBundleToDisk(path string, raw io.Reader) (string, error) {
|
||||
}
|
||||
|
||||
if raw == nil {
|
||||
return "", errors.New("no raw bundle bytes to persist to disk")
|
||||
return "", fmt.Errorf("no raw bundle bytes to persist to disk")
|
||||
}
|
||||
|
||||
dest, err := os.CreateTemp(path, ".bundle.tar.gz.*.tmp")
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
generated
vendored
@@ -114,7 +114,7 @@ func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) {
|
||||
copy(lastIPMask, ipNet.Mask)
|
||||
for i := range lastIPMask {
|
||||
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
|
||||
lastIP[net.IPv6len-i-1] |= lastIPMask[len(lastIPMask)-i-1]
|
||||
lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
|
||||
}
|
||||
|
||||
return firstIP, lastIP
|
||||
|
||||
51
vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
generated
vendored
51
vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
generated
vendored
@@ -5,9 +5,6 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/schemas"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
@@ -19,35 +16,12 @@ const (
|
||||
AuthorizationPolicySchema SchemaFile = "authorizationPolicy.json"
|
||||
)
|
||||
|
||||
var schemaDefinitions = map[SchemaFile]any{}
|
||||
|
||||
var loadOnce = sync.OnceValue(func() error {
|
||||
cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cont) == 0 {
|
||||
return errors.New("expected authorization policy schema file to be present")
|
||||
}
|
||||
|
||||
var schema any
|
||||
if err := util.Unmarshal(cont, &schema); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
schemaDefinitions[AuthorizationPolicySchema] = schema
|
||||
|
||||
return nil
|
||||
})
|
||||
var schemaDefinitions = map[SchemaFile]interface{}{}
|
||||
|
||||
// VerifyAuthorizationPolicySchema performs type checking on rules against the schema for the Authorization Policy
|
||||
// Input document.
|
||||
// NOTE: The provided compiler should have already run the compilation process on the input modules
|
||||
func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error {
|
||||
if err := loadOnce(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rules := getRulesWithDependencies(compiler, ref)
|
||||
|
||||
@@ -93,3 +67,26 @@ func transitiveDependencies(compiler *ast.Compiler, rule *ast.Rule, deps map[*as
|
||||
transitiveDependencies(compiler, other, deps)
|
||||
}
|
||||
}
|
||||
|
||||
func loadAuthorizationPolicySchema() {
|
||||
|
||||
cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(cont) == 0 {
|
||||
panic("expected authorization policy schema file to be present")
|
||||
}
|
||||
|
||||
var schema interface{}
|
||||
if err := util.Unmarshal(cont, &schema); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
schemaDefinitions[AuthorizationPolicySchema] = schema
|
||||
}
|
||||
|
||||
func init() {
|
||||
loadAuthorizationPolicySchema()
|
||||
}
|
||||
|
||||
49
vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
generated
vendored
49
vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
generated
vendored
@@ -340,7 +340,7 @@ func (c *Compiler) initModule() error {
|
||||
// two times. But let's deal with that when it happens.
|
||||
if _, ok := c.funcs[name]; ok { // already seen
|
||||
c.debug.Printf("function name duplicate: %s (%d)", name, fn.Index)
|
||||
name += ".1"
|
||||
name = name + ".1"
|
||||
}
|
||||
c.funcs[name] = fn.Index
|
||||
}
|
||||
@@ -348,7 +348,7 @@ func (c *Compiler) initModule() error {
|
||||
for _, fn := range c.policy.Funcs.Funcs {
|
||||
|
||||
params := make([]types.ValueType, len(fn.Params))
|
||||
for i := range params {
|
||||
for i := 0; i < len(params); i++ {
|
||||
params[i] = types.I32
|
||||
}
|
||||
|
||||
@@ -827,7 +827,7 @@ func (c *Compiler) compileFunc(fn *ir.Func) error {
|
||||
memoize := len(fn.Params) == 2
|
||||
|
||||
if len(fn.Params) == 0 {
|
||||
return errors.New("illegal function: zero args")
|
||||
return fmt.Errorf("illegal function: zero args")
|
||||
}
|
||||
|
||||
c.nextLocal = 0
|
||||
@@ -996,16 +996,12 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
|
||||
for _, stmt := range block.Stmts {
|
||||
switch stmt := stmt.(type) {
|
||||
case *ir.ResultSetAddStmt:
|
||||
instrs = append(instrs,
|
||||
instruction.GetLocal{Index: c.lrs},
|
||||
instruction.GetLocal{Index: c.local(stmt.Value)},
|
||||
instruction.Call{Index: c.function(opaSetAdd)},
|
||||
)
|
||||
instrs = append(instrs, instruction.GetLocal{Index: c.lrs})
|
||||
instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
|
||||
instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)})
|
||||
case *ir.ReturnLocalStmt:
|
||||
instrs = append(instrs,
|
||||
instruction.GetLocal{Index: c.local(stmt.Source)},
|
||||
instruction.Return{},
|
||||
)
|
||||
instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
|
||||
instrs = append(instrs, instruction.Return{})
|
||||
case *ir.BlockStmt:
|
||||
for i := range stmt.Blocks {
|
||||
block, err := c.compileBlock(stmt.Blocks[i])
|
||||
@@ -1033,10 +1029,8 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
|
||||
return instrs, err
|
||||
}
|
||||
case *ir.AssignVarStmt:
|
||||
instrs = append(instrs,
|
||||
c.instrRead(stmt.Source),
|
||||
instruction.SetLocal{Index: c.local(stmt.Target)},
|
||||
)
|
||||
instrs = append(instrs, c.instrRead(stmt.Source))
|
||||
instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
|
||||
case *ir.AssignVarOnceStmt:
|
||||
instrs = append(instrs, instruction.Block{
|
||||
Instrs: []instruction.Instruction{
|
||||
@@ -1366,7 +1360,7 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
|
||||
// Initialize the locals that specify the path of the upsert operation.
|
||||
lpath := make(map[int]uint32, len(path))
|
||||
|
||||
for i := range path {
|
||||
for i := 0; i < len(path); i++ {
|
||||
lpath[i] = c.genLocal()
|
||||
instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])})
|
||||
instrs = append(instrs, instruction.SetLocal{Index: lpath[i]})
|
||||
@@ -1375,10 +1369,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
|
||||
// Generate a block that traverses the path of the upsert operation,
|
||||
// shallowing copying values at each step as needed. Stop before the final
|
||||
// segment that will only be inserted.
|
||||
inner := make([]instruction.Instruction, 0, len(path)*21+1)
|
||||
var inner []instruction.Instruction
|
||||
ltemp := c.genLocal()
|
||||
|
||||
for i := range len(path) - 1 {
|
||||
for i := 0; i < len(path)-1; i++ {
|
||||
|
||||
// Lookup the next part of the path.
|
||||
inner = append(inner, instruction.GetLocal{Index: lcopy})
|
||||
@@ -1414,10 +1408,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
|
||||
inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)})
|
||||
|
||||
// Generate blocks that handle missing nodes during traversal.
|
||||
block := make([]instruction.Instruction, 0, len(path)*10)
|
||||
var block []instruction.Instruction
|
||||
lval := c.genLocal()
|
||||
|
||||
for i := range len(path) - 1 {
|
||||
for i := 0; i < len(path)-1; i++ {
|
||||
block = append(block, instruction.Block{Instrs: inner})
|
||||
block = append(block, instruction.Call{Index: c.function(opaObject)})
|
||||
block = append(block, instruction.SetLocal{Index: lval})
|
||||
@@ -1541,7 +1535,8 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
|
||||
}
|
||||
|
||||
instrs := *result
|
||||
instrs = append(instrs, instruction.I32Const{Value: ef.ID}, instruction.I32Const{Value: 0}) // unused context parameter
|
||||
instrs = append(instrs, instruction.I32Const{Value: ef.ID})
|
||||
instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter
|
||||
|
||||
for _, arg := range stmt.Args {
|
||||
instrs = append(instrs, c.instrRead(arg))
|
||||
@@ -1550,11 +1545,9 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
|
||||
instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])})
|
||||
|
||||
if ef.Decl.Result() != nil {
|
||||
instrs = append(instrs,
|
||||
instruction.TeeLocal{Index: c.local(stmt.Result)},
|
||||
instruction.I32Eqz{},
|
||||
instruction.BrIf{Index: 0},
|
||||
)
|
||||
instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)})
|
||||
instrs = append(instrs, instruction.I32Eqz{})
|
||||
instrs = append(instrs, instruction.BrIf{Index: 0})
|
||||
} else {
|
||||
instrs = append(instrs, instruction.Drop{})
|
||||
}
|
||||
@@ -1685,7 +1678,7 @@ func (c *Compiler) genLocal() uint32 {
|
||||
func (c *Compiler) function(name string) uint32 {
|
||||
fidx, ok := c.funcs[name]
|
||||
if !ok {
|
||||
panic("function not found: " + name)
|
||||
panic(fmt.Sprintf("function not found: %s", name))
|
||||
}
|
||||
return fidx
|
||||
}
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go
generated
vendored
@@ -36,7 +36,7 @@ func (vector *BitVector) Length() int {
|
||||
// position of the last byte in the slice.
|
||||
// This returns the bit that was shifted off of the last byte.
|
||||
func shiftLower(bit byte, b []byte) byte {
|
||||
bit <<= 7
|
||||
bit = bit << 7
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
newByte := b[i] >> 1
|
||||
newByte |= bit
|
||||
|
||||
41
vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
generated
vendored
41
vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
generated
vendored
@@ -146,7 +146,6 @@
|
||||
package edittree
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
@@ -336,13 +335,13 @@ func (e *EditTree) deleteChildValue(hash int) {
|
||||
// Insert creates a new child of e, and returns the new child EditTree node.
|
||||
func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
|
||||
if e.value == nil {
|
||||
return nil, errors.New("deleted node encountered during insert operation")
|
||||
return nil, fmt.Errorf("deleted node encountered during insert operation")
|
||||
}
|
||||
if key == nil {
|
||||
return nil, errors.New("nil key provided for insert operation")
|
||||
return nil, fmt.Errorf("nil key provided for insert operation")
|
||||
}
|
||||
if value == nil {
|
||||
return nil, errors.New("nil value provided for insert operation")
|
||||
return nil, fmt.Errorf("nil value provided for insert operation")
|
||||
}
|
||||
|
||||
switch x := e.value.Value.(type) {
|
||||
@@ -368,7 +367,7 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
|
||||
return nil, err
|
||||
}
|
||||
if idx < 0 || idx > e.insertions.Length() {
|
||||
return nil, errors.New("index for array insertion out of bounds")
|
||||
return nil, fmt.Errorf("index for array insertion out of bounds")
|
||||
}
|
||||
return e.unsafeInsertArray(idx, value), nil
|
||||
default:
|
||||
@@ -458,10 +457,10 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree {
|
||||
// already present in e. It then returns the deleted child EditTree node.
|
||||
func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
|
||||
if e.value == nil {
|
||||
return nil, errors.New("deleted node encountered during delete operation")
|
||||
return nil, fmt.Errorf("deleted node encountered during delete operation")
|
||||
}
|
||||
if key == nil {
|
||||
return nil, errors.New("nil key provided for delete operation")
|
||||
return nil, fmt.Errorf("nil key provided for delete operation")
|
||||
}
|
||||
|
||||
switch e.value.Value.(type) {
|
||||
@@ -532,7 +531,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
|
||||
return nil, err
|
||||
}
|
||||
if idx < 0 || idx > e.insertions.Length()-1 {
|
||||
return nil, errors.New("index for array delete out of bounds")
|
||||
return nil, fmt.Errorf("index for array delete out of bounds")
|
||||
}
|
||||
|
||||
// Collect insertion indexes above the delete site for rewriting.
|
||||
@@ -553,14 +552,14 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
|
||||
}
|
||||
// Do rewrites to clear out the newly-removed element.
|
||||
e.deleteChildValue(idx)
|
||||
for i := range rewritesScalars {
|
||||
for i := 0; i < len(rewritesScalars); i++ {
|
||||
originalIdx := rewritesScalars[i]
|
||||
rewriteIdx := rewritesScalars[i] - 1
|
||||
v := e.childScalarValues[originalIdx]
|
||||
e.deleteChildValue(originalIdx)
|
||||
e.setChildScalarValue(rewriteIdx, v)
|
||||
}
|
||||
for i := range rewritesComposites {
|
||||
for i := 0; i < len(rewritesComposites); i++ {
|
||||
originalIdx := rewritesComposites[i]
|
||||
rewriteIdx := rewritesComposites[i] - 1
|
||||
v := e.childCompositeValues[originalIdx]
|
||||
@@ -592,7 +591,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
|
||||
//gcassert:inline
|
||||
func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
|
||||
zeroesSeen := 0
|
||||
for i := range index {
|
||||
for i := 0; i < index; i++ {
|
||||
if bv.Element(i) == 0 {
|
||||
zeroesSeen++
|
||||
}
|
||||
@@ -602,7 +601,7 @@ func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
|
||||
|
||||
func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) {
|
||||
zeroesSeen := 0
|
||||
for i := range bv.Length() {
|
||||
for i := 0; i < bv.Length(); i++ {
|
||||
if bv.Element(i) == 0 {
|
||||
zeroesSeen++
|
||||
}
|
||||
@@ -638,7 +637,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
|
||||
}
|
||||
// 1+ path segment case.
|
||||
if e.value == nil {
|
||||
return nil, errors.New("nil value encountered where composite value was expected")
|
||||
return nil, fmt.Errorf("nil value encountered where composite value was expected")
|
||||
}
|
||||
|
||||
// Switch behavior based on types.
|
||||
@@ -832,7 +831,7 @@ func (e *EditTree) Render() *ast.Term {
|
||||
// original array. We build a new Array with modified/deleted keys.
|
||||
out := make([]*ast.Term, 0, e.insertions.Length())
|
||||
eIdx := 0
|
||||
for i := range e.insertions.Length() {
|
||||
for i := 0; i < e.insertions.Length(); i++ {
|
||||
// If the index == 0, that indicates we should look up the next
|
||||
// surviving original element.
|
||||
// If the index == 1, that indicates we should look up that
|
||||
@@ -880,7 +879,7 @@ func (e *EditTree) Render() *ast.Term {
|
||||
// Returns the inserted EditTree node.
|
||||
func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error) {
|
||||
if value == nil {
|
||||
return nil, errors.New("cannot insert nil value into EditTree")
|
||||
return nil, fmt.Errorf("cannot insert nil value into EditTree")
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
@@ -911,7 +910,7 @@ func (e *EditTree) DeleteAtPath(path ast.Ref) (*EditTree, error) {
|
||||
// Root document case:
|
||||
if len(path) == 0 {
|
||||
if e.value == nil {
|
||||
return nil, errors.New("deleted node encountered during delete operation")
|
||||
return nil, fmt.Errorf("deleted node encountered during delete operation")
|
||||
}
|
||||
e.value = nil
|
||||
e.childKeys = nil
|
||||
@@ -1047,7 +1046,7 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
|
||||
switch v := term.Value.(type) {
|
||||
case ast.Number:
|
||||
if i, ok = v.Int(); !ok {
|
||||
return 0, errors.New("invalid number type for indexing")
|
||||
return 0, fmt.Errorf("invalid number type for indexing")
|
||||
}
|
||||
case ast.String:
|
||||
if v == "-" {
|
||||
@@ -1055,13 +1054,13 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
|
||||
}
|
||||
num := ast.Number(v)
|
||||
if i, ok = num.Int(); !ok {
|
||||
return 0, errors.New("invalid string for indexing")
|
||||
return 0, fmt.Errorf("invalid string for indexing")
|
||||
}
|
||||
if v != "0" && strings.HasPrefix(string(v), "0") {
|
||||
return 0, errors.New("leading zeros are not allowed in JSON paths")
|
||||
return 0, fmt.Errorf("leading zeros are not allowed in JSON paths")
|
||||
}
|
||||
default:
|
||||
return 0, errors.New("invalid type for indexing")
|
||||
return 0, fmt.Errorf("invalid type for indexing")
|
||||
}
|
||||
|
||||
return i, nil
|
||||
@@ -1180,5 +1179,5 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term {
|
||||
type termSlice []*ast.Term
|
||||
|
||||
func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 }
|
||||
func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
|
||||
func (s termSlice) Len() int { return len(s) }
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
generated
vendored
@@ -19,14 +19,12 @@ func FilterFutureImports(imps []*ast.Import) []*ast.Import {
|
||||
return ret
|
||||
}
|
||||
|
||||
var keywordsTerm = ast.StringTerm("keywords")
|
||||
|
||||
// IsAllFutureKeywords returns true if the passed *ast.Import is `future.keywords`
|
||||
func IsAllFutureKeywords(imp *ast.Import) bool {
|
||||
path := imp.Path.Value.(ast.Ref)
|
||||
return len(path) == 2 &&
|
||||
ast.FutureRootDocument.Equal(path[0]) &&
|
||||
path[1].Equal(keywordsTerm)
|
||||
path[1].Equal(ast.StringTerm("keywords"))
|
||||
}
|
||||
|
||||
// IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}`
|
||||
@@ -34,7 +32,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool {
|
||||
path := imp.Path.Value.(ast.Ref)
|
||||
return len(path) == 3 &&
|
||||
ast.FutureRootDocument.Equal(path[0]) &&
|
||||
path[1].Equal(keywordsTerm) &&
|
||||
path[1].Equal(ast.StringTerm("keywords")) &&
|
||||
path[2].Equal(ast.StringTerm(kw))
|
||||
}
|
||||
|
||||
@@ -42,7 +40,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) {
|
||||
path := imp.Path.Value.(ast.Ref)
|
||||
if len(path) == 3 &&
|
||||
ast.FutureRootDocument.Equal(path[0]) &&
|
||||
path[1].Equal(keywordsTerm) {
|
||||
path[1].Equal(ast.StringTerm("keywords")) {
|
||||
if str, ok := path[2].Value.(ast.String); ok {
|
||||
return string(str), true
|
||||
}
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
generated
vendored
@@ -5,7 +5,6 @@
|
||||
package future
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
@@ -34,7 +33,7 @@ func ParserOptionsFromFutureImports(imports []*ast.Import) (ast.ParserOptions, e
|
||||
}
|
||||
if len(path) == 3 {
|
||||
if imp.Alias != "" {
|
||||
return popts, errors.New("alias not supported")
|
||||
return popts, fmt.Errorf("alias not supported")
|
||||
}
|
||||
popts.FutureKeywords = append(popts.FutureKeywords, string(path[2].Value.(ast.String)))
|
||||
}
|
||||
|
||||
12
vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go
generated
vendored
12
vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go
generated
vendored
@@ -40,10 +40,10 @@ func (d *dumper) dump(v reflect.Value) {
|
||||
d.WriteString("false")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
d.WriteString(strconv.FormatInt(v.Int(), 10))
|
||||
d.WriteString(fmt.Sprintf("%d", v.Int()))
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
d.WriteString(strconv.FormatUint(v.Uint(), 10))
|
||||
d.WriteString(fmt.Sprintf("%d", v.Uint()))
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
d.WriteString(fmt.Sprintf("%.2f", v.Float()))
|
||||
@@ -88,7 +88,7 @@ func typeName(t reflect.Type) string {
|
||||
func (d *dumper) dumpArray(v reflect.Value) {
|
||||
d.WriteString("[" + typeName(v.Type().Elem()) + "]")
|
||||
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
d.nl()
|
||||
d.WriteString("- ")
|
||||
d.indent++
|
||||
@@ -102,7 +102,7 @@ func (d *dumper) dumpStruct(v reflect.Value) {
|
||||
d.indent++
|
||||
|
||||
typ := v.Type()
|
||||
for i := range v.NumField() {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
if typ.Field(i).Tag.Get("dump") == "-" {
|
||||
continue
|
||||
@@ -132,13 +132,13 @@ func isZero(v reflect.Value) bool {
|
||||
return true
|
||||
}
|
||||
z := true
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
z = z && isZero(v.Index(i))
|
||||
}
|
||||
return z
|
||||
case reflect.Struct:
|
||||
z := true
|
||||
for i := range v.NumField() {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
z = z && isZero(v.Field(i))
|
||||
}
|
||||
return z
|
||||
|
||||
@@ -51,7 +51,7 @@ func init() {
|
||||
}
|
||||
var via string
|
||||
if len(fragmentNames) != 0 {
|
||||
via = " via " + strings.Join(fragmentNames, ", ")
|
||||
via = fmt.Sprintf(" via %s", strings.Join(fragmentNames, ", "))
|
||||
}
|
||||
addError(
|
||||
Message(`Cannot spread fragment "%s" within itself%s.`, spreadName, via),
|
||||
|
||||
@@ -159,6 +159,8 @@ func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption {
|
||||
return Message(`Float cannot represent non numeric value: %s`, v.String())
|
||||
case "ID", "ID!":
|
||||
return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String())
|
||||
//case "Enum":
|
||||
// return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String())
|
||||
default:
|
||||
if v.Definition.Kind == ast.Enum {
|
||||
return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String())
|
||||
|
||||
7
vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go
generated
vendored
7
vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go
generated
vendored
@@ -2,7 +2,6 @@ package validator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -12,7 +11,7 @@ import (
|
||||
"github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
|
||||
)
|
||||
|
||||
var ErrUnexpectedType = errors.New("Unexpected Type")
|
||||
var ErrUnexpectedType = fmt.Errorf("Unexpected Type")
|
||||
|
||||
// VariableValues coerces and validates variable values
|
||||
func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) {
|
||||
@@ -107,7 +106,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
|
||||
slc = reflect.Append(slc, val)
|
||||
val = slc
|
||||
}
|
||||
for i := range val.Len() {
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
resetPath()
|
||||
v.path = append(v.path, ast.PathIndex(i))
|
||||
field := val.Index(i)
|
||||
@@ -223,7 +222,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
|
||||
if fieldDef.Type.NonNull && field.IsNil() {
|
||||
return val, gqlerror.ErrorPathf(v.path, "cannot be null")
|
||||
}
|
||||
// allow null object field and skip it
|
||||
//allow null object field and skip it
|
||||
if !fieldDef.Type.NonNull && field.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
generated
vendored
@@ -37,8 +37,8 @@ func ParsePatchPathEscaped(str string) (path storage.Path, ok bool) {
|
||||
// the substitutions in this order, an implementation avoids the error of
|
||||
// turning '~01' first into '~1' and then into '/', which would be
|
||||
// incorrect (the string '~01' correctly becomes '~1' after transformation)."
|
||||
path[i] = strings.ReplaceAll(path[i], "~1", "/")
|
||||
path[i] = strings.ReplaceAll(path[i], "~0", "~")
|
||||
path[i] = strings.Replace(path[i], "~1", "/", -1)
|
||||
path[i] = strings.Replace(path[i], "~0", "~", -1)
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
generated
vendored
@@ -114,7 +114,7 @@ func parse(jwkSrc string) (*Set, error) {
|
||||
|
||||
// ParseBytes parses JWK from the incoming byte buffer.
|
||||
func ParseBytes(buf []byte) (*Set, error) {
|
||||
return parse(string(buf))
|
||||
return parse(string(buf[:]))
|
||||
}
|
||||
|
||||
// ParseString parses JWK from the incoming string.
|
||||
|
||||
5
vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
generated
vendored
5
vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
generated
vendored
@@ -2,7 +2,6 @@ package jwk
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
@@ -54,12 +53,12 @@ func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error {
|
||||
var tempKeyOperationList []string
|
||||
err := json.Unmarshal(data, &tempKeyOperationList)
|
||||
if err != nil {
|
||||
return errors.New("invalid key operation")
|
||||
return fmt.Errorf("invalid key operation")
|
||||
}
|
||||
for _, value := range tempKeyOperationList {
|
||||
_, ok := keyOps[value]
|
||||
if !ok {
|
||||
return errors.New("unknown key operation")
|
||||
return fmt.Errorf("unknown key operation")
|
||||
}
|
||||
*keyOperationList = append(*keyOperationList, KeyOperation(value))
|
||||
}
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
generated
vendored
@@ -111,7 +111,7 @@ func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte
|
||||
return nil, errors.New(`attempt to verify empty buffer`)
|
||||
}
|
||||
|
||||
parts, err := SplitCompact(string(buf))
|
||||
parts, err := SplitCompact(string(buf[:]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed extract from compact serialization format: %w", err)
|
||||
}
|
||||
@@ -164,7 +164,7 @@ func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) {
|
||||
|
||||
// ParseByte parses a JWS value serialized via compact serialization and provided as []byte.
|
||||
func ParseByte(jwsCompact []byte) (m *Message, err error) {
|
||||
return parseCompact(string(jwsCompact))
|
||||
return parseCompact(string(jwsCompact[:]))
|
||||
}
|
||||
|
||||
// ParseString parses a JWS value serialized via compact serialization and provided as string.
|
||||
|
||||
5
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
generated
vendored
5
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
generated
vendored
@@ -3,7 +3,6 @@ package sign
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jwa"
|
||||
@@ -31,7 +30,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
|
||||
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
|
||||
block, _ := pem.Decode([]byte(key))
|
||||
if block == nil {
|
||||
return nil, errors.New("failed to parse PEM block containing the key")
|
||||
return nil, fmt.Errorf("failed to parse PEM block containing the key")
|
||||
}
|
||||
|
||||
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
@@ -46,7 +45,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
|
||||
case jwa.ES256, jwa.ES384, jwa.ES512:
|
||||
block, _ := pem.Decode([]byte(key))
|
||||
if block == nil {
|
||||
return nil, errors.New("failed to parse PEM block containing the key")
|
||||
return nil, fmt.Errorf("failed to parse PEM block containing the key")
|
||||
}
|
||||
|
||||
priv, err := x509.ParseECPrivateKey(block.Bytes)
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jwa"
|
||||
@@ -34,7 +33,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error)
|
||||
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512:
|
||||
block, _ := pem.Decode([]byte(key))
|
||||
if block == nil {
|
||||
return nil, errors.New("failed to parse PEM block containing the key")
|
||||
return nil, fmt.Errorf("failed to parse PEM block containing the key")
|
||||
}
|
||||
|
||||
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
|
||||
16
vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
generated
vendored
16
vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
generated
vendored
@@ -223,7 +223,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
|
||||
}
|
||||
|
||||
// Initialize parameters for functions.
|
||||
for range len(rules[0].Head.Args) {
|
||||
for i := 0; i < len(rules[0].Head.Args); i++ {
|
||||
fn.Params = append(fn.Params, p.newLocal())
|
||||
}
|
||||
|
||||
@@ -385,7 +385,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
|
||||
return nil
|
||||
})
|
||||
default:
|
||||
return errors.New("illegal rule kind")
|
||||
return fmt.Errorf("illegal rule kind")
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -497,6 +497,7 @@ func (p *Planner) planDotOr(obj ir.Local, key ir.Operand, or stmtFactory, iter p
|
||||
|
||||
func (p *Planner) planNestedObjects(obj ir.Local, ref ast.Ref, iter planLocalIter) error {
|
||||
if len(ref) == 0 {
|
||||
//return fmt.Errorf("nested object construction didn't create object")
|
||||
return iter(obj)
|
||||
}
|
||||
|
||||
@@ -990,7 +991,8 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
|
||||
op := e.Operator()
|
||||
|
||||
if replacement := p.mocks.Lookup(operator); replacement != nil {
|
||||
if r, ok := replacement.Value.(ast.Ref); ok {
|
||||
switch r := replacement.Value.(type) {
|
||||
case ast.Ref:
|
||||
if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) {
|
||||
// replacement is builtin
|
||||
operator = r.String()
|
||||
@@ -1145,7 +1147,7 @@ func (p *Planner) planExprCallFunc(name string, arity int, void bool, operands [
|
||||
})
|
||||
|
||||
default:
|
||||
return errors.New("impossible replacement, arity mismatch")
|
||||
return fmt.Errorf("impossible replacement, arity mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1171,7 +1173,7 @@ func (p *Planner) planExprCallValue(value *ast.Term, arity int, operands []*ast.
|
||||
})
|
||||
})
|
||||
default:
|
||||
return errors.New("impossible replacement, arity mismatch")
|
||||
return fmt.Errorf("impossible replacement, arity mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1748,7 +1750,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
|
||||
|
||||
head, ok := ref[0].Value.(ast.Var)
|
||||
if !ok {
|
||||
return errors.New("illegal ref: non-var head")
|
||||
return fmt.Errorf("illegal ref: non-var head")
|
||||
}
|
||||
|
||||
if head.Compare(ast.DefaultRootDocument.Value) == 0 {
|
||||
@@ -1765,7 +1767,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
|
||||
|
||||
p.ltarget, ok = p.vars.GetOp(head)
|
||||
if !ok {
|
||||
return errors.New("illegal ref: unsafe head")
|
||||
return fmt.Errorf("illegal ref: unsafe head")
|
||||
}
|
||||
|
||||
return p.planRefRec(ref, 1, iter)
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
generated
vendored
@@ -111,7 +111,7 @@ func (t *ruletrie) Rules() []*ast.Rule {
|
||||
|
||||
func (t *ruletrie) Push(key ast.Ref) {
|
||||
node := t
|
||||
for i := range len(key) - 1 {
|
||||
for i := 0; i < len(key)-1; i++ {
|
||||
node = node.Get(key[i].Value)
|
||||
if node == nil {
|
||||
return
|
||||
@@ -123,7 +123,7 @@ func (t *ruletrie) Push(key ast.Ref) {
|
||||
|
||||
func (t *ruletrie) Pop(key ast.Ref) {
|
||||
node := t
|
||||
for i := range len(key) - 1 {
|
||||
for i := 0; i < len(key)-1; i++ {
|
||||
node = node.Get(key[i].Value)
|
||||
if node == nil {
|
||||
return
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
package crypto
|
||||
|
||||
import "errors"
|
||||
import "fmt"
|
||||
|
||||
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
|
||||
// if the two byte slices assuming they represent a big-endian number.
|
||||
@@ -11,7 +11,7 @@ import "errors"
|
||||
// +1 if x > y
|
||||
func ConstantTimeByteCompare(x, y []byte) (int, error) {
|
||||
if len(x) != len(y) {
|
||||
return 0, errors.New("slice lengths do not match")
|
||||
return 0, fmt.Errorf("slice lengths do not match")
|
||||
}
|
||||
|
||||
xLarger, yLarger := 0, 0
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"crypto/hmac"
|
||||
"encoding/asn1"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math"
|
||||
@@ -83,7 +82,7 @@ func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, con
|
||||
|
||||
// verify the requested bit length is not larger then the length encoding size
|
||||
if int64(bitLen) > 0x7FFFFFFF {
|
||||
return nil, errors.New("bitLen is greater than 32-bits")
|
||||
return nil, fmt.Errorf("bitLen is greater than 32-bits")
|
||||
}
|
||||
|
||||
fixedInput := bytes.NewBuffer(nil)
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -190,7 +189,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body []
|
||||
authHeader := "AWS4-HMAC-SHA256 Credential=" + awsCreds.AccessKey + "/" + dateNow
|
||||
authHeader += "/" + awsCreds.RegionName + "/" + service + "/aws4_request,"
|
||||
authHeader += "SignedHeaders=" + headerList + ","
|
||||
authHeader += "Signature=" + hex.EncodeToString(signature)
|
||||
authHeader += "Signature=" + fmt.Sprintf("%x", signature)
|
||||
|
||||
return authHeader, awsHeaders
|
||||
}
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/big"
|
||||
@@ -107,7 +107,7 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey,
|
||||
|
||||
counter++
|
||||
if counter > 0xFF {
|
||||
return nil, errors.New("exhausted single byte external counter")
|
||||
return nil, fmt.Errorf("exhausted single byte external counter")
|
||||
}
|
||||
}
|
||||
d = d.Add(d, one)
|
||||
@@ -146,7 +146,7 @@ func retrievePrivateKey(symmetric Credentials) (v4aCredentials, error) {
|
||||
|
||||
privateKey, err := deriveKeyFromAccessKeyPair(symmetric.AccessKey, symmetric.SecretKey)
|
||||
if err != nil {
|
||||
return v4aCredentials{}, errors.New("failed to derive asymmetric key from credentials")
|
||||
return v4aCredentials{}, fmt.Errorf("failed to derive asymmetric key from credentials")
|
||||
}
|
||||
|
||||
creds := v4aCredentials{
|
||||
@@ -216,7 +216,7 @@ func (s *httpSigner) Build() (signedRequest, error) {
|
||||
|
||||
signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
||||
|
||||
rawQuery := strings.ReplaceAll(query.Encode(), "+", "%20")
|
||||
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
|
||||
|
||||
canonicalURI := v4Internal.GetURIPath(req.URL)
|
||||
|
||||
@@ -314,7 +314,7 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
|
||||
var canonicalHeaders strings.Builder
|
||||
n := len(headers)
|
||||
const colon = ':'
|
||||
for i := range n {
|
||||
for i := 0; i < n; i++ {
|
||||
if headers[i] == hostHeader {
|
||||
canonicalHeaders.WriteString(hostHeader)
|
||||
canonicalHeaders.WriteRune(colon)
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/strings/strings.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/strings/strings.go
generated
vendored
@@ -57,7 +57,7 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string]
|
||||
}
|
||||
|
||||
// Drop the overall length down to match our substitution
|
||||
longestLocation -= (len(lcs) - 3)
|
||||
longestLocation = longestLocation - (len(lcs) - 3)
|
||||
}
|
||||
|
||||
return result, longestLocation
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
generated
vendored
@@ -148,6 +148,8 @@ func (t *parser) key(data map[string]interface{}) error {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("key %q has no value", string(k))
|
||||
//set(data, string(k), "")
|
||||
//return err
|
||||
case last == '[':
|
||||
// We are in a list index context, so we need to set an index.
|
||||
i, err := t.keyIndex()
|
||||
@@ -166,7 +168,7 @@ func (t *parser) key(data map[string]interface{}) error {
|
||||
set(data, kk, list)
|
||||
return err
|
||||
case last == '=':
|
||||
// End of key. Consume =, Get value.
|
||||
//End of key. Consume =, Get value.
|
||||
// FIXME: Get value list first
|
||||
vl, e := t.valList()
|
||||
switch e {
|
||||
|
||||
51
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
generated
vendored
51
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
generated
vendored
@@ -7,7 +7,6 @@ package encoding
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@@ -106,7 +105,7 @@ func readMagic(r io.Reader) error {
|
||||
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
|
||||
return err
|
||||
} else if v != constant.Magic {
|
||||
return errors.New("illegal magic value")
|
||||
return fmt.Errorf("illegal magic value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -116,7 +115,7 @@ func readVersion(r io.Reader) error {
|
||||
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
|
||||
return err
|
||||
} else if v != constant.Version {
|
||||
return errors.New("illegal wasm version")
|
||||
return fmt.Errorf("illegal wasm version")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -200,7 +199,7 @@ func readSections(r io.Reader, m *module.Module) error {
|
||||
return fmt.Errorf("code section: %w", err)
|
||||
}
|
||||
default:
|
||||
return errors.New("illegal section id")
|
||||
return fmt.Errorf("illegal section id")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -270,7 +269,7 @@ func readNameMap(r io.Reader) ([]module.NameMap, error) {
|
||||
return nil, err
|
||||
}
|
||||
nm := make([]module.NameMap, n)
|
||||
for i := range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
var name string
|
||||
id, err := leb128.ReadVarUint32(r)
|
||||
if err != nil {
|
||||
@@ -290,7 +289,7 @@ func readNameSectionLocals(r io.Reader, s *module.NameSection) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
id, err := leb128.ReadVarUint32(r) // func index
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -327,7 +326,7 @@ func readTypeSection(r io.Reader, s *module.TypeSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var ftype module.FunctionType
|
||||
if err := readFunctionType(r, &ftype); err != nil {
|
||||
@@ -347,7 +346,7 @@ func readImportSection(r io.Reader, s *module.ImportSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var imp module.Import
|
||||
|
||||
@@ -368,14 +367,14 @@ func readTableSection(r io.Reader, s *module.TableSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var table module.Table
|
||||
|
||||
if elem, err := readByte(r); err != nil {
|
||||
return err
|
||||
} else if elem != constant.ElementTypeAnyFunc {
|
||||
return errors.New("illegal element type")
|
||||
return fmt.Errorf("illegal element type")
|
||||
}
|
||||
|
||||
table.Type = types.Anyfunc
|
||||
@@ -397,7 +396,7 @@ func readMemorySection(r io.Reader, s *module.MemorySection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var mem module.Memory
|
||||
|
||||
@@ -418,7 +417,7 @@ func readGlobalSection(r io.Reader, s *module.GlobalSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var global module.Global
|
||||
|
||||
@@ -443,7 +442,7 @@ func readExportSection(r io.Reader, s *module.ExportSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var exp module.Export
|
||||
|
||||
@@ -464,7 +463,7 @@ func readElementSection(r io.Reader, s *module.ElementSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var seg module.ElementSegment
|
||||
|
||||
@@ -485,7 +484,7 @@ func readDataSection(r io.Reader, s *module.DataSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
|
||||
var seg module.DataSegment
|
||||
|
||||
@@ -506,7 +505,7 @@ func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
var seg module.RawCodeSegment
|
||||
|
||||
if err := readRawCodeSegment(r, &seg); err != nil {
|
||||
@@ -548,7 +547,7 @@ func readGlobal(r io.Reader, global *module.Global) error {
|
||||
if b == 1 {
|
||||
global.Mutable = true
|
||||
} else if b != 0 {
|
||||
return errors.New("illegal mutability flag")
|
||||
return fmt.Errorf("illegal mutability flag")
|
||||
}
|
||||
|
||||
return readConstantExpr(r, &global.Init)
|
||||
@@ -585,7 +584,7 @@ func readImport(r io.Reader, imp *module.Import) error {
|
||||
if elem, err := readByte(r); err != nil {
|
||||
return err
|
||||
} else if elem != constant.ElementTypeAnyFunc {
|
||||
return errors.New("illegal element type")
|
||||
return fmt.Errorf("illegal element type")
|
||||
}
|
||||
desc := module.TableImport{
|
||||
Type: types.Anyfunc,
|
||||
@@ -618,12 +617,12 @@ func readImport(r io.Reader, imp *module.Import) error {
|
||||
if b == 1 {
|
||||
desc.Mutable = true
|
||||
} else if b != 0 {
|
||||
return errors.New("illegal mutability flag")
|
||||
return fmt.Errorf("illegal mutability flag")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("illegal import descriptor type")
|
||||
return fmt.Errorf("illegal import descriptor type")
|
||||
}
|
||||
|
||||
func readExport(r io.Reader, exp *module.Export) error {
|
||||
@@ -647,7 +646,7 @@ func readExport(r io.Reader, exp *module.Export) error {
|
||||
case constant.ExportDescGlobal:
|
||||
exp.Descriptor.Type = module.GlobalExportType
|
||||
default:
|
||||
return errors.New("illegal export descriptor type")
|
||||
return fmt.Errorf("illegal export descriptor type")
|
||||
}
|
||||
|
||||
exp.Descriptor.Index, err = leb128.ReadVarUint32(r)
|
||||
@@ -728,7 +727,7 @@ func readExpr(r io.Reader, expr *module.Expr) (err error) {
|
||||
case error:
|
||||
err = r
|
||||
default:
|
||||
err = errors.New("unknown panic")
|
||||
err = fmt.Errorf("unknown panic")
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -824,7 +823,7 @@ func readLimits(r io.Reader, l *module.Limit) error {
|
||||
}
|
||||
l.Max = &maxLim
|
||||
} else if b != 0 {
|
||||
return errors.New("illegal limit flag")
|
||||
return fmt.Errorf("illegal limit flag")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -839,7 +838,7 @@ func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error {
|
||||
|
||||
ret := make([]module.LocalDeclaration, n)
|
||||
|
||||
for i := range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
if err := readVarUint32(r, &ret[i].Count); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -889,7 +888,7 @@ func readVarUint32Vector(r io.Reader, v *[]uint32) error {
|
||||
|
||||
ret := make([]uint32, n)
|
||||
|
||||
for i := range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
if err := readVarUint32(r, &ret[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -908,7 +907,7 @@ func readValueTypeVector(r io.Reader, v *[]types.ValueType) error {
|
||||
|
||||
ret := make([]types.ValueType, n)
|
||||
|
||||
for i := range n {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
if err := readValueType(r, &ret[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
5
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
generated
vendored
5
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
generated
vendored
@@ -7,7 +7,6 @@ package encoding
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -261,7 +260,7 @@ func writeTableSection(w io.Writer, s module.TableSection) error {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("illegal table element type")
|
||||
return fmt.Errorf("illegal table element type")
|
||||
}
|
||||
if err := writeLimits(&buf, table.Lim); err != nil {
|
||||
return err
|
||||
@@ -589,7 +588,7 @@ func writeImport(w io.Writer, imp module.Import) error {
|
||||
}
|
||||
return writeByte(w, constant.Const)
|
||||
default:
|
||||
return errors.New("illegal import descriptor type")
|
||||
return fmt.Errorf("illegal import descriptor type")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
100
vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go
generated
vendored
100
vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go
generated
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/deepcopy"
|
||||
@@ -18,32 +18,12 @@ import (
|
||||
|
||||
const (
|
||||
annotationScopePackage = "package"
|
||||
annotationScopeImport = "import"
|
||||
annotationScopeRule = "rule"
|
||||
annotationScopeDocument = "document"
|
||||
annotationScopeSubpackages = "subpackages"
|
||||
)
|
||||
|
||||
var (
|
||||
scopeTerm = StringTerm("scope")
|
||||
titleTerm = StringTerm("title")
|
||||
entrypointTerm = StringTerm("entrypoint")
|
||||
descriptionTerm = StringTerm("description")
|
||||
organizationsTerm = StringTerm("organizations")
|
||||
authorsTerm = StringTerm("authors")
|
||||
relatedResourcesTerm = StringTerm("related_resources")
|
||||
schemasTerm = StringTerm("schemas")
|
||||
customTerm = StringTerm("custom")
|
||||
refTerm = StringTerm("ref")
|
||||
nameTerm = StringTerm("name")
|
||||
emailTerm = StringTerm("email")
|
||||
schemaTerm = StringTerm("schema")
|
||||
definitionTerm = StringTerm("definition")
|
||||
documentTerm = StringTerm(annotationScopeDocument)
|
||||
packageTerm = StringTerm(annotationScopePackage)
|
||||
ruleTerm = StringTerm(annotationScopeRule)
|
||||
subpackagesTerm = StringTerm(annotationScopeSubpackages)
|
||||
)
|
||||
|
||||
type (
|
||||
// Annotations represents metadata attached to other AST nodes such as rules.
|
||||
Annotations struct {
|
||||
@@ -311,6 +291,7 @@ func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
func scopeCompare(s1, s2 string) int {
|
||||
|
||||
o1 := scopeOrder(s1)
|
||||
o2 := scopeOrder(s2)
|
||||
|
||||
@@ -330,7 +311,8 @@ func scopeCompare(s1, s2 string) int {
|
||||
}
|
||||
|
||||
func scopeOrder(s string) int {
|
||||
if s == annotationScopeRule {
|
||||
switch s {
|
||||
case annotationScopeRule:
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
@@ -343,7 +325,7 @@ func compareAuthors(a, b []*AuthorAnnotation) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -359,8 +341,8 @@ func compareRelatedResources(a, b []*RelatedResourceAnnotation) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
@@ -374,7 +356,7 @@ func compareSchemas(a, b []*SchemaAnnotation) int {
|
||||
maxLen = len(b)
|
||||
}
|
||||
|
||||
for i := range maxLen {
|
||||
for i := 0; i < maxLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -396,7 +378,7 @@ func compareStringLists(a, b []string) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := strings.Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -427,9 +409,7 @@ func (a *Annotations) Copy(node Node) *Annotations {
|
||||
cpy.Schemas[i] = a.Schemas[i].Copy()
|
||||
}
|
||||
|
||||
if a.Custom != nil {
|
||||
cpy.Custom = deepcopy.Map(a.Custom)
|
||||
}
|
||||
cpy.Custom = deepcopy.Map(a.Custom)
|
||||
|
||||
cpy.node = node
|
||||
|
||||
@@ -445,30 +425,19 @@ func (a *Annotations) toObject() (*Object, *Error) {
|
||||
}
|
||||
|
||||
if len(a.Scope) > 0 {
|
||||
switch a.Scope {
|
||||
case annotationScopeDocument:
|
||||
obj.Insert(scopeTerm, documentTerm)
|
||||
case annotationScopePackage:
|
||||
obj.Insert(scopeTerm, packageTerm)
|
||||
case annotationScopeRule:
|
||||
obj.Insert(scopeTerm, ruleTerm)
|
||||
case annotationScopeSubpackages:
|
||||
obj.Insert(scopeTerm, subpackagesTerm)
|
||||
default:
|
||||
obj.Insert(scopeTerm, StringTerm(a.Scope))
|
||||
}
|
||||
obj.Insert(StringTerm("scope"), StringTerm(a.Scope))
|
||||
}
|
||||
|
||||
if len(a.Title) > 0 {
|
||||
obj.Insert(titleTerm, StringTerm(a.Title))
|
||||
obj.Insert(StringTerm("title"), StringTerm(a.Title))
|
||||
}
|
||||
|
||||
if a.Entrypoint {
|
||||
obj.Insert(entrypointTerm, InternedBooleanTerm(true))
|
||||
obj.Insert(StringTerm("entrypoint"), BooleanTerm(true))
|
||||
}
|
||||
|
||||
if len(a.Description) > 0 {
|
||||
obj.Insert(descriptionTerm, StringTerm(a.Description))
|
||||
obj.Insert(StringTerm("description"), StringTerm(a.Description))
|
||||
}
|
||||
|
||||
if len(a.Organizations) > 0 {
|
||||
@@ -476,19 +445,19 @@ func (a *Annotations) toObject() (*Object, *Error) {
|
||||
for _, org := range a.Organizations {
|
||||
orgs = append(orgs, StringTerm(org))
|
||||
}
|
||||
obj.Insert(organizationsTerm, ArrayTerm(orgs...))
|
||||
obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...))
|
||||
}
|
||||
|
||||
if len(a.RelatedResources) > 0 {
|
||||
rrs := make([]*Term, 0, len(a.RelatedResources))
|
||||
for _, rr := range a.RelatedResources {
|
||||
rrObj := NewObject(Item(refTerm, StringTerm(rr.Ref.String())))
|
||||
rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String())))
|
||||
if len(rr.Description) > 0 {
|
||||
rrObj.Insert(descriptionTerm, StringTerm(rr.Description))
|
||||
rrObj.Insert(StringTerm("description"), StringTerm(rr.Description))
|
||||
}
|
||||
rrs = append(rrs, NewTerm(rrObj))
|
||||
}
|
||||
obj.Insert(relatedResourcesTerm, ArrayTerm(rrs...))
|
||||
obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...))
|
||||
}
|
||||
|
||||
if len(a.Authors) > 0 {
|
||||
@@ -496,14 +465,14 @@ func (a *Annotations) toObject() (*Object, *Error) {
|
||||
for _, author := range a.Authors {
|
||||
aObj := NewObject()
|
||||
if len(author.Name) > 0 {
|
||||
aObj.Insert(nameTerm, StringTerm(author.Name))
|
||||
aObj.Insert(StringTerm("name"), StringTerm(author.Name))
|
||||
}
|
||||
if len(author.Email) > 0 {
|
||||
aObj.Insert(emailTerm, StringTerm(author.Email))
|
||||
aObj.Insert(StringTerm("email"), StringTerm(author.Email))
|
||||
}
|
||||
as = append(as, NewTerm(aObj))
|
||||
}
|
||||
obj.Insert(authorsTerm, ArrayTerm(as...))
|
||||
obj.Insert(StringTerm("authors"), ArrayTerm(as...))
|
||||
}
|
||||
|
||||
if len(a.Schemas) > 0 {
|
||||
@@ -511,21 +480,21 @@ func (a *Annotations) toObject() (*Object, *Error) {
|
||||
for _, s := range a.Schemas {
|
||||
sObj := NewObject()
|
||||
if len(s.Path) > 0 {
|
||||
sObj.Insert(pathTerm, NewTerm(s.Path.toArray()))
|
||||
sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray()))
|
||||
}
|
||||
if len(s.Schema) > 0 {
|
||||
sObj.Insert(schemaTerm, NewTerm(s.Schema.toArray()))
|
||||
sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray()))
|
||||
}
|
||||
if s.Definition != nil {
|
||||
def, err := InterfaceToValue(s.Definition)
|
||||
if err != nil {
|
||||
return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error())
|
||||
}
|
||||
sObj.Insert(definitionTerm, NewTerm(def))
|
||||
sObj.Insert(StringTerm("definition"), NewTerm(def))
|
||||
}
|
||||
ss = append(ss, NewTerm(sObj))
|
||||
}
|
||||
obj.Insert(schemasTerm, ArrayTerm(ss...))
|
||||
obj.Insert(StringTerm("schemas"), ArrayTerm(ss...))
|
||||
}
|
||||
|
||||
if len(a.Custom) > 0 {
|
||||
@@ -533,7 +502,7 @@ func (a *Annotations) toObject() (*Object, *Error) {
|
||||
if err != nil {
|
||||
return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error())
|
||||
}
|
||||
obj.Insert(customTerm, NewTerm(c))
|
||||
obj.Insert(StringTerm("custom"), NewTerm(c))
|
||||
}
|
||||
|
||||
return &obj, nil
|
||||
@@ -594,11 +563,7 @@ func attachAnnotationsNodes(mod *Module) Errors {
|
||||
case *Package:
|
||||
a.Scope = annotationScopePackage
|
||||
case *Import:
|
||||
// Note that this isn't a valid scope, but set here so that the
|
||||
// validate function called below can print an error message with
|
||||
// a context that makes sense ("invalid scope: 'import'" instead of
|
||||
// "invalid scope: '')
|
||||
a.Scope = "import"
|
||||
a.Scope = annotationScopeImport
|
||||
}
|
||||
}
|
||||
|
||||
@@ -716,6 +681,7 @@ func (s *SchemaAnnotation) Copy() *SchemaAnnotation {
|
||||
// Compare returns an integer indicating if s is less than, equal to, or greater
|
||||
// than other.
|
||||
func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int {
|
||||
|
||||
if cmp := s.Path.Compare(other.Path); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -853,7 +819,9 @@ func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet {
|
||||
}
|
||||
|
||||
// Sort by path, then annotation location, for stable output
|
||||
slices.SortStableFunc(refs, (*AnnotationsRef).Compare)
|
||||
sort.SliceStable(refs, func(i, j int) bool {
|
||||
return refs[i].Compare(refs[j]) < 0
|
||||
})
|
||||
|
||||
return refs
|
||||
}
|
||||
@@ -885,8 +853,8 @@ func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet {
|
||||
|
||||
if len(refs) > 1 {
|
||||
// Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
|
||||
slices.SortStableFunc(refs, func(a, b *AnnotationsRef) int {
|
||||
return -a.Annotations.Location.Compare(b.Annotations.Location)
|
||||
sort.SliceStable(refs, func(i, j int) bool {
|
||||
return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
107
vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go
generated
vendored
107
vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go
generated
vendored
@@ -299,9 +299,6 @@ var DefaultBuiltins = [...]*Builtin{
|
||||
// Printing
|
||||
Print,
|
||||
InternalPrint,
|
||||
|
||||
// Testing
|
||||
InternalTestCase,
|
||||
}
|
||||
|
||||
// BuiltinMap provides a convenient mapping of built-in names to
|
||||
@@ -489,10 +486,10 @@ var Minus = &Builtin{
|
||||
Description: "Minus subtracts the second number from the first number or computes the difference between two sets.",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("x", types.NewAny(types.N, types.SetOfAny)),
|
||||
types.Named("y", types.NewAny(types.N, types.SetOfAny)),
|
||||
types.Named("x", types.NewAny(types.N, types.NewSet(types.A))),
|
||||
types.Named("y", types.NewAny(types.N, types.NewSet(types.A))),
|
||||
),
|
||||
types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"),
|
||||
types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"),
|
||||
),
|
||||
Categories: category("sets", "numbers"),
|
||||
}
|
||||
@@ -674,10 +671,10 @@ var And = &Builtin{
|
||||
Description: "Returns the intersection of two sets.",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("x", types.SetOfAny).Description("the first set"),
|
||||
types.Named("y", types.SetOfAny).Description("the second set"),
|
||||
types.Named("x", types.NewSet(types.A)).Description("the first set"),
|
||||
types.Named("y", types.NewSet(types.A)).Description("the second set"),
|
||||
),
|
||||
types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"),
|
||||
types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"),
|
||||
),
|
||||
Categories: sets,
|
||||
}
|
||||
@@ -689,10 +686,10 @@ var Or = &Builtin{
|
||||
Description: "Returns the union of two sets.",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("x", types.SetOfAny),
|
||||
types.Named("y", types.SetOfAny),
|
||||
types.Named("x", types.NewSet(types.A)),
|
||||
types.Named("y", types.NewSet(types.A)),
|
||||
),
|
||||
types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"),
|
||||
types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"),
|
||||
),
|
||||
Categories: sets,
|
||||
}
|
||||
@@ -702,9 +699,9 @@ var Intersection = &Builtin{
|
||||
Description: "Returns the intersection of the given input sets.",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to intersect"),
|
||||
types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"),
|
||||
),
|
||||
types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"),
|
||||
types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"),
|
||||
),
|
||||
Categories: sets,
|
||||
}
|
||||
@@ -714,9 +711,9 @@ var Union = &Builtin{
|
||||
Description: "Returns the union of the given input sets.",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to merge"),
|
||||
types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"),
|
||||
),
|
||||
types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"),
|
||||
types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"),
|
||||
),
|
||||
Categories: sets,
|
||||
}
|
||||
@@ -733,7 +730,7 @@ var Count = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
|
||||
types.S,
|
||||
@@ -750,7 +747,7 @@ var Sum = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfNum,
|
||||
types.NewSet(types.N),
|
||||
types.NewArray(nil, types.N),
|
||||
)).Description("the set or array of numbers to sum"),
|
||||
),
|
||||
@@ -765,7 +762,7 @@ var Product = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfNum,
|
||||
types.NewSet(types.N),
|
||||
types.NewArray(nil, types.N),
|
||||
)).Description("the set or array of numbers to multiply"),
|
||||
),
|
||||
@@ -780,7 +777,7 @@ var Max = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
)).Description("the set or array to be searched"),
|
||||
),
|
||||
@@ -795,7 +792,7 @@ var Min = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
)).Description("the set or array to be searched"),
|
||||
),
|
||||
@@ -815,7 +812,7 @@ var Sort = &Builtin{
|
||||
types.Args(
|
||||
types.Named("collection", types.NewAny(
|
||||
types.NewArray(nil, types.A),
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
)).Description("the array or set to be sorted"),
|
||||
),
|
||||
types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"),
|
||||
@@ -845,8 +842,8 @@ var ArraySlice = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"),
|
||||
types.Named("start", types.N).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
|
||||
types.Named("stop", types.N).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
|
||||
types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
|
||||
types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
|
||||
),
|
||||
types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"),
|
||||
),
|
||||
@@ -996,12 +993,12 @@ var AnyPrefixMatch = &Builtin{
|
||||
types.Args(
|
||||
types.Named("search", types.NewAny(
|
||||
types.S,
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
types.NewArray(nil, types.S),
|
||||
)).Description("search string(s)"),
|
||||
types.Named("base", types.NewAny(
|
||||
types.S,
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
types.NewArray(nil, types.S),
|
||||
)).Description("base string(s)"),
|
||||
),
|
||||
@@ -1017,12 +1014,12 @@ var AnySuffixMatch = &Builtin{
|
||||
types.Args(
|
||||
types.Named("search", types.NewAny(
|
||||
types.S,
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
types.NewArray(nil, types.S),
|
||||
)).Description("search string(s)"),
|
||||
types.Named("base", types.NewAny(
|
||||
types.S,
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
types.NewArray(nil, types.S),
|
||||
)).Description("base string(s)"),
|
||||
),
|
||||
@@ -1038,7 +1035,7 @@ var Concat = &Builtin{
|
||||
types.Args(
|
||||
types.Named("delimiter", types.S).Description("string to use as a delimiter"),
|
||||
types.Named("collection", types.NewAny(
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
types.NewArray(nil, types.S),
|
||||
)).Description("strings to join"),
|
||||
),
|
||||
@@ -1600,13 +1597,13 @@ var ObjectSubset = &Builtin{
|
||||
types.Named("super", types.NewAny(types.NewObject(
|
||||
nil,
|
||||
types.NewDynamicProperty(types.A, types.A),
|
||||
), types.SetOfAny,
|
||||
), types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
)).Description("object to test if sub is a subset of"),
|
||||
types.Named("sub", types.NewAny(types.NewObject(
|
||||
nil,
|
||||
types.NewDynamicProperty(types.A, types.A),
|
||||
), types.SetOfAny,
|
||||
), types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
)).Description("object to test if super is a superset of"),
|
||||
),
|
||||
@@ -1659,7 +1656,7 @@ var ObjectRemove = &Builtin{
|
||||
)).Description("object to remove keys from"),
|
||||
types.Named("keys", types.NewAny(
|
||||
types.NewArray(nil, types.A),
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
|
||||
)).Description("keys to remove from x"),
|
||||
),
|
||||
@@ -1679,7 +1676,7 @@ var ObjectFilter = &Builtin{
|
||||
)).Description("object to filter keys"),
|
||||
types.Named("keys", types.NewAny(
|
||||
types.NewArray(nil, types.A),
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
|
||||
)).Description("keys to keep in `object`"),
|
||||
),
|
||||
@@ -1710,7 +1707,7 @@ var ObjectKeys = &Builtin{
|
||||
types.Args(
|
||||
types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"),
|
||||
),
|
||||
types.Named("value", types.SetOfAny).Description("set of `object`'s keys"),
|
||||
types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -1884,8 +1881,7 @@ var URLQueryEncodeObject = &Builtin{
|
||||
types.NewAny(
|
||||
types.S,
|
||||
types.NewArray(nil, types.S),
|
||||
types.SetOfStr,
|
||||
),
|
||||
types.NewSet(types.S)),
|
||||
),
|
||||
),
|
||||
).Description("the object to encode"),
|
||||
@@ -2576,13 +2572,13 @@ var ReachableBuiltin = &Builtin{
|
||||
types.NewDynamicProperty(
|
||||
types.A,
|
||||
types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A)),
|
||||
)),
|
||||
).Description("object containing a set or array of neighboring vertices"),
|
||||
types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("set or array of root vertices"),
|
||||
types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"),
|
||||
),
|
||||
types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
|
||||
types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -2596,11 +2592,11 @@ var ReachablePathsBuiltin = &Builtin{
|
||||
types.NewDynamicProperty(
|
||||
types.A,
|
||||
types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A)),
|
||||
)),
|
||||
).Description("object containing a set or array of root vertices"),
|
||||
types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
|
||||
types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
|
||||
),
|
||||
types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"),
|
||||
),
|
||||
@@ -3031,7 +3027,7 @@ var NetCIDRExpand = &Builtin{
|
||||
types.Args(
|
||||
types.Named("cidr", types.S).Description("CIDR to expand"),
|
||||
),
|
||||
types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"),
|
||||
types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -3069,10 +3065,10 @@ Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/
|
||||
types.Args(
|
||||
types.Named("addrs", types.NewAny(
|
||||
types.NewArray(nil, types.NewAny(types.S)),
|
||||
types.SetOfStr,
|
||||
types.NewSet(types.S),
|
||||
)).Description("CIDRs or IP addresses"),
|
||||
),
|
||||
types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
|
||||
types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -3114,7 +3110,7 @@ var NetLookupIPAddr = &Builtin{
|
||||
types.Args(
|
||||
types.Named("name", types.S).Description("domain name to resolve"),
|
||||
),
|
||||
types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"),
|
||||
types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"),
|
||||
),
|
||||
Nondeterministic: true,
|
||||
}
|
||||
@@ -3164,12 +3160,7 @@ var Print = &Builtin{
|
||||
// The compiler rewrites print() calls to refer to the internal implementation.
|
||||
var InternalPrint = &Builtin{
|
||||
Name: "internal.print",
|
||||
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.SetOfAny)}, nil),
|
||||
}
|
||||
|
||||
var InternalTestCase = &Builtin{
|
||||
Name: "internal.test_case",
|
||||
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil),
|
||||
Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil),
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3181,10 +3172,10 @@ var SetDiff = &Builtin{
|
||||
Name: "set_diff",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.SetOfAny,
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewSet(types.A),
|
||||
),
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
),
|
||||
deprecated: true,
|
||||
}
|
||||
@@ -3221,7 +3212,7 @@ var CastSet = &Builtin{
|
||||
Name: "cast_set",
|
||||
Decl: types.NewFunction(
|
||||
types.Args(types.A),
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
),
|
||||
deprecated: true,
|
||||
}
|
||||
@@ -3287,7 +3278,7 @@ var All = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
),
|
||||
),
|
||||
@@ -3303,7 +3294,7 @@ var Any = &Builtin{
|
||||
Decl: types.NewFunction(
|
||||
types.Args(
|
||||
types.NewAny(
|
||||
types.SetOfAny,
|
||||
types.NewSet(types.A),
|
||||
types.NewArray(nil, types.A),
|
||||
),
|
||||
),
|
||||
@@ -3401,7 +3392,7 @@ func (b *Builtin) IsTargetPos(i int) bool {
|
||||
|
||||
func init() {
|
||||
BuiltinMap = map[string]*Builtin{}
|
||||
for _, b := range &DefaultBuiltins {
|
||||
for _, b := range DefaultBuiltins {
|
||||
RegisterBuiltin(b)
|
||||
}
|
||||
}
|
||||
|
||||
13
vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go
generated
vendored
13
vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@@ -117,9 +116,8 @@ func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities {
|
||||
|
||||
f.Builtins = make([]*Builtin, len(Builtins))
|
||||
copy(f.Builtins, Builtins)
|
||||
|
||||
slices.SortFunc(f.Builtins, func(a, b *Builtin) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
sort.Slice(f.Builtins, func(i, j int) bool {
|
||||
return f.Builtins[i].Name < f.Builtins[j].Name
|
||||
})
|
||||
|
||||
if co.regoVersion == RegoV0 || co.regoVersion == RegoV0CompatV1 {
|
||||
@@ -245,7 +243,12 @@ func (c *Capabilities) MinimumCompatibleVersion() (string, bool) {
|
||||
}
|
||||
|
||||
func (c *Capabilities) ContainsFeature(feature string) bool {
|
||||
return slices.Contains(c.Features, feature)
|
||||
for _, f := range c.Features {
|
||||
if f == feature {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name
|
||||
|
||||
78
vendor/github.com/open-policy-agent/opa/v1/ast/check.go
generated
vendored
78
vendor/github.com/open-policy-agent/opa/v1/ast/check.go
generated
vendored
@@ -276,7 +276,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
|
||||
if len(rule.Head.Args) > 0 {
|
||||
// If args are not referred to in body, infer as any.
|
||||
WalkVars(rule.Head.Args, func(v Var) bool {
|
||||
if cpy.GetByValue(v) == nil {
|
||||
if cpy.Get(v) == nil {
|
||||
cpy.tree.PutOne(v, types.A)
|
||||
}
|
||||
return false
|
||||
@@ -284,8 +284,8 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
|
||||
|
||||
// Construct function type.
|
||||
args := make([]types.Type, len(rule.Head.Args))
|
||||
for i := range len(rule.Head.Args) {
|
||||
args[i] = cpy.GetByValue(rule.Head.Args[i].Value)
|
||||
for i := 0; i < len(rule.Head.Args); i++ {
|
||||
args[i] = cpy.Get(rule.Head.Args[i])
|
||||
}
|
||||
|
||||
f := types.NewFunction(args, cpy.Get(rule.Head.Value))
|
||||
@@ -294,7 +294,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
|
||||
} else {
|
||||
switch rule.Head.RuleKind() {
|
||||
case SingleValue:
|
||||
typeV := cpy.GetByValue(rule.Head.Value.Value)
|
||||
typeV := cpy.Get(rule.Head.Value)
|
||||
if !path.IsGround() {
|
||||
// e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z]
|
||||
objPath := path.DynamicSuffix()
|
||||
@@ -306,11 +306,13 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
|
||||
tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) //nolint:govet
|
||||
tpe = nil
|
||||
}
|
||||
} else if typeV != nil {
|
||||
tpe = typeV
|
||||
} else {
|
||||
if typeV != nil {
|
||||
tpe = typeV
|
||||
}
|
||||
}
|
||||
case MultiValue:
|
||||
typeK := cpy.GetByValue(rule.Head.Key.Value)
|
||||
typeK := cpy.Get(rule.Head.Key)
|
||||
if typeK != nil {
|
||||
tpe = types.NewSet(typeK)
|
||||
}
|
||||
@@ -339,7 +341,7 @@ func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) {
|
||||
}
|
||||
|
||||
var dynamicProperty *types.DynamicProperty
|
||||
typeK := env.GetByValue(k.Value)
|
||||
typeK := env.Get(k)
|
||||
if typeK == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -389,7 +391,7 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
|
||||
// type checker relies on reordering (in particular for references to local
|
||||
// vars).
|
||||
name := expr.Operator()
|
||||
tpe := env.GetByRef(name)
|
||||
tpe := env.Get(name)
|
||||
|
||||
if tpe == nil {
|
||||
if tc.allowUndefinedFuncs {
|
||||
@@ -429,7 +431,7 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
|
||||
if !unify1(env, args[i], fargs.Arg(i), false) {
|
||||
post := make([]types.Type, len(args))
|
||||
for i := range args {
|
||||
post[i] = env.GetByValue(args[i].Value)
|
||||
post[i] = env.Get(args[i])
|
||||
}
|
||||
return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs)
|
||||
}
|
||||
@@ -451,7 +453,7 @@ func checkExprEq(env *TypeEnv, expr *Expr) *Error {
|
||||
}
|
||||
|
||||
a, b := expr.Operand(0), expr.Operand(1)
|
||||
typeA, typeB := env.GetByValue(a.Value), env.GetByValue(b.Value)
|
||||
typeA, typeB := env.Get(a), env.Get(b)
|
||||
|
||||
if !unify2(env, a, typeA, b, typeB) {
|
||||
err := NewError(TypeErr, expr.Location, "match error")
|
||||
@@ -471,7 +473,7 @@ func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error {
|
||||
}
|
||||
|
||||
target, value := expr.With[i].Target, expr.With[i].Value
|
||||
targetType, valueType := env.GetByValue(target.Value), env.GetByValue(value.Value)
|
||||
targetType, valueType := env.Get(target), env.Get(value)
|
||||
|
||||
if t, ok := targetType.(*types.Function); ok { // built-in function replacement
|
||||
switch v := valueType.(type) {
|
||||
@@ -507,7 +509,7 @@ func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type)
|
||||
case Var:
|
||||
switch b.Value.(type) {
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
case *Array:
|
||||
return unify2Array(env, b, a)
|
||||
case *object:
|
||||
@@ -523,15 +525,15 @@ func unify2Array(env *TypeEnv, a *Term, b *Term) bool {
|
||||
switch bv := b.Value.(type) {
|
||||
case *Array:
|
||||
if arr.Len() == bv.Len() {
|
||||
for i := range arr.Len() {
|
||||
if !unify2(env, arr.Elem(i), env.GetByValue(arr.Elem(i).Value), bv.Elem(i), env.GetByValue(bv.Elem(i).Value)) {
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -543,14 +545,14 @@ func unify2Object(env *TypeEnv, a *Term, b *Term) bool {
|
||||
cv := obj.Intersect(bv)
|
||||
if obj.Len() == bv.Len() && bv.Len() == len(cv) {
|
||||
for i := range cv {
|
||||
if !unify2(env, cv[i][1], env.GetByValue(cv[i][1].Value), cv[i][2], env.GetByValue(cv[i][2].Value)) {
|
||||
if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -563,7 +565,7 @@ func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
|
||||
return unify1Array(env, v, tpe, union)
|
||||
case types.Any:
|
||||
if types.Compare(tpe, types.A) == 0 {
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
unify1(env, v.Elem(i), types.A, true)
|
||||
}
|
||||
return true
|
||||
@@ -613,22 +615,22 @@ func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
|
||||
}
|
||||
return false
|
||||
case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
|
||||
return unifies(env.GetByValue(v), tpe)
|
||||
return unifies(env.Get(v), tpe)
|
||||
case Var:
|
||||
if !union {
|
||||
if exist := env.GetByValue(v); exist != nil {
|
||||
if exist := env.Get(v); exist != nil {
|
||||
return unifies(exist, tpe)
|
||||
}
|
||||
env.tree.PutOne(term.Value, tpe)
|
||||
} else {
|
||||
env.tree.PutOne(term.Value, types.Or(env.GetByValue(v), tpe))
|
||||
env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe))
|
||||
}
|
||||
return true
|
||||
default:
|
||||
if !IsConstant(v) {
|
||||
panic("unreachable")
|
||||
}
|
||||
return unifies(env.GetByValue(term.Value), tpe)
|
||||
return unifies(env.Get(term), tpe)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -636,7 +638,7 @@ func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool {
|
||||
if val.Len() != tpe.Len() && tpe.Dynamic() == nil {
|
||||
return false
|
||||
}
|
||||
for i := range val.Len() {
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
if !unify1(env, val.Elem(i), tpe.Select(i), union) {
|
||||
return false
|
||||
}
|
||||
@@ -730,8 +732,8 @@ func (rc *refChecker) Visit(x interface{}) bool {
|
||||
}
|
||||
|
||||
func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
|
||||
if tpe, ok := curr.GetByRef(ref).(*types.Function); ok {
|
||||
// NOTE(sr): We don't support first-class functions, except for `with`.
|
||||
switch tpe := curr.Get(ref).(type) {
|
||||
case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`.
|
||||
return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
|
||||
}
|
||||
|
||||
@@ -753,19 +755,19 @@ func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx i
|
||||
switch head.Value.(type) {
|
||||
case Var, String: // OK
|
||||
default:
|
||||
have := rc.env.GetByValue(head.Value)
|
||||
have := rc.env.Get(head.Value)
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node))
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := head.Value.(Var); ok && idx != 0 {
|
||||
if v, ok := head.Value.(Var); ok && idx != 0 {
|
||||
tpe := types.Keys(rc.env.getRefRecExtent(node))
|
||||
if exist := rc.env.GetByValue(head.Value); exist != nil {
|
||||
if exist := rc.env.Get(v); exist != nil {
|
||||
if !unifies(tpe, exist) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node))
|
||||
}
|
||||
} else {
|
||||
rc.env.tree.PutOne(head.Value, tpe)
|
||||
rc.env.tree.PutOne(v, tpe)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,8 +781,8 @@ func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx i
|
||||
|
||||
case RootDocumentNames.Contains(ref[0]):
|
||||
if idx != 0 {
|
||||
node.Children().Iter(func(_ Value, child *typeTreeNode) bool {
|
||||
_ = rc.checkRef(curr, child, ref, idx+1) // ignore error
|
||||
node.Children().Iter(func(_, child util.T) bool {
|
||||
_ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error
|
||||
return false
|
||||
})
|
||||
return nil
|
||||
@@ -815,7 +817,7 @@ func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
|
||||
switch value := head.Value.(type) {
|
||||
|
||||
case Var:
|
||||
if exist := rc.env.GetByValue(value); exist != nil {
|
||||
if exist := rc.env.Get(value); exist != nil {
|
||||
if !unifies(exist, keys) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
|
||||
}
|
||||
@@ -946,7 +948,7 @@ func unifiesArrays(a, b *types.Array) bool {
|
||||
|
||||
func unifiesArraysStatic(a, b *types.Array) bool {
|
||||
if a.Len() != 0 {
|
||||
for i := range a.Len() {
|
||||
for i := 0; i < a.Len(); i++ {
|
||||
if !unifies(a.Select(i), b.Select(i)) {
|
||||
return false
|
||||
}
|
||||
@@ -1001,7 +1003,7 @@ type ArgErrDetail struct {
|
||||
func (d *ArgErrDetail) Lines() []string {
|
||||
lines := make([]string, 2)
|
||||
lines[0] = "have: " + formatArgs(d.Have)
|
||||
lines[1] = "want: " + d.Want.String()
|
||||
lines[1] = "want: " + fmt.Sprint(d.Want)
|
||||
return lines
|
||||
}
|
||||
|
||||
@@ -1067,7 +1069,7 @@ func (r *RefErrInvalidDetail) Lines() []string {
|
||||
lines := []string{r.Ref.String()}
|
||||
offset := len(r.Ref[:r.Pos].String()) + 1
|
||||
pad := strings.Repeat(" ", offset)
|
||||
lines = append(lines, pad+"^")
|
||||
lines = append(lines, fmt.Sprintf("%s^", pad))
|
||||
if r.Have != nil {
|
||||
lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
|
||||
} else {
|
||||
@@ -1125,8 +1127,8 @@ func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type,
|
||||
}
|
||||
|
||||
func getOneOfForNode(node *typeTreeNode) (result []Value) {
|
||||
node.Children().Iter(func(k Value, _ *typeTreeNode) bool {
|
||||
result = append(result, k)
|
||||
node.Children().Iter(func(k, _ util.T) bool {
|
||||
result = append(result, k.(Value))
|
||||
return false
|
||||
})
|
||||
|
||||
|
||||
16
vendor/github.com/open-policy-agent/opa/v1/ast/compare.go
generated
vendored
16
vendor/github.com/open-policy-agent/opa/v1/ast/compare.go
generated
vendored
@@ -236,7 +236,7 @@ func Compare(a, b interface{}) int {
|
||||
type termSlice []*Term
|
||||
|
||||
func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
|
||||
func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
|
||||
func (s termSlice) Len() int { return len(s) }
|
||||
|
||||
func sortOrder(x interface{}) int {
|
||||
@@ -300,7 +300,7 @@ func importsCompare(a, b []*Import) int {
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -319,7 +319,7 @@ func annotationsCompare(a, b []*Annotations) int {
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -338,7 +338,7 @@ func rulesCompare(a, b []*Rule) int {
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func termSliceCompare(a, b []*Term) int {
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -375,7 +375,7 @@ func withSliceCompare(a, b []*With) int {
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -402,10 +402,6 @@ func TermValueCompare(a, b *Term) int {
|
||||
return a.Value.Compare(b.Value)
|
||||
}
|
||||
|
||||
func TermValueEqual(a, b *Term) bool {
|
||||
return ValueEqual(a.Value, b.Value)
|
||||
}
|
||||
|
||||
func ValueEqual(a, b Value) bool {
|
||||
// TODO(ae): why doesn't this work the same?
|
||||
//
|
||||
|
||||
88
vendor/github.com/open-policy-agent/opa/v1/ast/compile.go
generated
vendored
88
vendor/github.com/open-policy-agent/opa/v1/ast/compile.go
generated
vendored
@@ -124,7 +124,7 @@ type Compiler struct {
|
||||
|
||||
localvargen *localVarGenerator
|
||||
moduleLoader ModuleLoader
|
||||
ruleIndices *util.HasherMap[Ref, RuleIndex]
|
||||
ruleIndices *util.HashMap
|
||||
stages []stage
|
||||
maxErrs int
|
||||
sorted []string // list of sorted module names
|
||||
@@ -303,10 +303,15 @@ type stage struct {
|
||||
func NewCompiler() *Compiler {
|
||||
|
||||
c := &Compiler{
|
||||
Modules: map[string]*Module{},
|
||||
RewrittenVars: map[Var]Var{},
|
||||
Required: &Capabilities{},
|
||||
ruleIndices: util.NewHasherMap[Ref, RuleIndex](RefEqual),
|
||||
Modules: map[string]*Module{},
|
||||
RewrittenVars: map[Var]Var{},
|
||||
Required: &Capabilities{},
|
||||
ruleIndices: util.NewHashMap(func(a, b util.T) bool {
|
||||
r1, r2 := a.(Ref), b.(Ref)
|
||||
return r1.Equal(r2)
|
||||
}, func(x util.T) int {
|
||||
return x.(Ref).Hash()
|
||||
}),
|
||||
maxErrs: CompileErrorLimitDefault,
|
||||
after: map[string][]CompilerStageDefinition{},
|
||||
unsafeBuiltinsMap: map[string]struct{}{},
|
||||
@@ -820,7 +825,7 @@ func (c *Compiler) RuleIndex(path Ref) RuleIndex {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return r
|
||||
return r.(RuleIndex)
|
||||
}
|
||||
|
||||
// PassesTypeCheck determines whether the given body passes type checking
|
||||
@@ -1109,7 +1114,7 @@ func (c *Compiler) checkRuleConflicts() {
|
||||
for _, rule := range node.Values {
|
||||
r := rule.(*Rule)
|
||||
ref := r.Ref()
|
||||
name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place
|
||||
name = rw(ref.Copy()).String() // varRewriter operates in-place
|
||||
kinds[r.Head.RuleKind()] = struct{}{}
|
||||
arities[len(r.Head.Args)] = struct{}{}
|
||||
if r.Default {
|
||||
@@ -1151,7 +1156,7 @@ func (c *Compiler) checkRuleConflicts() {
|
||||
// data.p.q[r][s] { r := input.r; s := input.s }
|
||||
// data.p[q].r.s { q := input.q }
|
||||
|
||||
if ref.IsGround() && len(node.Children) > 0 {
|
||||
if r.Ref().IsGround() && len(node.Children) > 0 {
|
||||
conflicts = node.flattenChildren()
|
||||
}
|
||||
|
||||
@@ -1346,7 +1351,7 @@ func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schem
|
||||
if goSchema != nil {
|
||||
refLoader = gojsonschema.NewGoLoader(goSchema)
|
||||
} else {
|
||||
return nil, errors.New("no schema as input to compile")
|
||||
return nil, fmt.Errorf("no schema as input to compile")
|
||||
}
|
||||
schemasCompiled, err := sl.Compile(refLoader)
|
||||
if err != nil {
|
||||
@@ -1365,13 +1370,13 @@ func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema,
|
||||
if len(schemas[i].PropertiesChildren) > 0 {
|
||||
if !schemas[i].Types.Contains("object") {
|
||||
if err := schemas[i].Types.Add("object"); err != nil {
|
||||
return nil, errors.New("unable to set the type in schemas")
|
||||
return nil, fmt.Errorf("unable to set the type in schemas")
|
||||
}
|
||||
}
|
||||
} else if len(schemas[i].ItemsChildren) > 0 {
|
||||
if !schemas[i].Types.Contains("array") {
|
||||
if err := schemas[i].Types.Add("array"); err != nil {
|
||||
return nil, errors.New("unable to set the type in schemas")
|
||||
return nil, fmt.Errorf("unable to set the type in schemas")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1383,12 +1388,12 @@ func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema,
|
||||
} else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 {
|
||||
result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...)
|
||||
} else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 {
|
||||
for j := range len(schemas[i].ItemsChildren) {
|
||||
for j := 0; j < len(schemas[i].ItemsChildren); j++ {
|
||||
if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) {
|
||||
result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j])
|
||||
}
|
||||
if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() {
|
||||
return nil, errors.New("unable to merge these schemas")
|
||||
return nil, fmt.Errorf("unable to merge these schemas")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1477,7 +1482,7 @@ func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, prope
|
||||
}
|
||||
return parser.parseSchema(objectOrArrayResult)
|
||||
} else if subSchema.Types.String() != allOfResult.Types.String() {
|
||||
return nil, errors.New("unable to merge these schemas")
|
||||
return nil, fmt.Errorf("unable to merge these schemas")
|
||||
}
|
||||
}
|
||||
return parser.parseSchema(allOfResult)
|
||||
@@ -1733,9 +1738,13 @@ func (c *Compiler) err(err *Error) {
|
||||
c.Errors = append(c.Errors, err)
|
||||
}
|
||||
|
||||
func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] {
|
||||
func (c *Compiler) getExports() *util.HashMap {
|
||||
|
||||
rules := util.NewHasherMap[Ref, []Ref](RefEqual)
|
||||
rules := util.NewHashMap(func(a, b util.T) bool {
|
||||
return a.(Ref).Equal(b.(Ref))
|
||||
}, func(v util.T) int {
|
||||
return v.(Ref).Hash()
|
||||
})
|
||||
|
||||
for _, name := range c.sorted {
|
||||
mod := c.Modules[name]
|
||||
@@ -1748,30 +1757,18 @@ func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] {
|
||||
return rules
|
||||
}
|
||||
|
||||
func refSliceEqual(a, b []Ref) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !a[i].Equal(b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func hashMapAdd(rules *util.HasherMap[Ref, []Ref], pkg, rule Ref) {
|
||||
func hashMapAdd(rules *util.HashMap, pkg, rule Ref) {
|
||||
prev, ok := rules.Get(pkg)
|
||||
if !ok {
|
||||
rules.Put(pkg, []Ref{rule})
|
||||
return
|
||||
}
|
||||
for _, p := range prev {
|
||||
for _, p := range prev.([]Ref) {
|
||||
if p.Equal(rule) {
|
||||
return
|
||||
}
|
||||
}
|
||||
rules.Put(pkg, append(prev, rule))
|
||||
rules.Put(pkg, append(prev.([]Ref), rule))
|
||||
}
|
||||
|
||||
func (c *Compiler) GetAnnotationSet() *AnnotationSet {
|
||||
@@ -1870,7 +1867,7 @@ func (c *Compiler) resolveAllRefs() {
|
||||
|
||||
var ruleExports []Ref
|
||||
if x, ok := rules.Get(mod.Package.Path); ok {
|
||||
ruleExports = x
|
||||
ruleExports = x.([]Ref)
|
||||
}
|
||||
|
||||
globals := getGlobals(mod.Package, ruleExports, mod.Imports)
|
||||
@@ -3017,7 +3014,7 @@ func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error
|
||||
var ruleExports []Ref
|
||||
rules := qc.compiler.getExports()
|
||||
if exist, ok := rules.Get(pkg.Path); ok {
|
||||
ruleExports = exist
|
||||
ruleExports = exist.([]Ref)
|
||||
}
|
||||
|
||||
globals = getGlobals(qctx.Package, ruleExports, qctx.Imports)
|
||||
@@ -3545,8 +3542,10 @@ func (n *TreeNode) add(path Ref, rule *Rule) {
|
||||
}
|
||||
node.Children[sub.Key] = sub
|
||||
node.Sorted = append(node.Sorted, sub.Key)
|
||||
} else if rule != nil {
|
||||
node.Values = append(node.Values, rule)
|
||||
} else {
|
||||
if rule != nil {
|
||||
node.Values = append(node.Values, rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4232,9 +4231,6 @@ func (f *equalityFactory) Generate(other *Term) *Expr {
|
||||
return expr
|
||||
}
|
||||
|
||||
// TODO: Move to internal package?
|
||||
const LocalVarPrefix = "__local"
|
||||
|
||||
type localVarGenerator struct {
|
||||
exclude VarSet
|
||||
suffix string
|
||||
@@ -4259,7 +4255,7 @@ func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator {
|
||||
|
||||
func (l *localVarGenerator) Generate() Var {
|
||||
for {
|
||||
result := Var(LocalVarPrefix + l.suffix + strconv.Itoa(l.next) + "__")
|
||||
result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__")
|
||||
l.next++
|
||||
if !l.exclude.Contains(result) {
|
||||
return result
|
||||
@@ -4415,7 +4411,7 @@ func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr
|
||||
cpy.Terms = resolveRefsInTerm(globals, ignore, ts)
|
||||
case []*Term:
|
||||
buf := make([]*Term, len(ts))
|
||||
for i := range ts {
|
||||
for i := 0; i < len(ts); i++ {
|
||||
buf[i] = resolveRefsInTerm(globals, ignore, ts[i])
|
||||
}
|
||||
cpy.Terms = buf
|
||||
@@ -4520,7 +4516,7 @@ func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term
|
||||
|
||||
func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term {
|
||||
cpy := make([]*Term, terms.Len())
|
||||
for i := range terms.Len() {
|
||||
for i := 0; i < terms.Len(); i++ {
|
||||
cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i))
|
||||
}
|
||||
return cpy
|
||||
@@ -4528,7 +4524,7 @@ func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack,
|
||||
|
||||
func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term {
|
||||
cpy := make([]*Term, len(terms))
|
||||
for i := range terms {
|
||||
for i := 0; i < len(terms); i++ {
|
||||
cpy[i] = resolveRefsInTerm(globals, ignore, terms[i])
|
||||
}
|
||||
return cpy
|
||||
@@ -4802,7 +4798,7 @@ func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result B
|
||||
connectGeneratedExprs(original, generated)
|
||||
return result, result[len(result)-1].Operand(0)
|
||||
case *Array:
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
var t *Term
|
||||
result, t = rewriteDynamicsOne(original, f, v.Elem(i), result)
|
||||
v.set(i, t)
|
||||
@@ -4879,7 +4875,7 @@ func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) {
|
||||
|
||||
func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body {
|
||||
cpy := make(Body, 0, len(body))
|
||||
for i := range body {
|
||||
for i := 0; i < len(body); i++ {
|
||||
for _, expr := range expandExpr(gen, body[i]) {
|
||||
cpy.Append(expr)
|
||||
}
|
||||
@@ -5032,7 +5028,7 @@ func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) {
|
||||
}
|
||||
|
||||
func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) {
|
||||
for i := range arr.Len() {
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
extras, v := expandExprTerm(gen, arr.Elem(i))
|
||||
arr.set(i, v)
|
||||
support = append(support, extras...)
|
||||
@@ -5714,7 +5710,7 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr
|
||||
case isDataRef(target):
|
||||
ref := target.Value.(Ref)
|
||||
targetNode := c.RuleTree
|
||||
for i := range len(ref) - 1 {
|
||||
for i := 0; i < len(ref)-1; i++ {
|
||||
child := targetNode.Child(ref[i].Value)
|
||||
if child == nil {
|
||||
break
|
||||
|
||||
102
vendor/github.com/open-policy-agent/opa/v1/ast/env.go
generated
vendored
102
vendor/github.com/open-policy-agent/opa/v1/ast/env.go
generated
vendored
@@ -29,38 +29,29 @@ func newTypeEnv(f func() *typeChecker) *TypeEnv {
|
||||
}
|
||||
|
||||
// Get returns the type of x.
|
||||
// Deprecated: Use GetByValue or GetByRef instead, as they are more efficient.
|
||||
func (env *TypeEnv) Get(x interface{}) types.Type {
|
||||
|
||||
if term, ok := x.(*Term); ok {
|
||||
x = term.Value
|
||||
}
|
||||
|
||||
if v, ok := x.(Value); ok {
|
||||
return env.GetByValue(v)
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// GetByValue returns the type of v.
|
||||
func (env *TypeEnv) GetByValue(v Value) types.Type {
|
||||
switch x := v.(type) {
|
||||
switch x := x.(type) {
|
||||
|
||||
// Scalars.
|
||||
case Null:
|
||||
return types.Nl
|
||||
return types.NewNull()
|
||||
case Boolean:
|
||||
return types.B
|
||||
return types.NewBoolean()
|
||||
case Number:
|
||||
return types.N
|
||||
return types.NewNumber()
|
||||
case String:
|
||||
return types.S
|
||||
return types.NewString()
|
||||
|
||||
// Composites.
|
||||
case *Array:
|
||||
static := make([]types.Type, x.Len())
|
||||
for i := range static {
|
||||
tpe := env.GetByValue(x.Elem(i).Value)
|
||||
tpe := env.Get(x.Elem(i).Value)
|
||||
static[i] = tpe
|
||||
}
|
||||
|
||||
@@ -72,7 +63,7 @@ func (env *TypeEnv) GetByValue(v Value) types.Type {
|
||||
return types.NewArray(static, dynamic)
|
||||
|
||||
case *lazyObj:
|
||||
return env.GetByValue(x.force())
|
||||
return env.Get(x.force())
|
||||
case *object:
|
||||
static := []*types.StaticProperty{}
|
||||
var dynamic *types.DynamicProperty
|
||||
@@ -81,14 +72,14 @@ func (env *TypeEnv) GetByValue(v Value) types.Type {
|
||||
if IsConstant(k.Value) {
|
||||
kjson, err := JSON(k.Value)
|
||||
if err == nil {
|
||||
tpe := env.GetByValue(v.Value)
|
||||
tpe := env.Get(v)
|
||||
static = append(static, types.NewStaticProperty(kjson, tpe))
|
||||
return
|
||||
}
|
||||
}
|
||||
// Can't handle it as a static property, fallback to dynamic
|
||||
typeK := env.GetByValue(k.Value)
|
||||
typeV := env.GetByValue(v.Value)
|
||||
typeK := env.Get(k.Value)
|
||||
typeV := env.Get(v.Value)
|
||||
dynamic = types.NewDynamicProperty(typeK, typeV)
|
||||
})
|
||||
|
||||
@@ -101,7 +92,8 @@ func (env *TypeEnv) GetByValue(v Value) types.Type {
|
||||
case Set:
|
||||
var tpe types.Type
|
||||
x.Foreach(func(elem *Term) {
|
||||
tpe = types.Or(tpe, env.GetByValue(elem.Value))
|
||||
other := env.Get(elem.Value)
|
||||
tpe = types.Or(tpe, other)
|
||||
})
|
||||
if tpe == nil {
|
||||
tpe = types.A
|
||||
@@ -112,46 +104,47 @@ func (env *TypeEnv) GetByValue(v Value) types.Type {
|
||||
case *ArrayComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewArray(nil, cpy.GetByValue(x.Term.Value))
|
||||
return types.NewArray(nil, cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
case *ObjectComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewObject(nil, types.NewDynamicProperty(cpy.GetByValue(x.Key.Value), cpy.GetByValue(x.Value.Value)))
|
||||
return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
|
||||
}
|
||||
return nil
|
||||
case *SetComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewSet(cpy.GetByValue(x.Term.Value))
|
||||
return types.NewSet(cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
|
||||
// Refs.
|
||||
case Ref:
|
||||
return env.GetByRef(x)
|
||||
return env.getRef(x)
|
||||
|
||||
// Vars.
|
||||
case Var:
|
||||
if node := env.tree.Child(v); node != nil {
|
||||
if node := env.tree.Child(x); node != nil {
|
||||
return node.Value()
|
||||
}
|
||||
if env.next != nil {
|
||||
return env.next.GetByValue(v)
|
||||
return env.next.Get(x)
|
||||
}
|
||||
return nil
|
||||
|
||||
// Calls.
|
||||
case Call:
|
||||
return nil
|
||||
}
|
||||
|
||||
return env.Get(v)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// GetByRef returns the type of the value referred to by ref.
|
||||
func (env *TypeEnv) GetByRef(ref Ref) types.Type {
|
||||
func (env *TypeEnv) getRef(ref Ref) types.Type {
|
||||
|
||||
node := env.tree.Child(ref[0].Value)
|
||||
if node == nil {
|
||||
return env.getRefFallback(ref)
|
||||
@@ -163,7 +156,7 @@ func (env *TypeEnv) GetByRef(ref Ref) types.Type {
|
||||
func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
|
||||
|
||||
if env.next != nil {
|
||||
return env.next.GetByRef(ref)
|
||||
return env.next.Get(ref)
|
||||
}
|
||||
|
||||
if RootDocumentNames.Contains(ref[0]) {
|
||||
@@ -207,7 +200,10 @@ func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
|
||||
|
||||
children := []*types.StaticProperty{}
|
||||
|
||||
node.Children().Iter(func(key Value, child *typeTreeNode) bool {
|
||||
node.Children().Iter(func(k, v util.T) bool {
|
||||
key := k.(Value)
|
||||
child := v.(*typeTreeNode)
|
||||
|
||||
tpe := env.getRefRecExtent(child)
|
||||
|
||||
// NOTE(sr): Converting to Golang-native types here is an extension of what we did
|
||||
@@ -241,14 +237,14 @@ func (env *TypeEnv) wrap() *TypeEnv {
|
||||
type typeTreeNode struct {
|
||||
key Value
|
||||
value types.Type
|
||||
children *util.HasherMap[Value, *typeTreeNode]
|
||||
children *util.HashMap
|
||||
}
|
||||
|
||||
func newTypeTree() *typeTreeNode {
|
||||
return &typeTreeNode{
|
||||
key: nil,
|
||||
value: nil,
|
||||
children: util.NewHasherMap[Value, *typeTreeNode](ValueEqual),
|
||||
children: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,10 +253,10 @@ func (n *typeTreeNode) Child(key Value) *typeTreeNode {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return value
|
||||
return value.(*typeTreeNode)
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Children() *util.HasherMap[Value, *typeTreeNode] {
|
||||
func (n *typeTreeNode) Children() *util.HashMap {
|
||||
return n.children
|
||||
}
|
||||
|
||||
@@ -271,7 +267,7 @@ func (n *typeTreeNode) Get(path Ref) types.Type {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
curr = child
|
||||
curr = child.(*typeTreeNode)
|
||||
}
|
||||
return curr.Value()
|
||||
}
|
||||
@@ -289,7 +285,7 @@ func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
|
||||
child.key = key
|
||||
n.children.Put(key, child)
|
||||
} else {
|
||||
child = c
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
child.value = tpe
|
||||
@@ -306,7 +302,7 @@ func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
|
||||
child.key = term.Value
|
||||
curr.children.Put(child.key, child)
|
||||
} else {
|
||||
child = c
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
curr = child
|
||||
@@ -328,7 +324,8 @@ func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) {
|
||||
child.key = term.Value
|
||||
curr.children.Put(child.key, child)
|
||||
} else {
|
||||
child = c
|
||||
child = c.(*typeTreeNode)
|
||||
|
||||
if child.value != nil && i+1 < len(path) {
|
||||
// If child has an object value, merge the new value into it.
|
||||
if o, ok := child.value.(*types.Object); ok {
|
||||
@@ -429,12 +426,13 @@ func (n *typeTreeNode) String() string {
|
||||
b.WriteString(v.String())
|
||||
}
|
||||
|
||||
n.children.Iter(func(_ Value, child *typeTreeNode) bool {
|
||||
b.WriteString("\n\t+ ")
|
||||
s := child.String()
|
||||
s = strings.ReplaceAll(s, "\n", "\n\t")
|
||||
b.WriteString(s)
|
||||
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
if child, ok := v.(*typeTreeNode); ok {
|
||||
b.WriteString("\n\t+ ")
|
||||
s := child.String()
|
||||
s = strings.ReplaceAll(s, "\n", "\n\t")
|
||||
b.WriteString(s)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
@@ -446,7 +444,7 @@ func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (
|
||||
return o, nil
|
||||
}
|
||||
|
||||
key := env.GetByValue(path[0].Value)
|
||||
key := env.Get(path[0].Value)
|
||||
|
||||
if len(path) == 1 {
|
||||
var dynamicProps *types.DynamicProperty
|
||||
@@ -474,8 +472,8 @@ func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (
|
||||
|
||||
func (n *typeTreeNode) Leafs() map[*Ref]types.Type {
|
||||
leafs := map[*Ref]types.Type{}
|
||||
n.children.Iter(func(_ Value, v *typeTreeNode) bool {
|
||||
collectLeafs(v, nil, leafs)
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
collectLeafs(v.(*typeTreeNode), nil, leafs)
|
||||
return false
|
||||
})
|
||||
return leafs
|
||||
@@ -487,8 +485,8 @@ func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) {
|
||||
leafs[&nPath] = n.Value()
|
||||
return
|
||||
}
|
||||
n.children.Iter(func(_ Value, v *typeTreeNode) bool {
|
||||
collectLeafs(v, nPath, leafs)
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
collectLeafs(v.(*typeTreeNode), nPath, leafs)
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
16
vendor/github.com/open-policy-agent/opa/v1/ast/errors.go
generated
vendored
16
vendor/github.com/open-policy-agent/opa/v1/ast/errors.go
generated
vendored
@@ -6,8 +6,7 @@ package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -36,12 +35,15 @@ func (e Errors) Error() string {
|
||||
// Sort sorts the error slice by location. If the locations are equal then the
|
||||
// error message is compared.
|
||||
func (e Errors) Sort() {
|
||||
slices.SortFunc(e, func(a, b *Error) int {
|
||||
sort.Slice(e, func(i, j int) bool {
|
||||
a := e[i]
|
||||
b := e[j]
|
||||
|
||||
if cmp := a.Location.Compare(b.Location); cmp != 0 {
|
||||
return cmp
|
||||
return cmp < 0
|
||||
}
|
||||
|
||||
return strings.Compare(a.Error(), b.Error())
|
||||
return a.Error() < b.Error()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -90,9 +92,9 @@ func (e *Error) Error() string {
|
||||
if e.Location != nil {
|
||||
|
||||
if len(e.Location.File) > 0 {
|
||||
prefix += e.Location.File + ":" + strconv.Itoa(e.Location.Row)
|
||||
prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
|
||||
} else {
|
||||
prefix += strconv.Itoa(e.Location.Row) + ":" + strconv.Itoa(e.Location.Col)
|
||||
prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
180
vendor/github.com/open-policy-agent/opa/v1/ast/index.go
generated
vendored
180
vendor/github.com/open-policy-agent/opa/v1/ast/index.go
generated
vendored
@@ -6,7 +6,6 @@ package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -34,10 +33,10 @@ type RuleIndex interface {
|
||||
|
||||
// IndexResult contains the result of an index lookup.
|
||||
type IndexResult struct {
|
||||
Kind RuleKind
|
||||
Rules []*Rule
|
||||
Else map[*Rule][]*Rule
|
||||
Default *Rule
|
||||
Kind RuleKind
|
||||
EarlyExit bool
|
||||
OnlyGroundRefs bool
|
||||
}
|
||||
@@ -46,6 +45,7 @@ type IndexResult struct {
|
||||
func NewIndexResult(kind RuleKind) *IndexResult {
|
||||
return &IndexResult{
|
||||
Kind: kind,
|
||||
Else: map[*Rule][]*Rule{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ func (ir *IndexResult) Empty() bool {
|
||||
}
|
||||
|
||||
type baseDocEqIndex struct {
|
||||
skipIndexing Set
|
||||
isVirtual func(Ref) bool
|
||||
root *trieNode
|
||||
defaultRule *Rule
|
||||
@@ -63,17 +64,15 @@ type baseDocEqIndex struct {
|
||||
}
|
||||
|
||||
var (
|
||||
equalityRef = Equality.Ref()
|
||||
equalRef = Equal.Ref()
|
||||
globMatchRef = GlobMatch.Ref()
|
||||
internalPrintRef = InternalPrint.Ref()
|
||||
internalTestCaseRef = InternalTestCase.Ref()
|
||||
|
||||
skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef))
|
||||
equalityRef = Equality.Ref()
|
||||
equalRef = Equal.Ref()
|
||||
globMatchRef = GlobMatch.Ref()
|
||||
internalPrintRef = InternalPrint.Ref()
|
||||
)
|
||||
|
||||
func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
|
||||
return &baseDocEqIndex{
|
||||
skipIndexing: NewSet(NewTerm(internalPrintRef)),
|
||||
isVirtual: isVirtual,
|
||||
root: newTrieNodeImpl(),
|
||||
onlyGroundRefs: true,
|
||||
@@ -99,15 +98,15 @@ func (i *baseDocEqIndex) Build(rules []*Rule) bool {
|
||||
i.onlyGroundRefs = rule.Head.Reference.IsGround()
|
||||
}
|
||||
var skip bool
|
||||
for i := range rule.Body {
|
||||
if op := rule.Body[i].OperatorTerm(); op != nil && skipIndexing.Contains(op) {
|
||||
for _, expr := range rule.Body {
|
||||
if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
for i := range rule.Body {
|
||||
indices.Update(rule, rule.Body[i])
|
||||
for _, expr := range rule.Body {
|
||||
indices.Update(rule, expr)
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -144,8 +143,7 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
|
||||
defer func() {
|
||||
clear(tr.unordered)
|
||||
tr.ordering = tr.ordering[:0]
|
||||
tr.multiple = false
|
||||
tr.exist = nil
|
||||
tr.values.clear()
|
||||
|
||||
ttrPool.Put(tr)
|
||||
}()
|
||||
@@ -155,33 +153,20 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := IndexResultPool.Get()
|
||||
|
||||
result.Kind = i.kind
|
||||
result := NewIndexResult(i.kind)
|
||||
result.Default = i.defaultRule
|
||||
result.OnlyGroundRefs = i.onlyGroundRefs
|
||||
|
||||
if result.Rules == nil {
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
} else {
|
||||
result.Rules = result.Rules[:0]
|
||||
}
|
||||
|
||||
clear(result.Else)
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
|
||||
return a.prio[1] - b.prio[1]
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
if result.Else == nil {
|
||||
result.Else = map[*Rule][]*Rule{}
|
||||
}
|
||||
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
@@ -189,26 +174,7 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if !tr.multiple {
|
||||
// even when the indexer hasn't seen multiple values, the rule itself could be one
|
||||
// where early exit shouldn't be applied.
|
||||
var lastValue Value
|
||||
for i := range result.Rules {
|
||||
if result.Rules[i].Head.DocKind() != CompleteDoc {
|
||||
tr.multiple = true
|
||||
break
|
||||
}
|
||||
if result.Rules[i].Head.Value != nil {
|
||||
if lastValue != nil && !ValueEqual(lastValue, result.Rules[i].Head.Value.Value) {
|
||||
tr.multiple = true
|
||||
break
|
||||
}
|
||||
lastValue = result.Rules[i].Head.Value.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.EarlyExit = !tr.multiple
|
||||
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -226,17 +192,13 @@ func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
|
||||
return a.prio[1] - b.prio[1]
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
if result.Else == nil {
|
||||
result.Else = map[*Rule][]*Rule{}
|
||||
}
|
||||
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
@@ -244,7 +206,7 @@ func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
|
||||
}
|
||||
}
|
||||
|
||||
result.EarlyExit = !tr.multiple
|
||||
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -273,7 +235,7 @@ type refindex struct {
|
||||
type refindices struct {
|
||||
isVirtual func(Ref) bool
|
||||
rules map[*Rule][]*refindex
|
||||
frequency *util.HasherMap[Ref, int]
|
||||
frequency *util.HashMap
|
||||
sorted []Ref
|
||||
}
|
||||
|
||||
@@ -281,7 +243,12 @@ func newrefindices(isVirtual func(Ref) bool) *refindices {
|
||||
return &refindices{
|
||||
isVirtual: isVirtual,
|
||||
rules: map[*Rule][]*refindex{},
|
||||
frequency: util.NewHasherMap[Ref, int](RefEqual),
|
||||
frequency: util.NewHashMap(func(a, b util.T) bool {
|
||||
r1, r2 := a.(Ref), b.(Ref)
|
||||
return r1.Equal(r2)
|
||||
}, func(x util.T) int {
|
||||
return x.(Ref).Hash()
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,9 +296,9 @@ func (i *refindices) Sorted() []Ref {
|
||||
counts := make([]int, 0, i.frequency.Len())
|
||||
i.sorted = make([]Ref, 0, i.frequency.Len())
|
||||
|
||||
i.frequency.Iter(func(k Ref, v int) bool {
|
||||
counts = append(counts, v)
|
||||
i.sorted = append(i.sorted, k)
|
||||
i.frequency.Iter(func(k, v util.T) bool {
|
||||
counts = append(counts, v.(int))
|
||||
i.sorted = append(i.sorted, k.(Ref))
|
||||
return false
|
||||
})
|
||||
|
||||
@@ -432,7 +399,7 @@ func (i *refindices) insert(rule *Rule, index *refindex) {
|
||||
count = 0
|
||||
}
|
||||
|
||||
i.frequency.Put(index.Ref, count+1)
|
||||
i.frequency.Put(index.Ref, count.(int)+1)
|
||||
|
||||
for pos, other := range i.rules[rule] {
|
||||
if other.Ref.Equal(index.Ref) {
|
||||
@@ -460,8 +427,7 @@ type trieWalker interface {
|
||||
type trieTraversalResult struct {
|
||||
unordered map[int][]*ruleNode
|
||||
ordering []int
|
||||
exist *Term
|
||||
multiple bool
|
||||
values *set
|
||||
}
|
||||
|
||||
var ttrPool = sync.Pool{
|
||||
@@ -473,6 +439,10 @@ var ttrPool = sync.Pool{
|
||||
func newTrieTraversalResult() *trieTraversalResult {
|
||||
return &trieTraversalResult{
|
||||
unordered: map[int][]*ruleNode{},
|
||||
// Number 3 is arbitrary, but seemed to be the most common number of values
|
||||
// stored when benchmarking the trie traversal against a large policy library
|
||||
// (Regal).
|
||||
values: newset(3),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -485,30 +455,21 @@ func (tr *trieTraversalResult) Add(t *trieNode) {
|
||||
}
|
||||
tr.unordered[root] = append(nodes, node)
|
||||
}
|
||||
if t.multiple {
|
||||
tr.multiple = true
|
||||
if t.values != nil {
|
||||
t.values.Foreach(tr.values.insertNoGuard)
|
||||
}
|
||||
if tr.multiple || t.value == nil {
|
||||
return
|
||||
}
|
||||
if t.value.IsGround() && tr.exist == nil || tr.exist.Equal(t.value) {
|
||||
tr.exist = t.value
|
||||
return
|
||||
}
|
||||
tr.multiple = true
|
||||
}
|
||||
|
||||
type trieNode struct {
|
||||
ref Ref
|
||||
values Set
|
||||
mappers []*valueMapper
|
||||
next *trieNode
|
||||
any *trieNode
|
||||
undefined *trieNode
|
||||
scalars *util.HasherMap[Value, *trieNode]
|
||||
scalars *util.HashMap
|
||||
array *trieNode
|
||||
rules []*ruleNode
|
||||
value *Term
|
||||
multiple bool
|
||||
}
|
||||
|
||||
func (node *trieNode) String() string {
|
||||
@@ -531,7 +492,9 @@ func (node *trieNode) String() string {
|
||||
}
|
||||
if node.scalars.Len() > 0 {
|
||||
buf := make([]string, 0, node.scalars.Len())
|
||||
node.scalars.Iter(func(key Value, val *trieNode) bool {
|
||||
node.scalars.Iter(func(k, v util.T) bool {
|
||||
key := k.(Value)
|
||||
val := v.(*trieNode)
|
||||
buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val))
|
||||
return false
|
||||
})
|
||||
@@ -544,8 +507,10 @@ func (node *trieNode) String() string {
|
||||
if len(node.mappers) > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers)))
|
||||
}
|
||||
if node.value != nil {
|
||||
flags = append(flags, "value exists")
|
||||
if node.values != nil {
|
||||
if l := node.values.Len(); l > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d value(s)", l))
|
||||
}
|
||||
}
|
||||
return strings.Join(flags, " ")
|
||||
}
|
||||
@@ -553,12 +518,13 @@ func (node *trieNode) String() string {
|
||||
func (node *trieNode) append(prio [2]int, rule *Rule) {
|
||||
node.rules = append(node.rules, &ruleNode{prio, rule})
|
||||
|
||||
if node.value != nil && rule.Head.Value != nil && !node.value.Equal(rule.Head.Value) {
|
||||
node.multiple = true
|
||||
if node.values != nil && rule.Head.Value != nil {
|
||||
node.values.Add(rule.Head.Value)
|
||||
return
|
||||
}
|
||||
|
||||
if node.value == nil && rule.Head.DocKind() == CompleteDoc {
|
||||
node.value = rule.Head.Value
|
||||
if node.values == nil && rule.Head.DocKind() == CompleteDoc {
|
||||
node.values = NewSet(rule.Head.Value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,7 +535,7 @@ type ruleNode struct {
|
||||
|
||||
func newTrieNodeImpl() *trieNode {
|
||||
return &trieNode{
|
||||
scalars: util.NewHasherMap[Value, *trieNode](ValueEqual),
|
||||
scalars: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,7 +551,8 @@ func (node *trieNode) Do(walker trieWalker) {
|
||||
node.undefined.Do(next)
|
||||
}
|
||||
|
||||
node.scalars.Iter(func(_ Value, child *trieNode) bool {
|
||||
node.scalars.Iter(func(_, v util.T) bool {
|
||||
child := v.(*trieNode)
|
||||
child.Do(next)
|
||||
return false
|
||||
})
|
||||
@@ -651,7 +618,7 @@ func (node *trieNode) insertValue(value Value) *trieNode {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars.Put(value, child)
|
||||
}
|
||||
return child
|
||||
return child.(*trieNode)
|
||||
case *Array:
|
||||
if node.array == nil {
|
||||
node.array = newTrieNodeImpl()
|
||||
@@ -680,7 +647,7 @@ func (node *trieNode) insertArray(arr *Array) *trieNode {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars.Put(head, child)
|
||||
}
|
||||
return child.insertArray(arr.Slice(1, -1))
|
||||
return child.(*trieNode).insertArray(arr.Slice(1, -1))
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
@@ -745,7 +712,7 @@ func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalRes
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return child.Traverse(resolver, tr)
|
||||
return child.(*trieNode).Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -770,16 +737,11 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes
|
||||
return nil
|
||||
}
|
||||
|
||||
switch head := head.(type) {
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars.Get(head)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return child.traverseArray(resolver, tr, arr.Slice(1, -1))
|
||||
child, ok := node.scalars.Get(head)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1))
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
@@ -805,8 +767,12 @@ func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalR
|
||||
}
|
||||
|
||||
var iterErr error
|
||||
node.scalars.Iter(func(_ Value, child *trieNode) bool {
|
||||
return child.traverseUnknown(resolver, tr) != nil
|
||||
node.scalars.Iter(func(_, v util.T) bool {
|
||||
child := v.(*trieNode)
|
||||
if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return iterErr
|
||||
@@ -820,7 +786,7 @@ func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term)
|
||||
switch v := a.Value.(type) {
|
||||
case Var:
|
||||
for i, arg := range args {
|
||||
if arg.Value.Compare(a.Value) == 0 {
|
||||
if arg.Value.Compare(v) == 0 {
|
||||
if bval, ok := indexValue(b); ok {
|
||||
return &refindex{Ref: Ref{FunctionArgRootDocument, InternedIntNumberTerm(i)}, Value: bval}, true
|
||||
}
|
||||
@@ -883,7 +849,7 @@ func globDelimiterToString(delim *Term) (string, bool) {
|
||||
if arr.Len() == 0 {
|
||||
result = "."
|
||||
} else {
|
||||
for i := range arr.Len() {
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
term := arr.Elem(i)
|
||||
s, ok := term.Value.(String)
|
||||
if !ok {
|
||||
@@ -896,8 +862,6 @@ func globDelimiterToString(delim *Term) (string, bool) {
|
||||
return result, true
|
||||
}
|
||||
|
||||
var globwildcard = VarTerm("$globwildcard")
|
||||
|
||||
func globPatternToArray(pattern *Term, delim string) *Term {
|
||||
|
||||
s, ok := pattern.Value.(String)
|
||||
@@ -910,7 +874,7 @@ func globPatternToArray(pattern *Term, delim string) *Term {
|
||||
|
||||
for i := range parts {
|
||||
if parts[i] == "*" {
|
||||
arr[i] = globwildcard
|
||||
arr[i] = VarTerm("$globwildcard")
|
||||
} else {
|
||||
var escaped bool
|
||||
for _, c := range parts[i] {
|
||||
|
||||
38
vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go
generated
vendored
38
vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go
generated
vendored
@@ -9,9 +9,9 @@ import (
|
||||
"io"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
"unsafe"
|
||||
|
||||
"github.com/open-policy-agent/opa/v1/ast/internal/tokens"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
const bom = 0xFEFF
|
||||
@@ -101,8 +101,8 @@ func (s *Scanner) Keyword(lit string) tokens.Token {
|
||||
func (s *Scanner) AddKeyword(kw string, tok tokens.Token) {
|
||||
s.keywords[kw] = tok
|
||||
|
||||
if tok == tokens.Every {
|
||||
// importing 'every' means also importing 'in'
|
||||
switch tok {
|
||||
case tokens.Every: // importing 'every' means also importing 'in'
|
||||
s.keywords["in"] = tokens.In
|
||||
}
|
||||
}
|
||||
@@ -165,21 +165,7 @@ func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) {
|
||||
var lit string
|
||||
|
||||
if s.isWhitespace() {
|
||||
// string(rune) is an unnecessary heap allocation in this case as we know all
|
||||
// the possible whitespace values, and can simply translate to string ourselves
|
||||
switch s.curr {
|
||||
case ' ':
|
||||
lit = " "
|
||||
case '\t':
|
||||
lit = "\t"
|
||||
case '\n':
|
||||
lit = "\n"
|
||||
case '\r':
|
||||
lit = "\r"
|
||||
default:
|
||||
// unreachable unless isWhitespace changes
|
||||
lit = string(s.curr)
|
||||
}
|
||||
lit = string(s.curr)
|
||||
s.next()
|
||||
tok = tokens.Whitespace
|
||||
} else if isLetter(s.curr) {
|
||||
@@ -286,7 +272,7 @@ func (s *Scanner) scanIdentifier() string {
|
||||
s.next()
|
||||
}
|
||||
|
||||
return util.ByteSliceToString(s.bs[start : s.offset-1])
|
||||
return byteSliceToString(s.bs[start : s.offset-1])
|
||||
}
|
||||
|
||||
func (s *Scanner) scanNumber() string {
|
||||
@@ -337,7 +323,7 @@ func (s *Scanner) scanNumber() string {
|
||||
}
|
||||
}
|
||||
|
||||
return util.ByteSliceToString(s.bs[start : s.offset-1])
|
||||
return byteSliceToString(s.bs[start : s.offset-1])
|
||||
}
|
||||
|
||||
func (s *Scanner) scanString() string {
|
||||
@@ -371,7 +357,7 @@ func (s *Scanner) scanString() string {
|
||||
}
|
||||
}
|
||||
|
||||
return util.ByteSliceToString(s.bs[start : s.offset-1])
|
||||
return byteSliceToString(s.bs[start : s.offset-1])
|
||||
}
|
||||
|
||||
func (s *Scanner) scanRawString() string {
|
||||
@@ -387,7 +373,7 @@ func (s *Scanner) scanRawString() string {
|
||||
}
|
||||
}
|
||||
|
||||
return util.ByteSliceToString(s.bs[start : s.offset-1])
|
||||
return byteSliceToString(s.bs[start : s.offset-1])
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment() string {
|
||||
@@ -398,10 +384,10 @@ func (s *Scanner) scanComment() string {
|
||||
end := s.offset - 1
|
||||
// Trim carriage returns that precede the newline
|
||||
if s.offset > 1 && s.bs[s.offset-2] == '\r' {
|
||||
end -= 1
|
||||
end = end - 1
|
||||
}
|
||||
|
||||
return util.ByteSliceToString(s.bs[start:end])
|
||||
return byteSliceToString(s.bs[start:end])
|
||||
}
|
||||
|
||||
func (s *Scanner) next() {
|
||||
@@ -471,3 +457,7 @@ func (s *Scanner) error(reason string) {
|
||||
Col: s.col,
|
||||
}, Message: reason})
|
||||
}
|
||||
|
||||
func byteSliceToString(bs []byte) string {
|
||||
return unsafe.String(unsafe.SliceData(bs), len(bs))
|
||||
}
|
||||
|
||||
12
vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go
generated
vendored
12
vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go
generated
vendored
@@ -4,14 +4,12 @@
|
||||
|
||||
package tokens
|
||||
|
||||
import "maps"
|
||||
|
||||
// Token represents a single Rego source code token
|
||||
// for use by the Parser.
|
||||
type Token uint8
|
||||
type Token int
|
||||
|
||||
func (t Token) String() string {
|
||||
if int(t) >= len(strings) {
|
||||
if t < 0 || int(t) >= len(strings) {
|
||||
return "unknown"
|
||||
}
|
||||
return strings[t]
|
||||
@@ -139,7 +137,11 @@ var keywords = map[string]Token{
|
||||
|
||||
// Keywords returns a copy of the default string -> Token keyword map.
|
||||
func Keywords() map[string]Token {
|
||||
return maps.Clone(keywords)
|
||||
cpy := make(map[string]Token, len(keywords))
|
||||
for k, v := range keywords {
|
||||
cpy[k] = v
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
// IsKeyword returns if a token is a keyword
|
||||
|
||||
30
vendor/github.com/open-policy-agent/opa/v1/ast/interning.go
generated
vendored
30
vendor/github.com/open-policy-agent/opa/v1/ast/interning.go
generated
vendored
@@ -17,9 +17,6 @@ var (
|
||||
minusOneTerm = &Term{Value: Number("-1")}
|
||||
|
||||
InternedNullTerm = &Term{Value: Null{}}
|
||||
|
||||
InternedEmptyString = StringTerm("")
|
||||
InternedEmptyObject = ObjectTerm()
|
||||
)
|
||||
|
||||
// InternedBooleanTerm returns an interned term with the given boolean value.
|
||||
@@ -63,29 +60,6 @@ func HasInternedIntNumberTerm(i int) bool {
|
||||
return i >= -1 && i < len(intNumberTerms)
|
||||
}
|
||||
|
||||
func InternedStringTerm(s string) *Term {
|
||||
if term, ok := internedStringTerms[s]; ok {
|
||||
return term
|
||||
}
|
||||
|
||||
return StringTerm(s)
|
||||
}
|
||||
|
||||
var internedStringTerms = map[string]*Term{
|
||||
"": InternedEmptyString,
|
||||
"0": StringTerm("0"),
|
||||
"1": StringTerm("1"),
|
||||
"2": StringTerm("2"),
|
||||
"3": StringTerm("3"),
|
||||
"4": StringTerm("4"),
|
||||
"5": StringTerm("5"),
|
||||
"6": StringTerm("6"),
|
||||
"7": StringTerm("7"),
|
||||
"8": StringTerm("8"),
|
||||
"9": StringTerm("9"),
|
||||
"10": StringTerm("10"),
|
||||
}
|
||||
|
||||
var stringToIntNumberTermMap = map[string]*Term{
|
||||
"-1": minusOneTerm,
|
||||
"0": intNumberTerms[0],
|
||||
@@ -1118,3 +1092,7 @@ var intNumberTerms = [...]*Term{
|
||||
{Value: Number("511")},
|
||||
{Value: Number("512")},
|
||||
}
|
||||
|
||||
var InternedEmptyString = StringTerm("")
|
||||
|
||||
var InternedEmptyObject = ObjectTerm()
|
||||
|
||||
35
vendor/github.com/open-policy-agent/opa/v1/ast/map.go
generated
vendored
35
vendor/github.com/open-policy-agent/opa/v1/ast/map.go
generated
vendored
@@ -13,14 +13,15 @@ import (
|
||||
// ValueMap represents a key/value map between AST term values. Any type of term
|
||||
// can be used as a key in the map.
|
||||
type ValueMap struct {
|
||||
hashMap *util.TypedHashMap[Value, Value]
|
||||
hashMap *util.HashMap
|
||||
}
|
||||
|
||||
// NewValueMap returns a new ValueMap.
|
||||
func NewValueMap() *ValueMap {
|
||||
return &ValueMap{
|
||||
hashMap: util.NewTypedHashMap(ValueEqual, ValueEqual, Value.Hash, Value.Hash, nil),
|
||||
vs := &ValueMap{
|
||||
hashMap: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// MarshalJSON provides a custom marshaller for the ValueMap which
|
||||
@@ -38,6 +39,16 @@ func (vs *ValueMap) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(tmp)
|
||||
}
|
||||
|
||||
// Copy returns a shallow copy of the ValueMap.
|
||||
func (vs *ValueMap) Copy() *ValueMap {
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := NewValueMap()
|
||||
cpy.hashMap = vs.hashMap.Copy()
|
||||
return cpy
|
||||
}
|
||||
|
||||
// Equal returns true if this ValueMap equals the other.
|
||||
func (vs *ValueMap) Equal(other *ValueMap) bool {
|
||||
if vs == nil {
|
||||
@@ -61,7 +72,7 @@ func (vs *ValueMap) Len() int {
|
||||
func (vs *ValueMap) Get(k Value) Value {
|
||||
if vs != nil {
|
||||
if v, ok := vs.hashMap.Get(k); ok {
|
||||
return v
|
||||
return v.(Value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -81,7 +92,11 @@ func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
|
||||
if vs == nil {
|
||||
return false
|
||||
}
|
||||
return vs.hashMap.Iter(iter)
|
||||
return vs.hashMap.Iter(func(kt, vt util.T) bool {
|
||||
k := kt.(Value)
|
||||
v := vt.(Value)
|
||||
return iter(k, v)
|
||||
})
|
||||
}
|
||||
|
||||
// Put inserts a key k into the map with value v.
|
||||
@@ -106,3 +121,13 @@ func (vs *ValueMap) String() string {
|
||||
}
|
||||
return vs.hashMap.String()
|
||||
}
|
||||
|
||||
func valueHash(v util.T) int {
|
||||
return v.(Value).Hash()
|
||||
}
|
||||
|
||||
func valueEq(a, b util.T) bool {
|
||||
av := a.(Value)
|
||||
bv := b.(Value)
|
||||
return av.Compare(bv) == 0
|
||||
}
|
||||
|
||||
39
vendor/github.com/open-policy-agent/opa/v1/ast/parser.go
generated
vendored
39
vendor/github.com/open-policy-agent/opa/v1/ast/parser.go
generated
vendored
@@ -7,7 +7,6 @@ package ast
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
@@ -134,7 +133,7 @@ func (c parsedTermCache) String() string {
|
||||
s.WriteRune('{')
|
||||
var e *parsedTermCacheItem
|
||||
for e = c.m; e != nil; e = e.next {
|
||||
s.WriteString(e.String())
|
||||
s.WriteString(fmt.Sprintf("%v", e))
|
||||
}
|
||||
s.WriteRune('}')
|
||||
return s.String()
|
||||
@@ -518,7 +517,7 @@ func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) {
|
||||
var curr *metadataParser
|
||||
var blocks []*metadataParser
|
||||
|
||||
for i := range comments {
|
||||
for i := 0; i < len(comments); i++ {
|
||||
if curr != nil {
|
||||
if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 {
|
||||
curr.Append(comments[i])
|
||||
@@ -726,9 +725,7 @@ func (p *Parser) parseRules() []*Rule {
|
||||
|
||||
// p[x] if ... becomes a single-value rule p[x]
|
||||
if hasIf && !usesContains && len(rule.Head.Ref()) == 2 {
|
||||
v := rule.Head.Ref()[1]
|
||||
_, isRef := v.Value.(Ref)
|
||||
if (!v.IsGround() || isRef) && len(rule.Head.Args) == 0 {
|
||||
if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 {
|
||||
rule.Head.Key = rule.Head.Ref()[1]
|
||||
}
|
||||
|
||||
@@ -1641,10 +1638,6 @@ func (p *Parser) parseNumber() *Term {
|
||||
|
||||
func (p *Parser) parseString() *Term {
|
||||
if p.s.lit[0] == '"' {
|
||||
if p.s.lit == "\"\"" {
|
||||
return NewTerm(InternedEmptyString.Value).SetLocation(p.s.Loc())
|
||||
}
|
||||
|
||||
var s string
|
||||
err := json.Unmarshal([]byte(p.s.lit), &s)
|
||||
if err != nil {
|
||||
@@ -2067,7 +2060,7 @@ func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
|
||||
func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
|
||||
for i := range values {
|
||||
if p.s.tok == values[i] {
|
||||
r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
|
||||
r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
|
||||
p.scan()
|
||||
return r
|
||||
}
|
||||
@@ -2361,7 +2354,7 @@ func (b *metadataParser) Parse() (*Annotations, error) {
|
||||
var raw rawAnnotation
|
||||
|
||||
if len(bytes.TrimSpace(b.buf.Bytes())) == 0 {
|
||||
return nil, errors.New("expected METADATA block, found whitespace")
|
||||
return nil, fmt.Errorf("expected METADATA block, found whitespace")
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil {
|
||||
@@ -2410,7 +2403,7 @@ func (b *metadataParser) Parse() (*Annotations, error) {
|
||||
|
||||
a.Path, err = ParseRef(k)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid document reference")
|
||||
return nil, fmt.Errorf("invalid document reference")
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
@@ -2510,7 +2503,7 @@ func unwrapPair(pair map[string]interface{}) (string, interface{}) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var errInvalidSchemaRef = errors.New("invalid schema reference")
|
||||
var errInvalidSchemaRef = fmt.Errorf("invalid schema reference")
|
||||
|
||||
// NOTE(tsandall): 'schema' is not registered as a root because it's not
|
||||
// supported by the compiler or evaluator today. Once we fix that, we can remove
|
||||
@@ -2549,7 +2542,7 @@ func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) {
|
||||
}
|
||||
return &RelatedResourceAnnotation{Ref: *u}, nil
|
||||
}
|
||||
return nil, errors.New("ref URL may not be empty string")
|
||||
return nil, fmt.Errorf("ref URL may not be empty string")
|
||||
case map[string]interface{}:
|
||||
description := strings.TrimSpace(getSafeString(rr, "description"))
|
||||
ref := strings.TrimSpace(getSafeString(rr, "ref"))
|
||||
@@ -2560,10 +2553,10 @@ func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) {
|
||||
}
|
||||
return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil
|
||||
}
|
||||
return nil, errors.New("'ref' value required in object")
|
||||
return nil, fmt.Errorf("'ref' value required in object")
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid value type, must be string or map")
|
||||
return nil, fmt.Errorf("invalid value type, must be string or map")
|
||||
}
|
||||
|
||||
func parseAuthor(a interface{}) (*AuthorAnnotation, error) {
|
||||
@@ -2581,10 +2574,10 @@ func parseAuthor(a interface{}) (*AuthorAnnotation, error) {
|
||||
if len(name) > 0 || len(email) > 0 {
|
||||
return &AuthorAnnotation{name, email}, nil
|
||||
}
|
||||
return nil, errors.New("'name' and/or 'email' values required in object")
|
||||
return nil, fmt.Errorf("'name' and/or 'email' values required in object")
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid value type, must be string or map")
|
||||
return nil, fmt.Errorf("invalid value type, must be string or map")
|
||||
}
|
||||
|
||||
func getSafeString(m map[string]interface{}, k string) string {
|
||||
@@ -2606,7 +2599,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
|
||||
parts := strings.Fields(s)
|
||||
|
||||
if len(parts) == 0 {
|
||||
return nil, errors.New("author is an empty string")
|
||||
return nil, fmt.Errorf("author is an empty string")
|
||||
}
|
||||
|
||||
namePartCount := len(parts)
|
||||
@@ -2616,7 +2609,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
|
||||
strings.HasSuffix(trailing, emailSuffix) {
|
||||
email = trailing[len(emailPrefix):]
|
||||
email = email[0 : len(email)-len(emailSuffix)]
|
||||
namePartCount -= 1
|
||||
namePartCount = namePartCount - 1
|
||||
}
|
||||
|
||||
name := strings.Join(parts[0:namePartCount], " ")
|
||||
@@ -2642,7 +2635,7 @@ func convertYAMLMapKeyTypes(x any, path []string) (any, error) {
|
||||
return result, nil
|
||||
case []any:
|
||||
for i := range x {
|
||||
x[i], err = convertYAMLMapKeyTypes(x[i], append(path, strconv.Itoa(i)))
|
||||
x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2688,7 +2681,7 @@ func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool {
|
||||
func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) {
|
||||
path := imp.Path.Value.(Ref)
|
||||
|
||||
if len(path) == 1 || !path[1].Equal(keywordsTerm) {
|
||||
if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) {
|
||||
p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`")
|
||||
return
|
||||
}
|
||||
|
||||
18
vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go
generated
vendored
18
vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go
generated
vendored
@@ -155,7 +155,7 @@ func MustParseTerm(input string) *Term {
|
||||
func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
|
||||
|
||||
if len(body) != 1 {
|
||||
return nil, errors.New("multiple expressions cannot be used for rule head")
|
||||
return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
return ParseRuleFromExpr(module, body[0])
|
||||
@@ -166,11 +166,11 @@ func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
|
||||
func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
|
||||
|
||||
if len(expr.With) > 0 {
|
||||
return nil, errors.New("expressions using with keyword cannot be used for rule head")
|
||||
return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
|
||||
}
|
||||
|
||||
if expr.Negated {
|
||||
return nil, errors.New("negated expressions cannot be used for rule head")
|
||||
return nil, fmt.Errorf("negated expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
if _, ok := expr.Terms.(*SomeDecl); ok {
|
||||
@@ -207,7 +207,7 @@ func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
|
||||
}
|
||||
|
||||
if _, ok := BuiltinMap[expr.Operator().String()]; ok {
|
||||
return nil, errors.New("rule name conflicts with built-in function")
|
||||
return nil, fmt.Errorf("rule name conflicts with built-in function")
|
||||
}
|
||||
|
||||
return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
|
||||
@@ -272,7 +272,7 @@ func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, erro
|
||||
}
|
||||
head = RefHead(r)
|
||||
if len(r) > 1 && !r[len(r)-1].IsGround() {
|
||||
return nil, errors.New("ref not ground")
|
||||
return nil, fmt.Errorf("ref not ground")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value))
|
||||
@@ -387,7 +387,7 @@ func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
call, ok := lhs.Value.(Call)
|
||||
if !ok {
|
||||
return nil, errors.New("must be call")
|
||||
return nil, fmt.Errorf("must be call")
|
||||
}
|
||||
|
||||
ref, ok := call[0].Value.(Ref)
|
||||
@@ -419,7 +419,7 @@ func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
|
||||
|
||||
if len(terms) <= 1 {
|
||||
return nil, errors.New("rule argument list must take at least one argument")
|
||||
return nil, fmt.Errorf("rule argument list must take at least one argument")
|
||||
}
|
||||
|
||||
loc := terms[0].Location
|
||||
@@ -600,7 +600,7 @@ func ParseStatement(input string) (Statement, error) {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, errors.New("expected exactly one statement")
|
||||
return nil, fmt.Errorf("expected exactly one statement")
|
||||
}
|
||||
return stmts[0], nil
|
||||
}
|
||||
@@ -611,7 +611,7 @@ func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, errors.New("expected exactly one statement")
|
||||
return nil, fmt.Errorf("expected exactly one statement")
|
||||
}
|
||||
return stmts[0], nil
|
||||
}
|
||||
|
||||
68
vendor/github.com/open-policy-agent/opa/v1/ast/policy.go
generated
vendored
68
vendor/github.com/open-policy-agent/opa/v1/ast/policy.go
generated
vendored
@@ -8,14 +8,21 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/open-policy-agent/opa/v1/ast/internal/tokens"
|
||||
astJSON "github.com/open-policy-agent/opa/v1/ast/json"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
// Initialize seed for term hashing. This is intentionally placed before the
|
||||
// root document sets are constructed to ensure they use the same hash seed as
|
||||
// subsequent lookups. If the hash seeds are out of sync, lookups will fail.
|
||||
var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32())
|
||||
|
||||
// DefaultRootDocument is the default root document.
|
||||
//
|
||||
// All package directives inside source files are implicitly prefixed with the
|
||||
@@ -495,7 +502,7 @@ func (c *Comment) Equal(other *Comment) bool {
|
||||
// Compare returns an integer indicating whether pkg is less than, equal to,
|
||||
// or greater than other.
|
||||
func (pkg *Package) Compare(other *Package) int {
|
||||
return termSliceCompare(pkg.Path, other.Path)
|
||||
return Compare(pkg.Path, other.Path)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of pkg.
|
||||
@@ -587,8 +594,7 @@ func (imp *Import) Compare(other *Import) int {
|
||||
if cmp := Compare(imp.Path, other.Path); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
return VarCompare(imp.Alias, other.Alias)
|
||||
return Compare(imp.Alias, other.Alias)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of imp.
|
||||
@@ -638,7 +644,7 @@ func (imp *Import) Name() Var {
|
||||
func (imp *Import) String() string {
|
||||
buf := []string{"import", imp.Path.String()}
|
||||
if len(imp.Alias) > 0 {
|
||||
buf = append(buf, "as", imp.Alias.String())
|
||||
buf = append(buf, "as "+imp.Alias.String())
|
||||
}
|
||||
return strings.Join(buf, " ")
|
||||
}
|
||||
@@ -675,11 +681,8 @@ func (rule *Rule) Compare(other *Rule) int {
|
||||
if cmp := rule.Head.Compare(other.Head); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if rule.Default != other.Default {
|
||||
if !rule.Default {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
if cmp := util.Compare(rule.Default, other.Default); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := rule.Body.Compare(other.Body); cmp != 0 {
|
||||
return cmp
|
||||
@@ -698,11 +701,9 @@ func (rule *Rule) Copy() *Rule {
|
||||
cpy.Head = rule.Head.Copy()
|
||||
cpy.Body = rule.Body.Copy()
|
||||
|
||||
if len(cpy.Annotations) > 0 {
|
||||
cpy.Annotations = make([]*Annotations, len(rule.Annotations))
|
||||
for i, a := range rule.Annotations {
|
||||
cpy.Annotations[i] = a.Copy(&cpy)
|
||||
}
|
||||
cpy.Annotations = make([]*Annotations, len(rule.Annotations))
|
||||
for i, a := range rule.Annotations {
|
||||
cpy.Annotations[i] = a.Copy(&cpy)
|
||||
}
|
||||
|
||||
if cpy.Else != nil {
|
||||
@@ -779,7 +780,9 @@ func (rule *Rule) stringWithOpts(opts toStringOpts) string {
|
||||
case RegoV1, RegoV0CompatV1:
|
||||
buf = append(buf, "if")
|
||||
}
|
||||
buf = append(buf, "{", rule.Body.String(), "}")
|
||||
buf = append(buf, "{")
|
||||
buf = append(buf, rule.Body.String())
|
||||
buf = append(buf, "}")
|
||||
}
|
||||
if rule.Else != nil {
|
||||
buf = append(buf, rule.Else.elseString(opts))
|
||||
@@ -825,7 +828,8 @@ func (rule *Rule) elseString(opts toStringOpts) string {
|
||||
|
||||
value := rule.Head.Value
|
||||
if value != nil {
|
||||
buf = append(buf, "=", value.String())
|
||||
buf = append(buf, "=")
|
||||
buf = append(buf, value.String())
|
||||
}
|
||||
|
||||
switch opts.RegoVersion() {
|
||||
@@ -833,7 +837,9 @@ func (rule *Rule) elseString(opts toStringOpts) string {
|
||||
buf = append(buf, "if")
|
||||
}
|
||||
|
||||
buf = append(buf, "{", rule.Body.String(), "}")
|
||||
buf = append(buf, "{")
|
||||
buf = append(buf, rule.Body.String())
|
||||
buf = append(buf, "}")
|
||||
|
||||
if rule.Else != nil {
|
||||
buf = append(buf, rule.Else.elseString(opts))
|
||||
@@ -886,7 +892,7 @@ func RefHead(ref Ref, args ...*Term) *Head {
|
||||
}
|
||||
|
||||
// DocKind represents the collection of document types that can be produced by rules.
|
||||
type DocKind byte
|
||||
type DocKind int
|
||||
|
||||
const (
|
||||
// CompleteDoc represents a document that is completely defined by the rule.
|
||||
@@ -906,13 +912,11 @@ func (head *Head) DocKind() DocKind {
|
||||
return PartialObjectDoc
|
||||
}
|
||||
return PartialSetDoc
|
||||
} else if head.HasDynamicRef() {
|
||||
return PartialObjectDoc
|
||||
}
|
||||
return CompleteDoc
|
||||
}
|
||||
|
||||
type RuleKind byte
|
||||
type RuleKind int
|
||||
|
||||
const (
|
||||
SingleValue = iota
|
||||
@@ -969,7 +973,7 @@ func (head *Head) Compare(other *Head) int {
|
||||
if cmp := Compare(head.Reference, other.Reference); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := VarCompare(head.Name, other.Name); cmp != 0 {
|
||||
if cmp := Compare(head.Name, other.Name); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := Compare(head.Key, other.Key); cmp != 0 {
|
||||
@@ -1087,7 +1091,8 @@ func (head *Head) SetLoc(loc *Location) {
|
||||
|
||||
func (head *Head) HasDynamicRef() bool {
|
||||
pos := head.Reference.Dynamic()
|
||||
return pos > 0 && (pos < len(head.Reference))
|
||||
// Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule.
|
||||
return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of a.
|
||||
@@ -1172,7 +1177,7 @@ func (body Body) Compare(other Body) int {
|
||||
if len(other) < minLen {
|
||||
minLen = len(other)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := body[i].Compare(other[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -1197,7 +1202,12 @@ func (body Body) Copy() Body {
|
||||
|
||||
// Contains returns true if this body contains the given expression.
|
||||
func (body Body) Contains(x *Expr) bool {
|
||||
return slices.ContainsFunc(body, x.Equal)
|
||||
for _, e := range body {
|
||||
if e.Equal(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Equal returns true if this Body is equal to the other Body.
|
||||
@@ -1396,7 +1406,11 @@ func (expr *Expr) Copy() *Expr {
|
||||
case *SomeDecl:
|
||||
cpy.Terms = ts.Copy()
|
||||
case []*Term:
|
||||
cpy.Terms = termSliceCopy(ts)
|
||||
cpyTs := make([]*Term, len(ts))
|
||||
for i := range ts {
|
||||
cpyTs[i] = ts[i].Copy()
|
||||
}
|
||||
cpy.Terms = cpyTs
|
||||
case *Term:
|
||||
cpy.Terms = ts.Copy()
|
||||
case *Every:
|
||||
|
||||
29
vendor/github.com/open-policy-agent/opa/v1/ast/schema.go
generated
vendored
29
vendor/github.com/open-policy-agent/opa/v1/ast/schema.go
generated
vendored
@@ -13,32 +13,41 @@ import (
|
||||
|
||||
// SchemaSet holds a map from a path to a schema.
|
||||
type SchemaSet struct {
|
||||
m *util.HasherMap[Ref, any]
|
||||
m *util.HashMap
|
||||
}
|
||||
|
||||
// NewSchemaSet returns an empty SchemaSet.
|
||||
func NewSchemaSet() *SchemaSet {
|
||||
|
||||
eqFunc := func(a, b util.T) bool {
|
||||
return a.(Ref).Equal(b.(Ref))
|
||||
}
|
||||
|
||||
hashFunc := func(x util.T) int { return x.(Ref).Hash() }
|
||||
|
||||
return &SchemaSet{
|
||||
m: util.NewHasherMap[Ref, any](RefEqual),
|
||||
m: util.NewHashMap(eqFunc, hashFunc),
|
||||
}
|
||||
}
|
||||
|
||||
// Put inserts a raw schema into the set.
|
||||
func (ss *SchemaSet) Put(path Ref, raw any) {
|
||||
func (ss *SchemaSet) Put(path Ref, raw interface{}) {
|
||||
ss.m.Put(path, raw)
|
||||
}
|
||||
|
||||
// Get returns the raw schema identified by the path.
|
||||
func (ss *SchemaSet) Get(path Ref) any {
|
||||
if ss != nil {
|
||||
if x, ok := ss.m.Get(path); ok {
|
||||
return x
|
||||
}
|
||||
func (ss *SchemaSet) Get(path Ref) interface{} {
|
||||
if ss == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
x, ok := ss.m.Get(path)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func loadSchema(raw any, allowNet []string) (types.Type, error) {
|
||||
func loadSchema(raw interface{}, allowNet []string) (types.Type, error) {
|
||||
|
||||
jsonSchema, err := compileSchema(raw, allowNet)
|
||||
if err != nil {
|
||||
|
||||
69
vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go
generated
vendored
69
vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type termPtrPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
type stringBuilderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
type indexResultPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func (p *termPtrPool) Get() *Term {
|
||||
return p.pool.Get().(*Term)
|
||||
}
|
||||
|
||||
func (p *termPtrPool) Put(t *Term) {
|
||||
p.pool.Put(t)
|
||||
}
|
||||
|
||||
func (p *stringBuilderPool) Get() *strings.Builder {
|
||||
return p.pool.Get().(*strings.Builder)
|
||||
}
|
||||
|
||||
func (p *stringBuilderPool) Put(sb *strings.Builder) {
|
||||
sb.Reset()
|
||||
p.pool.Put(sb)
|
||||
}
|
||||
|
||||
func (p *indexResultPool) Get() *IndexResult {
|
||||
return p.pool.Get().(*IndexResult)
|
||||
}
|
||||
|
||||
func (p *indexResultPool) Put(x *IndexResult) {
|
||||
if x != nil {
|
||||
p.pool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
var TermPtrPool = &termPtrPool{
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
return &Term{}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var sbPool = &stringBuilderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
return &strings.Builder{}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var IndexResultPool = &indexResultPool{
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
return &IndexResult{}
|
||||
},
|
||||
},
|
||||
}
|
||||
154
vendor/github.com/open-policy-agent/opa/v1/ast/term.go
generated
vendored
154
vendor/github.com/open-policy-agent/opa/v1/ast/term.go
generated
vendored
@@ -8,7 +8,6 @@ package ast
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -20,14 +19,14 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/OneOfOne/xxhash"
|
||||
|
||||
astJSON "github.com/open-policy-agent/opa/v1/ast/json"
|
||||
"github.com/open-policy-agent/opa/v1/ast/location"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
var errFindNotFound = errors.New("find: not found")
|
||||
var errFindNotFound = fmt.Errorf("find: not found")
|
||||
|
||||
// Location records a position in source code.
|
||||
type Location = location.Location
|
||||
@@ -56,12 +55,13 @@ type Value interface {
|
||||
// InterfaceToValue converts a native Go value x to a Value.
|
||||
func InterfaceToValue(x interface{}) (Value, error) {
|
||||
switch x := x.(type) {
|
||||
case Value:
|
||||
return x, nil
|
||||
case nil:
|
||||
return NullValue, nil
|
||||
case bool:
|
||||
return InternedBooleanTerm(x).Value, nil
|
||||
if x {
|
||||
return InternedBooleanTerm(true).Value, nil
|
||||
}
|
||||
return InternedBooleanTerm(false).Value, nil
|
||||
case json.Number:
|
||||
if interned := InternedIntNumberTermFromString(string(x)); interned != nil {
|
||||
return interned.Value, nil
|
||||
@@ -87,12 +87,6 @@ func InterfaceToValue(x interface{}) (Value, error) {
|
||||
r[i].Value = e
|
||||
}
|
||||
return NewArray(r...), nil
|
||||
case []string:
|
||||
r := util.NewPtrSlice[Term](len(x))
|
||||
for i, e := range x {
|
||||
r[i].Value = String(e)
|
||||
}
|
||||
return NewArray(r...), nil
|
||||
case map[string]any:
|
||||
kvs := util.NewPtrSlice[Term](len(x) * 2)
|
||||
idx := 0
|
||||
@@ -188,7 +182,7 @@ func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, err
|
||||
return string(v), nil
|
||||
case *Array:
|
||||
buf := []interface{}{}
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x1, err := valueToInterface(v.Elem(i).Value, resolver, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -624,7 +618,10 @@ func (bol Boolean) Compare(other Value) int {
|
||||
// Find returns the current value or a not found error.
|
||||
func (bol Boolean) Find(path Ref) (Value, error) {
|
||||
if len(path) == 0 {
|
||||
return InternedBooleanTerm(bool(bol)).Value, nil
|
||||
if bol {
|
||||
return InternedBooleanTerm(true).Value, nil
|
||||
}
|
||||
return InternedBooleanTerm(false).Value, nil
|
||||
}
|
||||
return nil, errFindNotFound
|
||||
}
|
||||
@@ -721,7 +718,7 @@ func (num Number) Hash() int {
|
||||
f, err := json.Number(num).Float64()
|
||||
if err != nil {
|
||||
bs := []byte(num)
|
||||
h := xxhash.Sum64(bs)
|
||||
h := xxhash.Checksum64(bs)
|
||||
return int(h)
|
||||
}
|
||||
return int(f)
|
||||
@@ -837,7 +834,8 @@ func (str String) String() string {
|
||||
|
||||
// Hash returns the hash code for the Value.
|
||||
func (str String) Hash() int {
|
||||
return int(xxhash.Sum64String(string(str)))
|
||||
h := xxhash.ChecksumString64S(string(str), hashSeed0)
|
||||
return int(h)
|
||||
}
|
||||
|
||||
// Var represents a variable as defined by the language.
|
||||
@@ -878,7 +876,8 @@ func (v Var) Find(path Ref) (Value, error) {
|
||||
|
||||
// Hash returns the hash code for the Value.
|
||||
func (v Var) Hash() int {
|
||||
return int(xxhash.Sum64String(string(v)))
|
||||
h := xxhash.ChecksumString64S(string(v), hashSeed0)
|
||||
return int(h)
|
||||
}
|
||||
|
||||
// IsGround always returns false.
|
||||
@@ -1015,25 +1014,6 @@ func (ref Ref) Copy() Ref {
|
||||
return termSliceCopy(ref)
|
||||
}
|
||||
|
||||
// CopyNonGround returns a new ref with deep copies of the non-ground parts and shallow
|
||||
// copies of the ground parts. This is a *much* cheaper operation than Copy for operations
|
||||
// that only intend to modify (e.g. plug) the non-ground parts. The head element of the ref
|
||||
// is always shallow copied.
|
||||
func (ref Ref) CopyNonGround() Ref {
|
||||
cpy := make(Ref, len(ref))
|
||||
cpy[0] = ref[0]
|
||||
|
||||
for i := 1; i < len(ref); i++ {
|
||||
if ref[i].Value.IsGround() {
|
||||
cpy[i] = ref[i]
|
||||
} else {
|
||||
cpy[i] = ref[i].Copy()
|
||||
}
|
||||
}
|
||||
|
||||
return cpy
|
||||
}
|
||||
|
||||
// Equal returns true if ref is equal to other.
|
||||
func (ref Ref) Equal(other Value) bool {
|
||||
switch o := other.(type) {
|
||||
@@ -1163,7 +1143,7 @@ func (ref Ref) Ptr() (string, error) {
|
||||
if str, ok := term.Value.(String); ok {
|
||||
parts = append(parts, url.PathEscape(string(str)))
|
||||
} else {
|
||||
return "", errors.New("invalid path value type")
|
||||
return "", fmt.Errorf("invalid path value type")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, "/"), nil
|
||||
@@ -1175,12 +1155,20 @@ func IsVarCompatibleString(s string) bool {
|
||||
return varRegexp.MatchString(s)
|
||||
}
|
||||
|
||||
var sbPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &strings.Builder{}
|
||||
},
|
||||
}
|
||||
|
||||
func (ref Ref) String() string {
|
||||
if len(ref) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
sb := sbPool.Get()
|
||||
sb := sbPool.Get().(*strings.Builder)
|
||||
sb.Reset()
|
||||
|
||||
defer sbPool.Put(sb)
|
||||
|
||||
sb.Grow(10 * len(ref))
|
||||
@@ -1323,15 +1311,7 @@ func (arr *Array) Find(path Ref) (Value, error) {
|
||||
if i < 0 || i >= arr.Len() {
|
||||
return nil, errFindNotFound
|
||||
}
|
||||
|
||||
term := arr.Elem(i)
|
||||
// Using Find on scalar values costs an allocation (type -> Value conversion)
|
||||
// and since we already have the Value here, we can avoid that.
|
||||
if len(path) == 1 && IsScalar(term.Value) {
|
||||
return term.Value, nil
|
||||
}
|
||||
|
||||
return term.Value.Find(path[1:])
|
||||
return arr.Elem(i).Value.Find(path[1:])
|
||||
}
|
||||
|
||||
// Get returns the element at pos or nil if not possible.
|
||||
@@ -1386,19 +1366,20 @@ func (arr *Array) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (arr *Array) String() string {
|
||||
sb := sbPool.Get()
|
||||
sb := sbPool.Get().(*strings.Builder)
|
||||
sb.Reset()
|
||||
sb.Grow(len(arr.elems) * 16)
|
||||
|
||||
defer sbPool.Put(sb)
|
||||
|
||||
sb.WriteByte('[')
|
||||
sb.WriteRune('[')
|
||||
for i, e := range arr.elems {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(e.String())
|
||||
}
|
||||
sb.WriteByte(']')
|
||||
sb.WriteRune(']')
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
@@ -1584,19 +1565,20 @@ func (s *set) String() string {
|
||||
return "set()"
|
||||
}
|
||||
|
||||
sb := sbPool.Get()
|
||||
sb := sbPool.Get().(*strings.Builder)
|
||||
sb.Reset()
|
||||
sb.Grow(s.Len() * 16)
|
||||
|
||||
defer sbPool.Put(sb)
|
||||
|
||||
sb.WriteByte('{')
|
||||
sb.WriteRune('{')
|
||||
for i := range s.sortedKeys() {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(s.keys[i].Value.String())
|
||||
}
|
||||
sb.WriteByte('}')
|
||||
sb.WriteRune('}')
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
@@ -1766,6 +1748,20 @@ func (s *set) Slice() []*Term {
|
||||
return s.sortedKeys()
|
||||
}
|
||||
|
||||
// Internal method to use for cases where a set may be reused in favor
|
||||
// of creating a new one (with the associated allocations).
|
||||
func (s *set) clear() {
|
||||
clear(s.elems)
|
||||
s.keys = s.keys[:0]
|
||||
s.hash = 0
|
||||
s.ground = true
|
||||
s.sortGuard = sync.Once{}
|
||||
}
|
||||
|
||||
func (s *set) insertNoGuard(x *Term) {
|
||||
s.insert(x, false)
|
||||
}
|
||||
|
||||
// NOTE(philipc): We assume a many-readers, single-writer model here.
|
||||
// This method should NOT be used concurrently, or else we risk data races.
|
||||
func (s *set) insert(x *Term, resetSortGuard bool) {
|
||||
@@ -2217,7 +2213,7 @@ type objectElem struct {
|
||||
type objectElemSlice []*objectElem
|
||||
|
||||
func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 }
|
||||
func (s objectElemSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
|
||||
func (s objectElemSlice) Len() int { return len(s) }
|
||||
|
||||
// Item is a helper for constructing an tuple containing two Terms
|
||||
@@ -2257,7 +2253,7 @@ func (obj *object) Compare(other Value) int {
|
||||
if len(b.keys) < len(akeys) {
|
||||
minLen = len(bkeys)
|
||||
}
|
||||
for i := range minLen {
|
||||
for i := 0; i < minLen; i++ {
|
||||
keysCmp := Compare(akeys[i].key, bkeys[i].key)
|
||||
if keysCmp < 0 {
|
||||
return -1
|
||||
@@ -2286,17 +2282,11 @@ func (obj *object) Find(path Ref) (Value, error) {
|
||||
if len(path) == 0 {
|
||||
return obj, nil
|
||||
}
|
||||
term := obj.Get(path[0])
|
||||
if term == nil {
|
||||
value := obj.Get(path[0])
|
||||
if value == nil {
|
||||
return nil, errFindNotFound
|
||||
}
|
||||
// Using Find on scalar values costs an allocation (type -> Value conversion)
|
||||
// and since we already have the Value here, we can avoid that.
|
||||
if len(path) == 1 && IsScalar(term.Value) {
|
||||
return term.Value, nil
|
||||
}
|
||||
|
||||
return term.Value.Find(path[1:])
|
||||
return value.Value.Find(path[1:])
|
||||
}
|
||||
|
||||
func (obj *object) Insert(k, v *Term) {
|
||||
@@ -2385,8 +2375,7 @@ func (obj *object) Foreach(f func(*Term, *Term)) {
|
||||
}
|
||||
|
||||
// Map returns a new Object constructed by mapping each element in the object
|
||||
// using the function f. If f returns an error, the error is returned by Map.
|
||||
// If f return a nil key, the element is skipped.
|
||||
// using the function f.
|
||||
func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
|
||||
cpy := newobject(obj.Len())
|
||||
for _, node := range obj.sortedKeys() {
|
||||
@@ -2394,9 +2383,7 @@ func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k != nil {
|
||||
cpy.insert(k, v, false)
|
||||
}
|
||||
cpy.insert(k, v, false)
|
||||
}
|
||||
return cpy, nil
|
||||
}
|
||||
@@ -2497,12 +2484,13 @@ func (obj *object) Len() int {
|
||||
}
|
||||
|
||||
func (obj *object) String() string {
|
||||
sb := sbPool.Get()
|
||||
sb := sbPool.Get().(*strings.Builder)
|
||||
sb.Reset()
|
||||
sb.Grow(obj.Len() * 32)
|
||||
|
||||
defer sbPool.Put(sb)
|
||||
|
||||
sb.WriteByte('{')
|
||||
sb.WriteRune('{')
|
||||
|
||||
for i, elem := range obj.sortedKeys() {
|
||||
if i > 0 {
|
||||
@@ -2512,7 +2500,7 @@ func (obj *object) String() string {
|
||||
sb.WriteString(": ")
|
||||
sb.WriteString(elem.value.String())
|
||||
}
|
||||
sb.WriteByte('}')
|
||||
sb.WriteRune('}')
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
@@ -2762,7 +2750,7 @@ func filterObject(o Value, filter Value) (Value, error) {
|
||||
return o, nil
|
||||
case *Array:
|
||||
values := NewArray()
|
||||
for i := range v.Len() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i)))
|
||||
if subFilter != nil {
|
||||
filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value)
|
||||
@@ -3066,12 +3054,16 @@ func (c Call) String() string {
|
||||
|
||||
func termSliceCopy(a []*Term) []*Term {
|
||||
cpy := make([]*Term, len(a))
|
||||
for i := range a {
|
||||
cpy[i] = a[i].Copy()
|
||||
}
|
||||
termSliceCopyTo(a, cpy)
|
||||
return cpy
|
||||
}
|
||||
|
||||
func termSliceCopyTo(src, dst []*Term) {
|
||||
for i := range src {
|
||||
dst[i] = src[i].Copy()
|
||||
}
|
||||
}
|
||||
|
||||
func termSliceEqual(a, b []*Term) bool {
|
||||
if len(a) == len(b) {
|
||||
for i := range a {
|
||||
@@ -3123,7 +3115,7 @@ func unmarshalBody(b []interface{}) (Body, error) {
|
||||
}
|
||||
return buf, nil
|
||||
unmarshal_error:
|
||||
return nil, errors.New("ast: unable to unmarshal body")
|
||||
return nil, fmt.Errorf("ast: unable to unmarshal body")
|
||||
}
|
||||
|
||||
func unmarshalExpr(expr *Expr, v map[string]interface{}) error {
|
||||
@@ -3260,7 +3252,7 @@ func unmarshalTermSlice(s []interface{}) ([]*Term, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("ast: unable to unmarshal term")
|
||||
return nil, fmt.Errorf("ast: unable to unmarshal term")
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
@@ -3269,7 +3261,7 @@ func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) {
|
||||
if s, ok := d["value"].([]interface{}); ok {
|
||||
return unmarshalTermSlice(s)
|
||||
}
|
||||
return nil, errors.New(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
|
||||
return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
|
||||
}
|
||||
|
||||
func unmarshalWith(i interface{}) (*With, error) {
|
||||
@@ -3289,7 +3281,7 @@ func unmarshalWith(i interface{}) (*With, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
|
||||
return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
|
||||
}
|
||||
|
||||
func unmarshalValue(d map[string]interface{}) (Value, error) {
|
||||
@@ -3407,5 +3399,5 @@ func unmarshalValue(d map[string]interface{}) (Value, error) {
|
||||
}
|
||||
}
|
||||
unmarshal_error:
|
||||
return nil, errors.New("ast: unable to unmarshal term")
|
||||
return nil, fmt.Errorf("ast: unable to unmarshal term")
|
||||
}
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/v1/ast/transform.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/v1/ast/transform.go
generated
vendored
@@ -234,7 +234,7 @@ func Transform(t Transformer, x interface{}) (interface{}, error) {
|
||||
return k, v, nil
|
||||
})
|
||||
case *Array:
|
||||
for i := range y.Len() {
|
||||
for i := 0; i < y.Len(); i++ {
|
||||
v, err := transformTerm(t, y.Elem(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/v1/ast/unify.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/v1/ast/unify.go
generated
vendored
@@ -135,7 +135,7 @@ func (u *unifier) unify(a *Term, b *Term) {
|
||||
}
|
||||
case *Array:
|
||||
if a.Len() == b.Len() {
|
||||
for i := range a.Len() {
|
||||
for i := 0; i < a.Len(); i++ {
|
||||
u.unify(a.Elem(i), b.Elem(i))
|
||||
}
|
||||
}
|
||||
|
||||
36
vendor/github.com/open-policy-agent/opa/v1/ast/varset.go
generated
vendored
36
vendor/github.com/open-policy-agent/opa/v1/ast/varset.go
generated
vendored
@@ -16,18 +16,13 @@ type VarSet map[Var]struct{}
|
||||
|
||||
// NewVarSet returns a new VarSet containing the specified variables.
|
||||
func NewVarSet(vs ...Var) VarSet {
|
||||
s := make(VarSet, len(vs))
|
||||
s := VarSet{}
|
||||
for _, v := range vs {
|
||||
s.Add(v)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewVarSet returns a new VarSet containing the specified variables.
|
||||
func NewVarSetOfSize(size int) VarSet {
|
||||
return make(VarSet, size)
|
||||
}
|
||||
|
||||
// Add updates the set to include the variable "v".
|
||||
func (s VarSet) Add(v Var) {
|
||||
s[v] = struct{}{}
|
||||
@@ -41,7 +36,7 @@ func (s VarSet) Contains(v Var) bool {
|
||||
|
||||
// Copy returns a shallow copy of the VarSet.
|
||||
func (s VarSet) Copy() VarSet {
|
||||
cpy := NewVarSetOfSize(len(s))
|
||||
cpy := VarSet{}
|
||||
for v := range s {
|
||||
cpy.Add(v)
|
||||
}
|
||||
@@ -50,13 +45,7 @@ func (s VarSet) Copy() VarSet {
|
||||
|
||||
// Diff returns a VarSet containing variables in s that are not in vs.
|
||||
func (s VarSet) Diff(vs VarSet) VarSet {
|
||||
i := 0
|
||||
for v := range s {
|
||||
if !vs.Contains(v) {
|
||||
i++
|
||||
}
|
||||
}
|
||||
r := NewVarSetOfSize(i)
|
||||
r := VarSet{}
|
||||
for v := range s {
|
||||
if !vs.Contains(v) {
|
||||
r.Add(v)
|
||||
@@ -67,26 +56,15 @@ func (s VarSet) Diff(vs VarSet) VarSet {
|
||||
|
||||
// Equal returns true if s contains exactly the same elements as vs.
|
||||
func (s VarSet) Equal(vs VarSet) bool {
|
||||
if len(s) != len(vs) {
|
||||
if len(s.Diff(vs)) > 0 {
|
||||
return false
|
||||
}
|
||||
for v := range s {
|
||||
if !vs.Contains(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return len(vs.Diff(s)) == 0
|
||||
}
|
||||
|
||||
// Intersect returns a VarSet containing variables in s that are in vs.
|
||||
func (s VarSet) Intersect(vs VarSet) VarSet {
|
||||
i := 0
|
||||
for v := range s {
|
||||
if vs.Contains(v) {
|
||||
i++
|
||||
}
|
||||
}
|
||||
r := NewVarSetOfSize(i)
|
||||
r := VarSet{}
|
||||
for v := range s {
|
||||
if vs.Contains(v) {
|
||||
r.Add(v)
|
||||
@@ -95,7 +73,7 @@ func (s VarSet) Intersect(vs VarSet) VarSet {
|
||||
return r
|
||||
}
|
||||
|
||||
// Sorted returns a new sorted slice of vars from s.
|
||||
// Sorted returns a sorted slice of vars from s.
|
||||
func (s VarSet) Sorted() []Var {
|
||||
sorted := make([]Var, 0, len(s))
|
||||
for v := range s {
|
||||
|
||||
7
vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json
generated
vendored
7
vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json
generated
vendored
@@ -497,13 +497,6 @@
|
||||
"PreRelease": "",
|
||||
"Metadata": ""
|
||||
},
|
||||
"internal.test_case": {
|
||||
"Major": 1,
|
||||
"Minor": 2,
|
||||
"Patch": 0,
|
||||
"PreRelease": "",
|
||||
"Metadata": ""
|
||||
},
|
||||
"intersection": {
|
||||
"Major": 0,
|
||||
"Minor": 17,
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/v1/ast/visit.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/v1/ast/visit.go
generated
vendored
@@ -362,7 +362,7 @@ func (vis *GenericVisitor) Walk(x interface{}) {
|
||||
vis.Walk(x.Get(k))
|
||||
}
|
||||
case *Array:
|
||||
for i := range x.Len() {
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
vis.Walk(x.Elem(i))
|
||||
}
|
||||
case Set:
|
||||
|
||||
35
vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go
generated
vendored
35
vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go
generated
vendored
@@ -267,7 +267,7 @@ func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range len(m.WasmResolvers) {
|
||||
for i := 0; i < len(m.WasmResolvers); i++ {
|
||||
if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) {
|
||||
return false
|
||||
}
|
||||
@@ -298,7 +298,7 @@ func (wr *WasmResolver) Equal(other *WasmResolver) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range annotLen {
|
||||
for i := 0; i < annotLen; i++ {
|
||||
if wr.Annotations[i].Compare(other.Annotations[i]) != 0 {
|
||||
return false
|
||||
}
|
||||
@@ -333,7 +333,7 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error {
|
||||
roots[i] = strings.Trim(roots[i], "/")
|
||||
}
|
||||
|
||||
for i := range len(roots) - 1 {
|
||||
for i := 0; i < len(roots)-1; i++ {
|
||||
for j := i + 1; j < len(roots); j++ {
|
||||
if RootPathsOverlap(roots[i], roots[j]) {
|
||||
return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j])
|
||||
@@ -715,11 +715,8 @@ func (r *Reader) Read() (Bundle, error) {
|
||||
popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion())
|
||||
for _, mf := range modules {
|
||||
modulePopts := popts
|
||||
if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil {
|
||||
if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil {
|
||||
return bundle, err
|
||||
} else if regoVersion != ast.RegoUndefined {
|
||||
// We don't expect ast.RegoUndefined here, but don't override configured rego-version if we do just to be extra protective
|
||||
modulePopts.RegoVersion = regoVersion
|
||||
}
|
||||
r.metrics.Timer(metrics.RegoModuleParse).Start()
|
||||
mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts)
|
||||
@@ -732,19 +729,19 @@ func (r *Reader) Read() (Bundle, error) {
|
||||
|
||||
if bundle.Type() == DeltaBundleType {
|
||||
if len(bundle.Data) != 0 {
|
||||
return bundle, errors.New("delta bundle expected to contain only patch file but data files found")
|
||||
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found")
|
||||
}
|
||||
|
||||
if len(bundle.Modules) != 0 {
|
||||
return bundle, errors.New("delta bundle expected to contain only patch file but policy files found")
|
||||
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found")
|
||||
}
|
||||
|
||||
if len(bundle.WasmModules) != 0 {
|
||||
return bundle, errors.New("delta bundle expected to contain only patch file but wasm files found")
|
||||
return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found")
|
||||
}
|
||||
|
||||
if r.persist {
|
||||
return bundle, errors.New("'persist' property is true in config. persisting delta bundle to disk is not supported")
|
||||
return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -766,7 +763,7 @@ func (r *Reader) Read() (Bundle, error) {
|
||||
for _, r := range bundle.Manifest.WasmResolvers {
|
||||
epMap[r.Module] = append(epMap[r.Module], r.Entrypoint)
|
||||
}
|
||||
for i := range len(bundle.WasmModules) {
|
||||
for i := 0; i < len(bundle.WasmModules); i++ {
|
||||
entrypoints := epMap[bundle.WasmModules[i].Path]
|
||||
for _, entrypoint := range entrypoints {
|
||||
ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint)
|
||||
@@ -819,12 +816,12 @@ func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) erro
|
||||
}
|
||||
|
||||
if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" {
|
||||
return errors.New("bundle missing .signatures.json file")
|
||||
return fmt.Errorf("bundle missing .signatures.json file")
|
||||
}
|
||||
|
||||
if !signatures.isEmpty() {
|
||||
if r.verificationConfig == nil {
|
||||
return errors.New("verification key not provided")
|
||||
return fmt.Errorf("verification key not provided")
|
||||
}
|
||||
|
||||
// verify the JWT signatures included in the `.signatures.json` file
|
||||
@@ -1207,6 +1204,10 @@ func (b *Bundle) SetRegoVersion(v ast.RegoVersion) {
|
||||
// If there is no defined version for the given path, the default version def is returned.
|
||||
// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned.
|
||||
func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) {
|
||||
if def == ast.RegoUndefined {
|
||||
def = ast.DefaultRegoVersion
|
||||
}
|
||||
|
||||
version, err := b.Manifest.numericRegoVersionForFile(path)
|
||||
if err != nil {
|
||||
return def, err
|
||||
@@ -1353,7 +1354,7 @@ func (b *Bundle) readData(key []string) *interface{} {
|
||||
|
||||
node := b.Data
|
||||
|
||||
for i := range len(key) - 1 {
|
||||
for i := 0; i < len(key)-1; i++ {
|
||||
|
||||
child, ok := node[key[i]]
|
||||
if !ok {
|
||||
@@ -1389,7 +1390,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) {
|
||||
// For 0 length path the value is the full tree.
|
||||
obj, ok := value.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("root value must be object")
|
||||
return nil, fmt.Errorf("root value must be object")
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
@@ -1512,7 +1513,7 @@ func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath boo
|
||||
return nil, err
|
||||
}
|
||||
// only record the rego version if it's different from one applied globally to the result bundle
|
||||
if regoVersion != ast.RegoUndefined && v != regoVersion {
|
||||
if v != regoVersion {
|
||||
// We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path
|
||||
// to the module inside the merged bundle.
|
||||
fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int()
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go
generated
vendored
@@ -101,9 +101,11 @@ func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte,
|
||||
for claim, value := range claims {
|
||||
payload[claim] = value
|
||||
}
|
||||
} else if keyID != "" {
|
||||
// keyid claim is deprecated but include it for backwards compatibility.
|
||||
payload["keyid"] = keyID
|
||||
} else {
|
||||
if keyID != "" {
|
||||
// keyid claim is deprecated but include it for backwards compatibility.
|
||||
payload["keyid"] = keyID
|
||||
}
|
||||
}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
33
vendor/github.com/open-policy-agent/opa/v1/bundle/store.go
generated
vendored
33
vendor/github.com/open-policy-agent/opa/v1/bundle/store.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -95,7 +94,7 @@ func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn stor
|
||||
|
||||
bundleMap, ok := value.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt manifest roots")
|
||||
return nil, fmt.Errorf("corrupt manifest roots")
|
||||
}
|
||||
|
||||
bundles := make([]string, len(bundleMap))
|
||||
@@ -197,14 +196,14 @@ func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn sto
|
||||
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, errors.New("corrupt wasm manifest data")
|
||||
return nil, fmt.Errorf("corrupt wasm manifest data")
|
||||
}
|
||||
|
||||
var wasmMetadata []WasmResolver
|
||||
|
||||
err = util.UnmarshalJSON(bs, &wasmMetadata)
|
||||
if err != nil {
|
||||
return nil, errors.New("corrupt wasm manifest data")
|
||||
return nil, fmt.Errorf("corrupt wasm manifest data")
|
||||
}
|
||||
|
||||
return wasmMetadata, nil
|
||||
@@ -220,14 +219,14 @@ func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn stor
|
||||
|
||||
encodedModules, ok := value.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt wasm modules")
|
||||
return nil, fmt.Errorf("corrupt wasm modules")
|
||||
}
|
||||
|
||||
rawModules := map[string][]byte{}
|
||||
for path, enc := range encodedModules {
|
||||
encStr, ok := enc.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt wasm modules")
|
||||
return nil, fmt.Errorf("corrupt wasm modules")
|
||||
}
|
||||
bs, err := base64.StdEncoding.DecodeString(encStr)
|
||||
if err != nil {
|
||||
@@ -249,7 +248,7 @@ func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn stor
|
||||
|
||||
sl, ok := value.([]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt manifest roots")
|
||||
return nil, fmt.Errorf("corrupt manifest roots")
|
||||
}
|
||||
|
||||
roots := make([]string, len(sl))
|
||||
@@ -257,7 +256,7 @@ func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn stor
|
||||
for i := range sl {
|
||||
roots[i], ok = sl[i].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt manifest root")
|
||||
return nil, fmt.Errorf("corrupt manifest root")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,7 +278,7 @@ func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage
|
||||
|
||||
str, ok := value.(string)
|
||||
if !ok {
|
||||
return "", errors.New("corrupt manifest revision")
|
||||
return "", fmt.Errorf("corrupt manifest revision")
|
||||
}
|
||||
|
||||
return str, nil
|
||||
@@ -300,7 +299,7 @@ func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage
|
||||
|
||||
data, ok := value.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("corrupt manifest metadata")
|
||||
return nil, fmt.Errorf("corrupt manifest metadata")
|
||||
}
|
||||
|
||||
return data, nil
|
||||
@@ -321,7 +320,7 @@ func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Tra
|
||||
|
||||
str, ok := value.(string)
|
||||
if !ok {
|
||||
return "", errors.New("corrupt bundle etag")
|
||||
return "", fmt.Errorf("corrupt bundle etag")
|
||||
}
|
||||
|
||||
return str, nil
|
||||
@@ -447,7 +446,7 @@ func activateBundles(opts *ActivateOpts) error {
|
||||
p := getNormalizedPath(path)
|
||||
|
||||
if len(p) == 0 {
|
||||
return errors.New("root value must be object")
|
||||
return fmt.Errorf("root value must be object")
|
||||
}
|
||||
|
||||
// verify valid YAML or JSON value
|
||||
@@ -717,7 +716,7 @@ func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn stora
|
||||
if vs, ok := ver.(json.Number); ok {
|
||||
i, err := vs.Int64()
|
||||
if err != nil {
|
||||
return nil, errors.New("corrupt rego version")
|
||||
return nil, fmt.Errorf("corrupt rego version")
|
||||
}
|
||||
versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))}
|
||||
}
|
||||
@@ -727,7 +726,7 @@ func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn stora
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("corrupt rego version")
|
||||
return nil, fmt.Errorf("corrupt rego version")
|
||||
}
|
||||
|
||||
func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) {
|
||||
@@ -827,7 +826,7 @@ func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn
|
||||
|
||||
if regoVersion == ast.RegoUndefined {
|
||||
var err error
|
||||
regoVersion, err = b.RegoVersionForFile(mf.Path, runtimeRegoVersion)
|
||||
regoVersion, err = b.RegoVersionForFile(mf.Path, ast.RegoUndefined)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err)
|
||||
}
|
||||
@@ -1020,7 +1019,7 @@ func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool)
|
||||
if len(path) == 0 {
|
||||
return data, true
|
||||
}
|
||||
for i := range len(path) - 1 {
|
||||
for i := 0; i < len(path)-1; i++ {
|
||||
value, ok := data[path[i]]
|
||||
if !ok {
|
||||
return nil, false
|
||||
@@ -1094,7 +1093,7 @@ func applyPatches(ctx context.Context, store storage.Store, txn storage.Transact
|
||||
// construct patch path
|
||||
path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/"))
|
||||
if !ok {
|
||||
return errors.New("error parsing patch path")
|
||||
return fmt.Errorf("error parsing patch path")
|
||||
}
|
||||
|
||||
var op storage.PatchOp
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user