Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
f371ba8091 build(deps): bump github.com/open-policy-agent/opa from 1.12.3 to 1.13.1
Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 1.12.3 to 1.13.1.
- [Release notes](https://github.com/open-policy-agent/opa/releases)
- [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-policy-agent/opa/compare/v1.12.3...v1.13.1)

---
updated-dependencies:
- dependency-name: github.com/open-policy-agent/opa
  dependency-version: 1.13.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-16 15:53:36 +00:00
145 changed files with 18517 additions and 2058 deletions

View File

@@ -1,89 +0,0 @@
---
title: "Discover OIDC Client configuration via WebFinger"
---
* Status: accepted
* Deciders: [@TheOneRing @kulmann @rhafer @dragotin]
* Date: 2026-02-02
Reference: https://github.com/opencloud-eu/opencloud/pull/2072, https://github.com/opencloud-eu/desktop/issues/217
## Context and Problem Statement
Up to now our client applications used hard-coded OIDC client configurations.
So it is not possible to change the client id that a client should use or the
list of scopes that a client needs to request. This makes it hard to integrate
OpenCloud with various existing identity providers. For example:
- Authentik basically creates a different issuer URL for each client. As OpenCloud
can only work with a single issuer URL, all OpenCloud clients need to use the
same client id to work with Authentik.
- Some IDPs (kanidm) are not able to work with user-supplied client ids. They generate
client ids automatically and do not allow to specify them manually.
- To make features like automatic role assignment work, clients need to request
specific scopes, depending on which exact IDP is used.
## Decision Drivers
* Support broader set of IDPs
* avoid any manual configuration adjustments on the client side
## Decision
Enhance the WebFinger service in OpenCloud to provide platform-specific OIDC
discovery, enabling clients to query for the correct OIDC `client_id` and
`scopes` based on their application type (e.g., web, desktop, android, ios).
This is achieved by allowing an additional `platform` query parameter to be used
when querying the WebFinger endpoint. The response will include the appropriate
`client_id` and `scopes` in the `properties` section of the response.
This is implemented in a backward-compatible way, so existing clients that do not
specify the `platform` parameter will continue to receive just the issuer information.
## Example
### Client Request
```
GET /.well-known/webfinger?resource=https://cloud.opencloud.test&rel=http://openid.net/specs/connect/1.0/issuer&platform=desktop
```
### Server Response
```json
{
"subject": "https://cloud.opencloud.test",
"links": [{
"rel": "http://openid.net/specs/connect/1.0/issuer",
"href": "https://idp.example.com"
}],
"properties": {
"http://opencloud.eu/ns/oidc/client_id": "desktop-client-id",
"http://opencloud.eu/ns/oidc/scopes": ["openid", "profile", "email", "offline_access"]
}
}
```
### Server configuration (suggestion)
To configure the OpenCloud server a couple of new config settings need to be introduced. This would
be two new settings per client, e.g.:
```
WEBFINGER_ANDROID_OIDC_CLIENT_ID
WEBFINGER_ANDROID_OIDC_CLIENT_SCOPE
WEBFINGER_DESKTOP_OIDC_CLIENT_ID
WEBFINGER_DESKTOP_OIDC_CLIENT_SCOPE
WEBFINGER_IOS_OIDC_CLIENT_ID
WEBFINGER_IOS_OIDC_CLIENT_SCOPE
WEBFINGER_WEB_OIDC_CLIENT_ID
WEBFINGER_WEB_OIDC_CLIENT_SCOPE
```
Additionally for backwards compatibility the existing `WEB_OIDC_CLIENT_ID` and
`WEB_OIDC_CLIENT_SCOPE` settings should be used as fallback for the `web`
platform. Also we should make it easy to configure the same settings for all
platforms at once by using `OC_OIDC_CLIENT_ID` and `OC_OIDC_CLIENT_SCOPE` as
fallback for all platforms if the platform-specific settings are not set.

12
go.mod
View File

@@ -62,7 +62,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.28.0
github.com/onsi/gomega v1.39.1
github.com/open-policy-agent/opa v1.12.3
github.com/open-policy-agent/opa v1.13.1
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20260204102724-10bcda1b3068
github.com/opencloud-eu/reva/v2 v2.42.4
@@ -181,7 +181,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/crewjam/httperr v0.2.0 // indirect
github.com/crewjam/saml v0.4.14 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/cyphar/filepath-securejoin v0.5.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
@@ -271,9 +271,8 @@ require (
github.com/lestrrat-go/dsig v1.0.0 // indirect
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
github.com/lestrrat-go/jwx/v3 v3.0.12 // indirect
github.com/lestrrat-go/option v1.0.1 // indirect
github.com/lestrrat-go/httprc/v3 v3.0.2 // indirect
github.com/lestrrat-go/jwx/v3 v3.0.13 // indirect
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
github.com/libregraph/oidc-go v1.1.0 // indirect
github.com/longsleep/go-metrics v1.0.0 // indirect
@@ -369,7 +368,7 @@ require (
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
github.com/trustelem/zxcvbn v1.0.1 // indirect
github.com/urfave/cli/v2 v2.27.7 // indirect
github.com/valyala/fastjson v1.6.4 // indirect
github.com/valyala/fastjson v1.6.7 // indirect
github.com/vektah/gqlparser/v2 v2.5.31 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wk8/go-ordered-map v1.0.0 // indirect
@@ -398,6 +397,7 @@ require (
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/ini.v1 v1.67.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect

33
go.sum
View File

@@ -272,8 +272,8 @@ github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1n
github.com/cs3org/go-cs3apis v0.0.0-20250908152307-4ca807afe54e h1:fC/BWMVWNFlSbzvSp2xTaH0qpJiq7ScRrOsCzpgi1xI=
github.com/cs3org/go-cs3apis v0.0.0-20250908152307-4ca807afe54e/go.mod h1:DedpcqXl193qF/08Y04IO0PpxyyMu8+GrkD6kWK2MEQ=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -287,8 +287,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjY
github.com/deepmap/oapi-codegen v1.3.11/go.mod h1:suMvK7+rKlx3+tpa8ByptmvoXbAV70wERKTOGH3hLp0=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
github.com/dgraph-io/badger/v4 v4.9.0 h1:tpqWb0NewSrCYqTvywbcXOhQdWcqephkVkbBmaaqHzc=
github.com/dgraph-io/badger/v4 v4.9.0/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0=
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
@@ -350,8 +350,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0=
github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -777,12 +777,10 @@ github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7
github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI=
github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk=
github.com/lestrrat-go/jwx/v3 v3.0.12 h1:p25r68Y4KrbBdYjIsQweYxq794CtGCzcrc5dGzJIRjg=
github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8=
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lestrrat-go/httprc/v3 v3.0.2 h1:7u4HUaD0NQbf2/n5+fyp+T10hNCsAnwKfqn4A4Baif0=
github.com/lestrrat-go/httprc/v3 v3.0.2/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0=
github.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk=
github.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU=
github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
github.com/libregraph/idm v0.5.0 h1:tDMwKbAOZzdeDYMxVlY5PbSqRKO7dbAW9KT42A51WSk=
@@ -959,8 +957,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
github.com/open-policy-agent/opa v1.12.3 h1:qe3m/w52baKC/HJtippw+hYBUKCzuBCPjB+D5P9knfc=
github.com/open-policy-agent/opa v1.12.3/go.mod h1:RnDgm04GA1RjEXJvrsG9uNT/+FyBNmozcPvA2qz60M4=
github.com/open-policy-agent/opa v1.13.1 h1:2odxAcL3L0GNTlsuDcoguxViGxQxlpGL6zR8jdJjID8=
github.com/open-policy-agent/opa v1.13.1/go.mod h1:M3Asy9yp1YTusUU5VQuENDe92GLmamIuceqjw+C8PHY=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a h1:Sakl76blJAaM6NxylVkgSzktjo2dS504iDotEFJsh3M=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY=
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89 h1:W1ms+lP5lUUIzjRGDg93WrQfZJZCaV1ZP3KeyXi8bzY=
@@ -1196,6 +1194,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
@@ -1246,8 +1245,8 @@ github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/X
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM=
github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/vektah/gqlparser/v2 v2.5.31 h1:YhWGA1mfTjID7qJhd1+Vxhpk5HTgydrGU9IgkWBTJ7k=
@@ -1801,6 +1800,8 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k=
gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss=
gopkg.in/ns1/ns1-go.v2 v2.4.4/go.mod h1:GMnKY+ZuoJ+lVLL+78uSTjwTz2jMazq6AfGKQOYhsPk=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=

View File

@@ -0,0 +1,56 @@
# SPDX-License-Identifier: MPL-2.0
# Copyright (C) 2025 Aleksa Sarai <cyphar@cyphar.com>
# Copyright (C) 2025 SUSE LLC
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
version: "2"
linters:
enable:
- asasalint
- asciicheck
- containedctx
- contextcheck
- errcheck
- errorlint
- exhaustive
- forcetypeassert
- godot
- goprintffuncname
- govet
- importas
- ineffassign
- makezero
- misspell
- musttag
- nilerr
- nilnesserr
- nilnil
- noctx
- prealloc
- revive
- staticcheck
- testifylint
- unconvert
- unparam
- unused
- usetesting
settings:
govet:
enable:
- nilness
testifylint:
enable-all: true
formatters:
enable:
- gofumpt
- goimports
settings:
goimports:
local-prefixes:
- github.com/cyphar/filepath-securejoin

View File

@@ -4,7 +4,152 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased] ##
## [Unreleased 0.5.z] ##
## [0.5.1] - 2025-10-31 ##
> Spooky scary skeletons send shivers down your spine!
### Changed ###
- `openat2` can return `-EAGAIN` if it detects a possible attack in certain
scenarios (namely if there was a rename or mount while walking a path with a
`..` component). While this is necessary to avoid a denial-of-service in the
kernel, it does require retry loops in userspace.
In previous versions, `pathrs-lite` would retry `openat2` 32 times before
returning an error, but we've received user reports that this limit can be
hit on systems with very heavy load. In some synthetic benchmarks (testing
the worst-case of an attacker doing renames in a tight loop on every core of
a 16-core machine) we managed to get a ~3% failure rate in runc. We have
improved this situation in two ways:
* We have now increased this limit to 128, which should be good enough for
most use-cases without becoming a denial-of-service vector (the number of
syscalls called by the `O_PATH` resolver in a typical case is within the
same ballpark). The same benchmarks show a failure rate of ~0.12% which
(while not zero) is probably sufficient for most users.
* In addition, we now return a `unix.EAGAIN` error that is bubbled up and can
be detected by callers. This means that callers with stricter requirements
to avoid spurious errors can choose to do their own infinite `EAGAIN` retry
loop (though we would strongly recommend users use time-based deadlines in
such retry loops to avoid potentially unbounded denials-of-service).
## [0.5.0] - 2025-09-26 ##
> Let the past die. Kill it if you have to.
> **NOTE**: With this release, some parts of
> `github.com/cyphar/filepath-securejoin` are now licensed under the Mozilla
> Public License (version 2). Please see [COPYING.md][] as well as the the
> license header in each file for more details.
[COPYING.md]: ./COPYING.md
### Breaking ###
- The new API introduced in the [0.3.0][] release has been moved to a new
subpackage called `pathrs-lite`. This was primarily done to better indicate
the split between the new and old APIs, as well as indicate to users the
purpose of this subpackage (it is a less complete version of [libpathrs][]).
We have added some wrappers to the top-level package to ease the transition,
but those are deprecated and will be removed in the next minor release of
filepath-securejoin. Users should update their import paths.
This new subpackage has also been relicensed under the Mozilla Public License
(version 2), please see [COPYING.md][] for more details.
### Added ###
- Most of the key bits the safe `procfs` API have now been exported and are
available in `github.com/cyphar/filepath-securejoin/pathrs-lite/procfs`. At
the moment this primarily consists of a new `procfs.Handle` API:
* `OpenProcRoot` returns a new handle to `/proc`, endeavouring to make it
safe if possible (`subset=pid` to protect against mistaken write attacks
and leaks, as well as using `fsopen(2)` to avoid racing mount attacks).
`OpenUnsafeProcRoot` returns a handle without attempting to create one
with `subset=pid`, which makes it more dangerous to leak. Most users
should use `OpenProcRoot` (even if you need to use `ProcRoot` as the base
of an operation, as filepath-securejoin will internally open a handle when
necessary).
* The `(*procfs.Handle).Open*` family of methods lets you get a safe
`O_PATH` handle to subpaths within `/proc` for certain subpaths.
For `OpenThreadSelf`, the returned `ProcThreadSelfCloser` needs to be
called after you completely finish using the handle (this is necessary
because Go is multi-threaded and `ProcThreadSelf` references
`/proc/thread-self` which may disappear if we do not
`runtime.LockOSThread` -- `ProcThreadSelfCloser` is currently equivalent
to `runtime.UnlockOSThread`).
Note that you cannot open any `procfs` symlinks (most notably magic-links)
using this API. At the moment, filepath-securejoin does not support this
feature (but [libpathrs][] does).
* `ProcSelfFdReadlink` lets you get the in-kernel path representation of a
file descriptor (think `readlink("/proc/self/fd/...")`), except that we
verify that there aren't any tricky overmounts that could fool the
process.
Please be aware that the returned string is simply a snapshot at that
particular moment, and an attacker could move the file being pointed to.
In addition, complex namespace configurations could result in non-sensical
or confusing paths to be returned. The value received from this function
should only be used as secondary verification of some security property,
not as proof that a particular handle has a particular path.
The procfs handle used internally by the API is the same as the rest of
`filepath-securejoin` (for privileged programs this is usually a private
in-process `procfs` instance created with `fsopen(2)`).
As before, this is intended as a stop-gap before users migrate to
[libpathrs][], which provides a far more extensive safe `procfs` API and is
generally more robust.
- Previously, the hardened procfs implementation (used internally within
`Reopen` and `Open(at)InRoot`) only protected against overmount attacks on
systems with `openat2(2)` (Linux 5.6) or systems with `fsopen(2)` or
`open_tree(2)` (Linux 5.2) and programs with privileges to use them (with
some caveats about locked mounts that probably affect very few users). For
other users, an attacker with the ability to create malicious mounts (on most
systems, a sysadmin) could trick you into operating on files you didn't
expect. This attack only really makes sense in the context of container
runtime implementations.
This was considered a reasonable trade-off, as the long-term intention was to
get all users to just switch to [libpathrs][] if they wanted to use the safe
`procfs` API (which had more extensive protections, and is what these new
protections in `filepath-securejoin` are based on). However, as the API
is now being exported it seems unwise to advertise the API as "safe" if we do
not protect against known attacks.
The procfs API is now more protected against attackers on systems lacking the
aforementioned protections. However, the most comprehensive of these
protections effectively rely on [`statx(STATX_MNT_ID)`][statx.2] (Linux 5.8).
On older kernel versions, there is no effective protection (there is some
minimal protection against non-`procfs` filesystem components but a
sufficiently clever attacker can work around those). In addition,
`STATX_MNT_ID` is vulnerable to mount ID reuse attacks by sufficiently
motivated and privileged attackers -- this problem is mitigated with
`STATX_MNT_ID_UNIQUE` (Linux 6.8) but that raises the minimum kernel version
for more protection.
The fact that these protections are quite limited despite needing a fair bit
of extra code to handle was one of the primary reasons we did not initially
implement this in `filepath-securejoin` ([libpathrs][] supports all of this,
of course).
### Fixed ###
- RHEL 8 kernels have backports of `fsopen(2)` but in some testing we've found
that it has very bad (and very difficult to debug) performance issues, and so
we will explicitly refuse to use `fsopen(2)` if the running kernel version is
pre-5.2 and will instead fallback to `open("/proc")`.
[CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv
[libpathrs]: https://github.com/cyphar/libpathrs
[statx.2]: https://www.man7.org/linux/man-pages/man2/statx.2.html
## [0.4.1] - 2025-01-28 ##
@@ -173,7 +318,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
safe to start migrating to as we have extensive tests ensuring they behave
correctly and are safe against various races and other attacks.
[libpathrs]: https://github.com/openSUSE/libpathrs
[libpathrs]: https://github.com/cyphar/libpathrs
[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html
## [0.2.5] - 2024-05-03 ##
@@ -238,7 +383,9 @@ This is our first release of `github.com/cyphar/filepath-securejoin`,
containing a full implementation with a coverage of 93.5% (the only missing
cases are the error cases, which are hard to mocktest at the moment).
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD
[Unreleased 0.5.z]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...release-0.5
[0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0
[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0
[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6

447
vendor/github.com/cyphar/filepath-securejoin/COPYING.md generated vendored Normal file
View File

@@ -0,0 +1,447 @@
## COPYING ##
`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0`
This project is made up of code licensed under different licenses. Which code
you use will have an impact on whether only one or both licenses apply to your
usage of this library.
Note that **each file** in this project individually has a code comment at the
start describing the license of that particular file -- this is the most
accurate license information of this project; in case there is any conflict
between this document and the comment at the start of a file, the comment shall
take precedence. The only purpose of this document is to work around [a known
technical limitation of pkg.go.dev's license checking tool when dealing with
non-trivial project licenses][go75067].
[go75067]: https://go.dev/issue/75067
### `BSD-3-Clause` ###
At time of writing, the following files and directories are licensed under the
BSD-3-Clause license:
* `doc.go`
* `join*.go`
* `vfs.go`
* `internal/consts/*.go`
* `pathrs-lite/internal/gocompat/*.go`
* `pathrs-lite/internal/kernelversion/*.go`
The text of the BSD-3-Clause license used by this project is the following (the
text is also available from the [`LICENSE.BSD`](./LICENSE.BSD) file):
```
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
### `MPL-2.0` ###
All other files (unless otherwise marked) are licensed under the Mozilla Public
License (version 2.0).
The text of the Mozilla Public License (version 2.0) is the following (the text
is also available from the [`LICENSE.MPL-2.0`](./LICENSE.MPL-2.0) file):
```
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.
```

View File

@@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@@ -67,7 +67,8 @@ func SecureJoin(root, unsafePath string) (string, error) {
[libpathrs]: https://github.com/openSUSE/libpathrs
[go#20126]: https://github.com/golang/go/issues/20126
### New API ###
### <a name="new-api" /> New API ###
[#new-api]: #new-api
While we recommend users switch to [libpathrs][libpathrs] as soon as it has a
stable release, some methods implemented by libpathrs have been ported to this
@@ -165,5 +166,19 @@ after `MkdirAll`).
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license
available in the `LICENSE` file.
`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0`
Some of the code in this project is derived from Go, and is licensed under a
BSD 3-clause license (available in `LICENSE.BSD`). Other files (many of which
are derived from [libpathrs][libpathrs]) are licensed under the Mozilla Public
License version 2.0 (available in `LICENSE.MPL-2.0`). If you are using the
["New API" described above][#new-api], you are probably using code from files
released under this license.
Every source file in this project has a copyright header describing its
license. Please check the license headers of each file to see what license
applies to it.
See [COPYING.md](./COPYING.md) for some more details.
[umoci]: https://github.com/opencontainers/umoci

View File

@@ -1 +1 @@
0.4.1
0.5.1

View File

@@ -0,0 +1,29 @@
# SPDX-License-Identifier: MPL-2.0
# Copyright (C) 2025 Aleksa Sarai <cyphar@cyphar.com>
# Copyright (C) 2025 SUSE LLC
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
comment:
layout: "condensed_header, reach, diff, components, condensed_files, condensed_footer"
require_changes: true
branches:
- main
coverage:
range: 60..100
status:
project:
default:
target: 85%
threshold: 0%
patch:
default:
target: auto
informational: true
github_checks:
annotations: false

View File

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package securejoin
import (
"github.com/cyphar/filepath-securejoin/pathrs-lite"
)
var (
// MkdirAll is a wrapper around [pathrs.MkdirAll].
//
// Deprecated: You should use [pathrs.MkdirAll] directly instead. This
// wrapper will be removed in filepath-securejoin v0.6.
MkdirAll = pathrs.MkdirAll
// MkdirAllHandle is a wrapper around [pathrs.MkdirAllHandle].
//
// Deprecated: You should use [pathrs.MkdirAllHandle] directly instead.
// This wrapper will be removed in filepath-securejoin v0.6.
MkdirAllHandle = pathrs.MkdirAllHandle
// OpenInRoot is a wrapper around [pathrs.OpenInRoot].
//
// Deprecated: You should use [pathrs.OpenInRoot] directly instead. This
// wrapper will be removed in filepath-securejoin v0.6.
OpenInRoot = pathrs.OpenInRoot
// OpenatInRoot is a wrapper around [pathrs.OpenatInRoot].
//
// Deprecated: You should use [pathrs.OpenatInRoot] directly instead. This
// wrapper will be removed in filepath-securejoin v0.6.
OpenatInRoot = pathrs.OpenatInRoot
// Reopen is a wrapper around [pathrs.Reopen].
//
// Deprecated: You should use [pathrs.Reopen] directly instead. This
// wrapper will be removed in filepath-securejoin v0.6.
Reopen = pathrs.Reopen
)

View File

@@ -1,3 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -14,14 +16,13 @@
// **not** safe against race conditions where an attacker changes the
// filesystem after (or during) the [SecureJoin] operation.
//
// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived
// functions). These are safe against racing attackers and have several other
// protections that are not provided by the legacy API. There are many more
// operations that most programs expect to be able to do safely, but we do not
// provide explicit support for them because we want to encourage users to
// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a
// cross-language next-generation library that is entirely designed around
// operating on paths safely.
// The new API is available in the [pathrs-lite] subpackage, and provide
// protections against racing attackers as well as several other key
// protections against attacks often seen by container runtimes. As the name
// suggests, [pathrs-lite] is a stripped down (pure Go) reimplementation of
// [libpathrs]. The main APIs provided are [OpenInRoot], [MkdirAll], and
// [procfs.Handle] -- other APIs are not planned to be ported. The long-term
// goal is for users to migrate to [libpathrs] which is more fully-featured.
//
// securejoin has been used by several container runtimes (Docker, runc,
// Kubernetes, etc) for quite a few years as a de-facto standard for operating
@@ -31,9 +32,16 @@
// API as soon as possible (or even better, switch to libpathrs).
//
// This project was initially intended to be included in the Go standard
// library, but [it was rejected](https://go.dev/issue/20126). There is now a
// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API
// that shares some of the goals of filepath-securejoin. However, that design
// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the
// usecase of container runtimes and most system tools.
// library, but it was rejected (see https://go.dev/issue/20126). Much later,
// [os.Root] was added to the Go stdlib that shares some of the goals of
// filepath-securejoin. However, its design is intended to work like
// openat2(RESOLVE_BENEATH) which does not fit the usecase of container
// runtimes and most system tools.
//
// [pathrs-lite]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite
// [libpathrs]: https://github.com/openSUSE/libpathrs
// [OpenInRoot]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#OpenInRoot
// [MkdirAll]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#MkdirAll
// [procfs.Handle]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs#Handle
// [os.Root]: https:///pkg.go.dev/os#Root
package securejoin

View File

@@ -1,32 +0,0 @@
//go:build linux && go1.21
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import (
"slices"
"sync"
)
func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S {
return slices.DeleteFunc(slice, delFn)
}
func slices_Contains[S ~[]E, E comparable](slice S, val E) bool {
return slices.Contains(slice, val)
}
func slices_Clone[S ~[]E, E any](slice S) S {
return slices.Clone(slice)
}
func sync_OnceValue[T any](f func() T) func() T {
return sync.OnceValue(f)
}
func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
return sync.OnceValues(f)
}

View File

@@ -1,124 +0,0 @@
//go:build linux && !go1.21
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import (
"sync"
)
// These are very minimal implementations of functions that appear in Go 1.21's
// stdlib, included so that we can build on older Go versions. Most are
// borrowed directly from the stdlib, and a few are modified to be "obviously
// correct" without needing to copy too many other helpers.
// clearSlice is equivalent to the builtin clear from Go 1.21.
// Copied from the Go 1.24 stdlib implementation.
func clearSlice[S ~[]E, E any](slice S) {
var zero E
for i := range slice {
slice[i] = zero
}
}
// Copied from the Go 1.24 stdlib implementation.
func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
}
// Copied from the Go 1.24 stdlib implementation.
func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := slices_IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// Similar to the stdlib slices.Contains, except that we don't have
// slices.Index so we need to use slices.IndexFunc for this non-Func helper.
func slices_Contains[S ~[]E, E comparable](s S, v E) bool {
return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0
}
// Copied from the Go 1.24 stdlib implementation.
func slices_Clone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
}
// Copied from the Go 1.24 stdlib implementation.
func sync_OnceValue[T any](f func() T) func() T {
var (
once sync.Once
valid bool
p any
result T
)
g := func() {
defer func() {
p = recover()
if !valid {
panic(p)
}
}()
result = f()
f = nil
valid = true
}
return func() T {
once.Do(g)
if !valid {
panic(p)
}
return result
}
}
// Copied from the Go 1.24 stdlib implementation.
func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
var (
once sync.Once
valid bool
p any
r1 T1
r2 T2
)
g := func() {
defer func() {
p = recover()
if !valid {
panic(p)
}
}()
r1, r2 = f()
f = nil
valid = true
}
return func() (T1, T2) {
once.Do(g)
if !valid {
panic(p)
}
return r1, r2
}
}

View File

@@ -0,0 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package consts contains the definitions of internal constants used
// throughout filepath-securejoin.
package consts
// MaxSymlinkLimit is the maximum number of symlinks that can be encountered
// during a single lookup before returning -ELOOP. At time of writing, Linux
// has an internal limit of 40.
const MaxSymlinkLimit = 255

View File

@@ -1,3 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -11,9 +13,9 @@ import (
"path/filepath"
"strings"
"syscall"
)
const maxSymlinkLimit = 255
"github.com/cyphar/filepath-securejoin/internal/consts"
)
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
@@ -49,12 +51,13 @@ func hasDotDot(path string) bool {
return strings.Contains("/"+path+"/", "/../")
}
// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
// given root treated as the root of the filesystem, similar to a chroot. The
// filesystem state is evaluated through the given [VFS] interface (if nil, the
// standard [os].* family of functions are used).
// SecureJoinVFS joins the two given path components (similar to
// [filepath.Join]) except that the returned path is guaranteed to be scoped
// inside the provided root path (when evaluated). Any symbolic links in the
// path are evaluated with the given root treated as the root of the
// filesystem, similar to a chroot. The filesystem state is evaluated through
// the given [VFS] interface (if nil, the standard [os].* family of functions
// are used).
//
// Note that the guarantees provided by this function only apply if the path
// components in the returned string are not modified (in other words are not
@@ -78,7 +81,7 @@ func hasDotDot(path string) bool {
// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to
// avoid containing symlink components. Of course, the root also *must not* be
// attacker-controlled.
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { //nolint:revive // name is part of public API
// The root path must not contain ".." components, otherwise when we join
// the subpath we will end up with a weird path. We could work around this
// in other ways but users shouldn't be giving us non-lexical root paths in
@@ -138,7 +141,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// It's a symlink, so get its contents and expand it by prepending it
// to the yet-unparsed path.
linksWalked++
if linksWalked > maxSymlinkLimit {
if linksWalked > consts.MaxSymlinkLimit {
return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP}
}

View File

@@ -1,127 +0,0 @@
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
)
var hasOpenat2 = sync_OnceValue(func() bool {
fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
})
if err != nil {
return false
}
_ = unix.Close(fd)
return true
})
func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool {
// RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve
// ".." while a mount or rename occurs anywhere on the system. This could
// happen spuriously, or as the result of an attacker trying to mess with
// us during lookup.
//
// In addition, scoped lookups have a "safety check" at the end of
// complete_walk which will return -EXDEV if the final path is not in the
// root.
return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 &&
(errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV))
}
const scopedLookupMaxRetries = 10
func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) {
fullPath := dir.Name() + "/" + path
// Make sure we always set O_CLOEXEC.
how.Flags |= unix.O_CLOEXEC
var tries int
for tries < scopedLookupMaxRetries {
fd, err := unix.Openat2(int(dir.Fd()), path, how)
if err != nil {
if scopedLookupShouldRetry(how, err) {
// We retry a couple of times to avoid the spurious errors, and
// if we are being attacked then returning -EAGAIN is the best
// we can do.
tries++
continue
}
return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err}
}
// If we are using RESOLVE_IN_ROOT, the name we generated may be wrong.
// NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise
// you'll get infinite recursion here.
if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT {
if actualPath, err := rawProcSelfFdReadlink(fd); err == nil {
fullPath = actualPath
}
}
return os.NewFile(uintptr(fd), fullPath), nil
}
return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack}
}
func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) {
if !partial {
file, err := openat2File(root, unsafePath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
})
return file, "", err
}
return partialLookupOpenat2(root, unsafePath)
}
// partialLookupOpenat2 is an alternative implementation of
// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a
// handle to the deepest existing child of the requested path within the root.
func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) {
// TODO: Implement this as a git-bisect-like binary search.
unsafePath = filepath.ToSlash(unsafePath) // noop
endIdx := len(unsafePath)
var lastError error
for endIdx > 0 {
subpath := unsafePath[:endIdx]
handle, err := openat2File(root, subpath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
})
if err == nil {
// Jump over the slash if we have a non-"" remainingPath.
if endIdx < len(unsafePath) {
endIdx += 1
}
// We found a subpath!
return handle, unsafePath[endIdx:], lastError
}
if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) {
// That path doesn't exist, let's try the next directory up.
endIdx = strings.LastIndexByte(subpath, '/')
lastError = err
continue
}
return nil, "", fmt.Errorf("open subpath: %w", err)
}
// If we couldn't open anything, the whole subpath is missing. Return a
// copy of the root fd so that the caller doesn't close this one by
// accident.
rootClone, err := dupFile(root)
if err != nil {
return nil, "", err
}
return rootClone, unsafePath, lastError
}

View File

@@ -1,59 +0,0 @@
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import (
"os"
"path/filepath"
"golang.org/x/sys/unix"
)
func dupFile(f *os.File) (*os.File, error) {
fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err)
}
return os.NewFile(uintptr(fd), f.Name()), nil
}
func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) {
// Make sure we always set O_CLOEXEC.
flags |= unix.O_CLOEXEC
fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode))
if err != nil {
return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err}
}
// All of the paths we use with openatFile(2) are guaranteed to be
// lexically safe, so we can use path.Join here.
fullPath := filepath.Join(dir.Name(), path)
return os.NewFile(uintptr(fd), fullPath), nil
}
func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) {
var stat unix.Stat_t
if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil {
return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err}
}
return stat, nil
}
func readlinkatFile(dir *os.File, path string) (string, error) {
size := 4096
for {
linkBuf := make([]byte, size)
n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf)
if err != nil {
return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err}
}
if n != size {
return string(linkBuf[:n]), nil
}
// Possible truncation, resize the buffer.
size *= 2
}
}

View File

@@ -0,0 +1,33 @@
## `pathrs-lite` ##
`github.com/cyphar/filepath-securejoin/pathrs-lite` provides a minimal **pure
Go** implementation of the core bits of [libpathrs][]. This is not intended to
be a complete replacement for libpathrs, instead it is mainly intended to be
useful as a transition tool for existing Go projects.
The long-term plan for `pathrs-lite` is to provide a build tag that will cause
all `pathrs-lite` operations to call into libpathrs directly, thus removing
code duplication for projects that wish to make use of libpathrs (and providing
the ability for software packagers to opt-in to libpathrs support without
needing to patch upstream).
[libpathrs]: https://github.com/cyphar/libpathrs
### License ###
Most of this subpackage is licensed under the Mozilla Public License (version
2.0). For more information, see the top-level [COPYING.md][] and
[LICENSE.MPL-2.0][] files, as well as the individual license headers for each
file.
```
Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
Copyright (C) 2024-2025 SUSE LLC
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
```
[COPYING.md]: ../COPYING.md
[LICENSE.MPL-2.0]: ../LICENSE.MPL-2.0

View File

@@ -0,0 +1,14 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package pathrs (pathrs-lite) is a less complete pure Go implementation of
// some of the APIs provided by [libpathrs].
package pathrs

View File

@@ -0,0 +1,30 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright (C) 2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package assert provides some basic assertion helpers for Go.
package assert
import (
"fmt"
)
// Assert panics if the predicate is false with the provided argument.
func Assert(predicate bool, msg any) {
if !predicate {
panic(msg)
}
}
// Assertf panics if the predicate is false and formats the message using the
// same formatting as [fmt.Printf].
//
// [fmt.Printf]: https://pkg.go.dev/fmt#Printf
func Assertf(predicate bool, fmtMsg string, args ...any) {
Assert(predicate, fmt.Sprintf(fmtMsg, args...))
}

View File

@@ -0,0 +1,41 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package internal contains unexported common code for filepath-securejoin.
package internal
import (
"errors"
"golang.org/x/sys/unix"
)
type xdevErrorish struct {
description string
}
func (err xdevErrorish) Error() string { return err.description }
func (err xdevErrorish) Is(target error) bool { return target == unix.EXDEV }
var (
// ErrPossibleAttack indicates that some attack was detected.
ErrPossibleAttack error = xdevErrorish{"possible attack detected"}
// ErrPossibleBreakout indicates that during an operation we ended up in a
// state that could be a breakout but we detected it.
ErrPossibleBreakout error = xdevErrorish{"possible breakout detected"}
// ErrInvalidDirectory indicates an unlinked directory.
ErrInvalidDirectory = errors.New("wandered into deleted directory")
// ErrDeletedInode indicates an unlinked file (non-directory).
ErrDeletedInode = errors.New("cannot verify path of deleted inode")
)

View File

@@ -0,0 +1,148 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package fd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
)
// prepareAtWith returns -EBADF (an invalid fd) if dir is nil, otherwise using
// the dir.Fd(). We use -EBADF because in filepath-securejoin we generally
// don't want to allow relative-to-cwd paths. The returned path is an
// *informational* string that describes a reasonable pathname for the given
// *at(2) arguments. You must not use the full path for any actual filesystem
// operations.
func prepareAt(dir Fd, path string) (dirFd int, unsafeUnmaskedPath string) {
dirFd, dirPath := -int(unix.EBADF), "."
if dir != nil {
dirFd, dirPath = int(dir.Fd()), dir.Name()
}
if !filepath.IsAbs(path) {
// only prepend the dirfd path for relative paths
path = dirPath + "/" + path
}
// NOTE: If path is "." or "", the returned path won't be filepath.Clean,
// but that's okay since this path is either used for errors (in which case
// a trailing "/" or "/." is important information) or will be
// filepath.Clean'd later (in the case of fd.Openat).
return dirFd, path
}
// Openat is an [Fd]-based wrapper around unix.Openat.
func Openat(dir Fd, path string, flags int, mode int) (*os.File, error) { //nolint:unparam // wrapper func
dirFd, fullPath := prepareAt(dir, path)
// Make sure we always set O_CLOEXEC.
flags |= unix.O_CLOEXEC
fd, err := unix.Openat(dirFd, path, flags, uint32(mode))
if err != nil {
return nil, &os.PathError{Op: "openat", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
// openat is only used with lexically-safe paths so we can use
// filepath.Clean here, and also the path itself is not going to be used
// for actual path operations.
fullPath = filepath.Clean(fullPath)
return os.NewFile(uintptr(fd), fullPath), nil
}
// Fstatat is an [Fd]-based wrapper around unix.Fstatat.
func Fstatat(dir Fd, path string, flags int) (unix.Stat_t, error) {
dirFd, fullPath := prepareAt(dir, path)
var stat unix.Stat_t
if err := unix.Fstatat(dirFd, path, &stat, flags); err != nil {
return stat, &os.PathError{Op: "fstatat", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
return stat, nil
}
// Faccessat is an [Fd]-based wrapper around unix.Faccessat.
func Faccessat(dir Fd, path string, mode uint32, flags int) error {
dirFd, fullPath := prepareAt(dir, path)
err := unix.Faccessat(dirFd, path, mode, flags)
if err != nil {
err = &os.PathError{Op: "faccessat", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
return err
}
// Readlinkat is an [Fd]-based wrapper around unix.Readlinkat.
func Readlinkat(dir Fd, path string) (string, error) {
dirFd, fullPath := prepareAt(dir, path)
size := 4096
for {
linkBuf := make([]byte, size)
n, err := unix.Readlinkat(dirFd, path, linkBuf)
if err != nil {
return "", &os.PathError{Op: "readlinkat", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
if n != size {
return string(linkBuf[:n]), nil
}
// Possible truncation, resize the buffer.
size *= 2
}
}
const (
// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to
// avoid bumping the requirement for a single constant we can just define it
// ourselves.
_STATX_MNT_ID_UNIQUE = 0x4000 //nolint:revive // unix.* name
// We don't care which mount ID we get. The kernel will give us the unique
// one if it is supported. If the kernel doesn't support
// STATX_MNT_ID_UNIQUE, the bit is ignored and the returned request mask
// will only contain STATX_MNT_ID (if supported).
wantStatxMntMask = _STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
)
var hasStatxMountID = gocompat.SyncOnceValue(func() bool {
var stx unix.Statx_t
err := unix.Statx(-int(unix.EBADF), "/", 0, wantStatxMntMask, &stx)
return err == nil && stx.Mask&wantStatxMntMask != 0
})
// GetMountID gets the mount identifier associated with the fd and path
// combination. It is effectively a wrapper around fetching
// STATX_MNT_ID{,_UNIQUE} with unix.Statx, but with a fallback to 0 if the
// kernel doesn't support the feature.
func GetMountID(dir Fd, path string) (uint64, error) {
// If we don't have statx(STATX_MNT_ID*) support, we can't do anything.
if !hasStatxMountID() {
return 0, nil
}
dirFd, fullPath := prepareAt(dir, path)
var stx unix.Statx_t
err := unix.Statx(dirFd, path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, wantStatxMntMask, &stx)
if stx.Mask&wantStatxMntMask == 0 {
// It's not a kernel limitation, for some reason we couldn't get a
// mount ID. Assume it's some kind of attack.
err = fmt.Errorf("could not get mount id: %w", err)
}
if err != nil {
return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
return stx.Mnt_id, nil
}

View File

@@ -0,0 +1,55 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright (C) 2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package fd provides a drop-in interface-based replacement of [*os.File] that
// allows for things like noop-Close wrappers to be used.
//
// [*os.File]: https://pkg.go.dev/os#File
package fd
import (
"io"
"os"
)
// Fd is an interface that mirrors most of the API of [*os.File], allowing you
// to create wrappers that can be used in place of [*os.File].
//
// [*os.File]: https://pkg.go.dev/os#File
type Fd interface {
io.Closer
Name() string
Fd() uintptr
}
// Compile-time interface checks.
var (
_ Fd = (*os.File)(nil)
_ Fd = noClose{}
)
type noClose struct{ inner Fd }
func (f noClose) Name() string { return f.inner.Name() }
func (f noClose) Fd() uintptr { return f.inner.Fd() }
func (f noClose) Close() error { return nil }
// NopCloser returns an [*os.File]-like object where the [Close] method is now
// a no-op.
//
// Note that for [*os.File] and similar objects, the Go garbage collector will
// still call [Close] on the underlying file unless you use
// [runtime.SetFinalizer] to disable this behaviour. This is up to the caller
// to do (if necessary).
//
// [*os.File]: https://pkg.go.dev/os#File
// [Close]: https://pkg.go.dev/io#Closer
// [runtime.SetFinalizer]: https://pkg.go.dev/runtime#SetFinalizer
func NopCloser(f Fd) Fd { return noClose{inner: f} }

View File

@@ -0,0 +1,78 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package fd
import (
"fmt"
"os"
"runtime"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal"
)
// DupWithName creates a new file descriptor referencing the same underlying
// file, but with the provided name instead of fd.Name().
func DupWithName(fd Fd, name string) (*os.File, error) {
fd2, err := unix.FcntlInt(fd.Fd(), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err)
}
runtime.KeepAlive(fd)
return os.NewFile(uintptr(fd2), name), nil
}
// Dup creates a new file description referencing the same underlying file.
func Dup(fd Fd) (*os.File, error) {
return DupWithName(fd, fd.Name())
}
// Fstat is an [Fd]-based wrapper around unix.Fstat.
func Fstat(fd Fd) (unix.Stat_t, error) {
var stat unix.Stat_t
if err := unix.Fstat(int(fd.Fd()), &stat); err != nil {
return stat, &os.PathError{Op: "fstat", Path: fd.Name(), Err: err}
}
runtime.KeepAlive(fd)
return stat, nil
}
// Fstatfs is an [Fd]-based wrapper around unix.Fstatfs.
func Fstatfs(fd Fd) (unix.Statfs_t, error) {
var statfs unix.Statfs_t
if err := unix.Fstatfs(int(fd.Fd()), &statfs); err != nil {
return statfs, &os.PathError{Op: "fstatfs", Path: fd.Name(), Err: err}
}
runtime.KeepAlive(fd)
return statfs, nil
}
// IsDeadInode detects whether the file has been unlinked from a filesystem and
// is thus a "dead inode" from the kernel's perspective.
func IsDeadInode(file Fd) error {
// If the nlink of a file drops to 0, there is an attacker deleting
// directories during our walk, which could result in weird /proc values.
// It's better to error out in this case.
stat, err := Fstat(file)
if err != nil {
return fmt.Errorf("check for dead inode: %w", err)
}
if stat.Nlink == 0 {
err := internal.ErrDeletedInode
if stat.Mode&unix.S_IFMT == unix.S_IFDIR {
err = internal.ErrInvalidDirectory
}
return fmt.Errorf("%w %q", err, file.Name())
}
return nil
}

View File

@@ -0,0 +1,54 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package fd
import (
"os"
"runtime"
"golang.org/x/sys/unix"
)
// Fsopen is an [Fd]-based wrapper around unix.Fsopen.
func Fsopen(fsName string, flags int) (*os.File, error) {
// Make sure we always set O_CLOEXEC.
flags |= unix.FSOPEN_CLOEXEC
fd, err := unix.Fsopen(fsName, flags)
if err != nil {
return nil, os.NewSyscallError("fsopen "+fsName, err)
}
return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil
}
// Fsmount is an [Fd]-based wrapper around unix.Fsmount.
func Fsmount(ctx Fd, flags, mountAttrs int) (*os.File, error) {
// Make sure we always set O_CLOEXEC.
flags |= unix.FSMOUNT_CLOEXEC
fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs)
if err != nil {
return nil, os.NewSyscallError("fsmount "+ctx.Name(), err)
}
return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil
}
// OpenTree is an [Fd]-based wrapper around unix.OpenTree.
func OpenTree(dir Fd, path string, flags uint) (*os.File, error) {
dirFd, fullPath := prepareAt(dir, path)
// Make sure we always set O_CLOEXEC.
flags |= unix.OPEN_TREE_CLOEXEC
fd, err := unix.OpenTree(dirFd, path, flags)
if err != nil {
return nil, &os.PathError{Op: "open_tree", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
return os.NewFile(uintptr(fd), fullPath), nil
}

View File

@@ -0,0 +1,62 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package fd
import (
"errors"
"os"
"runtime"
"golang.org/x/sys/unix"
)
func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool {
// RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve
// ".." while a mount or rename occurs anywhere on the system. This could
// happen spuriously, or as the result of an attacker trying to mess with
// us during lookup.
//
// In addition, scoped lookups have a "safety check" at the end of
// complete_walk which will return -EXDEV if the final path is not in the
// root.
return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 &&
(errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV))
}
// This is a fairly arbitrary limit we have just to avoid an attacker being
// able to make us spin in an infinite retry loop -- callers can choose to
// retry on EAGAIN if they prefer.
const scopedLookupMaxRetries = 128
// Openat2 is an [Fd]-based wrapper around unix.Openat2, but with some retry
// logic in case of EAGAIN errors.
func Openat2(dir Fd, path string, how *unix.OpenHow) (*os.File, error) {
dirFd, fullPath := prepareAt(dir, path)
// Make sure we always set O_CLOEXEC.
how.Flags |= unix.O_CLOEXEC
var tries int
for {
fd, err := unix.Openat2(dirFd, path, how)
if err != nil {
if scopedLookupShouldRetry(how, err) && tries < scopedLookupMaxRetries {
// We retry a couple of times to avoid the spurious errors, and
// if we are being attacked then returning -EAGAIN is the best
// we can do.
tries++
continue
}
return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err}
}
runtime.KeepAlive(dir)
return os.NewFile(uintptr(fd), fullPath), nil
}
}

View File

@@ -0,0 +1,10 @@
## gocompat ##
This directory contains backports of stdlib functions from later Go versions so
the filepath-securejoin can continue to be used by projects that are stuck with
Go 1.18 support. Note that often filepath-securejoin is added in security
patches for old releases, so avoiding the need to bump Go compiler requirements
is a huge plus to downstreams.
The source code is licensed under the same license as the Go stdlib. See the
source files for the precise license information.

View File

@@ -0,0 +1,13 @@
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux && go1.20
// Copyright (C) 2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gocompat includes compatibility shims (backported from future Go
// stdlib versions) to permit filepath-securejoin to be used with older Go
// versions (often filepath-securejoin is added in security patches for old
// releases, so avoiding the need to bump Go compiler requirements is a huge
// plus to downstreams).
package gocompat

View File

@@ -1,18 +1,19 @@
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux && go1.20
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
package gocompat
import (
"fmt"
)
// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
// is only guaranteed to give you baseErr.
func wrapBaseError(baseErr, extraErr error) error {
func WrapBaseError(baseErr, extraErr error) error {
return fmt.Errorf("%w: %w", extraErr, baseErr)
}

View File

@@ -1,10 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux && !go1.20
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
package gocompat
import (
"fmt"
@@ -27,10 +29,10 @@ func (err wrappedError) Error() string {
return fmt.Sprintf("%v: %v", err.isError, err.inner)
}
// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
// is only guaranteed to give you baseErr.
func wrapBaseError(baseErr, extraErr error) error {
func WrapBaseError(baseErr, extraErr error) error {
return wrappedError{
inner: baseErr,
isError: extraErr,

View File

@@ -0,0 +1,53 @@
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux && go1.21
// Copyright (C) 2024-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocompat
import (
"cmp"
"slices"
"sync"
)
// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc.
func SlicesDeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S {
return slices.DeleteFunc(slice, delFn)
}
// SlicesContains is equivalent to Go 1.21's slices.Contains.
func SlicesContains[S ~[]E, E comparable](slice S, val E) bool {
return slices.Contains(slice, val)
}
// SlicesClone is equivalent to Go 1.21's slices.Clone.
func SlicesClone[S ~[]E, E any](slice S) S {
return slices.Clone(slice)
}
// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue.
func SyncOnceValue[T any](f func() T) func() T {
return sync.OnceValue(f)
}
// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues.
func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
return sync.OnceValues(f)
}
// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition.
type CmpOrdered = cmp.Ordered
// CmpCompare is equivalent to Go 1.21's cmp.Compare.
func CmpCompare[T CmpOrdered](x, y T) int {
return cmp.Compare(x, y)
}
// Max2 is equivalent to Go 1.21's max builtin (but only for two parameters).
func Max2[T CmpOrdered](x, y T) T {
return max(x, y)
}

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux && !go1.21
// Copyright (C) 2021, 2022 The Go Authors. All rights reserved.
// Copyright (C) 2024-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
package gocompat
import (
"sync"
)
// These are very minimal implementations of functions that appear in Go 1.21's
// stdlib, included so that we can build on older Go versions. Most are
// borrowed directly from the stdlib, and a few are modified to be "obviously
// correct" without needing to copy too many other helpers.
// clearSlice is equivalent to Go 1.21's builtin clear.
// Copied from the Go 1.24 stdlib implementation.
func clearSlice[S ~[]E, E any](slice S) {
var zero E
for i := range slice {
slice[i] = zero
}
}
// slicesIndexFunc is equivalent to Go 1.21's slices.IndexFunc.
// Copied from the Go 1.24 stdlib implementation.
func slicesIndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
}
// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc.
// Copied from the Go 1.24 stdlib implementation.
func SlicesDeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := slicesIndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// SlicesContains is equivalent to Go 1.21's slices.Contains.
// Similar to the stdlib slices.Contains, except that we don't have
// slices.Index so we need to use slices.IndexFunc for this non-Func helper.
func SlicesContains[S ~[]E, E comparable](s S, v E) bool {
return slicesIndexFunc(s, func(e E) bool { return e == v }) >= 0
}
// SlicesClone is equivalent to Go 1.21's slices.Clone.
// Copied from the Go 1.24 stdlib implementation.
func SlicesClone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
}
// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue.
// Copied from the Go 1.25 stdlib implementation.
func SyncOnceValue[T any](f func() T) func() T {
// Use a struct so that there's a single heap allocation.
d := struct {
f func() T
once sync.Once
valid bool
p any
result T
}{
f: f,
}
return func() T {
d.once.Do(func() {
defer func() {
d.f = nil
d.p = recover()
if !d.valid {
panic(d.p)
}
}()
d.result = d.f()
d.valid = true
})
if !d.valid {
panic(d.p)
}
return d.result
}
}
// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues.
// Copied from the Go 1.25 stdlib implementation.
func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
// Use a struct so that there's a single heap allocation.
d := struct {
f func() (T1, T2)
once sync.Once
valid bool
p any
r1 T1
r2 T2
}{
f: f,
}
return func() (T1, T2) {
d.once.Do(func() {
defer func() {
d.f = nil
d.p = recover()
if !d.valid {
panic(d.p)
}
}()
d.r1, d.r2 = d.f()
d.valid = true
})
if !d.valid {
panic(d.p)
}
return d.r1, d.r2
}
}
// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition.
// Copied from the Go 1.25 stdlib implementation.
type CmpOrdered interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 |
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
~float32 | ~float64 |
~string
}
// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
// Copied from the Go 1.25 stdlib implementation.
func isNaN[T CmpOrdered](x T) bool {
return x != x
}
// CmpCompare is equivalent to Go 1.21's cmp.Compare.
// Copied from the Go 1.25 stdlib implementation.
func CmpCompare[T CmpOrdered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN {
if yNaN {
return 0
}
return -1
}
if yNaN {
return +1
}
if x < y {
return -1
}
if x > y {
return +1
}
return 0
}
// Max2 is equivalent to Go 1.21's max builtin for two parameters.
func Max2[T CmpOrdered](x, y T) T {
m := x
if y > m {
m = y
}
return m
}

View File

@@ -0,0 +1,123 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2022 The Go Authors. All rights reserved.
// Copyright (C) 2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
// The parsing logic is very loosely based on the Go stdlib's
// src/internal/syscall/unix/kernel_version_linux.go but with an API that looks
// a bit like runc's libcontainer/system/kernelversion.
//
// TODO(cyphar): This API has been copied around to a lot of different projects
// (Docker, containerd, runc, and now filepath-securejoin) -- maybe we should
// put it in a separate project?
// Package kernelversion provides a simple mechanism for checking whether the
// running kernel is at least as new as some baseline kernel version. This is
// often useful when checking for features that would be too complicated to
// test support for (or in cases where we know that some kernel features in
// backport-heavy kernels are broken and need to be avoided).
package kernelversion
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
)
// KernelVersion is a numeric representation of the key numerical elements of a
// kernel version (for instance, "4.1.2-default-1" would be represented as
// KernelVersion{4, 1, 2}).
type KernelVersion []uint64
func (kver KernelVersion) String() string {
var str strings.Builder
for idx, elem := range kver {
if idx != 0 {
_, _ = str.WriteRune('.')
}
_, _ = str.WriteString(strconv.FormatUint(elem, 10))
}
return str.String()
}
var errInvalidKernelVersion = errors.New("invalid kernel version")
// parseKernelVersion parses a string and creates a KernelVersion based on it.
func parseKernelVersion(kverStr string) (KernelVersion, error) {
kver := make(KernelVersion, 1, 3)
for idx, ch := range kverStr {
if '0' <= ch && ch <= '9' {
v := &kver[len(kver)-1]
*v = (*v * 10) + uint64(ch-'0')
} else {
if idx == 0 || kverStr[idx-1] < '0' || '9' < kverStr[idx-1] {
// "." must be preceded by a digit while in version section
return nil, fmt.Errorf("%w %q: kernel version has dot(s) followed by non-digit in version section", errInvalidKernelVersion, kverStr)
}
if ch != '.' {
break
}
kver = append(kver, 0)
}
}
if len(kver) < 2 {
return nil, fmt.Errorf("%w %q: kernel versions must contain at least two components", errInvalidKernelVersion, kverStr)
}
return kver, nil
}
// getKernelVersion gets the current kernel version.
var getKernelVersion = gocompat.SyncOnceValues(func() (KernelVersion, error) {
var uts unix.Utsname
if err := unix.Uname(&uts); err != nil {
return nil, err
}
// Remove the \x00 from the release.
release := uts.Release[:]
return parseKernelVersion(string(release[:bytes.IndexByte(release, 0)]))
})
// GreaterEqualThan returns true if the the host kernel version is greater than
// or equal to the provided [KernelVersion]. When doing this comparison, any
// non-numerical suffixes of the host kernel version are ignored.
//
// If the number of components provided is not equal to the number of numerical
// components of the host kernel version, any missing components are treated as
// 0. This means that GreaterEqualThan(KernelVersion{4}) will be treated the
// same as GreaterEqualThan(KernelVersion{4, 0, 0, ..., 0, 0}), and that if the
// host kernel version is "4" then GreaterEqualThan(KernelVersion{4, 1}) will
// return false (because the host version will be treated as "4.0").
func GreaterEqualThan(wantKver KernelVersion) (bool, error) {
hostKver, err := getKernelVersion()
if err != nil {
return false, err
}
// Pad out the kernel version lengths to match one another.
cmpLen := gocompat.Max2(len(hostKver), len(wantKver))
hostKver = append(hostKver, make(KernelVersion, cmpLen-len(hostKver))...)
wantKver = append(wantKver, make(KernelVersion, cmpLen-len(wantKver))...)
for i := 0; i < cmpLen; i++ {
switch gocompat.CmpCompare(hostKver[i], wantKver[i]) {
case -1:
// host < want
return false, nil
case +1:
// host > want
return true, nil
case 0:
continue
}
}
// equal version values
return true, nil
}

View File

@@ -0,0 +1,12 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package linux returns information about what features are supported on the
// running kernel.
package linux

View File

@@ -0,0 +1,47 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package linux
import (
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion"
)
// HasNewMountAPI returns whether the new fsopen(2) mount API is supported on
// the running kernel.
var HasNewMountAPI = gocompat.SyncOnceValue(func() bool {
// All of the pieces of the new mount API we use (fsopen, fsconfig,
// fsmount, open_tree) were added together in Linux 5.2[1,2], so we can
// just check for one of the syscalls and the others should also be
// available.
//
// Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE.
// This is equivalent to openat(2), but tells us if open_tree is
// available (and thus all of the other basic new mount API syscalls).
// open_tree(2) is most light-weight syscall to test here.
//
// [1]: merge commit 400913252d09
// [2]: <https://lore.kernel.org/lkml/153754740781.17872.7869536526927736855.stgit@warthog.procyon.org.uk/>
fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC)
if err != nil {
return false
}
_ = unix.Close(fd)
// RHEL 8 has a backport of fsopen(2) that appears to have some very
// difficult to debug performance pathology. As such, it seems prudent to
// simply reject pre-5.2 kernels.
isNotBackport, _ := kernelversion.GreaterEqualThan(kernelversion.KernelVersion{5, 2})
return isNotBackport
})

View File

@@ -0,0 +1,31 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package linux
import (
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
)
// HasOpenat2 returns whether openat2(2) is supported on the running kernel.
var HasOpenat2 = gocompat.SyncOnceValue(func() bool {
fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
})
if err != nil {
return false
}
_ = unix.Close(fd)
return true
})

View File

@@ -0,0 +1,544 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package procfs provides a safe API for operating on /proc on Linux. Note
// that this is the *internal* procfs API, mainy needed due to Go's
// restrictions on cyclic dependencies and its incredibly minimal visibility
// system without making a separate internal/ package.
package procfs
import (
"errors"
"fmt"
"io"
"os"
"runtime"
"strconv"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux"
)
// The kernel guarantees that the root inode of a procfs mount has an
// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO.
const (
procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC
procRootIno = 1 // PROC_ROOT_INO
)
// verifyProcHandle checks that the handle is from a procfs filesystem.
// Contrast this to [verifyProcRoot], which also verifies that the handle is
// the root of a procfs mount.
func verifyProcHandle(procHandle fd.Fd) error {
if statfs, err := fd.Fstatfs(procHandle); err != nil {
return err
} else if statfs.Type != procSuperMagic {
return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type)
}
return nil
}
// verifyProcRoot verifies that the handle is the root of a procfs filesystem.
// Contrast this to [verifyProcHandle], which only verifies if the handle is
// some file on procfs (regardless of what file it is).
func verifyProcRoot(procRoot fd.Fd) error {
if err := verifyProcHandle(procRoot); err != nil {
return err
}
if stat, err := fd.Fstat(procRoot); err != nil {
return err
} else if stat.Ino != procRootIno {
return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino)
}
return nil
}
type procfsFeatures struct {
// hasSubsetPid was added in Linux 5.8, along with hidepid=ptraceable (and
// string-based hidepid= values). Before this patchset, it was not really
// safe to try to modify procfs superblock flags because the superblock was
// shared -- so if this feature is not available, **you should not set any
// superblock flags**.
//
// 6814ef2d992a ("proc: add option to mount only a pids subset")
// fa10fed30f25 ("proc: allow to mount many instances of proc in one pid namespace")
// 24a71ce5c47f ("proc: instantiate only pids that we can ptrace on 'hidepid=4' mount option")
// 1c6c4d112e81 ("proc: use human-readable values for hidepid")
// 9ff7258575d5 ("Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace")
hasSubsetPid bool
}
var getProcfsFeatures = gocompat.SyncOnceValue(func() procfsFeatures {
if !linux.HasNewMountAPI() {
return procfsFeatures{}
}
procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC)
if err != nil {
return procfsFeatures{}
}
defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here
return procfsFeatures{
hasSubsetPid: unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") == nil,
}
})
func newPrivateProcMount(subset bool) (_ *Handle, Err error) {
procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC)
if err != nil {
return nil, err
}
defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here
if subset && getProcfsFeatures().hasSubsetPid {
// Try to configure hidepid=ptraceable,subset=pid if possible, but
// ignore errors.
_ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable")
_ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid")
}
// Get an actual handle.
if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil {
return nil, os.NewSyscallError("fsconfig create procfs", err)
}
// TODO: Output any information from the fscontext log to debug logs.
procRoot, err := fd.Fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID)
if err != nil {
return nil, err
}
defer func() {
if Err != nil {
_ = procRoot.Close()
}
}()
return newHandle(procRoot)
}
func clonePrivateProcMount() (_ *Handle, Err error) {
// Try to make a clone without using AT_RECURSIVE if we can. If this works,
// we can be sure there are no over-mounts and so if the root is valid then
// we're golden. Otherwise, we have to deal with over-mounts.
procRoot, err := fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE)
if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procRoot) {
procRoot, err = fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE)
}
if err != nil {
return nil, fmt.Errorf("creating a detached procfs clone: %w", err)
}
defer func() {
if Err != nil {
_ = procRoot.Close()
}
}()
return newHandle(procRoot)
}
func privateProcRoot(subset bool) (*Handle, error) {
if !linux.HasNewMountAPI() || hookForceGetProcRootUnsafe() {
return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP)
}
// Try to create a new procfs mount from scratch if we can. This ensures we
// can get a procfs mount even if /proc is fake (for whatever reason).
procRoot, err := newPrivateProcMount(subset)
if err != nil || hookForcePrivateProcRootOpenTree(procRoot) {
// Try to clone /proc then...
procRoot, err = clonePrivateProcMount()
}
return procRoot, err
}
func unsafeHostProcRoot() (_ *Handle, Err error) {
procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
defer func() {
if Err != nil {
_ = procRoot.Close()
}
}()
return newHandle(procRoot)
}
// Handle is a wrapper around an *os.File handle to "/proc", which can be used
// to do further procfs-related operations in a safe way.
type Handle struct {
Inner fd.Fd
// Does this handle have subset=pid set?
isSubset bool
}
func newHandle(procRoot fd.Fd) (*Handle, error) {
if err := verifyProcRoot(procRoot); err != nil {
// This is only used in methods that
_ = procRoot.Close()
return nil, err
}
proc := &Handle{Inner: procRoot}
// With subset=pid we can be sure that /proc/uptime will not exist.
if err := fd.Faccessat(proc.Inner, "uptime", unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil {
proc.isSubset = errors.Is(err, os.ErrNotExist)
}
return proc, nil
}
// Close closes the underlying file for the Handle.
func (proc *Handle) Close() error { return proc.Inner.Close() }
var getCachedProcRoot = gocompat.SyncOnceValue(func() *Handle {
procRoot, err := getProcRoot(true)
if err != nil {
return nil // just don't cache if we see an error
}
if !procRoot.isSubset {
return nil // we only cache verified subset=pid handles
}
// Disarm (*Handle).Close() to stop someone from accidentally closing
// the global handle.
procRoot.Inner = fd.NopCloser(procRoot.Inner)
return procRoot
})
// OpenProcRoot tries to open a "safer" handle to "/proc".
func OpenProcRoot() (*Handle, error) {
if proc := getCachedProcRoot(); proc != nil {
return proc, nil
}
return getProcRoot(true)
}
// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or
// masked paths (but also without "subset=pid").
func OpenUnsafeProcRoot() (*Handle, error) { return getProcRoot(false) }
func getProcRoot(subset bool) (*Handle, error) {
proc, err := privateProcRoot(subset)
if err != nil {
// Fall back to using a /proc handle if making a private mount failed.
// If we have openat2, at least we can avoid some kinds of over-mount
// attacks, but without openat2 there's not much we can do.
proc, err = unsafeHostProcRoot()
}
return proc, err
}
var hasProcThreadSelf = gocompat.SyncOnceValue(func() bool {
return unix.Access("/proc/thread-self/", unix.F_OK) == nil
})
var errUnsafeProcfs = errors.New("unsafe procfs detected")
// lookup is a very minimal wrapper around [procfsLookupInRoot] which is
// intended to be called from the external API.
func (proc *Handle) lookup(subpath string) (*os.File, error) {
handle, err := procfsLookupInRoot(proc.Inner, subpath)
if err != nil {
return nil, err
}
return handle, nil
}
// procfsBase is an enum indicating the prefix of a subpath in operations
// involving [Handle]s.
type procfsBase string
const (
// ProcRoot refers to the root of the procfs (i.e., "/proc/<subpath>").
ProcRoot procfsBase = "/proc"
// ProcSelf refers to the current process' subdirectory (i.e.,
// "/proc/self/<subpath>").
ProcSelf procfsBase = "/proc/self"
// ProcThreadSelf refers to the current thread's subdirectory (i.e.,
// "/proc/thread-self/<subpath>"). In multi-threaded programs (i.e., all Go
// programs) where one thread has a different CLONE_FS, it is possible for
// "/proc/self" to point the wrong thread and so "/proc/thread-self" may be
// necessary. Note that on pre-3.17 kernels, "/proc/thread-self" doesn't
// exist and so a fallback will be used in that case.
ProcThreadSelf procfsBase = "/proc/thread-self"
// TODO: Switch to an interface setup so we can have a more type-safe
// version of ProcPid and remove the need to worry about invalid string
// values.
)
// prefix returns a prefix that can be used with the given [Handle].
func (base procfsBase) prefix(proc *Handle) (string, error) {
switch base {
case ProcRoot:
return ".", nil
case ProcSelf:
return "self", nil
case ProcThreadSelf:
threadSelf := "thread-self"
if !hasProcThreadSelf() || hookForceProcSelfTask() {
// Pre-3.17 kernels don't have /proc/thread-self, so do it
// manually.
threadSelf = "self/task/" + strconv.Itoa(unix.Gettid())
if err := fd.Faccessat(proc.Inner, threadSelf, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() {
// In this case, we running in a pid namespace that doesn't
// match the /proc mount we have. This can happen inside runc.
//
// Unfortunately, there is no nice way to get the correct TID
// to use here because of the age of the kernel, so we have to
// just use /proc/self and hope that it works.
threadSelf = "self"
}
}
return threadSelf, nil
}
return "", fmt.Errorf("invalid procfs base %q", base)
}
// ProcThreadSelfCloser is a callback that needs to be called when you are done
// operating on an [os.File] fetched using [ProcThreadSelf].
//
// [os.File]: https://pkg.go.dev/os#File
type ProcThreadSelfCloser func()
// open is the core lookup operation for [Handle]. It returns a handle to
// "/proc/<base>/<subpath>". If the returned [ProcThreadSelfCloser] is non-nil,
// you should call it after you are done interacting with the returned handle.
//
// In general you should use prefer to use the other helpers, as they remove
// the need to interact with [procfsBase] and do not return a nil
// [ProcThreadSelfCloser] for [procfsBase] values other than [ProcThreadSelf]
// where it is necessary.
func (proc *Handle) open(base procfsBase, subpath string) (_ *os.File, closer ProcThreadSelfCloser, Err error) {
prefix, err := base.prefix(proc)
if err != nil {
return nil, nil, err
}
subpath = prefix + "/" + subpath
switch base {
case ProcRoot:
file, err := proc.lookup(subpath)
if errors.Is(err, os.ErrNotExist) {
// The Handle handle in use might be a subset=pid one, which will
// result in spurious errors. In this case, just open a temporary
// unmasked procfs handle for this operation.
proc, err2 := OpenUnsafeProcRoot() // !subset=pid
if err2 != nil {
return nil, nil, err
}
defer proc.Close() //nolint:errcheck // close failures aren't critical here
file, err = proc.lookup(subpath)
}
return file, nil, err
case ProcSelf:
file, err := proc.lookup(subpath)
return file, nil, err
case ProcThreadSelf:
// We need to lock our thread until the caller is done with the handle
// because between getting the handle and using it we could get
// interrupted by the Go runtime and hit the case where the underlying
// thread is swapped out and the original thread is killed, resulting
// in pull-your-hair-out-hard-to-debug issues in the caller.
runtime.LockOSThread()
defer func() {
if Err != nil {
runtime.UnlockOSThread()
closer = nil
}
}()
file, err := proc.lookup(subpath)
return file, runtime.UnlockOSThread, err
}
// should never be reached
return nil, nil, fmt.Errorf("[internal error] invalid procfs base %q", base)
}
// OpenThreadSelf returns a handle to "/proc/thread-self/<subpath>" (or an
// equivalent handle on older kernels where "/proc/thread-self" doesn't exist).
// Once finished with the handle, you must call the returned closer function
// (runtime.UnlockOSThread). You must not pass the returned *os.File to other
// Go threads or use the handle after calling the closer.
func (proc *Handle) OpenThreadSelf(subpath string) (_ *os.File, _ ProcThreadSelfCloser, Err error) {
return proc.open(ProcThreadSelf, subpath)
}
// OpenSelf returns a handle to /proc/self/<subpath>.
func (proc *Handle) OpenSelf(subpath string) (*os.File, error) {
file, closer, err := proc.open(ProcSelf, subpath)
assert.Assert(closer == nil, "closer for ProcSelf must be nil")
return file, err
}
// OpenRoot returns a handle to /proc/<subpath>.
func (proc *Handle) OpenRoot(subpath string) (*os.File, error) {
file, closer, err := proc.open(ProcRoot, subpath)
assert.Assert(closer == nil, "closer for ProcRoot must be nil")
return file, err
}
// OpenPid returns a handle to /proc/$pid/<subpath> (pid can be a pid or tid).
// This is mainly intended for usage when operating on other processes.
func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) {
return proc.OpenRoot(strconv.Itoa(pid) + "/" + subpath)
}
// checkSubpathOvermount checks if the dirfd and path combination is on the
// same mount as the given root.
func checkSubpathOvermount(root, dir fd.Fd, path string) error {
// Get the mntID of our procfs handle.
expectedMountID, err := fd.GetMountID(root, "")
if err != nil {
return fmt.Errorf("get root mount id: %w", err)
}
// Get the mntID of the target magic-link.
gotMountID, err := fd.GetMountID(dir, path)
if err != nil {
return fmt.Errorf("get subpath mount id: %w", err)
}
// As long as the directory mount is alive, even with wrapping mount IDs,
// we would expect to see a different mount ID here. (Of course, if we're
// using unsafeHostProcRoot() then an attaker could change this after we
// did this check.)
if expectedMountID != gotMountID {
return fmt.Errorf("%w: subpath %s/%s has an overmount obscuring the real path (mount ids do not match %d != %d)",
errUnsafeProcfs, dir.Name(), path, expectedMountID, gotMountID)
}
return nil
}
// Readlink performs a readlink operation on "/proc/<base>/<subpath>" in a way
// that should be free from race attacks. This is most commonly used to get the
// real path of a file by looking at "/proc/self/fd/$n", with the same safety
// protections as [Open] (as well as some additional checks against
// overmounts).
func (proc *Handle) Readlink(base procfsBase, subpath string) (string, error) {
link, closer, err := proc.open(base, subpath)
if closer != nil {
defer closer()
}
if err != nil {
return "", fmt.Errorf("get safe %s/%s handle: %w", base, subpath, err)
}
defer link.Close() //nolint:errcheck // close failures aren't critical here
// Try to detect if there is a mount on top of the magic-link. This should
// be safe in general (a mount on top of the path afterwards would not
// affect the handle itself) and will definitely be safe if we are using
// privateProcRoot() (at least since Linux 5.12[1], when anonymous mount
// namespaces were completely isolated from external mounts including mount
// propagation events).
//
// [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
// onto targets that reside on shared mounts").
if err := checkSubpathOvermount(proc.Inner, link, ""); err != nil {
return "", fmt.Errorf("check safety of %s/%s magiclink: %w", base, subpath, err)
}
// readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit
// 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty
// relative pathnames").
return fd.Readlinkat(link, "")
}
// ProcSelfFdReadlink gets the real path of the given file by looking at
// readlink(/proc/thread-self/fd/$n).
//
// This is just a wrapper around [Handle.Readlink].
func ProcSelfFdReadlink(fd fd.Fd) (string, error) {
procRoot, err := OpenProcRoot() // subset=pid
if err != nil {
return "", err
}
defer procRoot.Close() //nolint:errcheck // close failures aren't critical here
fdPath := "fd/" + strconv.Itoa(int(fd.Fd()))
return procRoot.Readlink(ProcThreadSelf, fdPath)
}
// CheckProcSelfFdPath returns whether the given file handle matches the
// expected path. (This is inherently racy.)
func CheckProcSelfFdPath(path string, file fd.Fd) error {
if err := fd.IsDeadInode(file); err != nil {
return err
}
actualPath, err := ProcSelfFdReadlink(file)
if err != nil {
return fmt.Errorf("get path of handle: %w", err)
}
if actualPath != path {
return fmt.Errorf("%w: handle path %q doesn't match expected path %q", internal.ErrPossibleBreakout, actualPath, path)
}
return nil
}
// ReopenFd takes an existing file descriptor and "re-opens" it through
// /proc/thread-self/fd/<fd>. This allows for O_PATH file descriptors to be
// upgraded to regular file descriptors, as well as changing the open mode of a
// regular file descriptor. Some filesystems have unique handling of open(2)
// which make this incredibly useful (such as /dev/ptmx).
func ReopenFd(handle fd.Fd, flags int) (*os.File, error) {
procRoot, err := OpenProcRoot() // subset=pid
if err != nil {
return nil, err
}
defer procRoot.Close() //nolint:errcheck // close failures aren't critical here
// We can't operate on /proc/thread-self/fd/$n directly when doing a
// re-open, so we need to open /proc/thread-self/fd and then open a single
// final component.
procFdDir, closer, err := procRoot.OpenThreadSelf("fd/")
if err != nil {
return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err)
}
defer procFdDir.Close() //nolint:errcheck // close failures aren't critical here
defer closer()
// Try to detect if there is a mount on top of the magic-link we are about
// to open. If we are using unsafeHostProcRoot(), this could change after
// we check it (and there's nothing we can do about that) but for
// privateProcRoot() this should be guaranteed to be safe (at least since
// Linux 5.12[1], when anonymous mount namespaces were completely isolated
// from external mounts including mount propagation events).
//
// [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
// onto targets that reside on shared mounts").
fdStr := strconv.Itoa(int(handle.Fd()))
if err := checkSubpathOvermount(procRoot.Inner, procFdDir, fdStr); err != nil {
return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err)
}
flags |= unix.O_CLOEXEC
// Rather than just wrapping fd.Openat, open-code it so we can copy
// handle.Name().
reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0)
if err != nil {
return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err)
}
return os.NewFile(uintptr(reopenFd), handle.Name()), nil
}
// Test hooks used in the procfs tests to verify that the fallback logic works.
// See testing_mocks_linux_test.go and procfs_linux_test.go for more details.
var (
hookForcePrivateProcRootOpenTree = hookDummyFile
hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile
hookForceGetProcRootUnsafe = hookDummy
hookForceProcSelfTask = hookDummy
hookForceProcSelf = hookDummy
)
func hookDummy() bool { return false }
func hookDummyFile(_ io.Closer) bool { return false }

View File

@@ -0,0 +1,222 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// This code is adapted to be a minimal version of the libpathrs proc resolver
// <https://github.com/opensuse/libpathrs/blob/v0.1.3/src/resolvers/procfs.rs>.
// As we only need O_PATH|O_NOFOLLOW support, this is not too much to port.
package procfs
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/internal/consts"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux"
)
// procfsLookupInRoot is a stripped down version of completeLookupInRoot,
// entirely designed to support the very small set of features necessary to
// make procfs handling work. Unlike completeLookupInRoot, we always have
// O_PATH|O_NOFOLLOW behaviour for trailing symlinks.
//
// The main restrictions are:
//
// - ".." is not supported (as it requires either os.Root-style replays,
// which is more bug-prone; or procfs verification, which is not possible
// due to re-entrancy issues).
// - Absolute symlinks for the same reason (and all absolute symlinks in
// procfs are magic-links, which we want to skip anyway).
// - If statx is supported (checkSymlinkOvermount), any mount-point crossings
// (which is the main attack of concern against /proc).
// - Partial lookups are not supported, so the symlink stack is not needed.
// - Trailing slash special handling is not necessary in most cases (if we
// operating on procfs, it's usually with programmer-controlled strings
// that will then be re-opened), so we skip it since whatever re-opens it
// can deal with it. It's a creature comfort anyway.
//
// If the system supports openat2(), this is implemented using equivalent flags
// (RESOLVE_BENEATH | RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS).
func procfsLookupInRoot(procRoot fd.Fd, unsafePath string) (Handle *os.File, _ error) {
unsafePath = filepath.ToSlash(unsafePath) // noop
// Make sure that an empty unsafe path still returns something sane, even
// with openat2 (which doesn't have AT_EMPTY_PATH semantics yet).
if unsafePath == "" {
unsafePath = "."
}
// This is already checked by getProcRoot, but make sure here since the
// core security of this lookup is based on this assumption.
if err := verifyProcRoot(procRoot); err != nil {
return nil, err
}
if linux.HasOpenat2() {
// We prefer being able to use RESOLVE_NO_XDEV if we can, to be
// absolutely sure we are operating on a clean /proc handle that
// doesn't have any cheeky overmounts that could trick us (including
// symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't
// strictly needed, but just use it since we have it.
//
// NOTE: /proc/self is technically a magic-link (the contents of the
// symlink are generated dynamically), but it doesn't use
// nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it.
//
// TODO: It would be nice to have RESOLVE_NO_DOTDOT, purely for
// self-consistency with the backup O_PATH resolver.
handle, err := fd.Openat2(procRoot, unsafePath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS,
})
if err != nil {
// TODO: Once we bump the minimum Go version to 1.20, we can use
// multiple %w verbs for this wrapping. For now we need to use a
// compatibility shim for older Go versions.
// err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
return nil, gocompat.WrapBaseError(err, errUnsafeProcfs)
}
return handle, nil
}
// To mirror openat2(RESOLVE_BENEATH), we need to return an error if the
// path is absolute.
if path.IsAbs(unsafePath) {
return nil, fmt.Errorf("%w: cannot resolve absolute paths in procfs resolver", internal.ErrPossibleBreakout)
}
currentDir, err := fd.Dup(procRoot)
if err != nil {
return nil, fmt.Errorf("clone root fd: %w", err)
}
defer func() {
// If a handle is not returned, close the internal handle.
if Handle == nil {
_ = currentDir.Close()
}
}()
var (
linksWalked int
currentPath string
remainingPath = unsafePath
)
for remainingPath != "" {
// Get the next path component.
var part string
if i := strings.IndexByte(remainingPath, '/'); i == -1 {
part, remainingPath = remainingPath, ""
} else {
part, remainingPath = remainingPath[:i], remainingPath[i+1:]
}
if part == "" {
// no-op component, but treat it the same as "."
part = "."
}
if part == ".." {
// not permitted
return nil, fmt.Errorf("%w: cannot walk into '..' in procfs resolver", internal.ErrPossibleBreakout)
}
// Apply the component lexically to the path we are building.
// currentPath does not contain any symlinks, and we are lexically
// dealing with a single component, so it's okay to do a filepath.Clean
// here. (Not to mention that ".." isn't allowed.)
nextPath := path.Join("/", currentPath, part)
// If we logically hit the root, just clone the root rather than
// opening the part and doing all of the other checks.
if nextPath == "/" {
// Jump to root.
rootClone, err := fd.Dup(procRoot)
if err != nil {
return nil, fmt.Errorf("clone root fd: %w", err)
}
_ = currentDir.Close()
currentDir = rootClone
currentPath = nextPath
continue
}
// Try to open the next component.
nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
// Make sure we are still on procfs and haven't crossed mounts.
if err := verifyProcHandle(nextDir); err != nil {
_ = nextDir.Close()
return nil, fmt.Errorf("check %q component is on procfs: %w", part, err)
}
if err := checkSubpathOvermount(procRoot, nextDir, ""); err != nil {
_ = nextDir.Close()
return nil, fmt.Errorf("check %q component is not overmounted: %w", part, err)
}
// We are emulating O_PATH|O_NOFOLLOW, so we only need to traverse into
// trailing symlinks if we are not the final component. Otherwise we
// can just return the currentDir.
if remainingPath != "" {
st, err := nextDir.Stat()
if err != nil {
_ = nextDir.Close()
return nil, fmt.Errorf("stat component %q: %w", part, err)
}
if st.Mode()&os.ModeType == os.ModeSymlink {
// readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See
// Linux commit 65cfc6722361 ("readlinkat(), fchownat() and
// fstatat() with empty relative pathnames").
linkDest, err := fd.Readlinkat(nextDir, "")
// We don't need the handle anymore.
_ = nextDir.Close()
if err != nil {
return nil, err
}
linksWalked++
if linksWalked > consts.MaxSymlinkLimit {
return nil, &os.PathError{Op: "securejoin.procfsLookupInRoot", Path: "/proc/" + unsafePath, Err: unix.ELOOP}
}
// Update our logical remaining path.
remainingPath = linkDest + "/" + remainingPath
// Absolute symlinks are probably magiclinks, we reject them.
if path.IsAbs(linkDest) {
return nil, fmt.Errorf("%w: cannot jump to / in procfs resolver -- possible magiclink", internal.ErrPossibleBreakout)
}
continue
}
}
// Walk into the next component.
_ = currentDir.Close()
currentDir = nextDir
currentPath = nextPath
}
// One final sanity-check.
if err := verifyProcHandle(currentDir); err != nil {
return nil, fmt.Errorf("check final handle is on procfs: %w", err)
}
if err := checkSubpathOvermount(procRoot, currentDir, ""); err != nil {
return nil, fmt.Errorf("check final handle is not overmounted: %w", err)
}
return currentDir, nil
}

View File

@@ -1,10 +1,15 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package securejoin
package pathrs
import (
"errors"
@@ -15,6 +20,12 @@ import (
"strings"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/internal/consts"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs"
)
type symlinkStackEntry struct {
@@ -112,12 +123,12 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro
return nil
}
// Split the link target and clean up any "" parts.
linkTargetParts := slices_DeleteFunc(
linkTargetParts := gocompat.SlicesDeleteFunc(
strings.Split(linkTarget, "/"),
func(part string) bool { return part == "" || part == "." })
// Copy the directory so the caller doesn't close our copy.
dirCopy, err := dupFile(dir)
dirCopy, err := fd.Dup(dir)
if err != nil {
return err
}
@@ -159,11 +170,11 @@ func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) {
// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing
// component of the requested path, returning a file handle to the final
// existing component and a string containing the remaining path components.
func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) {
func partialLookupInRoot(root fd.Fd, unsafePath string) (*os.File, string, error) {
return lookupInRoot(root, unsafePath, true)
}
func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) {
func completeLookupInRoot(root fd.Fd, unsafePath string) (*os.File, error) {
handle, remainingPath, err := lookupInRoot(root, unsafePath, false)
if remainingPath != "" && err == nil {
// should never happen
@@ -174,7 +185,7 @@ func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) {
return handle, err
}
func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) {
func lookupInRoot(root fd.Fd, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) {
unsafePath = filepath.ToSlash(unsafePath) // noop
// This is very similar to SecureJoin, except that we operate on the
@@ -182,20 +193,20 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
// managed open, along with the remaining path components not opened.
// Try to use openat2 if possible.
if hasOpenat2() {
if linux.HasOpenat2() {
return lookupOpenat2(root, unsafePath, partial)
}
// Get the "actual" root path from /proc/self/fd. This is necessary if the
// root is some magic-link like /proc/$pid/root, in which case we want to
// make sure when we do checkProcSelfFdPath that we are using the correct
// root path.
logicalRootPath, err := procSelfFdReadlink(root)
// make sure when we do procfs.CheckProcSelfFdPath that we are using the
// correct root path.
logicalRootPath, err := procfs.ProcSelfFdReadlink(root)
if err != nil {
return nil, "", fmt.Errorf("get real root path: %w", err)
}
currentDir, err := dupFile(root)
currentDir, err := fd.Dup(root)
if err != nil {
return nil, "", fmt.Errorf("clone root fd: %w", err)
}
@@ -260,7 +271,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err)
}
// Jump to root.
rootClone, err := dupFile(root)
rootClone, err := fd.Dup(root)
if err != nil {
return nil, "", fmt.Errorf("clone root fd: %w", err)
}
@@ -271,21 +282,21 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
}
// Try to open the next component.
nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
switch {
case err == nil:
nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
switch err {
case nil:
st, err := nextDir.Stat()
if err != nil {
_ = nextDir.Close()
return nil, "", fmt.Errorf("stat component %q: %w", part, err)
}
switch st.Mode() & os.ModeType {
switch st.Mode() & os.ModeType { //nolint:exhaustive // just a glorified if statement
case os.ModeSymlink:
// readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See
// Linux commit 65cfc6722361 ("readlinkat(), fchownat() and
// fstatat() with empty relative pathnames").
linkDest, err := readlinkatFile(nextDir, "")
linkDest, err := fd.Readlinkat(nextDir, "")
// We don't need the handle anymore.
_ = nextDir.Close()
if err != nil {
@@ -293,7 +304,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
}
linksWalked++
if linksWalked > maxSymlinkLimit {
if linksWalked > consts.MaxSymlinkLimit {
return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP}
}
@@ -307,7 +318,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
// Absolute symlinks reset any work we've already done.
if path.IsAbs(linkDest) {
// Jump to root.
rootClone, err := dupFile(root)
rootClone, err := fd.Dup(root)
if err != nil {
return nil, "", fmt.Errorf("clone root fd: %w", err)
}
@@ -335,12 +346,12 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
// rename or mount on the system.
if part == ".." {
// Make sure the root hasn't moved.
if err := checkProcSelfFdPath(logicalRootPath, root); err != nil {
if err := procfs.CheckProcSelfFdPath(logicalRootPath, root); err != nil {
return nil, "", fmt.Errorf("root path moved during lookup: %w", err)
}
// Make sure the path is what we expect.
fullPath := logicalRootPath + nextPath
if err := checkProcSelfFdPath(fullPath, currentDir); err != nil {
if err := procfs.CheckProcSelfFdPath(fullPath, currentDir); err != nil {
return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err)
}
}
@@ -371,7 +382,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi
// context of openat2, a trailing slash and a trailing "/." are completely
// equivalent.
if strings.HasSuffix(unsafePath, "/") {
nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
nextDir, err := fd.Openat(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
if err != nil {
if !partial {
_ = currentDir.Close()

View File

@@ -1,10 +1,15 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package securejoin
package pathrs
import (
"errors"
@@ -14,12 +19,13 @@ import (
"strings"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux"
)
var (
errInvalidMode = errors.New("invalid permission mode")
errPossibleAttack = errors.New("possible attack detected")
)
var errInvalidMode = errors.New("invalid permission mode")
// modePermExt is like os.ModePerm except that it also includes the set[ug]id
// and sticky bits.
@@ -66,6 +72,8 @@ func toUnixMode(mode os.FileMode) (uint32, error) {
// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after
// doing [MkdirAll]. If you intend to open the directory after creating it, you
// should use MkdirAllHandle.
//
// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin
func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) {
unixMode, err := toUnixMode(mode)
if err != nil {
@@ -102,7 +110,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F
//
// This is mostly a quality-of-life check, because mkdir will simply fail
// later if the attacker deletes the tree after this check.
if err := isDeadInode(currentDir); err != nil {
if err := fd.IsDeadInode(currentDir); err != nil {
return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err)
}
@@ -113,13 +121,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F
return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR)
} else if err != nil {
return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err)
} else {
} else { //nolint:revive // indent-error-flow lint doesn't make sense here
_ = currentDir.Close()
currentDir = reopenDir
}
remainingParts := strings.Split(remainingPath, string(filepath.Separator))
if slices_Contains(remainingParts, "..") {
if gocompat.SlicesContains(remainingParts, "..") {
// The path contained ".." components after the end of the "real"
// components. We could try to safely resolve ".." here but that would
// add a bunch of extra logic for something that it's not clear even
@@ -150,12 +158,12 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F
if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) {
err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err}
// Make the error a bit nicer if the directory is dead.
if deadErr := isDeadInode(currentDir); deadErr != nil {
if deadErr := fd.IsDeadInode(currentDir); deadErr != nil {
// TODO: Once we bump the minimum Go version to 1.20, we can use
// multiple %w verbs for this wrapping. For now we need to use a
// compatibility shim for older Go versions.
//err = fmt.Errorf("%w (%w)", err, deadErr)
err = wrapBaseError(err, deadErr)
// err = fmt.Errorf("%w (%w)", err, deadErr)
err = gocompat.WrapBaseError(err, deadErr)
}
return nil, err
}
@@ -163,13 +171,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F
// Get a handle to the next component. O_DIRECTORY means we don't need
// to use O_PATH.
var nextDir *os.File
if hasOpenat2() {
nextDir, err = openat2File(currentDir, part, &unix.OpenHow{
if linux.HasOpenat2() {
nextDir, err = openat2(currentDir, part, &unix.OpenHow{
Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV,
})
} else {
nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
nextDir, err = fd.Openat(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
}
if err != nil {
return nil, err
@@ -220,12 +228,14 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F
// If you plan to open the directory after you have created it or want to use
// an open directory handle as the root, you should use [MkdirAllHandle] instead.
// This function is a wrapper around [MkdirAllHandle].
//
// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin
func MkdirAll(root, unsafePath string, mode os.FileMode) error {
rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
defer rootDir.Close()
defer rootDir.Close() //nolint:errcheck // close failures aren't critical here
f, err := MkdirAllHandle(rootDir, unsafePath, mode)
if err != nil {

View File

@@ -1,17 +1,22 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package securejoin
package pathrs
import (
"fmt"
"os"
"strconv"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs"
)
// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided
@@ -40,12 +45,14 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) {
// disconnected TTY that could cause a DoS, or some other issue). In order to
// use the returned handle, you can "upgrade" it to a proper handle using
// [Reopen].
//
// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin
func OpenInRoot(root, unsafePath string) (*os.File, error) {
rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
defer rootDir.Close()
defer rootDir.Close() //nolint:errcheck // close failures aren't critical here
return OpenatInRoot(rootDir, unsafePath)
}
@@ -63,41 +70,5 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) {
//
// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw
func Reopen(handle *os.File, flags int) (*os.File, error) {
procRoot, err := getProcRoot()
if err != nil {
return nil, err
}
// We can't operate on /proc/thread-self/fd/$n directly when doing a
// re-open, so we need to open /proc/thread-self/fd and then open a single
// final component.
procFdDir, closer, err := procThreadSelf(procRoot, "fd/")
if err != nil {
return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err)
}
defer procFdDir.Close()
defer closer()
// Try to detect if there is a mount on top of the magic-link we are about
// to open. If we are using unsafeHostProcRoot(), this could change after
// we check it (and there's nothing we can do about that) but for
// privateProcRoot() this should be guaranteed to be safe (at least since
// Linux 5.12[1], when anonymous mount namespaces were completely isolated
// from external mounts including mount propagation events).
//
// [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
// onto targets that reside on shared mounts").
fdStr := strconv.Itoa(int(handle.Fd()))
if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil {
return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err)
}
flags |= unix.O_CLOEXEC
// Rather than just wrapping openatFile, open-code it so we can copy
// handle.Name().
reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0)
if err != nil {
return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err)
}
return os.NewFile(uintptr(reopenFd), handle.Name()), nil
return procfs.ReopenFd(handle, flags)
}

View File

@@ -0,0 +1,101 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
package pathrs
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd"
"github.com/cyphar/filepath-securejoin/pathrs-lite/procfs"
)
func openat2(dir fd.Fd, path string, how *unix.OpenHow) (*os.File, error) {
file, err := fd.Openat2(dir, path, how)
if err != nil {
return nil, err
}
// If we are using RESOLVE_IN_ROOT, the name we generated may be wrong.
if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT {
if actualPath, err := procfs.ProcSelfFdReadlink(file); err == nil {
// TODO: Ideally we would not need to dup the fd, but you cannot
// easily just swap an *os.File with one from the same fd
// (the GC will close the old one, and you cannot clear the
// finaliser easily because it is associated with an internal
// field of *os.File not *os.File itself).
newFile, err := fd.DupWithName(file, actualPath)
if err != nil {
return nil, err
}
file = newFile
}
}
return file, nil
}
func lookupOpenat2(root fd.Fd, unsafePath string, partial bool) (*os.File, string, error) {
if !partial {
file, err := openat2(root, unsafePath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
})
return file, "", err
}
return partialLookupOpenat2(root, unsafePath)
}
// partialLookupOpenat2 is an alternative implementation of
// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a
// handle to the deepest existing child of the requested path within the root.
func partialLookupOpenat2(root fd.Fd, unsafePath string) (*os.File, string, error) {
// TODO: Implement this as a git-bisect-like binary search.
unsafePath = filepath.ToSlash(unsafePath) // noop
endIdx := len(unsafePath)
var lastError error
for endIdx > 0 {
subpath := unsafePath[:endIdx]
handle, err := openat2(root, subpath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
})
if err == nil {
// Jump over the slash if we have a non-"" remainingPath.
if endIdx < len(unsafePath) {
endIdx++
}
// We found a subpath!
return handle, unsafePath[endIdx:], lastError
}
if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) {
// That path doesn't exist, let's try the next directory up.
endIdx = strings.LastIndexByte(subpath, '/')
lastError = err
continue
}
return nil, "", fmt.Errorf("open subpath: %w", err)
}
// If we couldn't open anything, the whole subpath is missing. Return a
// copy of the root fd so that the caller doesn't close this one by
// accident.
rootClone, err := fd.Dup(root)
if err != nil {
return nil, "", err
}
return rootClone, unsafePath, lastError
}

View File

@@ -0,0 +1,157 @@
// SPDX-License-Identifier: MPL-2.0
//go:build linux
// Copyright (C) 2024-2025 Aleksa Sarai <cyphar@cyphar.com>
// Copyright (C) 2024-2025 SUSE LLC
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// Package procfs provides a safe API for operating on /proc on Linux.
package procfs
import (
"os"
"github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs"
)
// This package mostly just wraps internal/procfs APIs. This is necessary
// because we are forced to export some things from internal/procfs in order to
// avoid some dependency cycle issues, but we don't want users to see or use
// them.
// ProcThreadSelfCloser is a callback that needs to be called when you are done
// operating on an [os.File] fetched using [Handle.OpenThreadSelf].
//
// [os.File]: https://pkg.go.dev/os#File
type ProcThreadSelfCloser = procfs.ProcThreadSelfCloser
// Handle is a wrapper around an *os.File handle to "/proc", which can be used
// to do further procfs-related operations in a safe way.
type Handle struct {
inner *procfs.Handle
}
// Close close the resources associated with this [Handle]. Note that if this
// [Handle] was created with [OpenProcRoot], on some kernels the underlying
// procfs handle is cached and so this Close operation may be a no-op. However,
// you should always call Close on [Handle]s once you are done with them.
func (proc *Handle) Close() error { return proc.inner.Close() }
// OpenProcRoot tries to open a "safer" handle to "/proc" (i.e., one with the
// "subset=pid" mount option applied, available from Linux 5.8). Unless you
// plan to do many [Handle.OpenRoot] operations, users should prefer to use
// this over [OpenUnsafeProcRoot] which is far more dangerous to keep open.
//
// If a safe handle cannot be opened, OpenProcRoot will fall back to opening a
// regular "/proc" handle.
//
// Note that using [Handle.OpenRoot] will still work with handles returned by
// this function. If a subpath cannot be operated on with a safe "/proc"
// handle, then [OpenUnsafeProcRoot] will be called internally and a temporary
// unsafe handle will be used.
func OpenProcRoot() (*Handle, error) {
proc, err := procfs.OpenProcRoot()
if err != nil {
return nil, err
}
return &Handle{inner: proc}, nil
}
// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or
// masked paths. You must be extremely careful to make sure this handle is
// never leaked to a container and that you program cannot be tricked into
// writing to arbitrary paths within it.
//
// This is not necessary if you just wish to use [Handle.OpenRoot], as handles
// returned by [OpenProcRoot] will fall back to using a *temporary* unsafe
// handle in that case. You should only really use this if you need to do many
// operations with [Handle.OpenRoot] and the performance overhead of making
// many procfs handles is an issue. If you do use OpenUnsafeProcRoot, you
// should make sure to close the handle as soon as possible to avoid
// known-fd-number attacks.
func OpenUnsafeProcRoot() (*Handle, error) {
proc, err := procfs.OpenUnsafeProcRoot()
if err != nil {
return nil, err
}
return &Handle{inner: proc}, nil
}
// OpenThreadSelf returns a handle to "/proc/thread-self/<subpath>" (or an
// equivalent handle on older kernels where "/proc/thread-self" doesn't exist).
// Once finished with the handle, you must call the returned closer function
// ([runtime.UnlockOSThread]). You must not pass the returned *os.File to other
// Go threads or use the handle after calling the closer.
//
// [runtime.UnlockOSThread]: https://pkg.go.dev/runtime#UnlockOSThread
func (proc *Handle) OpenThreadSelf(subpath string) (*os.File, ProcThreadSelfCloser, error) {
return proc.inner.OpenThreadSelf(subpath)
}
// OpenSelf returns a handle to /proc/self/<subpath>.
//
// Note that in Go programs with non-homogenous threads, this may result in
// spurious errors. If you are monkeying around with APIs that are
// thread-specific, you probably want to use [Handle.OpenThreadSelf] instead
// which will guarantee that the handle refers to the same thread as the caller
// is executing on.
func (proc *Handle) OpenSelf(subpath string) (*os.File, error) {
return proc.inner.OpenSelf(subpath)
}
// OpenRoot returns a handle to /proc/<subpath>.
//
// You should only use this when you need to operate on global procfs files
// (such as sysctls in /proc/sys). Unlike [Handle.OpenThreadSelf],
// [Handle.OpenSelf], and [Handle.OpenPid], the procfs handle used internally
// for this operation will never use "subset=pid", which makes it a more juicy
// target for [CVE-2024-21626]-style attacks (and doing something like opening
// a directory with OpenRoot effectively leaks [OpenUnsafeProcRoot] as long as
// the file descriptor is open).
//
// [CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv
func (proc *Handle) OpenRoot(subpath string) (*os.File, error) {
return proc.inner.OpenRoot(subpath)
}
// OpenPid returns a handle to /proc/$pid/<subpath> (pid can be a pid or tid).
// This is mainly intended for usage when operating on other processes.
//
// You should not use this for the current thread, as special handling is
// needed for /proc/thread-self (or /proc/self/task/<tid>) when dealing with
// goroutine scheduling -- use [Handle.OpenThreadSelf] instead.
//
// To refer to the current thread-group, you should use prefer
// [Handle.OpenSelf] to passing os.Getpid as the pid argument.
func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) {
return proc.inner.OpenPid(pid, subpath)
}
// ProcSelfFdReadlink gets the real path of the given file by looking at
// /proc/self/fd/<fd> with [readlink]. It is effectively just shorthand for
// something along the lines of:
//
// proc, err := procfs.OpenProcRoot()
// if err != nil {
// return err
// }
// link, err := proc.OpenThreadSelf(fmt.Sprintf("fd/%d", f.Fd()))
// if err != nil {
// return err
// }
// defer link.Close()
// var buf [4096]byte
// n, err := unix.Readlinkat(int(link.Fd()), "", buf[:])
// if err != nil {
// return err
// }
// pathname := buf[:n]
//
// [readlink]: https://pkg.go.dev/golang.org/x/sys/unix#Readlinkat
func ProcSelfFdReadlink(f *os.File) (string, error) {
return procfs.ProcSelfFdReadlink(f)
}

View File

@@ -1,452 +0,0 @@
//go:build linux
// Copyright (C) 2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import (
"errors"
"fmt"
"os"
"runtime"
"strconv"
"golang.org/x/sys/unix"
)
func fstat(f *os.File) (unix.Stat_t, error) {
var stat unix.Stat_t
if err := unix.Fstat(int(f.Fd()), &stat); err != nil {
return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err}
}
return stat, nil
}
func fstatfs(f *os.File) (unix.Statfs_t, error) {
var statfs unix.Statfs_t
if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil {
return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err}
}
return statfs, nil
}
// The kernel guarantees that the root inode of a procfs mount has an
// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO.
const (
procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC
procRootIno = 1 // PROC_ROOT_INO
)
func verifyProcRoot(procRoot *os.File) error {
if statfs, err := fstatfs(procRoot); err != nil {
return err
} else if statfs.Type != procSuperMagic {
return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type)
}
if stat, err := fstat(procRoot); err != nil {
return err
} else if stat.Ino != procRootIno {
return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino)
}
return nil
}
var hasNewMountApi = sync_OnceValue(func() bool {
// All of the pieces of the new mount API we use (fsopen, fsconfig,
// fsmount, open_tree) were added together in Linux 5.1[1,2], so we can
// just check for one of the syscalls and the others should also be
// available.
//
// Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE.
// This is equivalent to openat(2), but tells us if open_tree is
// available (and thus all of the other basic new mount API syscalls).
// open_tree(2) is most light-weight syscall to test here.
//
// [1]: merge commit 400913252d09
// [2]: <https://lore.kernel.org/lkml/153754740781.17872.7869536526927736855.stgit@warthog.procyon.org.uk/>
fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC)
if err != nil {
return false
}
_ = unix.Close(fd)
return true
})
func fsopen(fsName string, flags int) (*os.File, error) {
// Make sure we always set O_CLOEXEC.
flags |= unix.FSOPEN_CLOEXEC
fd, err := unix.Fsopen(fsName, flags)
if err != nil {
return nil, os.NewSyscallError("fsopen "+fsName, err)
}
return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil
}
func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) {
// Make sure we always set O_CLOEXEC.
flags |= unix.FSMOUNT_CLOEXEC
fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs)
if err != nil {
return nil, os.NewSyscallError("fsmount "+ctx.Name(), err)
}
return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil
}
func newPrivateProcMount() (*os.File, error) {
procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC)
if err != nil {
return nil, err
}
defer procfsCtx.Close()
// Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors.
_ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable")
_ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid")
// Get an actual handle.
if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil {
return nil, os.NewSyscallError("fsconfig create procfs", err)
}
return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID)
}
func openTree(dir *os.File, path string, flags uint) (*os.File, error) {
dirFd := -int(unix.EBADF)
dirName := "."
if dir != nil {
dirFd = int(dir.Fd())
dirName = dir.Name()
}
// Make sure we always set O_CLOEXEC.
flags |= unix.OPEN_TREE_CLOEXEC
fd, err := unix.OpenTree(dirFd, path, flags)
if err != nil {
return nil, &os.PathError{Op: "open_tree", Path: path, Err: err}
}
return os.NewFile(uintptr(fd), dirName+"/"+path), nil
}
func clonePrivateProcMount() (_ *os.File, Err error) {
// Try to make a clone without using AT_RECURSIVE if we can. If this works,
// we can be sure there are no over-mounts and so if the root is valid then
// we're golden. Otherwise, we have to deal with over-mounts.
procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE)
if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) {
procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE)
}
if err != nil {
return nil, fmt.Errorf("creating a detached procfs clone: %w", err)
}
defer func() {
if Err != nil {
_ = procfsHandle.Close()
}
}()
if err := verifyProcRoot(procfsHandle); err != nil {
return nil, err
}
return procfsHandle, nil
}
func privateProcRoot() (*os.File, error) {
if !hasNewMountApi() || hookForceGetProcRootUnsafe() {
return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP)
}
// Try to create a new procfs mount from scratch if we can. This ensures we
// can get a procfs mount even if /proc is fake (for whatever reason).
procRoot, err := newPrivateProcMount()
if err != nil || hookForcePrivateProcRootOpenTree(procRoot) {
// Try to clone /proc then...
procRoot, err = clonePrivateProcMount()
}
return procRoot, err
}
func unsafeHostProcRoot() (_ *os.File, Err error) {
procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
defer func() {
if Err != nil {
_ = procRoot.Close()
}
}()
if err := verifyProcRoot(procRoot); err != nil {
return nil, err
}
return procRoot, nil
}
func doGetProcRoot() (*os.File, error) {
procRoot, err := privateProcRoot()
if err != nil {
// Fall back to using a /proc handle if making a private mount failed.
// If we have openat2, at least we can avoid some kinds of over-mount
// attacks, but without openat2 there's not much we can do.
procRoot, err = unsafeHostProcRoot()
}
return procRoot, err
}
var getProcRoot = sync_OnceValues(func() (*os.File, error) {
return doGetProcRoot()
})
var hasProcThreadSelf = sync_OnceValue(func() bool {
return unix.Access("/proc/thread-self/", unix.F_OK) == nil
})
var errUnsafeProcfs = errors.New("unsafe procfs detected")
type procThreadSelfCloser func()
// procThreadSelf returns a handle to /proc/thread-self/<subpath> (or an
// equivalent handle on older kernels where /proc/thread-self doesn't exist).
// Once finished with the handle, you must call the returned closer function
// (runtime.UnlockOSThread). You must not pass the returned *os.File to other
// Go threads or use the handle after calling the closer.
//
// This is similar to ProcThreadSelf from runc, but with extra hardening
// applied and using *os.File.
func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) {
// We need to lock our thread until the caller is done with the handle
// because between getting the handle and using it we could get interrupted
// by the Go runtime and hit the case where the underlying thread is
// swapped out and the original thread is killed, resulting in
// pull-your-hair-out-hard-to-debug issues in the caller.
runtime.LockOSThread()
defer func() {
if Err != nil {
runtime.UnlockOSThread()
}
}()
// Figure out what prefix we want to use.
threadSelf := "thread-self/"
if !hasProcThreadSelf() || hookForceProcSelfTask() {
/// Pre-3.17 kernels don't have /proc/thread-self, so do it manually.
threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/"
if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() {
// In this case, we running in a pid namespace that doesn't match
// the /proc mount we have. This can happen inside runc.
//
// Unfortunately, there is no nice way to get the correct TID to
// use here because of the age of the kernel, so we have to just
// use /proc/self and hope that it works.
threadSelf = "self/"
}
}
// Grab the handle.
var (
handle *os.File
err error
)
if hasOpenat2() {
// We prefer being able to use RESOLVE_NO_XDEV if we can, to be
// absolutely sure we are operating on a clean /proc handle that
// doesn't have any cheeky overmounts that could trick us (including
// symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't
// strictly needed, but just use it since we have it.
//
// NOTE: /proc/self is technically a magic-link (the contents of the
// symlink are generated dynamically), but it doesn't use
// nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it.
//
// NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses
// procSelfFdReadlink to clean up the returned f.Name() if we use
// RESOLVE_IN_ROOT (which would lead to an infinite recursion).
handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{
Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC,
Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS,
})
if err != nil {
// TODO: Once we bump the minimum Go version to 1.20, we can use
// multiple %w verbs for this wrapping. For now we need to use a
// compatibility shim for older Go versions.
//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
return nil, nil, wrapBaseError(err, errUnsafeProcfs)
}
} else {
handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
if err != nil {
// TODO: Once we bump the minimum Go version to 1.20, we can use
// multiple %w verbs for this wrapping. For now we need to use a
// compatibility shim for older Go versions.
//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
return nil, nil, wrapBaseError(err, errUnsafeProcfs)
}
defer func() {
if Err != nil {
_ = handle.Close()
}
}()
// We can't detect bind-mounts of different parts of procfs on top of
// /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we
// aren't on the wrong filesystem here.
if statfs, err := fstatfs(handle); err != nil {
return nil, nil, err
} else if statfs.Type != procSuperMagic {
return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type)
}
}
return handle, runtime.UnlockOSThread, nil
}
// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to
// avoid bumping the requirement for a single constant we can just define it
// ourselves.
const STATX_MNT_ID_UNIQUE = 0x4000
var hasStatxMountId = sync_OnceValue(func() bool {
var (
stx unix.Statx_t
// We don't care which mount ID we get. The kernel will give us the
// unique one if it is supported.
wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
)
err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx)
return err == nil && stx.Mask&wantStxMask != 0
})
func getMountId(dir *os.File, path string) (uint64, error) {
// If we don't have statx(STATX_MNT_ID*) support, we can't do anything.
if !hasStatxMountId() {
return 0, nil
}
var (
stx unix.Statx_t
// We don't care which mount ID we get. The kernel will give us the
// unique one if it is supported.
wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
)
err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx)
if stx.Mask&wantStxMask == 0 {
// It's not a kernel limitation, for some reason we couldn't get a
// mount ID. Assume it's some kind of attack.
err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs)
}
if err != nil {
return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err}
}
return stx.Mnt_id, nil
}
func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error {
// Get the mntId of our procfs handle.
expectedMountId, err := getMountId(procRoot, "")
if err != nil {
return err
}
// Get the mntId of the target magic-link.
gotMountId, err := getMountId(dir, path)
if err != nil {
return err
}
// As long as the directory mount is alive, even with wrapping mount IDs,
// we would expect to see a different mount ID here. (Of course, if we're
// using unsafeHostProcRoot() then an attaker could change this after we
// did this check.)
if expectedMountId != gotMountId {
return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId)
}
return nil
}
func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) {
fdPath := fmt.Sprintf("fd/%d", fd)
procFdLink, closer, err := procThreadSelf(procRoot, fdPath)
if err != nil {
return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err)
}
defer procFdLink.Close()
defer closer()
// Try to detect if there is a mount on top of the magic-link. Since we use the handle directly
// provide to the closure. If the closure uses the handle directly, this
// should be safe in general (a mount on top of the path afterwards would
// not affect the handle itself) and will definitely be safe if we are
// using privateProcRoot() (at least since Linux 5.12[1], when anonymous
// mount namespaces were completely isolated from external mounts including
// mount propagation events).
//
// [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
// onto targets that reside on shared mounts").
if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil {
return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err)
}
// readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit
// 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty
// relative pathnames").
return readlinkatFile(procFdLink, "")
}
func rawProcSelfFdReadlink(fd int) (string, error) {
procRoot, err := getProcRoot()
if err != nil {
return "", err
}
return doRawProcSelfFdReadlink(procRoot, fd)
}
func procSelfFdReadlink(f *os.File) (string, error) {
return rawProcSelfFdReadlink(int(f.Fd()))
}
var (
errPossibleBreakout = errors.New("possible breakout detected")
errInvalidDirectory = errors.New("wandered into deleted directory")
errDeletedInode = errors.New("cannot verify path of deleted inode")
)
func isDeadInode(file *os.File) error {
// If the nlink of a file drops to 0, there is an attacker deleting
// directories during our walk, which could result in weird /proc values.
// It's better to error out in this case.
stat, err := fstat(file)
if err != nil {
return fmt.Errorf("check for dead inode: %w", err)
}
if stat.Nlink == 0 {
err := errDeletedInode
if stat.Mode&unix.S_IFMT == unix.S_IFDIR {
err = errInvalidDirectory
}
return fmt.Errorf("%w %q", err, file.Name())
}
return nil
}
func checkProcSelfFdPath(path string, file *os.File) error {
if err := isDeadInode(file); err != nil {
return err
}
actualPath, err := procSelfFdReadlink(file)
if err != nil {
return fmt.Errorf("get path of handle: %w", err)
}
if actualPath != path {
return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path)
}
return nil
}
// Test hooks used in the procfs tests to verify that the fallback logic works.
// See testing_mocks_linux_test.go and procfs_linux_test.go for more details.
var (
hookForcePrivateProcRootOpenTree = hookDummyFile
hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile
hookForceGetProcRootUnsafe = hookDummy
hookForceProcSelfTask = hookDummy
hookForceProcSelf = hookDummy
)
func hookDummy() bool { return false }
func hookDummyFile(_ *os.File) bool { return false }

View File

@@ -1,3 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,10 @@
Changes
=======
v3.0.2 05 Dev 2025
* Code changes mainly due to upgraded linter.
* github.com/lestrrat-go/option upgraded to v2
v3.0.1 18 Aug 2025
* Refresh() no longer requires the resource to be ready.

View File

@@ -51,6 +51,9 @@ type Client struct {
// By default ALL urls are allowed. This may not be suitable for you if
// are using this in a production environment. You are encouraged to specify
// a whitelist using the `WithWhitelist` option.
//
// NOTE: In future versions, this function signature should be changed to
// return an error to properly handle option parsing failures.
func NewClient(options ...NewClientOption) *Client {
//nolint:staticcheck
var errSink ErrorSink = errsink.NewNop()
@@ -63,19 +66,18 @@ func NewClient(options ...NewClientOption) *Client {
defaultMaxInterval := DefaultMaxInterval
numWorkers := DefaultWorkers
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identHTTPClient{}:
httpcl = option.Value().(HTTPClient)
_ = option.Value(&httpcl)
case identWorkers{}:
numWorkers = option.Value().(int)
_ = option.Value(&numWorkers)
case identErrorSink{}:
errSink = option.Value().(ErrorSink)
_ = option.Value(&errSink)
case identTraceSink{}:
traceSink = option.Value().(TraceSink)
_ = option.Value(&traceSink)
case identWhitelist{}:
wl = option.Value().(Whitelist)
_ = option.Value(&wl)
}
}

View File

@@ -123,11 +123,12 @@ func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption)
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: START Add(%q)", r.URL()))
defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: END Add(%q)", r.URL()))
waitReady := true
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identWaitReady{}:
waitReady = option.(addOption).Value().(bool)
if err := option.Value(&waitReady); err != nil {
return fmt.Errorf(`httprc.Controller.Add: failed to parse WaitReady option: %w`, err)
}
}
}

View File

@@ -3,7 +3,7 @@ package httprc
import (
"time"
"github.com/lestrrat-go/option"
"github.com/lestrrat-go/option/v2"
)
type NewClientOption interface {

View File

@@ -41,17 +41,24 @@ func NewResource[T any](s string, transformer Transformer[T], options ...NewReso
var interval time.Duration
minInterval := DefaultMinInterval
maxInterval := DefaultMaxInterval
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identHTTPClient{}:
httpcl = option.Value().(HTTPClient)
if err := option.Value(&httpcl); err != nil {
return nil, fmt.Errorf(`httprc.NewResource: failed to parse HTTPClient option: %w`, err)
}
case identMinimumInterval{}:
minInterval = option.Value().(time.Duration)
if err := option.Value(&minInterval); err != nil {
return nil, fmt.Errorf(`httprc.NewResource: failed to parse MinimumInterval option: %w`, err)
}
case identMaximumInterval{}:
maxInterval = option.Value().(time.Duration)
if err := option.Value(&maxInterval); err != nil {
return nil, fmt.Errorf(`httprc.NewResource: failed to parse MaximumInterval option: %w`, err)
}
case identConstantInterval{}:
interval = option.Value().(time.Duration)
if err := option.Value(&interval); err != nil {
return nil, fmt.Errorf(`httprc.NewResource: failed to parse ConstantInterval option: %w`, err)
}
}
}
if transformer == nil {
@@ -109,7 +116,7 @@ func (r *ResourceBase[T]) Ready(ctx context.Context) error {
// returns `A` or `B` depending on the type of the resource. When accessing the
// resource through the `httprc.Resource` interface, use this method to obtain the
// stored value.
func (r *ResourceBase[T]) Get(dst interface{}) error {
func (r *ResourceBase[T]) Get(dst any) error {
return blackmagic.AssignIfCompatible(dst, r.Resource())
}

View File

@@ -4,6 +4,13 @@ Changes
v3 has many incompatibilities with v2. To see the full list of differences between
v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md)
v3.0.13 12 Jan 2026
* [jwt] The `jwt.WithContext()` option is now properly being passed to `jws.Verify()` from
`jwt.Parse()`.
* [jwx] github.com/lestrrat-go/httprc/v3 has been upgraded to remove dependency on
github.com/lestrrat-go/option (v1)
* [jwk] `jwk.Clone()` has been fixed to properly work with private fields.
v3.0.12 20 Oct 2025
* [jwe] As part of the next change, now per-recipient headers that are empty
are no longer serialized in flattened JSON serialization.
@@ -18,7 +25,7 @@ v3.0.12 20 Oct 2025
were left to be included in the final serialization as-is. This caused duplicate
headers to be present in both the protected headers and the per-recipient headers.
Since there maybe users who rely on this behavior already, instead of changing the
Since there may be users who rely on this behavior already, instead of changing the
default behavior to fix this duplication, a new option to `jwe.Encrypt()` was added
to allow clearing the per-recipient headers after merging to leave the `"headers"`
field empty. This in effect makes the flattened JSON serialization more similar to
@@ -29,7 +36,7 @@ v3.0.12 20 Oct 2025
headers need to be merged regardless. In full JSON serialization, we never
merge the headers, so it is left up to the user to keep the headers disjoint.
* [jws] Calling the deprecated `jws.NewSigner()` function for the time will cause
* [jws] Calling the deprecated `jws.NewSigner()` function for the first time will cause
legacy signers to be loaded automatically. Previously, you had to explicitly
call `jws.Settings(jws.WithLegacySigners(true))` to enable legacy signers.

View File

@@ -1,5 +1,4 @@
//go:build jwx_goccy
// +build jwx_goccy
package json

View File

@@ -1,6 +1,6 @@
//go:build !jwx_goccy
// +build !jwx_goccy
//nolint:revive
package json
import (

View File

@@ -1,5 +1,4 @@
//go:build jwx_es256k
// +build jwx_es256k
package jwa

View File

@@ -72,9 +72,7 @@ func extractPadding(payload []byte) (toRemove int, good byte) {
// The maximum possible padding length plus the actual length field
toCheck := 256
// The length of the padded data is public, so we can use an if here
if toCheck > len(payload) {
toCheck = len(payload)
}
toCheck = min(toCheck, len(payload))
for i := 1; i <= toCheck; i++ {
t := uint(paddingLen) - uint(i)

View File

@@ -141,8 +141,8 @@ func buildECDHPrivateKey(alg jwa.EllipticCurveAlgorithm, dbuf []byte) (*ecdh.Pri
}
var ecdsaConvertibleTypes = []reflect.Type{
reflect.TypeOf((*ECDSAPrivateKey)(nil)).Elem(),
reflect.TypeOf((*ECDSAPublicKey)(nil)).Elem(),
reflect.TypeFor[ECDSAPrivateKey](),
reflect.TypeFor[ECDSAPublicKey](),
}
func ecdsaJWKToRaw(keyif Key, hint any) (any, error) {

View File

@@ -1,5 +1,4 @@
//go:build jwx_es256k
// +build jwx_es256k
package jwk

View File

@@ -92,9 +92,14 @@ type Set interface {
Len() int
// LookupKeyID returns the first key matching the given key id.
//
// The second return value is false if there are no keys matching the key id.
// The set *may* contain multiple keys with the same key id. If you
// need all of them, use `Iterate()`
// need all of them, Len() and Key(int)
//
// This method is meant to be used to lookup a key with a unique ID.
// Bacauseof this, you cannot use this method to lookup keys with an empty key ID
// (i.e. `kid` is not specified, or is an empty string).
LookupKeyID(string) (Key, bool)
// RemoveKey removes the key from the set.

View File

@@ -13,6 +13,7 @@ import (
"io"
"math/big"
"reflect"
"slices"
"github.com/lestrrat-go/jwx/v3/internal/base64"
"github.com/lestrrat-go/jwx/v3/internal/json"
@@ -30,14 +31,14 @@ func bigIntToBytes(n *big.Int) ([]byte, error) {
func init() {
if err := RegisterProbeField(reflect.StructField{
Name: "Kty",
Type: reflect.TypeOf(""),
Type: reflect.TypeFor[string](),
Tag: `json:"kty"`,
}); err != nil {
panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err))
}
if err := RegisterProbeField(reflect.StructField{
Name: "D",
Type: reflect.TypeOf(json.RawMessage(nil)),
Type: reflect.TypeFor[json.RawMessage](),
Tag: `json:"d,omitempty"`,
}); err != nil {
panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err))
@@ -665,10 +666,10 @@ func extractEmbeddedKey(keyif Key, concretTypes []reflect.Type) (Key, error) {
rv := reflect.ValueOf(keyif)
// If the value can be converted to one of the concrete types, then we're done
for _, t := range concretTypes {
if rv.Type().ConvertibleTo(t) {
return keyif, nil
}
if slices.ContainsFunc(concretTypes, func(t reflect.Type) bool {
return rv.Type().ConvertibleTo(t)
}) {
return keyif, nil
}
// When a struct implements the Key interface via embedding, you unfortunately

View File

@@ -141,8 +141,8 @@ func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte
}
var okpConvertibleKeys = []reflect.Type{
reflect.TypeOf((*OKPPrivateKey)(nil)).Elem(),
reflect.TypeOf((*OKPPublicKey)(nil)).Elem(),
reflect.TypeFor[OKPPrivateKey](),
reflect.TypeFor[OKPPublicKey](),
}
// This is half baked. I think it will blow up if we used ecdh.* keys and/or x25519 keys

View File

@@ -115,8 +115,8 @@ func buildRSAPublicKey(key *rsa.PublicKey, n, e []byte) {
}
var rsaConvertibleKeys = []reflect.Type{
reflect.TypeOf((*RSAPrivateKey)(nil)).Elem(),
reflect.TypeOf((*RSAPublicKey)(nil)).Elem(),
reflect.TypeFor[RSAPrivateKey](),
reflect.TypeFor[RSAPublicKey](),
}
func rsaJWKToRaw(key Key, hint any) (any, error) {

View File

@@ -3,6 +3,7 @@ package jwk
import (
"bytes"
"fmt"
"maps"
"reflect"
"sort"
@@ -14,13 +15,17 @@ import (
const keysKey = `keys` // appease linter
// NewSet creates and empty `jwk.Set` object
func NewSet() Set {
func newSet() *set {
return &set{
privateParams: make(map[string]any),
}
}
// NewSet creates and empty `jwk.Set` object
func NewSet() Set {
return newSet()
}
func (s *set) Set(n string, v any) error {
s.mu.RLock()
defer s.mu.RUnlock()
@@ -300,12 +305,15 @@ func (s *set) SetDecodeCtx(dc DecodeCtx) {
}
func (s *set) Clone() (Set, error) {
s2 := &set{}
s2 := newSet()
s.mu.RLock()
defer s.mu.RUnlock()
s2.keys = make([]Key, len(s.keys))
copy(s2.keys, s.keys)
maps.Copy(s2.privateParams, s.privateParams)
return s2, nil
}

View File

@@ -27,7 +27,7 @@ func (k *symmetricKey) Import(rawKey []byte) error {
}
var symmetricConvertibleKeys = []reflect.Type{
reflect.TypeOf((*SymmetricKey)(nil)).Elem(),
reflect.TypeFor[SymmetricKey](),
}
func octetSeqToRaw(key Key, hint any) (any, error) {

View File

@@ -1,5 +1,4 @@
//go:build jwx_es256k
// +build jwx_es256k
package jws

View File

@@ -535,12 +535,12 @@ var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType)
var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm)
func init() {
rawKeyToKeyType[reflect.TypeOf([]byte(nil))] = jwa.OctetSeq()
rawKeyToKeyType[reflect.TypeOf(ed25519.PublicKey(nil))] = jwa.OKP()
rawKeyToKeyType[reflect.TypeOf(rsa.PublicKey{})] = jwa.RSA()
rawKeyToKeyType[reflect.TypeOf((*rsa.PublicKey)(nil))] = jwa.RSA()
rawKeyToKeyType[reflect.TypeOf(ecdsa.PublicKey{})] = jwa.EC()
rawKeyToKeyType[reflect.TypeOf((*ecdsa.PublicKey)(nil))] = jwa.EC()
rawKeyToKeyType[reflect.TypeFor[[]byte]()] = jwa.OctetSeq()
rawKeyToKeyType[reflect.TypeFor[ed25519.PublicKey]()] = jwa.OKP()
rawKeyToKeyType[reflect.TypeFor[rsa.PublicKey]()] = jwa.RSA()
rawKeyToKeyType[reflect.TypeFor[*rsa.PublicKey]()] = jwa.RSA()
rawKeyToKeyType[reflect.TypeFor[ecdsa.PublicKey]()] = jwa.EC()
rawKeyToKeyType[reflect.TypeFor[*ecdsa.PublicKey]()] = jwa.EC()
addAlgorithmForKeyType(jwa.OKP(), jwa.EdDSA())
for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} {

View File

@@ -2,6 +2,8 @@
//
// It's internal because we don't want to expose _anything_ about these errors
// so users absolutely cannot do anything other than use them as opaque errors.
//
//nolint:revive
package errors
import (

View File

@@ -211,7 +211,12 @@ func parseBytes(data []byte, options ...ParseOption) (Token, error) {
for _, o := range options {
if v, ok := o.(ValidateOption); ok {
ctx.validateOpts = append(ctx.validateOpts, v)
continue
// context is used for both verification and validation, so we can't just continue
switch o.Ident() {
case identContext{}:
default:
continue
}
}
switch o.Ident() {
@@ -228,7 +233,7 @@ func parseBytes(data []byte, options ...ParseOption) (Token, error) {
}
}
verifyOpts = append(verifyOpts, o)
case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}:
case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}, identContext{}:
verifyOpts = append(verifyOpts, o)
case identToken{}:
var token Token

View File

@@ -1,7 +1,9 @@
package jwt
import (
"context"
"fmt"
"strings"
"time"
"github.com/lestrrat-go/jwx/v3/jwa"
@@ -137,6 +139,14 @@ func toVerifyOptions(options ...Option) ([]jws.VerifyOption, error) {
return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err)
}
voptions = append(voptions, jws.WithBase64Encoder(enc))
case identContext{}:
var ctx context.Context
if err := option.Value(&ctx); err != nil {
return nil, fmt.Errorf(`failed to decode Context: %w`, err)
}
voptions = append(voptions, jws.WithContext(ctx))
default:
return nil, fmt.Errorf(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`))
}
}
return voptions, nil

View File

@@ -3,6 +3,7 @@ package jwt
import (
"context"
"fmt"
"slices"
"strconv"
"time"
@@ -344,12 +345,10 @@ func (ccs claimContainsString) Validate(_ context.Context, t Token) error {
return ccs.makeErr(`claim %q does not exist or is not a []string: %w`, ccs.name, err)
}
for _, v := range list {
if v == ccs.value {
return nil
}
if !slices.Contains(list, ccs.value) {
return ccs.makeErr(`%q not satisfied`, ccs.name)
}
return ccs.makeErr(`%q not satisfied`, ccs.name)
return nil
}
// audienceClaimContainsString can be used to check if the audience claim, which is

View File

@@ -1,15 +0,0 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 lestrrat-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,245 +0,0 @@
# option
Base object for the "Optional Parameters Pattern".
# DESCRIPTION
The beauty of this pattern is that you can achieve a method that can
take the following simple calling style
```go
obj.Method(mandatory1, mandatory2)
```
or the following, if you want to modify its behavior with optional parameters
```go
obj.Method(mandatory1, mandatory2, optional1, optional2, optional3)
```
Instead of the more clunky zero value for optionals style
```go
obj.Method(mandatory1, mandatory2, nil, "", 0)
```
or the equally clunky config object style, which requires you to create a
struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig
```go
cfg := &ConfigForMethod{
Optional1: ...,
Optional2: ...,
Optional3: ...,
}
obj.Method(mandatory1, mandatory2, &cfg)
```
# SYNOPSIS
Create an "identifier" for the option. We recommend using an unexported empty struct,
because
1. It is uniquely identifiable globally
1. Takes minimal space
1. Since it's unexported, you do not have to worry about it leaking elsewhere or having it changed by consumers
```go
// an unexported empty struct
type identFeatureX struct{}
```
Then define a method to create an option using this identifier. Here we assume
that the option will be a boolean option.
```go
// this is optional, but for readability we usually use a wrapper
// around option.Interface, or a type alias.
type Option
func WithFeatureX(v bool) Option {
// use the constructor to create a new option
return option.New(identFeatureX{}, v)
}
```
Now you can create an option, which essentially a two element tuple consisting
of an identifier and its associated value.
To consume this, you will need to create a function with variadic parameters,
and iterate over the list looking for a particular identifier:
```go
func MyAwesomeFunc( /* mandatory parameters omitted */, options ...[]Option) {
var enableFeatureX bool
// The nolint directive is recommended if you are using linters such
// as golangci-lint
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identFeatureX{}:
enableFeatureX = option.Value().(bool)
// other cases omitted
}
}
if enableFeatureX {
....
}
}
```
# Option objects
Option objects take two arguments, its identifier and the value it contains.
The identifier can be anything, but it's usually better to use a an unexported
empty struct so that only you have the ability to generate said option:
```go
type identOptionalParamOne struct{}
type identOptionalParamTwo struct{}
type identOptionalParamThree struct{}
func WithOptionOne(v ...) Option {
return option.New(identOptionalParamOne{}, v)
}
```
Then you can call the method we described above as
```go
obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...))
```
Options should be parsed in a code that looks somewhat like this
```go
func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) {
paramOne := defaultValueParamOne
for _, option := range options {
switch option.Ident() {
case identOptionalParamOne{}:
paramOne = option.Value().(...)
}
}
...
}
```
The loop requires a bit of boilerplate, and admittedly, this is the main downside
of this module. However, if you think you want use the Option as a Function pattern,
please check the FAQ below for rationale.
# Simple usage
Most of the times all you need to do is to declare the Option type as an alias
in your code:
```go
package myawesomepkg
import "github.com/lestrrat-go/option"
type Option = option.Interface
```
Then you can start defining options like they are described in the SYNOPSIS section.
# Differentiating Options
When you have multiple methods and options, and those options can only be passed to
each one the methods, it's hard to see which options should be passed to which method.
```go
func WithX() Option { ... }
func WithY() Option { ... }
// Now, which of WithX/WithY go to which method?
func (*Obj) Method1(options ...Option) {}
func (*Obj) Method2(options ...Option) {}
```
In this case the easiest way to make it obvious is to put an extra layer around
the options so that they have different types
```go
type Method1Option interface {
Option
method1Option()
}
type method1Option struct { Option }
func (*method1Option) method1Option() {}
func WithX() Method1Option {
return &methodOption{option.New(...)}
}
func (*Obj) Method1(options ...Method1Option) {}
```
This way the compiler knows if an option can be passed to a given method.
# FAQ
## Why aren't these function-based?
Using a base option type like `type Option func(ctx interface{})` is certainly one way to achieve the same goal. In this case, you are giving the option itself the ability to "configure" the main object. For example:
```go
type Foo struct {
optionaValue bool
}
type Option func(*Foo) error
func WithOptionalValue(v bool) Option {
return Option(func(f *Foo) error {
f.optionalValue = v
return nil
})
}
func NewFoo(options ...Option) (*Foo, error) {
var f Foo
for _, o := range options {
if err := o(&f); err != nil {
return nil, err
}
}
return &f
}
```
This in itself is fine, but we think there are a few problems:
### 1. It's hard to create a reusable "Option" type
We create many libraries using this optional pattern. We would like to provide a default base object. However, this function based approach is not reusuable because each "Option" type requires that it has a context-specific input type. For example, if the "Option" type in the previous example was `func(interface{}) error`, then its usability will significantly decrease because of the type conversion.
This is not to say that this library's approach is better as it also requires type conversion to convert the _value_ of the option. However, part of the beauty of the original function based approach was the ease of its use, and we claim that this significantly decreases the merits of the function based approach.
### 2. The receiver requires exported fields
Part of the appeal for a function-based option pattern is by giving the option itself the ability to do what it wants, you open up the possibility of allowing third-parties to create options that do things that the library authors did not think about.
```go
package thirdparty
, but when I read drum sheet music, I kind of get thrown off b/c many times it says to hit the bass drum where I feel like it's a snare hit.
func WithMyAwesomeOption( ... ) mypkg.Option {
return mypkg.Option(func(f *mypkg) error {
f.X = ...
f.Y = ...
f.Z = ...
return nil
})
}
```
However, for any third party code to access and set field values, these fields (`X`, `Y`, `Z`) must be exported. Basically you will need an "open" struct.
Exported fields are absolutely no problem when you have a struct that represents data alone (i.e., API calls that refer or change state information) happen, but we think that casually expose fields for a library struct is a sure way to maintenance hell in the future. What happens when you want to change the API? What happens when you realize that you want to use the field as state (i.e. use it for more than configuration)? What if they kept referring to that field, and then you have concurrent code accessing it?
Giving third parties complete access to exported fields is like handing out a loaded weapon to the users, and you are at their mercy.
Of course, providing public APIs for everything so you can validate and control concurrency is an option, but then ... it's a lot of work, and you may have to provide APIs _only_ so that users can refer it in the option-configuration phase. That sounds like a lot of extra work.

View File

@@ -1,38 +0,0 @@
package option
import "fmt"
// Interface defines the minimum interface that an option must fulfill
type Interface interface {
// Ident returns the "identity" of this option, a unique identifier that
// can be used to differentiate between options
Ident() interface{}
// Value returns the corresponding value.
Value() interface{}
}
type pair struct {
ident interface{}
value interface{}
}
// New creates a new Option
func New(ident, value interface{}) Interface {
return &pair{
ident: ident,
value: value,
}
}
func (p *pair) Ident() interface{} {
return p.ident
}
func (p *pair) Value() interface{} {
return p.value
}
func (p *pair) String() string {
return fmt.Sprintf(`%v(%v)`, p.ident, p.value)
}

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -111,6 +111,9 @@ opa_arith_rem,opa_bf_to_number
opa_array_concat,opa_value_type
opa_array_concat,opa_array_with_cap
opa_array_concat,opa_array_append
opa_array_flatten,opa_value_type
opa_array_flatten,opa_array_with_cap
opa_array_flatten,opa_array_append
opa_array_slice,opa_value_type
opa_array_slice,opa_number_try_int
opa_array_slice,opa_array_with_cap
1 opa_agg_count opa_value_type
111 opa_array_concat opa_value_type
112 opa_array_concat opa_array_with_cap
113 opa_array_concat opa_array_append
114 opa_array_flatten opa_value_type
115 opa_array_flatten opa_array_with_cap
116 opa_array_flatten opa_array_append
117 opa_array_slice opa_value_type
118 opa_array_slice opa_number_try_int
119 opa_array_slice opa_array_with_cap

View File

Binary file not shown.

View File

@@ -90,6 +90,7 @@ var builtinsFunctions = map[string]string{
ast.Floor.Name: "opa_arith_floor",
ast.Rem.Name: "opa_arith_rem",
ast.ArrayConcat.Name: "opa_array_concat",
ast.ArrayFlatten.Name: "opa_array_flatten",
ast.ArrayReverse.Name: "opa_array_reverse",
ast.ArraySlice.Name: "opa_array_slice",
ast.SetDiff.Name: "opa_set_diff",

View File

@@ -602,7 +602,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
}
return child.Unfold(path[1:])
}
return nil, fmt.Errorf("path %v does not exist in object term %v", ast.Ref{path[0]}, e.value.Value)
return nil, fmt.Errorf("path %v does not exist in object term %v", path[0], e.value.Value)
case ast.Set:
// Sets' keys *are* their values, so in order to allow accurate
// traversal, we have to collapse the tree beneath this node,
@@ -662,10 +662,10 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
}
return child.Unfold(path[1:])
}
return nil, fmt.Errorf("path %v does not exist in array term %v", ast.Ref{ast.IntNumberTerm(idx)}, e.value.Value)
return nil, fmt.Errorf("path %v does not exist in array term %v", ast.IntNumberTerm(idx), e.value.Value)
default:
// Catch all primitive types.
return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", ast.Ref{path[0]}, x, x)
return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", path[0], x, x)
}
}

View File

@@ -2,11 +2,10 @@
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
// Package report provides functions to report OPA's version information to an external service and process the response.
package report
// Package versioncheck provides functions to check for the latest OPA release version from GitHub.
package versioncheck
import (
"cmp"
"context"
"encoding/json"
"errors"
@@ -14,7 +13,6 @@ import (
"net/http"
"os"
"runtime"
"strconv"
"strings"
"time"
@@ -34,24 +32,24 @@ import (
//
// Override at build time via:
//
// -ldflags "-X github.com/open-policy-agent/opa/internal/report.ExternalServiceURL=<url>"
// -ldflags "-X github.com/open-policy-agent/opa/internal/report.GHRepo=<url>"
// -ldflags "-X github.com/open-policy-agent/opa/internal/versioncheck.ExternalServiceURL=<url>"
// -ldflags "-X github.com/open-policy-agent/opa/internal/versioncheck.GHRepo=<url>"
//
// ExternalServiceURL will be overridden if the OPA_TELEMETRY_SERVICE_URL environment variable
// ExternalServiceURL will be overridden if the OPA_VERSION_CHECK_SERVICE_URL environment variable
// is provided.
var ExternalServiceURL = "https://api.github.com"
var GHRepo = "open-policy-agent/opa"
// Reporter reports information such as the version, heap usage about the running OPA instance to an external service
type Reporter interface {
SendReport(ctx context.Context) (*DataResponse, error)
// Checker checks for the latest OPA release version
type Checker interface {
LatestVersion(ctx context.Context) (*DataResponse, error)
RegisterGatherer(key string, f Gatherer)
}
// Gatherer represents a mechanism to inject additional data in the telemetry report
// Gatherer represents a mechanism to inject additional data (currently unused for version checking)
type Gatherer func(ctx context.Context) (any, error)
// DataResponse represents the data returned by the external service
// DataResponse represents the data returned by the version check
type DataResponse struct {
Latest ReleaseDetails `json:"latest"`
}
@@ -64,44 +62,48 @@ type ReleaseDetails struct {
OPAUpToDate bool `json:"opa_up_to_date,omitempty"` // is running OPA version greater than or equal to the latest released
}
// Options supplies parameters to the reporter.
// Options supplies parameters to the version checker.
type Options struct {
Logger logging.Logger
}
type GHVersionCollector struct {
type GitHubVersionChecker struct {
client rest.Client
}
type GHResponse struct {
type GitHubRelease struct {
TagName string `json:"tag_name,omitempty"` // latest OPA release tag
ReleaseNotes string `json:"html_url,omitempty"` // link to the OPA release notes
Download string `json:"assets_url,omitempty"` // link to download the OPA release
}
// New returns an instance of the Reporter
func New(opts Options) (Reporter, error) {
url := cmp.Or(os.Getenv("OPA_TELEMETRY_SERVICE_URL"), ExternalServiceURL)
// New returns an instance of the Checker
func New(opts Options) (Checker, error) {
url := os.Getenv("OPA_VERSION_CHECK_SERVICE_URL")
if url == "" {
url = ExternalServiceURL
}
// Set a generic User-Agent to avoid sending version/platform information about the user's OPA instance.
// This ensures we only retrieve version information without transmitting any identifying data.
restConfig := fmt.Appendf(nil, `{
"url": %q,
"headers": {
"User-Agent": "OPA-Version-Checker"
}
}`, url)
client, err := rest.New(restConfig, map[string]*keys.Config{}, rest.Logger(opts.Logger))
if err != nil {
return nil, err
}
r := GHVersionCollector{client: client}
// heap_usage_bytes is always present, so register it unconditionally
r.RegisterGatherer("heap_usage_bytes", readRuntimeMemStats)
r := GitHubVersionChecker{client: client}
return &r, nil
}
// SendReport sends the telemetry report which includes information such as the OPA version, current memory usage to
// the external service
func (r *GHVersionCollector) SendReport(ctx context.Context) (*DataResponse, error) {
// LatestVersion queries the GitHub API to check for the latest OPA release version
func (r *GitHubVersionChecker) LatestVersion(ctx context.Context) (*DataResponse, error) {
rCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
@@ -115,12 +117,12 @@ func (r *GHVersionCollector) SendReport(ctx context.Context) (*DataResponse, err
switch resp.StatusCode {
case http.StatusOK:
if resp.Body != nil {
var result GHResponse
var result GitHubRelease
err := json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, err
}
return createDataResponse(result)
return createReleaseInfo(result)
}
return nil, nil
default:
@@ -128,7 +130,7 @@ func (r *GHVersionCollector) SendReport(ctx context.Context) (*DataResponse, err
}
}
func createDataResponse(ghResp GHResponse) (*DataResponse, error) {
func createReleaseInfo(ghResp GitHubRelease) (*DataResponse, error) {
if ghResp.TagName == "" {
return nil, errors.New("server response does not contain tag_name")
}
@@ -168,7 +170,7 @@ func createDataResponse(ghResp GHResponse) (*DataResponse, error) {
}, nil
}
func (*GHVersionCollector) RegisterGatherer(_ string, _ Gatherer) {
func (*GitHubVersionChecker) RegisterGatherer(_ string, _ Gatherer) {
// no-op for this implementation
}
@@ -206,9 +208,3 @@ func (dr *DataResponse) Pretty() string {
return strings.Join(lines, "\n")
}
func readRuntimeMemStats(_ context.Context) (any, error) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
return strconv.FormatUint(m.Alloc, 10), nil
}

View File

@@ -408,6 +408,14 @@ func Store(s storage.Store) func(r *Rego) {
return v1.Store(s)
}
// Data returns an argument that sets the Rego data document. Data should be
// a map representing the data document. This is a simpler alternative to
// using Store with inmem.NewFromObject for cases where an in-memory store
// with static data is sufficient.
func Data(x map[string]any) func(r *Rego) {
return v1.Data(x)
}
// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values.
//
// Only applicable when no store has been set on the Rego object through the Store option.

View File

@@ -95,6 +95,7 @@ var DefaultBuiltins = [...]*Builtin{
// Arrays
ArrayConcat,
ArrayFlatten,
ArraySlice,
ArrayReverse,
@@ -893,6 +894,18 @@ var ArrayConcat = &Builtin{
CanSkipBctx: true,
}
var ArrayFlatten = &Builtin{
Name: "array.flatten",
Description: "Non-recursively unpacks array items in arr into the flattened array. Other types are appended as-is.",
Decl: types.NewFunction(
types.Args(
types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be flattened"),
),
types.Named("flattened", types.NewArray(nil, types.A)).Description("array flattened one level"),
),
CanSkipBctx: true,
}
var ArraySlice = &Builtin{
Name: "array.slice",
Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.",
@@ -1823,7 +1836,8 @@ var ObjectKeys = &Builtin{
/*
* Encoding
*/
var encoding = category("encoding")
// Not using 'encoding' to avoid having to alias stdlib "encoding" imports
var catEncoding = category("encoding")
var JSONMarshal = &Builtin{
Name: "json.marshal",
@@ -1834,7 +1848,7 @@ var JSONMarshal = &Builtin{
),
types.Named("y", types.S).Description("the JSON string representation of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1856,7 +1870,7 @@ var JSONMarshalWithOptions = &Builtin{
),
types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1869,7 +1883,7 @@ var JSONUnmarshal = &Builtin{
),
types.Named("y", types.A).Description("the term deserialized from `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1882,7 +1896,7 @@ var JSONIsValid = &Builtin{
),
types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1895,7 +1909,7 @@ var Base64Encode = &Builtin{
),
types.Named("y", types.S).Description("base64 serialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1908,7 +1922,7 @@ var Base64Decode = &Builtin{
),
types.Named("y", types.S).Description("base64 deserialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1921,7 +1935,7 @@ var Base64IsValid = &Builtin{
),
types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1934,7 +1948,7 @@ var Base64UrlEncode = &Builtin{
),
types.Named("y", types.S).Description("base64url serialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1947,7 +1961,7 @@ var Base64UrlEncodeNoPad = &Builtin{
),
types.Named("y", types.S).Description("base64url serialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1960,7 +1974,7 @@ var Base64UrlDecode = &Builtin{
),
types.Named("y", types.S).Description("base64url deserialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1973,7 +1987,7 @@ var URLQueryDecode = &Builtin{
),
types.Named("y", types.S).Description("URL-encoding deserialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -1986,7 +2000,7 @@ var URLQueryEncode = &Builtin{
),
types.Named("y", types.S).Description("URL-encoding serialization of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2010,7 +2024,7 @@ var URLQueryEncodeObject = &Builtin{
),
types.Named("y", types.S).Description("the URL-encoded serialization of `object`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2025,7 +2039,7 @@ var URLQueryDecodeObject = &Builtin{
types.S,
types.NewArray(nil, types.S)))).Description("the resulting object"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2038,7 +2052,7 @@ var YAMLMarshal = &Builtin{
),
types.Named("y", types.S).Description("the YAML string representation of `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2051,7 +2065,7 @@ var YAMLUnmarshal = &Builtin{
),
types.Named("y", types.A).Description("the term deserialized from `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2065,7 +2079,7 @@ var YAMLIsValid = &Builtin{
),
types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2078,7 +2092,7 @@ var HexEncode = &Builtin{
),
types.Named("y", types.S).Description("serialization of `x` using hex-encoding"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}
@@ -2091,7 +2105,7 @@ var HexDecode = &Builtin{
),
types.Named("y", types.S).Description("deserialized from `x`"),
),
Categories: encoding,
Categories: catEncoding,
CanSkipBctx: true,
}

View File

@@ -6,6 +6,7 @@ package ast
import (
"fmt"
"regexp"
"slices"
"strings"
@@ -387,10 +388,12 @@ func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
return NewError(TypeErr, expr.Location, "undefined function %v", name)
}
// check if the expression refers to a function that contains an error
_, ok := tpe.(types.Any)
if ok {
return nil
if t, ok := tpe.(types.Any); ok {
// A type.Any with a len(0) is created by using types.A , this represents a potential non-local reference
// This is the exception when checking if the type represents a function
if len(t) == 0 {
return nil
}
}
ftpe, ok := tpe.(*types.Function)
@@ -1087,7 +1090,21 @@ func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, on
}
func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error {
err := newRefError(loc, ref)
var err *Error
switch have.(type) {
case *types.Function:
var function string
// drop any trailing references to unidentified parameters (e.g. __local1__)
if match, err := regexp.MatchString(`__local[0-9]+__`, ref[len(ref)-1].Value.String()); err == nil && match {
function = ref[:len(ref)-1].String()
} else {
function = ref.String()
}
err = NewError(TypeErr, loc, "function %s used as reference, not called", function)
default:
err = newRefError(loc, ref)
}
err.Details = &RefErrUnsupportedDetail{
Ref: ref,
Pos: idx,

View File

@@ -162,6 +162,8 @@ func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
}
if RootDocumentNames.Contains(ref[0]) {
// types.A is an empty types.Any
// this is used to represent a potential non-local reference
return types.A
}

View File

@@ -8,6 +8,7 @@ import (
"fmt"
astJSON "github.com/open-policy-agent/opa/v1/ast/json"
"github.com/open-policy-agent/opa/v1/util"
)
// Location records a position in source code
@@ -28,10 +29,10 @@ func NewLocation(text []byte, file string, row int, col int) *Location {
// Equal checks if two locations are equal to each other.
func (loc *Location) Equal(other *Location) bool {
return bytes.Equal(loc.Text, other.Text) &&
loc.File == other.File &&
return loc.File == other.File &&
loc.Row == other.Row &&
loc.Col == other.Col
loc.Col == other.Col &&
bytes.Equal(loc.Text, other.Text)
}
// Errorf returns a new error value with a message formatted to include the location
@@ -57,13 +58,35 @@ func (loc *Location) Format(f string, a ...any) string {
}
func (loc *Location) String() string {
if len(loc.File) > 0 {
return fmt.Sprintf("%v:%v", loc.File, loc.Row)
buf, _ := loc.AppendText(make([]byte, 0, loc.StringLength()))
return util.ByteSliceToString(buf)
}
func (loc *Location) AppendText(buf []byte) ([]byte, error) {
if loc != nil {
switch {
case len(loc.File) > 0:
buf = util.AppendInt(append(append(buf, loc.File...), ':'), loc.Row)
case len(loc.Text) > 0:
buf = append(buf, loc.Text...)
default:
buf = util.AppendInt(append(util.AppendInt(buf, loc.Row), ':'), loc.Col)
}
}
if len(loc.Text) > 0 {
return string(loc.Text)
return buf, nil
}
func (loc *Location) StringLength() (n int) {
if loc != nil {
if l := len(loc.File); l > 0 {
n = l + 1 + util.NumDigitsInt(loc.Row)
} else if l := len(loc.Text); l > 0 {
n = l
} else {
n = util.NumDigitsInt(loc.Row) + 1 + util.NumDigitsInt(loc.Col)
}
}
return fmt.Sprintf("%v:%v", loc.Row, loc.Col)
return n
}
// Compare returns -1, 0, or 1 to indicate if this loc is less than, equal to,
@@ -71,7 +94,7 @@ func (loc *Location) String() string {
// column of the Location (but not on the text.) Nil locations are greater than
// non-nil locations.
func (loc *Location) Compare(other *Location) int {
if loc == nil && other == nil {
if loc == other {
return 0
} else if loc == nil {
return 1

View File

@@ -20,7 +20,7 @@ import (
"strings"
"unicode/utf8"
"gopkg.in/yaml.v3"
"go.yaml.in/yaml/v3"
"github.com/open-policy-agent/opa/v1/ast/internal/scanner"
"github.com/open-policy-agent/opa/v1/ast/internal/tokens"
@@ -71,6 +71,10 @@ var (
// copy them to the call term only when needed
memberWithKeyRef = MemberWithKey.Ref()
memberRef = Member.Ref()
newlineBytes = []byte{'\n'}
metadataBytes = []byte("METADATA")
metadataParserPool = util.NewSyncPool[metadataParser]()
)
func (v RegoVersion) Int() int {
@@ -540,44 +544,46 @@ func (p *Parser) parseAnnotations(stmts []Statement) []Statement {
return stmts
}
func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) {
func parseAnnotations(comments []*Comment) (stmts []*Annotations, errs Errors) {
numBlocks := CountFunc(comments, isMetadataComment)
if numBlocks == 0 {
return nil, nil
}
var hint = []byte("METADATA")
var curr *metadataParser
var blocks []*metadataParser
stmts = make([]*Annotations, 0, numBlocks)
mdp := metadataParserPool.Get()
if mdp.buf == nil {
mdp.buf = &bytes.Buffer{}
}
for i := range comments {
if curr != nil {
if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 {
curr.Append(comments[i])
continue
if isMetadataComment(comments[i]) { // scan until end of block
mdp.Reset(comments[i].Location)
for i++; i < len(comments) && !blockBuster(comments[i], comments[i-1]); i++ {
mdp.Append(comments[i])
}
if a, err := mdp.Parse(); err != nil {
errs = append(errs, &Error{Code: ParseErr, Message: err.Error(), Location: mdp.loc})
} else {
stmts = append(stmts, a)
}
curr = nil
}
if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) {
curr = newMetadataParser(comments[i].Location)
blocks = append(blocks, curr)
}
}
stmts := make([]*Annotations, 0, len(blocks))
var errs Errors
for _, b := range blocks {
if a, err := b.Parse(); err != nil {
errs = append(errs, &Error{
Code: ParseErr,
Message: err.Error(),
Location: b.loc,
})
} else {
stmts = append(stmts, a)
}
}
metadataParserPool.Put(mdp)
return stmts, errs
}
func isMetadataComment(c *Comment) bool {
return c.Location.Col == 1 && bytes.HasPrefix(bytes.TrimSpace(c.Text), metadataBytes)
}
func blockBuster(curr, prev *Comment) bool { // or endOfBlock, but the name was too good to pass up
return curr.Location.Col != 1 || curr.Location.Row-1 != prev.Location.Row
}
func (p *Parser) parsePackage() *Package {
if p.s.tok != tokens.Package {
return nil
@@ -2455,7 +2461,8 @@ func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
if slices.Contains(values, p.s.tok) {
r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
loc := p.s.Loc()
r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(loc)).SetLocation(loc)
p.scan()
return r
}
@@ -2465,11 +2472,12 @@ func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term {
if slices.Contains(values, p.s.tok) {
cp := ref.Copy()
loc := p.s.Loc()
for _, r := range cp {
r.SetLocation(p.s.Loc())
r.SetLocation(loc)
}
t := RefTerm(cp...)
t.SetLocation(p.s.Loc())
t.SetLocation(loc)
p.scan()
return t
}
@@ -2743,13 +2751,17 @@ type rawAnnotation struct {
}
type metadataParser struct {
buf *bytes.Buffer
comments []*Comment
buf *bytes.Buffer
loc *location.Location
}
func newMetadataParser(loc *Location) *metadataParser {
return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)}
func (b *metadataParser) Reset(loc *location.Location) {
b.comments = b.comments[:0]
b.loc = loc
if b.buf != nil {
b.buf.Reset()
}
}
func (b *metadataParser) Append(c *Comment) {
@@ -2760,14 +2772,12 @@ func (b *metadataParser) Append(c *Comment) {
var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`)
func (b *metadataParser) Parse() (*Annotations, error) {
var raw rawAnnotation
func (b *metadataParser) Parse() (result *Annotations, err error) {
if len(bytes.TrimSpace(b.buf.Bytes())) == 0 {
return nil, errors.New("expected METADATA block, found whitespace")
}
var raw rawAnnotation
if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil {
var comment *Comment
match := yamlLineErrRegex.FindStringSubmatch(err.Error())
@@ -2790,13 +2800,14 @@ func (b *metadataParser) Parse() (*Annotations, error) {
return nil, augmentYamlError(err, b.comments)
}
var result Annotations
result.comments = b.comments
result.Scope = raw.Scope
result.Entrypoint = raw.Entrypoint
result.Title = raw.Title
result.Description = raw.Description
result.Organizations = raw.Organizations
result = &Annotations{
comments: b.comments,
Scope: raw.Scope,
Entrypoint: raw.Entrypoint,
Title: raw.Title,
Description: raw.Description,
Organizations: raw.Organizations,
}
for _, v := range raw.RelatedResources {
rr, err := parseRelatedResource(v)
@@ -2878,32 +2889,30 @@ func (b *metadataParser) Parse() (*Annotations, error) {
result.Authors = append(result.Authors, author)
}
result.Custom = make(map[string]any)
for k, v := range raw.Custom {
val, err := convertYAMLMapKeyTypes(v, nil)
if err != nil {
return nil, err
if raw.Custom != nil {
result.Custom = make(map[string]any, len(raw.Custom))
for k, v := range raw.Custom {
if result.Custom[k], err = convertYAMLMapKeyTypes(v, nil); err != nil {
return nil, err
}
}
result.Custom[k] = val
}
result.Location = b.loc
// recreate original text of entire metadata block for location text attribute
sb := strings.Builder{}
sb.WriteString("# METADATA\n")
original := bytes.TrimSuffix(b.buf.Bytes(), newlineBytes)
numLines := bytes.Count(original, newlineBytes) + 1
preAlloc := len("# METADATA\n") + len(original) + numLines*2 // '# ' prefix added per line
lines := bytes.Split(b.buf.Bytes(), []byte{'\n'})
result.Location.Text = append(make([]byte, 0, preAlloc), "# METADATA\n"...)
for _, line := range lines[:len(lines)-1] {
sb.WriteString("# ")
sb.Write(line)
sb.WriteByte('\n')
for line := range bytes.SplitAfterSeq(original, newlineBytes) {
result.Location.Text = append(result.Location.Text, "# "...)
result.Location.Text = append(result.Location.Text, line...)
}
result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n"))
return &result, nil
return result, err
}
// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise
@@ -2912,30 +2921,29 @@ func (b *metadataParser) Parse() (*Annotations, error) {
func augmentYamlError(err error, comments []*Comment) error {
// Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol
for _, comment := range comments {
txt := string(comment.Text)
parts := strings.Split(txt, ":")
if len(parts) > 1 {
parts = parts[1:]
var invalidSpaces []string
for partIndex, part := range parts {
if len(part) == 0 && partIndex == len(parts)-1 {
invalidSpaces = []string{}
break
}
if bytes.IndexByte(comment.Text, ':') == -1 {
continue
}
parts := bytes.Split(comment.Text, []byte{':'})[1:]
r, _ := utf8.DecodeRuneInString(part)
if r == ' ' || r == '\t' {
invalidSpaces = []string{}
break
}
var invalidSpaces []string
for partIndex, part := range parts {
if len(part) == 0 && partIndex == len(parts)-1 {
break
}
invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r))
}
if len(invalidSpaces) > 0 {
err = fmt.Errorf(
"%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character",
err.Error(), comment.Location.Row, invalidSpaces)
r, _ := utf8.DecodeRune(part)
if r == ' ' || r == '\t' {
break
}
invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r))
}
if len(invalidSpaces) > 0 {
err = fmt.Errorf(
"%s\n Hint: on line %d, symbol(s) %v immediately following a"+
" key/value separator ':' is not a legal yaml space character",
err.Error(), comment.Location.Row, invalidSpaces)
}
}
return err
@@ -3053,7 +3061,7 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) &&
strings.HasSuffix(trailing, emailSuffix) {
email = trailing[len(emailPrefix):]
email = email[0 : len(email)-len(emailSuffix)]
email = email[:len(email)-len(emailSuffix)]
namePartCount -= 1
}

View File

@@ -4,6 +4,7 @@
package ast
import (
"encoding"
"strings"
"sync"
)
@@ -83,3 +84,16 @@ func BuiltinNameFromRef(ref Ref) (string, bool) {
return "", false
}
func AppendDelimeted[T encoding.TextAppender](buf []byte, appenders []T, delim string) ([]byte, error) {
for i, item := range appenders {
if i > 0 {
buf = append(buf, delim...)
}
var err error
if buf, err = item.AppendText(buf); err != nil {
return nil, err
}
}
return buf, nil
}

View File

@@ -371,42 +371,8 @@ func (mod *Module) Equal(other *Module) bool {
}
func (mod *Module) String() string {
byNode := map[Node][]*Annotations{}
for _, a := range mod.Annotations {
byNode[a.node] = append(byNode[a.node], a)
}
appendAnnotationStrings := func(buf []string, node Node) []string {
if as, ok := byNode[node]; ok {
for i := range as {
buf = append(buf,
"# METADATA",
"# "+as[i].String(),
)
}
}
return buf
}
buf := []string{}
buf = appendAnnotationStrings(buf, mod.Package)
buf = append(buf, mod.Package.String())
if len(mod.Imports) > 0 {
buf = append(buf, "")
for _, imp := range mod.Imports {
buf = appendAnnotationStrings(buf, imp)
buf = append(buf, imp.String())
}
}
if len(mod.Rules) > 0 {
buf = append(buf, "")
for _, rule := range mod.Rules {
buf = appendAnnotationStrings(buf, rule)
buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion}))
}
}
return strings.Join(buf, "\n")
buf, _ := mod.AppendText(make([]byte, 0, mod.StringLength()))
return util.ByteSliceToString(buf)
}
// RuleSet returns a RuleSet containing named rules in the mod.
@@ -475,7 +441,8 @@ func (c *Comment) SetLoc(loc *Location) {
}
func (c *Comment) String() string {
return "#" + string(c.Text)
buf, _ := c.AppendText(make([]byte, 0, c.StringLength()))
return util.ByteSliceToString(buf)
}
// Copy returns a deep copy of c.
@@ -525,16 +492,8 @@ func (pkg *Package) SetLoc(loc *Location) {
}
func (pkg *Package) String() string {
if pkg == nil {
return "<illegal nil package>"
} else if len(pkg.Path) <= 1 {
return fmt.Sprintf("package <illegal path %q>", pkg.Path)
}
// Omit head as all packages have the DefaultRootDocument prepended at parse time.
path := make(Ref, len(pkg.Path)-1)
path[0] = VarTerm(string(pkg.Path[1].Value.(String)))
copy(path[1:], pkg.Path[2:])
return fmt.Sprintf("package %v", path)
buf, _ := pkg.AppendText(make([]byte, 0, pkg.StringLength()))
return util.ByteSliceToString(buf)
}
func (pkg *Package) MarshalJSON() ([]byte, error) {
@@ -637,11 +596,8 @@ func (imp *Import) Name() Var {
}
func (imp *Import) String() string {
buf := []string{"import", imp.Path.String()}
if len(imp.Alias) > 0 {
buf = append(buf, "as", imp.Alias.String())
}
return strings.Join(buf, " ")
buf, _ := imp.AppendText(make([]byte, 0, imp.StringLength()))
return util.ByteSliceToString(buf)
}
func (imp *Import) MarshalJSON() ([]byte, error) {
@@ -752,11 +708,12 @@ func (rule *Rule) Ref() Ref {
}
func (rule *Rule) String() string {
regoVersion := DefaultRegoVersion
opts := toStringOpts{}
if rule.Module != nil {
regoVersion = rule.Module.RegoVersion()
opts.regoVersion = rule.Module.RegoVersion()
}
return rule.stringWithOpts(toStringOpts{regoVersion: regoVersion})
buf, _ := rule.appendWithOpts(opts, make([]byte, 0, rule.stringLengthWithOpts(opts)))
return util.ByteSliceToString(buf)
}
type toStringOpts struct {
@@ -770,80 +727,46 @@ func (o toStringOpts) RegoVersion() RegoVersion {
return o.regoVersion
}
func (rule *Rule) stringWithOpts(opts toStringOpts) string {
buf := []string{}
if rule.Default {
buf = append(buf, "default")
}
buf = append(buf, rule.Head.stringWithOpts(opts))
if !rule.Default {
switch opts.RegoVersion() {
case RegoV1, RegoV0CompatV1:
buf = append(buf, "if")
}
buf = append(buf, "{", rule.Body.String(), "}")
}
if rule.Else != nil {
buf = append(buf, rule.Else.elseString(opts))
}
return strings.Join(buf, " ")
}
func (rule *Rule) isFunction() bool {
return len(rule.Head.Args) > 0
}
// ruleJSON is used for JSON serialization of Rule to avoid map allocation overhead.
// Field order is alphabetical to match previous map-based output.
type ruleJSON struct {
Annotations []*Annotations `json:"annotations,omitempty"`
Body Body `json:"body"`
Default bool `json:"default,omitempty"`
Else *Rule `json:"else,omitempty"`
Head *Head `json:"head"`
Location *Location `json:"location,omitempty"`
}
func (rule *Rule) MarshalJSON() ([]byte, error) {
data := map[string]any{
"head": rule.Head,
"body": rule.Body,
data := ruleJSON{
Head: rule.Head,
Body: rule.Body,
}
if rule.Default {
data["default"] = true
data.Default = true
}
if rule.Else != nil {
data["else"] = rule.Else
data.Else = rule.Else
}
if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule {
if rule.Location != nil {
data["location"] = rule.Location
}
data.Location = rule.Location
}
if len(rule.Annotations) != 0 {
data["annotations"] = rule.Annotations
data.Annotations = rule.Annotations
}
return json.Marshal(data)
}
func (rule *Rule) elseString(opts toStringOpts) string {
var buf []string
buf = append(buf, "else")
value := rule.Head.Value
if value != nil {
buf = append(buf, "=", value.String())
}
switch opts.RegoVersion() {
case RegoV1, RegoV0CompatV1:
buf = append(buf, "if")
}
buf = append(buf, "{", rule.Body.String(), "}")
if rule.Else != nil {
buf = append(buf, rule.Else.elseString(opts))
}
return strings.Join(buf, " ")
}
// NewHead returns a new Head object. If args are provided, the first will be
// used for the key and the second will be used for the value.
func NewHead(name Var, args ...*Term) *Head {
@@ -1002,37 +925,8 @@ func (head *Head) String() string {
}
func (head *Head) stringWithOpts(opts toStringOpts) string {
buf := strings.Builder{}
buf.WriteString(head.Ref().String())
containsAdded := false
switch {
case len(head.Args) != 0:
buf.WriteString(head.Args.String())
case len(head.Reference) == 1 && head.Key != nil:
switch opts.RegoVersion() {
case RegoV0:
buf.WriteRune('[')
buf.WriteString(head.Key.String())
buf.WriteRune(']')
default:
containsAdded = true
buf.WriteString(" contains ")
buf.WriteString(head.Key.String())
}
}
if head.Value != nil {
if head.Assign {
buf.WriteString(" := ")
} else {
buf.WriteString(" = ")
}
buf.WriteString(head.Value.String())
} else if !containsAdded && head.Name == "" && head.Key != nil {
buf.WriteString(" contains ")
buf.WriteString(head.Key.String())
}
return buf.String()
buf, _ := head.appendWithOpts(opts, make([]byte, 0, head.stringLengthWithOpts(opts)))
return util.ByteSliceToString(buf)
}
func (head *Head) MarshalJSON() ([]byte, error) {
@@ -1103,11 +997,8 @@ func (a Args) Copy() Args {
}
func (a Args) String() string {
buf := make([]string, 0, len(a))
for _, t := range a {
buf = append(buf, t.String())
}
return "(" + strings.Join(buf, ", ") + ")"
buf, _ := a.AppendText(make([]byte, 0, a.StringLength()))
return util.ByteSliceToString(buf)
}
// Loc returns the Location of a.
@@ -1240,11 +1131,12 @@ func (body Body) SetLoc(loc *Location) {
}
func (body Body) String() string {
buf := make([]string, 0, len(body))
for _, v := range body {
buf = append(buf, v.String())
}
return strings.Join(buf, "; ")
buf, _ := body.AppendText(make([]byte, 0, body.StringLength()))
return util.ByteSliceToString(buf)
}
func (body Body) AppendText(buf []byte) ([]byte, error) {
return AppendDelimeted(buf, body, "; ")
}
// Vars returns a VarSet containing variables in body. The params can be set to
@@ -1555,50 +1447,41 @@ func (expr *Expr) SetLoc(loc *Location) {
}
func (expr *Expr) String() string {
buf := make([]string, 0, 2+len(expr.With))
if expr.Negated {
buf = append(buf, "not")
}
switch t := expr.Terms.(type) {
case []*Term:
if expr.IsEquality() && validEqAssignArgCount(expr) {
buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2]))
} else {
buf = append(buf, Call(t).String())
}
case fmt.Stringer:
buf = append(buf, t.String())
}
buf, _ := expr.AppendText(make([]byte, 0, expr.StringLength()))
return util.ByteSliceToString(buf)
}
for i := range expr.With {
buf = append(buf, expr.With[i].String())
}
return strings.Join(buf, " ")
// exprJSON is used for JSON serialization of Expr to avoid map allocation overhead.
// Field order is alphabetical to match previous map-based output.
type exprJSON struct {
Generated bool `json:"generated,omitempty"`
Index int `json:"index"`
Location *Location `json:"location,omitempty"`
Negated bool `json:"negated,omitempty"`
Terms any `json:"terms"`
With []*With `json:"with,omitempty"`
}
func (expr *Expr) MarshalJSON() ([]byte, error) {
data := map[string]any{
"terms": expr.Terms,
"index": expr.Index,
data := exprJSON{
Index: expr.Index,
Terms: expr.Terms,
}
if len(expr.With) > 0 {
data["with"] = expr.With
data.With = expr.With
}
if expr.Generated {
data["generated"] = true
data.Generated = true
}
if expr.Negated {
data["negated"] = true
data.Negated = true
}
if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr {
if expr.Location != nil {
data["location"] = expr.Location
}
data.Location = expr.Location
}
return json.Marshal(data)
@@ -1668,17 +1551,8 @@ func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) {
}
func (d *SomeDecl) String() string {
if call, ok := d.Symbols[0].Value.(Call); ok {
if len(call) == 4 {
return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String()
}
return "some " + call[1].String() + " in " + call[2].String()
}
buf := make([]string, len(d.Symbols))
for i := range buf {
buf[i] = d.Symbols[i].String()
}
return "some " + strings.Join(buf, ", ")
buf, _ := d.AppendText(make([]byte, 0, d.StringLength()))
return util.ByteSliceToString(buf)
}
// SetLoc sets the Location on d.
@@ -1797,7 +1671,8 @@ func (q *Every) MarshalJSON() ([]byte, error) {
}
func (w *With) String() string {
return "with " + w.Target.String() + " as " + w.Value.String()
buf, _ := w.AppendText(make([]byte, 0, w.StringLength()))
return util.ByteSliceToString(buf)
}
// Equal returns true if this With is equals the other With.
@@ -1854,16 +1729,22 @@ func (w *With) SetLoc(loc *Location) {
w.Location = loc
}
// withJSON is used for JSON serialization of With to avoid map allocation overhead.
// Field order is alphabetical to match previous map-based output.
type withJSON struct {
Location *Location `json:"location,omitempty"`
Target *Term `json:"target"`
Value *Term `json:"value"`
}
func (w *With) MarshalJSON() ([]byte, error) {
data := map[string]any{
"target": w.Target,
"value": w.Value,
data := withJSON{
Target: w.Target,
Value: w.Value,
}
if astJSON.GetOptions().MarshalOptions.IncludeLocation.With {
if w.Location != nil {
data["location"] = w.Location
}
data.Location = w.Location
}
return json.Marshal(data)

View File

@@ -0,0 +1,333 @@
package ast
import (
"encoding"
"fmt"
)
func (m *Module) AppendText(buf []byte) ([]byte, error) {
if m == nil {
return append(buf, "<nil module>"...), nil
}
var err error
// NOTE(anderseknert): this DOES allocate still, and while that's unfortunate,
// we'll be better off dealing with that when we have v2 JSON in the stdlib than
// doing manual JSON marshalling (and string length calculations) here.
for _, annotations := range m.Annotations {
// rule annotations are attached to rules, so only check for package scoped ones here
if annotations.Scope == "package" || annotations.Scope == "subpackages" {
buf = append(buf, "# METADATA\n# "...)
buf = append(buf, annotations.String()...)
buf = append(buf, '\n')
}
}
if buf, err = m.Package.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, '\n')
if len(m.Imports) > 0 {
for _, imp := range m.Imports {
buf = append(buf, '\n')
if buf, err = imp.AppendText(buf); err != nil {
return nil, err
}
}
buf = append(buf, '\n')
}
if len(m.Rules) > 0 {
for _, rule := range m.Rules {
buf = append(buf, '\n')
if buf, err = rule.appendWithOpts(toStringOpts{regoVersion: m.regoVersion}, buf); err != nil {
return nil, err
}
}
}
return buf, nil
}
func (pkg *Package) AppendText(buf []byte) ([]byte, error) {
var err error
if pkg == nil {
return append(buf, "<illegal nil package>"...), nil
}
if len(pkg.Path) <= 1 {
buf = append(buf, "package <illegal path \""...)
if buf, err = pkg.Path.AppendText(buf); err != nil {
return nil, err
}
return append(buf, "\">"...), nil
}
buf = append(buf, "package "...)
path := pkg.Path[1:] // omit "data"
if s, ok := path[0].Value.(String); ok {
buf = append(buf, s...) // first term should never be quoted
if len(path) == 1 {
return buf, nil
}
buf = append(buf, '.')
path = path[1:]
}
return path.AppendText(buf)
}
func (imp *Import) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, "import "...)
var err error
if buf, err = imp.Path.AppendText(buf); err != nil {
return nil, err
}
if imp.Alias != "" {
buf = append(buf, ' ', 'a', 's', ' ')
buf = append(buf, imp.Alias...)
}
return buf, nil
}
func (r *Rule) AppendText(buf []byte) ([]byte, error) {
regoVersion := DefaultRegoVersion
if r.Module != nil {
regoVersion = r.Module.RegoVersion()
}
return r.appendWithOpts(toStringOpts{regoVersion: regoVersion}, buf)
}
func (r *Rule) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) {
// See note in [Module.AppendText] regarding annotations.
for _, annotations := range r.Annotations {
buf = append(buf, "# METADATA\n# "...)
buf = append(buf, annotations.String()...)
buf = append(buf, '\n')
}
if r.Default {
buf = append(buf, "default "...)
}
var err error
if buf, err = r.Head.appendWithOpts(opts, buf); err != nil {
return nil, err
}
if !r.Default {
switch opts.RegoVersion() {
case RegoV1, RegoV0CompatV1:
buf = append(buf, " if { "...)
default:
buf = append(buf, " { "...)
}
if buf, err = r.Body.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, " }"...)
}
if r.Else != nil {
if buf, err = r.Else.appendElse(opts, buf); err != nil {
return nil, err
}
}
return buf, nil
}
func (r *Rule) appendElse(opts toStringOpts, buf []byte) ([]byte, error) {
buf = append(buf, " else "...)
var err error
if r.Head.Value != nil {
buf = append(buf, "= "...)
if buf, err = r.Head.Value.AppendText(buf); err != nil {
return nil, err
}
}
if v := opts.RegoVersion(); v == RegoV1 || v == RegoV0CompatV1 {
buf = append(buf, " if { "...)
} else {
buf = append(buf, " { "...)
}
if buf, err = r.Body.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, " }"...)
if r.Else != nil {
if buf, err = r.Else.appendElse(opts, buf); err != nil {
return nil, err
}
}
return buf, nil
}
func (h *Head) AppendText(buf []byte) ([]byte, error) {
return h.appendWithOpts(toStringOpts{}, buf)
}
func (h *Head) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) {
var err error
if h.Reference == nil {
buf = append(buf, h.Name...)
} else {
if buf, err = h.Reference.AppendText(buf); err != nil {
return nil, err
}
}
containsAdded := false
switch {
case len(h.Args) != 0:
if buf, err = h.Args.AppendText(buf); err != nil {
return nil, err
}
case len(h.Reference) == 1 && h.Key != nil:
switch opts.RegoVersion() {
case RegoV0:
buf = append(buf, '[')
if buf, err = h.Key.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, ']')
default:
if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil {
return nil, err
}
containsAdded = true
}
}
if h.Value != nil {
if h.Assign {
buf = append(buf, " := "...)
} else {
buf = append(buf, " = "...)
}
if buf, err = h.Value.AppendText(buf); err != nil {
return nil, err
}
} else if !containsAdded && h.Name == "" && h.Key != nil {
if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil {
return nil, err
}
}
return buf, nil
}
func (a Args) AppendText(buf []byte) ([]byte, error) {
var err error
buf = append(buf, '(')
if buf, err = AppendDelimeted(buf, a, ", "); err != nil {
return nil, err
}
return append(buf, ')'), nil
}
func (expr *Expr) AppendText(buf []byte) ([]byte, error) {
if expr.Negated {
buf = append(buf, "not "...)
}
var err error
switch t := expr.Terms.(type) {
case []*Term:
if expr.IsEquality() && validEqAssignArgCount(expr) {
if buf, err = t[1].AppendText(buf); err != nil {
return nil, err
}
buf = append(append(append(buf, ' '), Equality.Infix...), ' ')
if buf, err = t[2].AppendText(buf); err != nil {
return nil, err
}
} else if buf, err = Call(t).AppendText(buf); err != nil {
return nil, err
}
case encoding.TextAppender:
if buf, err = t.AppendText(buf); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unsupported expr terms type: %T", expr.Terms)
}
if len(expr.With) > 0 {
buf = append(buf, ' ')
}
return AppendDelimeted(buf, expr.With, " ")
}
func (w *With) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, "with "...)
var err error
if buf, err = w.Target.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, " as "...)
if buf, err = w.Value.AppendText(buf); err != nil {
return nil, err
}
return buf, nil
}
func (w *Every) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, "every "...)
var err error
if w.Key != nil {
if buf, err = w.Key.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, ", "...)
}
if buf, err = w.Value.AppendText(buf); err == nil {
buf = append(buf, " in "...)
if buf, err = w.Domain.AppendText(buf); err == nil {
buf = append(buf, " { "...)
if buf, err = w.Body.AppendText(buf); err == nil {
buf = append(buf, " }"...)
}
}
}
return buf, err
}
func (d *SomeDecl) AppendText(buf []byte) ([]byte, error) {
var err error
buf = append(buf, "some "...)
if call, ok := d.Symbols[0].Value.(Call); ok {
if buf, err = call[1].AppendText(buf); err != nil {
return nil, err
}
if len(call) == 3 {
buf = append(buf, " in "...)
} else {
buf = append(buf, ", "...)
}
if buf, err = call[2].AppendText(buf); err != nil {
return nil, err
}
if len(call) == 4 {
buf = append(buf, " in "...)
if buf, err = call[3].AppendText(buf); err != nil {
return nil, err
}
}
return buf, nil
}
buf, err = AppendDelimeted(buf, d.Symbols, ", ")
return buf, err
}
func (c *Comment) AppendText(buf []byte) ([]byte, error) {
return append(append(buf, '#'), c.Text...), nil
}

View File

@@ -0,0 +1,15 @@
// Copyright 2026 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package ast
// CountFunc counts the number of items in a slice S that satisfy predicate function f.
func CountFunc[T any, S ~[]T](items S, f func(T) bool) (n int) {
for i := range items {
if f(items[i]) {
n++
}
}
return n
}

View File

@@ -0,0 +1,347 @@
package ast
import (
"fmt"
"unicode/utf8"
"github.com/open-policy-agent/opa/v1/util"
)
// StringLengther is an interface for types that can report their string length without
// actually constructing the string. This is useful for pre-allocating buffers, like those
// used in AppendText, strings.Builder, bytes.Buffer, etc.
type StringLengther interface {
StringLength() int
}
// TermSliceStringLength returns the total string length of the given terms, as reported
// by the [StringLengther.StringLength] method implementation of each term's [Value]. The
// delimLen value will be added between each term's length to account for a delimiter, or
// no delimiter if delimLen is 0.
// Implementation note: this function is optimized for inlining, and just meets the threshold
// for that. Don't change without making sure that's still the case.
func TermSliceStringLength(terms []*Term, delimLen int) (n int) {
for i := range terms {
n += terms[i].StringLength() + delimLen
}
return max(n-delimLen, 0)
}
func (t *Term) StringLength() int {
if sl, ok := t.Value.(StringLengther); ok {
return sl.StringLength()
}
panic("expected all ast.Value types to implement StringLenghter interface, got: " + ValueName(t.Value))
}
func (s String) StringLength() int {
n := 2 // surrounding quotes
bs := util.StringToByteSlice(s)
for i := 0; i < len(bs); {
r, size := utf8.DecodeRune(bs[i:])
switch r {
case '\\', '"':
n += 2 // escaped backslash or quote
case '\b', '\f', '\n', '\r', '\t':
n += 2 // escaped control characters
default:
if r < 0x20 {
n += 6 // unicode escape for other control characters
} else {
n += size // normal rune
}
}
i += size
}
return n
}
func (n Number) StringLength() int {
return len(n)
}
func (b Boolean) StringLength() int {
if b {
return 4
}
return 5
}
func (Null) StringLength() int {
return 4
}
func (s *set) StringLength() int {
if s.Len() == 0 {
return 5 // set()
}
// surrounding {} + ", " for every element - 1
return TermSliceStringLength(s.Slice(), 2) + 2
}
func (a *Array) StringLength() int {
if a.Len() == 0 {
return 2 // []
}
// surrounding brackets + ", " for every element - 1
return TermSliceStringLength(a.elems, 2) + 2
}
func (o *object) StringLength() (n int) {
if o.Len() == 0 {
return 2 // {}
}
// ": " for every item + ", " for every item - 1
o.Foreach(func(key, value *Term) {
n += key.StringLength() + 4 + value.StringLength() // ": " and ", "
})
return n // surrounding {} but also minus last ", "
}
func (ts *TemplateString) StringLength() (n int) {
for _, p := range ts.Parts {
switch x := p.(type) {
case *Expr:
n += 2 + x.StringLength() // for {}
case *Term:
if s, ok := x.Value.(String); ok {
n += len(s) + countUnescapedLeftCurly(string(s))
} else {
n += x.StringLength()
}
default:
n += 9 // <invalid>
}
}
return n + 3 // $"" or $``
}
func (c Call) StringLength() int {
return c[0].StringLength() + 2 + TermSliceStringLength(c[1:], 2)
}
func (r Ref) StringLength() (n int) {
rlen := len(r)
if rlen == 0 {
return 0
}
if s, ok := r[0].Value.(String); ok {
n = len(s) // first term should never be quoted
} else {
n = r[0].StringLength()
}
if rlen == 1 {
return n
}
for _, p := range r[1:] {
switch v := p.Value.(type) {
case String:
str := string(v)
if IsVarCompatibleString(str) && !IsKeyword(str) {
n += 1 + len(str) // dot + name
} else {
n += 2 + p.StringLength() // brackets
}
default:
n += 2 + p.StringLength() // brackets
}
}
return n
}
func (v Var) StringLength() int {
if v.IsWildcard() {
return 1
}
return len(v)
}
func (s *SetComprehension) StringLength() int {
return s.Term.StringLength() + s.Body.StringLength() + 5 // {} and " | "
}
func (a *ArrayComprehension) StringLength() int {
return a.Term.StringLength() + a.Body.StringLength() + 5 // [] and " | "
}
func (o *ObjectComprehension) StringLength() (n int) {
n += o.Key.StringLength()
n += o.Value.StringLength()
n += o.Body.StringLength()
return n + 7 // "{}"", " | ", and ": "
}
func (m *Module) StringLength() (n int) {
if m.Package != nil {
n += m.Package.StringLength() + 2 // newlines
}
if len(m.Imports) > 0 {
for _, imp := range m.Imports {
n += imp.StringLength() + 1 // newline
}
}
if len(m.Rules) > 0 {
for _, rule := range m.Rules {
n += rule.stringLengthWithOpts(toStringOpts{regoVersion: m.regoVersion}) + 1 // newline
}
}
return n
}
func (p *Package) StringLength() int {
if p == nil {
return 21 // <illegal nil package>
}
if len(p.Path) <= 1 {
return 25 + p.Path.StringLength() // // package <illegal path " ... ">
}
return 8 + p.Path[1:].StringLength() // "package ..."
}
func (i *Import) StringLength() (n int) {
n = 7 + i.Path.StringLength() // "import " and path
if i.Alias != "" {
n += 4 + i.Alias.StringLength() // " as " and alias
}
return n
}
func (r *Rule) StringLength() int {
return r.stringLengthWithOpts(toStringOpts{})
}
func (r *Rule) stringLengthWithOpts(opts toStringOpts) int {
n := 0
if r.Default {
n += 8 // "default "
}
n += r.Head.stringLengthWithOpts(opts)
if !r.Default {
switch opts.RegoVersion() {
case RegoV1, RegoV0CompatV1:
n += 6 // " if { "
default:
n += 3 // " { "
}
n += r.Body.StringLength() + 2 // body and closing " }"
}
if r.Else != nil {
n += r.Else.stringLengthWithOpts(opts)
}
return n
}
func (h *Head) StringLength() int {
return h.stringLengthWithOpts(toStringOpts{})
}
func (h *Head) stringLengthWithOpts(opts toStringOpts) int {
n := h.Reference.StringLength()
containsAdded := false
switch {
case len(h.Args) != 0:
n += h.Args.StringLength()
case len(h.Reference) == 1 && h.Key != nil:
switch opts.RegoVersion() {
case RegoV0:
n += 2 + h.Key.StringLength() // for []
default:
n += 10 + h.Key.StringLength() // " contains "
containsAdded = true
}
}
if h.Value != nil {
if h.Assign {
n += 4 // " := "
} else {
n += 3 // " = "
}
n += h.Value.StringLength()
} else if !containsAdded && h.Name == "" && h.Key != nil {
n += 10 + h.Key.StringLength() // " contains "
}
return n
}
func (a Args) StringLength() (n int) {
n = 2 // ()
for _, t := range a {
n += t.StringLength() + 2 // ", "
}
return n - 2 // minus last ", "
}
func (b Body) StringLength() (n int) {
for _, expr := range b {
n += expr.StringLength() + 2 // "; "
}
return max(n-2, 0) // minus last "; " (if `n` isn't 0)
}
func (e *Expr) StringLength() (n int) {
if e.Negated {
n += 4 // "not "
}
switch terms := e.Terms.(type) {
case []*Term:
if e.IsEquality() && validEqAssignArgCount(e) {
n += terms[1].StringLength() + len(Equality.Infix) + terms[2].StringLength() + 2 // spaces around =
} else {
n += Call(terms).StringLength()
}
case StringLengther:
n += terms.StringLength()
default:
panic(fmt.Sprintf("string length estimation not implemented for type: %T", e.Terms))
}
for _, w := range e.With {
n += w.StringLength() + 1 // space before with
}
return n
}
func (w *With) StringLength() int {
return w.Target.StringLength() + w.Value.StringLength() + 9 // "with " and " as "
}
func (e *Every) StringLength() int {
n := 6 // "every "
if e.Key != nil {
n += e.Key.StringLength() + 2 // ", "
}
n += e.Value.StringLength() + 4 // " in "
n += e.Domain.StringLength() + 3 // " { "
n += e.Body.StringLength() + 2 // " }"
return n
}
func (s *SomeDecl) StringLength() int {
n := 5 // "some "
if call, ok := s.Symbols[0].Value.(Call); ok {
n += 4 // " in "
n += call[1].StringLength()
if len(call) == 4 {
n += 2 // ", "
}
n += call[2].StringLength()
if len(call) == 4 {
n += call[3].StringLength()
}
return n
}
return n + TermSliceStringLength(s.Symbols, 2)
}
func (c *Comment) StringLength() int {
return 1 + len(c.Text) // '#' + text
}

View File

@@ -2,7 +2,6 @@ package ast
import (
"bytes"
"strings"
"sync"
"github.com/open-policy-agent/opa/v1/util"
@@ -12,15 +11,7 @@ var (
TermPtrPool = util.NewSyncPool[Term]()
BytesReaderPool = util.NewSyncPool[bytes.Reader]()
IndexResultPool = util.NewSyncPool[IndexResult]()
bbPool = util.NewSyncPool[bytes.Buffer]()
// Needs custom pool because of custom Put logic.
sbPool = &stringBuilderPool{
pool: sync.Pool{
New: func() any {
return &strings.Builder{}
},
},
}
// Needs custom pool because of custom Put logic.
varVisitorPool = &vvPool{
pool: sync.Pool{
@@ -31,18 +22,8 @@ var (
}
)
type (
stringBuilderPool struct{ pool sync.Pool }
vvPool struct{ pool sync.Pool }
)
func (p *stringBuilderPool) Get() *strings.Builder {
return p.pool.Get().(*strings.Builder)
}
func (p *stringBuilderPool) Put(sb *strings.Builder) {
sb.Reset()
p.pool.Put(sb)
type vvPool struct {
pool sync.Pool
}
func (p *vvPool) Get() *VarVisitor {

View File

@@ -12,7 +12,6 @@ import (
"io"
"math"
"net/url"
"regexp"
"slices"
"strconv"
"strings"
@@ -29,8 +28,6 @@ var (
NullValue Value = Null{}
errFindNotFound = errors.New("find: not found")
varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
)
// Location records a position in source code.
@@ -413,19 +410,24 @@ func (term *Term) IsGround() bool {
return term.Value.IsGround()
}
// termJSON is used to serialize Term to JSON without map allocation.
type termJSON struct {
Location *Location `json:"location,omitempty"`
Type string `json:"type"`
Value Value `json:"value"`
}
// MarshalJSON returns the JSON encoding of the term.
//
// Specialized marshalling logic is required to include a type hint for Value.
func (term *Term) MarshalJSON() ([]byte, error) {
d := map[string]any{
"type": ValueName(term.Value),
"value": term.Value,
d := termJSON{
Type: ValueName(term.Value),
Value: term.Value,
}
jsonOptions := astJSON.GetOptions().MarshalOptions
if jsonOptions.IncludeLocation.Term {
if term.Location != nil {
d["location"] = term.Location
}
d.Location = term.Location
}
return json.Marshal(d)
}
@@ -925,30 +927,8 @@ func (*TemplateString) IsGround() bool {
}
func (ts *TemplateString) String() string {
str := strings.Builder{}
str.WriteString("$\"")
for _, p := range ts.Parts {
switch x := p.(type) {
case *Expr:
str.WriteByte('{')
str.WriteString(p.String())
str.WriteByte('}')
case *Term:
s := p.String()
if _, ok := x.Value.(String); ok {
s = strings.TrimPrefix(s, "\"")
s = strings.TrimSuffix(s, "\"")
s = EscapeTemplateStringStringPart(s)
}
str.WriteString(s)
default:
str.WriteString("<invalid>")
}
}
str.WriteByte('"')
return str.String()
buf, _ := ts.AppendText(make([]byte, 0, ts.StringLength()))
return util.ByteSliceToString(buf)
}
func TemplateStringTerm(multiLine bool, parts ...Node) *Term {
@@ -973,23 +953,25 @@ func EscapeTemplateStringStringPart(s string) string {
return s
}
l := len(s)
escaped := make([]byte, 0, l+numUnescaped)
return util.ByteSliceToString(AppendEscapedTemplateStringStringPart(make([]byte, 0, len(s)+numUnescaped), s))
}
func AppendEscapedTemplateStringStringPart(buf []byte, s string) []byte {
if s[0] == '{' {
escaped = append(escaped, '\\', s[0])
buf = append(buf, '\\', s[0])
} else {
escaped = append(escaped, s[0])
buf = append(buf, s[0])
}
for i := 1; i < l; i++ {
for i := 1; i < len(s); i++ {
if s[i] == '{' && s[i-1] != '\\' {
escaped = append(escaped, '\\', s[i])
buf = append(buf, '\\', s[i])
} else {
escaped = append(escaped, s[i])
buf = append(buf, s[i])
}
}
return util.ByteSliceToString(escaped)
return buf
}
func countUnescapedLeftCurly(s string) (n int) {
@@ -1340,66 +1322,60 @@ func (ref Ref) Ptr() (string, error) {
return buf.String(), nil
}
// IsVarCompatibleString returns true if s is a valid variable name. String s is a valid variable
// name if it starts with a letter (a-z or A-Z) or underscore (_) and is followed by
// letters (a-z or A-Z), digits (0-9), and underscores.
func IsVarCompatibleString(s string) bool {
return varRegexp.MatchString(s)
l := len(s)
if l == 0 {
return false
}
// not exactly easy on the eyes, but often orders of magnitude faster
// than using a compiled regex (see benchmarks in term_bench_test.go)
is_letter := func(c byte) bool {
return (c > 96 && c < 123) || (c > 64 && c < 91)
}
is_digit := func(c byte) bool {
return c > 47 && c < 58
}
// first character must be a letter or underscore
c := s[0]
if !(is_letter(c) || c == 95) {
return false
}
// remaining characters must be letters, digits, or underscores
for i := 1; i < l; i++ {
if c = s[i]; !(is_letter(c) || is_digit(c) || c == 95) {
return false
}
}
return true
}
func (ref Ref) String() string {
// Note(anderseknert):
// Options tried in the order of cheapness, where after some effort,
// only the last option now requires a (single) allocation:
// 1. empty ref
// 2. single var ref
// 3. built-in function ref
// 4. concatenated parts
reflen := len(ref)
if reflen == 0 {
l := len(ref)
// First check for zero-alloc options, as making the buffer for AppendText
// always costs an allocation.
if l == 0 {
return ""
}
if reflen == 1 {
if l == 1 {
if s, ok := ref[0].Value.(String); ok {
// Ref head should normally be a Var, but if for some reason
// it's a string, don't quote it.
return string(s)
}
return ref[0].Value.String()
}
if name, ok := BuiltinNameFromRef(ref); ok {
return name
}
_var := ref[0].Value.String()
bb := bbPool.Get()
bb.Reset()
defer bbPool.Put(bb)
bb.Grow(len(_var) + len(ref[1:])*7) // rough estimate
bb.WriteString(_var)
for _, p := range ref[1:] {
switch p := p.Value.(type) {
case String:
str := string(p)
if IsVarCompatibleString(str) && !IsKeyword(str) {
bb.WriteByte('.')
bb.WriteString(str)
} else {
bb.WriteByte('[')
// Determine whether we need the full JSON-escaped form
if strings.ContainsFunc(str, isControlOrBackslash) {
bb.Write(strconv.AppendQuote(bb.AvailableBuffer(), str))
} else {
bb.WriteByte('"')
bb.WriteString(str)
bb.WriteByte('"')
}
bb.WriteByte(']')
}
default:
bb.WriteByte('[')
bb.WriteString(p.String())
bb.WriteByte(']')
}
}
return bb.String()
buf, _ := ref.AppendText(make([]byte, 0, ref.StringLength()))
return util.ByteSliceToString(buf)
}
// OutputVars returns a VarSet containing variables that would be bound by evaluating
@@ -1442,6 +1418,15 @@ func NewArray(a ...*Term) *Array {
return arr
}
// NewArrayWithCapacity returns a new empty Array with the given capacity pre-allocated.
func NewArrayWithCapacity(capacity int) *Array {
return &Array{
elems: make([]*Term, 0, capacity),
hashs: make([]int, 0, capacity),
ground: true,
}
}
// Array represents an array as defined by the language. Arrays are similar to the
// same types as defined by JSON with the exception that they can contain Vars
// and References.
@@ -1570,21 +1555,8 @@ func (arr *Array) MarshalJSON() ([]byte, error) {
}
func (arr *Array) String() string {
sb := sbPool.Get()
sb.Grow(len(arr.elems) * 16)
defer sbPool.Put(sb)
sb.WriteByte('[')
for i, e := range arr.elems {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(e.String())
}
sb.WriteByte(']')
return sb.String()
buf, _ := arr.AppendText(make([]byte, 0, arr.StringLength()))
return util.ByteSliceToString(buf)
}
// Len returns the number of elements in the array.
@@ -1702,6 +1674,11 @@ func NewSet(t ...*Term) Set {
return s
}
// NewSetWithCapacity returns a new empty Set with the given capacity pre-allocated.
func NewSetWithCapacity(capacity int) Set {
return newset(capacity)
}
func newset(n int) *set {
var keys []*Term
if n > 0 {
@@ -1765,25 +1742,8 @@ func (s *set) Hash() int {
}
func (s *set) String() string {
if s.Len() == 0 {
return "set()"
}
sb := sbPool.Get()
sb.Grow(s.Len() * 16)
defer sbPool.Put(sb)
sb.WriteByte('{')
for i := range s.sortedKeys() {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(s.keys[i].Value.String())
}
sb.WriteByte('}')
return sb.String()
buf, _ := s.AppendText(make([]byte, 0, s.StringLength()))
return util.ByteSliceToString(buf)
}
func (s *set) sortedKeys() []*Term {
@@ -1824,14 +1784,14 @@ func (s *set) Diff(other Set) Set {
return NewSet()
}
terms := make([]*Term, 0, len(s.keys))
for _, term := range s.sortedKeys() {
result := newset(len(s.keys))
for _, term := range s.keys {
if !other.Contains(term) {
terms = append(terms, term)
result.insert(term, false)
}
}
return NewSet(terms...)
return result
}
// Intersect returns the set containing elements in both s and other.
@@ -1846,21 +1806,28 @@ func (s *set) Intersect(other Set) Set {
n = m
}
terms := make([]*Term, 0, n)
for _, term := range ss.sortedKeys() {
result := newset(n)
for _, term := range ss.keys {
if so.Contains(term) {
terms = append(terms, term)
result.insert(term, false)
}
}
return NewSet(terms...)
return result
}
// Union returns the set containing all elements of s and other.
func (s *set) Union(other Set) Set {
r := NewSet()
s.Foreach(r.Add)
other.Foreach(r.Add)
o := other.(*set)
// Pre-allocate with max size - avoids over-allocation for overlapping sets
// while only requiring one potential grow for disjoint sets.
r := newset(max(len(s.keys), len(o.keys)))
for _, term := range s.keys {
r.insert(term, false)
}
for _, term := range o.keys {
r.insert(term, false)
}
return r
}
@@ -2034,6 +2001,11 @@ func NewObject(t ...[2]*Term) Object {
return obj
}
// NewObjectWithCapacity returns a new empty Object with the given capacity pre-allocated.
func NewObjectWithCapacity(capacity int) Object {
return newobject(capacity)
}
// ObjectTerm creates a new Term with an Object value.
func ObjectTerm(o ...[2]*Term) *Term {
return &Term{Value: NewObject(o...)}
@@ -2554,24 +2526,8 @@ func (obj *object) Len() int {
}
func (obj *object) String() string {
sb := sbPool.Get()
sb.Grow(obj.Len() * 32)
defer sbPool.Put(sb)
sb.WriteByte('{')
for i, elem := range obj.sortedKeys() {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(elem.key.String())
sb.WriteString(": ")
sb.WriteString(elem.value.String())
}
sb.WriteByte('}')
return sb.String()
buf, _ := obj.AppendText(make([]byte, 0, obj.StringLength()))
return util.ByteSliceToString(buf)
}
func (*object) get(*Term) *objectElem {
@@ -2642,7 +2598,7 @@ func filterObject(o Value, filter Value) (Value, error) {
case String, Number, Boolean, Null:
return o, nil
case *Array:
values := NewArray()
values := make([]*Term, 0, v.Len())
for i := range v.Len() {
subFilter := filteredObj.Get(InternedIntegerString(i))
if subFilter != nil {
@@ -2650,10 +2606,10 @@ func filterObject(o Value, filter Value) (Value, error) {
if err != nil {
return nil, err
}
values = values.Append(NewTerm(filteredValue))
values = append(values, NewTerm(filteredValue))
}
}
return values, nil
return NewArray(values...), nil
case Set:
terms := make([]*Term, 0, v.Len())
for _, t := range v.Slice() {
@@ -2776,7 +2732,8 @@ func (ac *ArrayComprehension) IsGround() bool {
}
func (ac *ArrayComprehension) String() string {
return "[" + ac.Term.String() + " | " + ac.Body.String() + "]"
buf, _ := ac.AppendText(make([]byte, 0, ac.StringLength()))
return util.ByteSliceToString(buf)
}
// ObjectComprehension represents an object comprehension as defined in the language.
@@ -2836,7 +2793,8 @@ func (oc *ObjectComprehension) IsGround() bool {
}
func (oc *ObjectComprehension) String() string {
return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}"
buf, _ := oc.AppendText(make([]byte, 0, oc.StringLength()))
return util.ByteSliceToString(buf)
}
// SetComprehension represents a set comprehension as defined in the language.
@@ -2893,7 +2851,8 @@ func (sc *SetComprehension) IsGround() bool {
}
func (sc *SetComprehension) String() string {
return "{" + sc.Term.String() + " | " + sc.Body.String() + "}"
buf, _ := sc.AppendText(make([]byte, 0, sc.StringLength()))
return util.ByteSliceToString(buf)
}
// Call represents as function call in the language.
@@ -2954,11 +2913,8 @@ func (c Call) Operands() []*Term {
}
func (c Call) String() string {
args := make([]string, len(c)-1)
for i := 1; i < len(c); i++ {
args[i-1] = c[i].String()
}
return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", "))
buf, _ := c.AppendText(make([]byte, 0, c.StringLength()))
return util.ByteSliceToString(buf)
}
func termSliceCopy(a []*Term) []*Term {

View File

@@ -0,0 +1,266 @@
package ast
import (
"encoding"
"strconv"
"strings"
"github.com/open-policy-agent/opa/v1/util"
)
// AppendText appends the text representation of term (i.e. as printed in policy) to
// buf and returns the extended buffer.
func (term *Term) AppendText(buf []byte) ([]byte, error) {
if app, ok := term.Value.(encoding.TextAppender); ok {
return app.AppendText(buf)
}
return append(buf, term.Value.String()...), nil
}
func (v Var) AppendText(buf []byte) ([]byte, error) {
if v.IsWildcard() {
return append(buf, WildcardString...), nil
}
return append(buf, v...), nil
}
func (b Boolean) AppendText(buf []byte) ([]byte, error) {
if b {
return append(buf, "true"...), nil
}
return append(buf, "false"...), nil
}
func (Null) AppendText(buf []byte) ([]byte, error) {
return append(buf, "null"...), nil
}
func (str String) AppendText(buf []byte) ([]byte, error) {
return strconv.AppendQuote(buf, string(str)), nil
}
func (str String) appendNoQuote(buf []byte) []byte {
// Append using strconv.AppendQuote for proper escaping, but trim off
// the leading and trailing quotes afterwards.
oldLen := len(buf)
buf = strconv.AppendQuote(buf, string(str))
newLen := len(buf)
quoted := buf[oldLen:newLen]
return append(buf[:oldLen], quoted[1:len(quoted)-1]...)
}
func (num Number) AppendText(buf []byte) ([]byte, error) {
return append(buf, num...), nil
}
func (arr *Array) AppendText(buf []byte) ([]byte, error) {
buf, err := AppendDelimeted(append(buf, '['), arr.elems, ", ")
if err != nil {
return nil, err
}
return append(buf, ']'), nil
}
func (obj *object) AppendText(buf []byte) ([]byte, error) {
olen := obj.Len()
if olen == 0 {
return append(buf, "{}"...), nil
}
buf = append(buf, '{')
var err error
// first key-value pair
keys := obj.sortedKeys()
for i := range keys {
if buf, err = keys[i].key.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, ": "...)
if buf, err = keys[i].value.AppendText(buf); err != nil {
return nil, err
}
if i < olen-1 {
buf = append(buf, ", "...)
}
}
return append(buf, '}'), nil
}
func (obj *lazyObj) AppendText(buf []byte) ([]byte, error) {
return append(buf, obj.force().String()...), nil
}
func (s *set) AppendText(buf []byte) ([]byte, error) {
slen := s.Len()
if slen == 0 {
return append(buf, "set()"...), nil
}
var err error
buf = append(buf, '{')
if buf, err = AppendDelimeted(buf, s.sortedKeys(), ", "); err != nil {
return nil, err
}
return append(buf, '}'), nil
}
func (c Call) AppendText(buf []byte) ([]byte, error) {
if len(c) == 0 {
return buf, nil
}
var err error
if buf, err = c[0].AppendText(buf); err != nil {
return nil, err
}
if buf, err = AppendDelimeted(append(buf, '('), c[1:], ", "); err != nil {
return nil, err
}
return append(buf, ')'), nil
}
func (ts *TemplateString) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, "$\""...)
for _, p := range ts.Parts {
switch x := p.(type) {
case *Expr:
buf = append(buf, '{')
var err error
if buf, err = x.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, '}')
case *Term:
if str, ok := x.Value.(String); ok {
// TODO(anders): this is a bit of a mess, but as explained by the comment on
// [EscapeTemplateStringStringPart], required as long as we rely on strconv for escaping, which adds
// quotes around the string that we don't want here, and trying to "unappend" them is not nice at all..
s := string(str)
ulc := countUnescapedLeftCurly(s)
sl := str.StringLength() + ulc - 2 // no surrounding quotes
if sl == len(s) { // no escaping needed
buf = append(buf, s...)
} else { // some escaping needed
if sl == len(s)+ulc { // only unescaped {
buf = AppendEscapedTemplateStringStringPart(buf, string(str))
} else { // full escaping needed. this is expensive but luckily rare
tmp := str.appendNoQuote(make([]byte, 0, sl))
ets := EscapeTemplateStringStringPart(util.ByteSliceToString(tmp))
buf = append(buf, ets...)
}
}
} else {
var err error
if buf, err = x.AppendText(buf); err != nil {
return nil, err
}
}
default:
buf = append(buf, "<invalid>"...)
}
}
return append(buf, '"'), nil
}
func (r Ref) AppendText(buf []byte) ([]byte, error) {
reflen := len(r)
if reflen == 0 {
return buf, nil
}
if reflen == 1 {
if s, ok := r[0].Value.(String); ok {
// While a ref head is typically a Var, a lone String term should not be quoted
return append(buf, s...), nil
}
return r[0].AppendText(buf)
}
if name, ok := BuiltinNameFromRef(r); ok {
return append(buf, name...), nil
}
var err error
if s, ok := r[0].Value.(String); ok {
buf = append(buf, s...)
} else if buf, err = r[0].AppendText(buf); err != nil {
return nil, err
}
for _, p := range r[1:] {
switch v := p.Value.(type) {
case String:
str := string(v)
if IsVarCompatibleString(str) && !IsKeyword(str) {
buf = append(append(buf, '.'), str...)
} else {
buf = append(buf, '[')
// Determine whether we need the full JSON-escaped form
if strings.ContainsFunc(str, isControlOrBackslash) {
if buf, err = v.AppendText(buf); err != nil {
return nil, err
}
} else {
buf = append(append(append(buf, '"'), str...), '"')
}
buf = append(buf, ']')
}
default:
buf = append(buf, '[')
if buf, err = p.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, ']')
}
}
return buf, nil
}
func (sc *SetComprehension) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, '{')
var err error
if buf, err = sc.Term.AppendText(buf); err != nil {
return nil, err
}
if buf, err = sc.Body.AppendText(append(buf, " | "...)); err != nil {
return nil, err
}
return append(buf, '}'), nil
}
func (ac *ArrayComprehension) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, '[')
var err error
if buf, err = ac.Term.AppendText(buf); err != nil {
return nil, err
}
if buf, err = ac.Body.AppendText(append(buf, " | "...)); err != nil {
return nil, err
}
return append(buf, ']'), nil
}
func (oc *ObjectComprehension) AppendText(buf []byte) ([]byte, error) {
buf = append(buf, '{')
var err error
if buf, err = oc.Key.AppendText(buf); err != nil {
return nil, err
}
buf = append(buf, ": "...)
if buf, err = oc.Value.AppendText(buf); err != nil {
return nil, err
}
if buf, err = oc.Body.AppendText(append(buf, " | "...)); err != nil {
return nil, err
}
return append(buf, '}'), nil
}

View File

@@ -25,6 +25,11 @@
"Minor": 17,
"Patch": 0
},
"array.flatten": {
"Major": 1,
"Minor": 13,
"Patch": 0
},
"array.reverse": {
"Major": 0,
"Minor": 36,

View File

@@ -14,7 +14,6 @@ import (
"fmt"
"hash"
"io"
"strings"
"github.com/open-policy-agent/opa/v1/util"
)
@@ -132,5 +131,5 @@ func encodePrimitive(v any) []byte {
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
_ = encoder.Encode(v)
return []byte(strings.Trim(buf.String(), "\n"))
return bytes.Trim(buf.Bytes(), "\n")
}

View File

@@ -9,7 +9,6 @@ import (
"bytes"
"errors"
"fmt"
"regexp"
"slices"
"sort"
"strings"
@@ -29,7 +28,6 @@ const defaultLocationFile = "__format_default__"
var (
expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true)))
commentsSlicePool = util.NewSlicePool[*ast.Comment](50)
varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
)
// Opts lets you control the code formatting via `AstWithOpts()`.
@@ -1441,7 +1439,7 @@ func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) {
}
func (w *writer) shouldBracketRefTerm(s string, l *ast.Location) bool {
if !varRegexp.MatchString(s) {
if !ast.IsVarCompatibleString(s) {
return true
}

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/open-policy-agent/opa/internal/report"
"github.com/open-policy-agent/opa/internal/versioncheck"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel/sdk/trace"
@@ -140,6 +140,9 @@ const (
type TriggerMode string
const (
// TriggerImmediate represents uploading chunks when ready, flushed by the periodic polling mechanism
TriggerImmediate TriggerMode = "immediate"
// TriggerPeriodic represents periodic polling mechanism
TriggerPeriodic TriggerMode = "periodic"
@@ -212,11 +215,10 @@ type Manager struct {
tracerProvider *trace.TracerProvider
distributedTacingOpts tracing.Options
registeredNDCacheTriggers []func(bool)
registeredTelemetryGatherers map[string]report.Gatherer
bootstrapConfigLabels map[string]string
hooks hooks.Hooks
enableTelemetry bool
reporter report.Reporter
enableVersionCheck bool
versionChecker versioncheck.Checker
opaReportNotifyCh chan struct{}
stop chan chan struct{}
parserOptions ast.ParserOptions
@@ -272,10 +274,10 @@ func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver {
func validateTriggerMode(mode TriggerMode) error {
switch mode {
case TriggerPeriodic, TriggerManual:
case TriggerPeriodic, TriggerManual, TriggerImmediate:
return nil
default:
return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual)
return fmt.Errorf("invalid trigger mode %q (want %q, %q or %q)", mode, TriggerPeriodic, TriggerManual, TriggerImmediate)
}
}
@@ -418,18 +420,36 @@ func WithParserOptions(opts ast.ParserOptions) func(*Manager) {
}
}
// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service.
func WithEnableTelemetry(enableTelemetry bool) func(*Manager) {
// WithEnableVersionCheck controls whether OPA will check for version updates.
func WithEnableVersionCheck(enable bool) func(*Manager) {
return func(m *Manager) {
m.enableTelemetry = enableTelemetry
m.enableVersionCheck = enable
}
}
// WithEnableTelemetry controls whether OPA will check for version updates.
//
// Deprecated: please use WithEnableVersionCheck instead.
func WithEnableTelemetry(enableTelemetry bool) func(*Manager) {
return WithEnableVersionCheck(enableTelemetry)
}
// WithTelemetryGatherers allows registration of telemetry gatherers which enable injection of additional data in the
// telemetry report
func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) {
//
// Deprecated: This function is deprecated as telemetry gathering has been removed. Use WithVersionChecker to provide
// a custom version checker implementation if needed.
func WithTelemetryGatherers(gs map[string]versioncheck.Gatherer) func(*Manager) {
return func(m *Manager) {
m.registeredTelemetryGatherers = gs
// No-op: telemetry gatherers are no longer used
}
}
// WithVersionChecker sets a custom version checker implementation.
// If not provided, a default GitHub-based version checker will be used when telemetry is enabled.
func WithVersionChecker(checker versioncheck.Checker) func(*Manager) {
return func(m *Manager) {
m.versionChecker = checker
}
}
@@ -507,25 +527,12 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
return nil, err
}
if m.enableTelemetry {
reporter, err := report.New(report.Options{Logger: m.logger})
if m.enableVersionCheck {
versionChecker, err := versioncheck.New(versioncheck.Options{Logger: m.logger})
if err != nil {
return nil, err
}
m.reporter = reporter
m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) {
var minimumCompatibleVersion string
if c := m.GetCompiler(); c != nil && c.Required != nil {
minimumCompatibleVersion, _ = c.Required.MinimumCompatibleVersion()
}
return minimumCompatibleVersion, nil
})
// register any additional gatherers
for k, g := range m.registeredTelemetryGatherers {
m.reporter.RegisterGatherer(k, g)
}
m.versionChecker = versionChecker
}
return m, nil
@@ -543,7 +550,7 @@ func (m *Manager) Init(ctx context.Context) error {
Context: storage.NewContext(),
}
if m.enableTelemetry {
if m.enableVersionCheck {
m.opaReportNotifyCh = make(chan struct{})
m.stop = make(chan chan struct{})
go m.sendOPAUpdateLoop(ctx)
@@ -969,7 +976,7 @@ func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event s
if compiler != nil {
m.setCompiler(compiler)
if m.enableTelemetry && event.PolicyChanged() {
if m.enableVersionCheck && event.PolicyChanged() {
m.opaReportNotifyCh <- struct{}{}
}
@@ -1173,9 +1180,9 @@ func (m *Manager) sendOPAUpdateLoop(ctx context.Context) {
if opaReportNotify {
opaReportNotify = false
_, err := m.reporter.SendReport(ctx)
_, err := m.versionChecker.LatestVersion(ctx)
if err != nil {
m.logger.WithFields(map[string]any{"err": err}).Debug("Unable to send OPA telemetry report.")
m.logger.WithFields(map[string]any{"err": err}).Debug("Unable to check OPA version.")
}
}

Some files were not shown because too many files have changed in this diff Show More