build(deps): bump github.com/open-policy-agent/opa from 1.9.0 to 1.10.1

Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 1.9.0 to 1.10.1.
- [Release notes](https://github.com/open-policy-agent/opa/releases)
- [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-policy-agent/opa/compare/v1.9.0...v1.10.1)

---
updated-dependencies:
- dependency-name: github.com/open-policy-agent/opa
  dependency-version: 1.10.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-11-10 07:44:01 +00:00
committed by Ralf Haferkamp
parent f02a23098a
commit 54684101f7
37 changed files with 11915 additions and 1588 deletions

2
go.mod
View File

@@ -62,7 +62,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.27.2
github.com/onsi/gomega v1.38.2
github.com/open-policy-agent/opa v1.9.0
github.com/open-policy-agent/opa v1.10.1
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
github.com/opencloud-eu/reva/v2 v2.39.2-0.20251106122902-c13e27f55362

8
go.sum
View File

@@ -198,8 +198,8 @@ github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/butonic/go-micro/v4 v4.11.1-0.20241115112658-b5d4de5ed9b3 h1:h8Z0hBv5tg/uZMKu8V47+DKWYVQg0lYP8lXDQq7uRpE=
github.com/butonic/go-micro/v4 v4.11.1-0.20241115112658-b5d4de5ed9b3/go.mod h1:eE/tD53n3KbVrzrWxKLxdkGw45Fg1qaNLWjpJMvIUF4=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8=
github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0=
github.com/c-bata/go-prompt v0.2.5/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
@@ -942,8 +942,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/open-policy-agent/opa v1.9.0 h1:QWFNwbcc29IRy0xwD3hRrMc/RtSersLY1Z6TaID3vgI=
github.com/open-policy-agent/opa v1.9.0/go.mod h1:72+lKmTda0O48m1VKAxxYl7MjP/EWFZu9fxHQK2xihs=
github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY=
github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a h1:Sakl76blJAaM6NxylVkgSzktjo2dS504iDotEFJsh3M=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY=
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89 h1:W1ms+lP5lUUIzjRGDg93WrQfZJZCaV1ZP3KeyXi8bzY=

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -72,7 +72,7 @@ func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn st
var resolvers []*wasm.Resolver
if len(resolversToLoad) > 0 {
// Get a full snapshot of the current data (including any from "outside" the bundles)
data, err := store.Read(ctx, txn, storage.Path{})
data, err := store.Read(ctx, txn, storage.RootPath)
if err != nil {
return nil, fmt.Errorf("failed to initialize wasm runtime: %s", err)
}

View File

@@ -148,7 +148,6 @@ package edittree
import (
"errors"
"fmt"
"math/big"
"sort"
"strings"
@@ -203,89 +202,13 @@ func NewEditTree(term *ast.Term) *EditTree {
// it was found in the table already.
func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) {
hash := key.Hash()
// This `equal` utility is duplicated and manually inlined a number of
// time in this file. Inlining it avoids heap allocations, so it makes
// a big performance difference: some operations like lookup become twice
// as slow without it.
var equal func(v ast.Value) bool
switch x := key.Value.(type) {
case ast.Null, ast.Boolean, ast.String, ast.Var:
equal = func(y ast.Value) bool { return x == y }
case ast.Number:
if xi, ok := x.Int64(); ok {
equal = func(y ast.Value) bool {
if y, ok := y.(ast.Number); ok {
if yi, ok := y.Int64(); ok {
return xi == yi
}
}
return false
}
break
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
equal = func(b ast.Value) bool {
if bNum, ok := b.(ast.Number); ok {
var b *big.Rat
fb, ok := new(big.Float).SetString(string(bNum))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(bNum))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b) == 0
}
return false
}
default:
equal = func(y ast.Value) bool { return ast.Compare(x, y) == 0 }
}
// Look through childKeys, looking up the original hash
// value first, and then use linear-probing to iter
// through the keys until we either find the Term we're
// after, or run out of candidates.
for curr, ok := e.childKeys[hash]; ok; {
if equal(curr.Value) {
if ast.KeyHashEqual(curr.Value, key.Value) {
return hash, true
}

View File

@@ -42,7 +42,7 @@ type InsertAndCompileResult struct {
// store contents.
func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*InsertAndCompileResult, error) {
if len(opts.Files.Documents) > 0 {
if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.Path{}, opts.Files.Documents); err != nil {
if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.RootPath, opts.Files.Documents); err != nil {
return nil, fmt.Errorf("storage error: %w", err)
}
}

View File

@@ -233,3 +233,18 @@ func validateIdentifier(id string) error {
// reIdentifier is a regular expression used to check that pre-release and metadata
// identifiers satisfy the spec requirements
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
// Compare compares two semver strings.
func Compare(a, b string) int {
aV, err := NewVersion(strings.TrimPrefix(a, "v"))
if err != nil {
return -1
}
bV, err := NewVersion(strings.TrimPrefix(b, "v"))
if err != nil {
return 1
}
return aV.Compare(*bV)
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -95,7 +95,6 @@ type Capabilities struct {
// As of now, this only controls fetching remote refs for using JSON Schemas in
// the type checker.
// TODO(sr): support ports to further restrict connection peers
// TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665)
AllowNet []string `json:"allow_net,omitempty"`
}
@@ -220,6 +219,9 @@ func LoadCapabilitiesVersions() ([]string, error) {
for _, ent := range ents {
capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1))
}
slices.SortStableFunc(capabilitiesVersions, semver.Compare)
return capabilitiesVersions, nil
}

View File

@@ -5,9 +5,10 @@
package ast
import (
"encoding/json"
"cmp"
"fmt"
"math/big"
"strings"
)
// Compare returns an integer indicating whether two AST values are less than,
@@ -77,8 +78,7 @@ func Compare(a, b any) int {
case Null:
return 0
case Boolean:
b := b.(Boolean)
if a.Equal(b) {
if a == b.(Boolean) {
return 0
}
if !a {
@@ -86,64 +86,10 @@ func Compare(a, b any) int {
}
return 1
case Number:
if ai, err := json.Number(a).Int64(); err == nil {
if bi, err := json.Number(b.(Number)).Int64(); err == nil {
if ai == bi {
return 0
}
if ai < bi {
return -1
}
return 1
}
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var bigA, bigB *big.Rat
fa, ok := new(big.Float).SetString(string(a))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
bigA = new(big.Rat).SetInt64(0)
}
}
if bigA == nil {
bigA, ok = new(big.Rat).SetString(string(a))
if !ok {
panic("illegal value")
}
}
fb, ok := new(big.Float).SetString(string(b.(Number)))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
bigB = new(big.Rat).SetInt64(0)
}
}
if bigB == nil {
bigB, ok = new(big.Rat).SetString(string(b.(Number)))
if !ok {
panic("illegal value")
}
}
return bigA.Cmp(bigB)
return NumberCompare(a, b.(Number))
case String:
b := b.(String)
if a.Equal(b) {
if a == b {
return 0
}
if a < b {
@@ -153,8 +99,7 @@ func Compare(a, b any) int {
case Var:
return VarCompare(a, b.(Var))
case Ref:
b := b.(Ref)
return termSliceCompare(a, b)
return termSliceCompare(a, b.(Ref))
case *Array:
b := b.(*Array)
return termSliceCompare(a.elems, b.elems)
@@ -164,11 +109,9 @@ func Compare(a, b any) int {
if x, ok := b.(*lazyObj); ok {
b = x.force()
}
b := b.(*object)
return a.Compare(b)
return a.Compare(b.(*object))
case Set:
b := b.(Set)
return a.Compare(b)
return a.Compare(b.(Set))
case *ArrayComprehension:
b := b.(*ArrayComprehension)
if cmp := Compare(a.Term, b.Term); cmp != 0 {
@@ -191,44 +134,31 @@ func Compare(a, b any) int {
}
return a.Body.Compare(b.Body)
case Call:
b := b.(Call)
return termSliceCompare(a, b)
return termSliceCompare(a, b.(Call))
case *Expr:
b := b.(*Expr)
return a.Compare(b)
return a.Compare(b.(*Expr))
case *SomeDecl:
b := b.(*SomeDecl)
return a.Compare(b)
return a.Compare(b.(*SomeDecl))
case *Every:
b := b.(*Every)
return a.Compare(b)
return a.Compare(b.(*Every))
case *With:
b := b.(*With)
return a.Compare(b)
return a.Compare(b.(*With))
case Body:
b := b.(Body)
return a.Compare(b)
return a.Compare(b.(Body))
case *Head:
b := b.(*Head)
return a.Compare(b)
return a.Compare(b.(*Head))
case *Rule:
b := b.(*Rule)
return a.Compare(b)
return a.Compare(b.(*Rule))
case Args:
b := b.(Args)
return termSliceCompare(a, b)
return termSliceCompare(a, b.(Args))
case *Import:
b := b.(*Import)
return a.Compare(b)
return a.Compare(b.(*Import))
case *Package:
b := b.(*Package)
return a.Compare(b)
return a.Compare(b.(*Package))
case *Annotations:
b := b.(*Annotations)
return a.Compare(b)
return a.Compare(b.(*Annotations))
case *Module:
b := b.(*Module)
return a.Compare(b)
return a.Compare(b.(*Module))
}
panic(fmt.Sprintf("illegal value: %T", a))
}
@@ -427,3 +357,84 @@ func RefCompare(a, b Ref) int {
func RefEqual(a, b Ref) bool {
return termSliceEqual(a, b)
}
func NumberCompare(x, y Number) int {
xs, ys := string(x), string(y)
var xIsF, yIsF bool
// Treat "1" and "1.0", "1.00", etc as "1"
if strings.Contains(xs, ".") {
if tx := strings.TrimRight(xs, ".0"); tx != xs {
// Still a float after trimming?
xIsF = strings.Contains(tx, ".")
xs = tx
}
}
if strings.Contains(ys, ".") {
if ty := strings.TrimRight(ys, ".0"); ty != ys {
yIsF = strings.Contains(ty, ".")
ys = ty
}
}
if xs == ys {
return 0
}
var xi, yi int64
var xf, yf float64
var xiOK, yiOK, xfOK, yfOK bool
if xi, xiOK = x.Int64(); xiOK {
if yi, yiOK = y.Int64(); yiOK {
return cmp.Compare(xi, yi)
}
}
if xIsF && yIsF {
if xf, xfOK = x.Float64(); xfOK {
if yf, yfOK = y.Float64(); yfOK {
if xf == yf {
return 0
}
// could still be "equal" depending on precision, so we continue?
}
}
}
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
var b *big.Rat
fb, ok := new(big.Float).SetString(string(y))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(y))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b)
}

View File

@@ -2163,6 +2163,15 @@ func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals V
var errs Errors
safe := outputVarsForBody(body[:i], getArity, globals)
safe.Update(globals)
// Fixes Issue #7647 by adding generated variables to the safe set
WalkVars(body[:i], func(v Var) bool {
if v.IsGenerated() {
safe.Add(v)
}
return false
})
args := body[i].Operands()
var vis *VarVisitor

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1056,7 +1056,7 @@ func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
return nil, false
}
ref := p.parseTermFinish(term, true)
ref := p.parseHeadFinish(term, true)
if ref == nil {
p.illegal("expected rule head name")
return nil, false
@@ -1778,6 +1778,35 @@ func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term {
}
}
func (p *Parser) parseHeadFinish(head *Term, skipws bool) *Term {
if head == nil {
return nil
}
offset := p.s.loc.Offset
p.doScan(false)
switch p.s.tok {
case tokens.Add, tokens.Sub, tokens.Mul, tokens.Quo, tokens.Rem,
tokens.And, tokens.Or,
tokens.Equal, tokens.Neq, tokens.Gt, tokens.Gte, tokens.Lt, tokens.Lte:
p.illegalToken()
case tokens.Whitespace:
p.doScan(skipws)
}
switch p.s.tok {
case tokens.LParen, tokens.Dot, tokens.LBrack:
return p.parseRef(head, offset)
case tokens.Whitespace:
p.scan()
}
if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) {
return RefTerm(head).SetLocation(head.Location)
}
return head
}
func (p *Parser) parseNumber() *Term {
var prefix string
loc := p.s.Loc()
@@ -1853,13 +1882,11 @@ func (p *Parser) parseString() *Term {
}
var s string
err := json.Unmarshal([]byte(p.s.lit), &s)
if err != nil {
if err := json.Unmarshal([]byte(p.s.lit), &s); err != nil {
p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit)
return nil
}
term := StringTerm(s).SetLocation(p.s.Loc())
return term
return StringTerm(s).SetLocation(p.s.Loc())
}
return p.parseRawString()
}
@@ -1868,8 +1895,7 @@ func (p *Parser) parseRawString() *Term {
if len(p.s.lit) < 2 {
return nil
}
term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc())
return term
return StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc())
}
// this is the name to use for instantiating an empty set, e.g., `set()`.

View File

@@ -0,0 +1,85 @@
// Copyright 2025 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package ast
import (
"strings"
"sync"
)
var builtinNamesByNumParts = sync.OnceValue(func() map[int][]string {
m := map[int][]string{}
for name := range BuiltinMap {
parts := strings.Count(name, ".") + 1
if parts > 1 {
m[parts] = append(m[parts], name)
}
}
return m
})
// BuiltinNameFromRef attempts to extract a known built-in function name from a ref,
// in the most efficient way possible. I.e. without allocating memory for a new string.
// If no built-in function name can be extracted, the second return value is false.
func BuiltinNameFromRef(ref Ref) (string, bool) {
reflen := len(ref)
if reflen == 0 {
return "", false
}
_var, ok := ref[0].Value.(Var)
if !ok {
return "", false
}
varName := string(_var)
if reflen == 1 {
if _, ok := BuiltinMap[varName]; ok {
return varName, true
}
return "", false
}
totalLen := len(varName)
for _, term := range ref[1:] {
if _, ok = term.Value.(String); !ok {
return "", false
}
totalLen += 1 + len(term.Value.(String)) // account for dot
}
matched, ok := builtinNamesByNumParts()[reflen]
if !ok {
return "", false
}
for _, name := range matched {
// This check saves us a huge amount of work, as only very few built-in
// names will have the exact same length as the ref we are checking.
if len(name) != totalLen {
continue
}
// Example: `name` is "io.jwt.decode" (and so is ref)
// The first part is varName, which have already been established to be 'io':
// io, jwt.decode io == io
if curr, remaining, _ := strings.Cut(name, "."); curr == varName {
// Loop over the remaining (now known to be string) terms in the ref, e.g. "jwt" and "decode"
for _, term := range ref[1:] {
ts := string(term.Value.(String))
// First iteration: jwt.decode != jwt, so we continue cutting
// Second iteration: remaining is "decode", and so is term
if remaining == ts {
return name, true
}
// Cutting remaining (e.g. jwt.decode), and we now get:
// jwt, decode, false || jwt != jwt
if curr, remaining, _ = strings.Cut(remaining, "."); remaining == "" || curr != ts {
break
}
}
}
}
return "", false
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
"io"
"math"
"math/big"
"net/url"
"regexp"
"slices"
@@ -61,20 +60,20 @@ func InterfaceToValue(x any) (Value, error) {
case nil:
return NullValue, nil
case bool:
return InternedTerm(x).Value, nil
return InternedValue(x), nil
case json.Number:
if interned := InternedIntNumberTermFromString(string(x)); interned != nil {
return interned.Value, nil
}
return Number(x), nil
case int:
return InternedValueOr(x, newIntNumberValue), nil
case int64:
return int64Number(x), nil
return InternedValueOr(x, newInt64NumberValue), nil
case uint64:
return uint64Number(x), nil
return InternedValueOr(x, newUint64NumberValue), nil
case float64:
return floatNumber(x), nil
case int:
return intNumber(x), nil
case string:
return String(x), nil
case []any:
@@ -586,10 +585,7 @@ type Boolean bool
// BooleanTerm creates a new Term with a Boolean value.
func BooleanTerm(b bool) *Term {
if b {
return &Term{Value: InternedTerm(true).Value}
}
return &Term{Value: InternedTerm(false).Value}
return &Term{Value: internedBooleanValue(b)}
}
// Equal returns true if the other Value is a Boolean and is equal.
@@ -656,12 +652,12 @@ func NumberTerm(n json.Number) *Term {
// IntNumberTerm creates a new Term with an integer Number value.
func IntNumberTerm(i int) *Term {
return &Term{Value: Number(strconv.Itoa(i))}
return &Term{Value: newIntNumberValue(i)}
}
// UIntNumberTerm creates a new Term with an unsigned integer Number value.
func UIntNumberTerm(u uint64) *Term {
return &Term{Value: uint64Number(u)}
return &Term{Value: newUint64NumberValue(u)}
}
// FloatNumberTerm creates a new Term with a floating point Number value.
@@ -672,22 +668,10 @@ func FloatNumberTerm(f float64) *Term {
// Equal returns true if the other Value is a Number and is equal.
func (num Number) Equal(other Value) bool {
switch other := other.(type) {
case Number:
if num == other {
return true
}
if n1, ok1 := num.Int64(); ok1 {
n2, ok2 := other.Int64()
if ok1 && ok2 {
return n1 == n2
}
}
return num.Compare(other) == 0
default:
return false
if other, ok := other.(Number); ok {
return NumberCompare(num, other) == 0
}
return false
}
// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to,
@@ -695,17 +679,7 @@ func (num Number) Equal(other Value) bool {
func (num Number) Compare(other Value) int {
// Optimize for the common case, as calling Compare allocates on heap.
if otherNum, yes := other.(Number); yes {
if ai, ok := num.Int64(); ok {
if bi, ok := otherNum.Int64(); ok {
if ai == bi {
return 0
}
if ai < bi {
return -1
}
return 1
}
}
return NumberCompare(num, otherNum)
}
return Compare(num, other)
@@ -726,13 +700,10 @@ func (num Number) Hash() int {
return i
}
}
f, err := json.Number(num).Float64()
if err != nil {
bs := []byte(num)
h := xxhash.Sum64(bs)
return int(h)
if f, ok := num.Float64(); ok {
return int(f)
}
return int(f)
return int(xxhash.Sum64String(string(num)))
}
// Int returns the int representation of num if possible.
@@ -773,15 +744,15 @@ func (num Number) String() string {
return string(num)
}
func intNumber(i int) Number {
func newIntNumberValue(i int) Value {
return Number(strconv.Itoa(i))
}
func int64Number(i int64) Number {
func newInt64NumberValue(i int64) Value {
return Number(strconv.FormatInt(i, 10))
}
func uint64Number(u uint64) Number {
func newUint64NumberValue(u uint64) Value {
return Number(strconv.FormatUint(u, 10))
}
@@ -1183,52 +1154,68 @@ func IsVarCompatibleString(s string) bool {
return varRegexp.MatchString(s)
}
var bbPool = &sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
func (ref Ref) String() string {
if len(ref) == 0 {
// Note(anderseknert):
// Options tried in the order of cheapness, where after some effort,
// only the last option now requires a (single) allocation:
// 1. empty ref
// 2. single var ref
// 3. built-in function ref
// 4. concatenated parts
reflen := len(ref)
if reflen == 0 {
return ""
}
if len(ref) == 1 {
switch p := ref[0].Value.(type) {
case Var:
return p.String()
}
if reflen == 1 {
return ref[0].Value.String()
}
if name, ok := BuiltinNameFromRef(ref); ok {
return name
}
sb := sbPool.Get()
defer sbPool.Put(sb)
_var := ref[0].Value.String()
sb.Grow(10 * len(ref))
sb.WriteString(ref[0].Value.String())
bb := bbPool.Get().(*bytes.Buffer)
bb.Reset()
defer bbPool.Put(bb)
bb.Grow(len(_var) + len(ref[1:])*7) // rough estimate
bb.WriteString(_var)
for _, p := range ref[1:] {
switch p := p.Value.(type) {
case String:
str := string(p)
if varRegexp.MatchString(str) && !IsKeyword(str) {
sb.WriteByte('.')
sb.WriteString(str)
if IsVarCompatibleString(str) && !IsKeyword(str) {
bb.WriteByte('.')
bb.WriteString(str)
} else {
sb.WriteByte('[')
bb.WriteByte('[')
// Determine whether we need the full JSON-escaped form
if strings.ContainsFunc(str, isControlOrBackslash) {
// only now pay the cost of expensive JSON-escaped form
sb.WriteString(p.String())
bb.Write(strconv.AppendQuote(bb.AvailableBuffer(), str))
} else {
sb.WriteByte('"')
sb.WriteString(str)
sb.WriteByte('"')
bb.WriteByte('"')
bb.WriteString(str)
bb.WriteByte('"')
}
sb.WriteByte(']')
bb.WriteByte(']')
}
default:
sb.WriteByte('[')
sb.WriteString(p.String())
sb.WriteByte(']')
bb.WriteByte('[')
bb.WriteString(p.String())
bb.WriteByte(']')
}
}
return sb.String()
return bb.String()
}
// OutputVars returns a VarSet containing variables that would be bound by evaluating
@@ -1779,85 +1766,9 @@ func (s *set) Slice() []*Term {
func (s *set) insert(x *Term, resetSortGuard bool) {
hash := x.Hash()
insertHash := hash
// This `equal` utility is duplicated and manually inlined a number of
// time in this file. Inlining it avoids heap allocations, so it makes
// a big performance difference: some operations like lookup become twice
// as slow without it.
var equal func(v Value) bool
switch x := x.Value.(type) {
case Null, Boolean, String, Var:
equal = func(y Value) bool { return x == y }
case Number:
if xi, err := json.Number(x).Int64(); err == nil {
equal = func(y Value) bool {
if y, ok := y.(Number); ok {
if yi, err := json.Number(y).Int64(); err == nil {
return xi == yi
}
}
return false
}
break
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
equal = func(b Value) bool {
if bNum, ok := b.(Number); ok {
var b *big.Rat
fb, ok := new(big.Float).SetString(string(bNum))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(bNum))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b) == 0
}
return false
}
default:
equal = func(y Value) bool { return Compare(x, y) == 0 }
}
for curr, ok := s.elems[insertHash]; ok; {
if equal(curr.Value) {
if KeyHashEqual(curr.Value, x.Value) {
return
}
@@ -1883,87 +1794,18 @@ func (s *set) insert(x *Term, resetSortGuard bool) {
}
func (s *set) get(x *Term) *Term {
hash := x.Hash()
// This `equal` utility is duplicated and manually inlined a number of
// time in this file. Inlining it avoids heap allocations, so it makes
// a big performance difference: some operations like lookup become twice
// as slow without it.
var equal func(v Value) bool
switch x := x.Value.(type) {
case Null, Boolean, String, Var:
equal = func(y Value) bool { return x == y }
case Number:
if xi, err := json.Number(x).Int64(); err == nil {
equal = func(y Value) bool {
if y, ok := y.(Number); ok {
if yi, err := json.Number(y).Int64(); err == nil {
return xi == yi
}
}
return false
}
break
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
equal = func(b Value) bool {
if bNum, ok := b.(Number); ok {
var b *big.Rat
fb, ok := new(big.Float).SetString(string(bNum))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(bNum))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b) == 0
}
return false
}
default:
equal = func(y Value) bool { return Compare(x, y) == 0 }
if len(s.elems) == 0 {
return nil
}
hash := x.Hash()
for curr, ok := s.elems[hash]; ok; {
if equal(curr.Value) {
// Pointer equality check first
if curr == x {
return curr
}
if KeyHashEqual(curr.Value, x.Value) {
return curr
}
@@ -2304,12 +2146,37 @@ func (obj *object) Insert(k, v *Term) {
// Get returns the value of k in obj if k exists, otherwise nil.
func (obj *object) Get(k *Term) *Term {
if elem := obj.get(k); elem != nil {
return elem.value
if len(obj.elems) == 0 {
return nil
}
hash := k.Hash()
for curr := obj.elems[hash]; curr != nil; curr = curr.next {
// Pointer equality check always fastest, and not too unlikely with interning.
if curr.key == k {
return curr.value
}
if KeyHashEqual(curr.key.Value, k.Value) {
return curr.value
}
}
return nil
}
func KeyHashEqual(x, y Value) bool {
switch x := x.(type) {
case Null, Boolean, String, Var:
return x == y
case Number:
if y, ok := y.(Number); ok {
return x.Equal(y)
}
}
return Compare(x, y) == 0
}
// Hash returns the hash code for the Value.
func (obj *object) Hash() int {
return obj.hash
@@ -2516,94 +2383,7 @@ func (obj *object) String() string {
return sb.String()
}
func (obj *object) get(k *Term) *objectElem {
hash := k.Hash()
// This `equal` utility is duplicated and manually inlined a number of
// time in this file. Inlining it avoids heap allocations, so it makes
// a big performance difference: some operations like lookup become twice
// as slow without it.
var equal func(v Value) bool
switch x := k.Value.(type) {
case Null, Boolean, String, Var:
equal = func(y Value) bool { return x == y }
case Number:
if xi, ok := x.Int64(); ok {
equal = func(y Value) bool {
if x == y {
return true
}
if y, ok := y.(Number); ok {
if yi, ok := y.Int64(); ok {
return xi == yi
}
}
return false
}
break
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
equal = func(b Value) bool {
if bNum, ok := b.(Number); ok {
var b *big.Rat
fb, ok := new(big.Float).SetString(string(bNum))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(bNum))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b) == 0
}
return false
}
default:
equal = func(y Value) bool { return Compare(x, y) == 0 }
}
for curr := obj.elems[hash]; curr != nil; curr = curr.next {
if equal(curr.key.Value) {
return curr
}
}
func (*object) get(*Term) *objectElem {
return nil
}
@@ -2612,88 +2392,9 @@ func (obj *object) get(k *Term) *objectElem {
func (obj *object) insert(k, v *Term, resetSortGuard bool) {
hash := k.Hash()
head := obj.elems[hash]
// This `equal` utility is duplicated and manually inlined a number of
// time in this file. Inlining it avoids heap allocations, so it makes
// a big performance difference: some operations like lookup become twice
// as slow without it.
var equal func(v Value) bool
switch x := k.Value.(type) {
case Null, Boolean, String, Var:
equal = func(y Value) bool { return x == y }
case Number:
if xi, err := json.Number(x).Int64(); err == nil {
equal = func(y Value) bool {
if x == y {
return true
}
if y, ok := y.(Number); ok {
if yi, err := json.Number(y).Int64(); err == nil {
return xi == yi
}
}
return false
}
break
}
// We use big.Rat for comparing big numbers.
// It replaces big.Float due to following reason:
// big.Float comes with a default precision of 64, and setting a
// larger precision results in more memory being allocated
// (regardless of the actual number we are parsing with SetString).
//
// Note: If we're so close to zero that big.Float says we are zero, do
// *not* big.Rat).SetString on the original string it'll potentially
// take very long.
var a *big.Rat
fa, ok := new(big.Float).SetString(string(x))
if !ok {
panic("illegal value")
}
if fa.IsInt() {
if i, _ := fa.Int64(); i == 0 {
a = new(big.Rat).SetInt64(0)
}
}
if a == nil {
a, ok = new(big.Rat).SetString(string(x))
if !ok {
panic("illegal value")
}
}
equal = func(b Value) bool {
if bNum, ok := b.(Number); ok {
var b *big.Rat
fb, ok := new(big.Float).SetString(string(bNum))
if !ok {
panic("illegal value")
}
if fb.IsInt() {
if i, _ := fb.Int64(); i == 0 {
b = new(big.Rat).SetInt64(0)
}
}
if b == nil {
b, ok = new(big.Rat).SetString(string(bNum))
if !ok {
panic("illegal value")
}
}
return a.Cmp(b) == 0
}
return false
}
default:
equal = func(y Value) bool { return Compare(x, y) == 0 }
}
for curr := head; curr != nil; curr = curr.next {
if equal(curr.key.Value) {
if KeyHashEqual(curr.key.Value, k.Value) {
if curr.value.IsGround() {
obj.ground--
}

View File

@@ -86,6 +86,12 @@ func moduleInfoPath(id string) storage.Path {
func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (any, error) {
value, err := store.Read(ctx, txn, path)
if err != nil {
if storage.IsNotFound(err) {
return nil, &storage.Error{
Code: storage.NotFoundErr,
Message: strings.TrimPrefix(path.String(), "/system") + ": document does not exist",
}
}
return nil, err
}

View File

@@ -18,6 +18,20 @@ import (
"github.com/open-policy-agent/opa/internal/future"
"github.com/open-policy-agent/opa/v1/ast"
"github.com/open-policy-agent/opa/v1/types"
"github.com/open-policy-agent/opa/v1/util"
)
// defaultLocationFile is the file name used in `Ast()` for terms
// without a location, as could happen when pretty-printing the
// results of partial eval.
const defaultLocationFile = "__format_default__"
var (
elseVar ast.Value = ast.Var("else")
expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true)))
commentsSlicePool = util.NewSlicePool[*ast.Comment](50)
varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
)
// Opts lets you control the code formatting via `AstWithOpts()`.
@@ -38,6 +52,11 @@ type Opts struct {
// Imports are only removed if [Opts.RegoVersion] makes them redundant.
DropV0Imports bool
// SkipDefensiveCopying, if true, will avoid deep-copying the AST before formatting it.
// This is true by default for all Source* functions, but false by default for Ast* functions,
// as some formatting operations may otherwise mutate the AST.
SkipDefensiveCopying bool
Capabilities *ast.Capabilities
}
@@ -48,16 +67,11 @@ func (o Opts) effectiveRegoVersion() ast.RegoVersion {
return o.RegoVersion
}
// defaultLocationFile is the file name used in `Ast()` for terms
// without a location, as could happen when pretty-printing the
// results of partial eval.
const defaultLocationFile = "__format_default__"
// Source formats a Rego source file. The bytes provided must describe a complete
// Rego module. If they don't, Source will return an error resulting from the attempt
// to parse the bytes.
func Source(filename string, src []byte) ([]byte, error) {
return SourceWithOpts(filename, src, Opts{})
return SourceWithOpts(filename, src, Opts{SkipDefensiveCopying: true})
}
func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
@@ -72,6 +86,9 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
parserOpts.RegoVersion = ast.RegoV1
}
// Copying the node does not make sense when both input and output are bytes.
opts.SkipDefensiveCopying = true
if parserOpts.RegoVersion == ast.RegoUndefined {
parserOpts.RegoVersion = ast.DefaultRegoVersion
}
@@ -166,7 +183,9 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) {
// The node has to be deep copied because it may be mutated below. Alternatively,
// we could avoid the copy by checking if mutation will occur first. For now,
// since format is not latency sensitive, just deep copy in all cases.
x = ast.Copy(x)
if !opts.SkipDefensiveCopying {
x = ast.Copy(x)
}
wildcards := map[ast.Var]*ast.Term{}
@@ -233,10 +252,11 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) {
}
case *ast.Rule:
if len(n.Head.Ref()) > 2 {
headLen := len(n.Head.Ref())
if headLen > 2 {
o.refHeads = true
}
if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x"
if headLen == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x"
o.refHeads = true
}
}
@@ -339,6 +359,7 @@ func AstWithOpts(x any, opts Opts) ([]byte, error) {
if len(w.errs) > 0 {
return nil, w.errs
}
return squashTrailingNewlines(w.buf.Bytes()), nil
}
@@ -545,8 +566,6 @@ func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) ([]*ast.
return comments, nil
}
var expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true)))
func (w *writer) groupableOneLiner(rule *ast.Rule) bool {
// Location required to determine if two rules are adjacent in the policy.
// If not, we respect line breaks between rules.
@@ -667,8 +686,6 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment)
return comments, nil
}
var elseVar ast.Value = ast.Var("else")
func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) {
// If there was nothing else on the line before the "else" starts
// then preserve this style of else block, otherwise it will be
@@ -1127,18 +1144,33 @@ func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented boo
return comments, nil
}
// saveComments saves a copy of the comments slice in a pooled slice to and returns it.
// This is to avoid having to create a new slice every time we need to save comments.
// The caller is responsible for putting the slice back in the pool when done.
func saveComments(comments []*ast.Comment) *[]*ast.Comment {
cmlen := len(comments)
saved := commentsSlicePool.Get(cmlen)
copy(*saved, comments)
return saved
}
func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) {
currentComments := make([]*ast.Comment, len(comments))
copy(currentComments, comments)
if len(comments) == 0 {
return w.writeTermParens(false, term, comments)
}
currentLen := w.buf.Len()
currentComments := saveComments(comments)
defer commentsSlicePool.Put(currentComments)
comments, err := w.writeTermParens(false, term, comments)
if err != nil {
if errors.As(err, &unexpectedCommentError{}) {
w.buf.Truncate(currentLen)
comments, uErr := w.writeUnformatted(term.Location, currentComments)
comments, uErr := w.writeUnformatted(term.Location, *currentComments)
if uErr != nil {
return nil, uErr
}
@@ -1156,16 +1188,16 @@ func (w *writer) writeUnformatted(location *ast.Location, currentComments []*ast
return nil, errors.New("original unformatted text is empty")
}
rawRule := string(location.Text)
rowNum := len(strings.Split(rawRule, "\n"))
rowNum := bytes.Count(location.Text, []byte{'\n'}) + 1
w.write(string(location.Text))
w.writeBytes(location.Text)
comments := make([]*ast.Comment, 0, len(currentComments))
for _, c := range currentComments {
// if there is a body then wait to write the last comment
if w.writeCommentOnFinalLine && c.Location.Row == location.Row+rowNum-1 {
w.write(" " + string(c.Location.Text))
w.write(" ")
w.writeBytes(c.Location.Text)
continue
}
@@ -1227,19 +1259,19 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co
case ast.String:
if term.Location.Text[0] == '`' {
// To preserve raw strings, we need to output the original text,
w.write(string(term.Location.Text))
w.writeBytes(term.Location.Text)
} else {
// x.String() cannot be used by default because it can change the input string "\u0000" to "\x00"
var after, quote string
var after, quote []byte
var found bool
// term.Location.Text could contain the prefix `else :=`, remove it
switch term.Location.Text[len(term.Location.Text)-1] {
case '"':
quote = "\""
_, after, found = strings.Cut(string(term.Location.Text), quote)
quote = []byte{'"'}
_, after, found = bytes.Cut(term.Location.Text, quote)
case '`':
quote = "`"
_, after, found = strings.Cut(string(term.Location.Text), quote)
quote = []byte{'`'}
_, after, found = bytes.Cut(term.Location.Text, quote)
}
if !found {
@@ -1247,7 +1279,8 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co
// e.g. partial_set.y to partial_set["y"]
w.write(x.String())
} else {
w.write(quote + after)
w.writeBytes(quote)
w.writeBytes(after)
}
}
@@ -1310,8 +1343,6 @@ func (w *writer) writeBracketed(str string) {
w.write("[" + str + "]")
}
var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) {
str := string(s)
if w.shouldBracketRefTerm(str, l) {
@@ -2130,11 +2161,16 @@ func (w *writer) blankLine() {
w.write("\n")
}
// write the input string and writes it to the buffer.
// write writes string s to the buffer.
func (w *writer) write(s string) {
w.buf.WriteString(s)
}
// writeBytes writes []byte b to the buffer.
func (w *writer) writeBytes(b []byte) {
w.buf.Write(b)
}
// writeLine writes the string on a newly started line, then terminate the line.
func (w *writer) writeLine(s string) {
if !w.inline {

View File

@@ -397,7 +397,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context,
}
// Parse headers
var headers map[string]interface{}
var headers map[string]any
if err := json.Unmarshal(header, &headers); err != nil {
return nil, err
}

View File

@@ -1798,7 +1798,7 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa
}
// nolint: staticcheck // SA4006 false positive
data, err := r.store.Read(ctx, r.txn, storage.Path{})
data, err := r.store.Read(ctx, r.txn, storage.RootPath)
if err != nil {
_ = txnClose(ctx, err) // Ignore error
return PreparedEvalQuery{}, err
@@ -2020,7 +2020,7 @@ func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics
}
if len(result.Documents) > 0 {
err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents)
err = r.store.Write(ctx, txn, storage.AddOp, storage.RootPath, result.Documents)
if err != nil {
return err
}

View File

@@ -73,10 +73,9 @@ func (u *updateAST) Apply(v any) any {
}
func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
switch data.(type) {
case ast.Null, ast.Boolean, ast.Number, ast.String:
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
switch data := data.(type) {
@@ -94,11 +93,10 @@ func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, valu
}
func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
if idx == len(path)-1 {
if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) {
if op != storage.AddOp {
return nil, invalidPatchError("%v: invalid patch path", path)
return nil, errors.NewInvalidPatchError("%v: invalid patch path", path)
}
cpy := data.Append(ast.NewTerm(value))
@@ -161,7 +159,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path,
switch op {
case storage.ReplaceOp, storage.RemoveOp:
if val == nil {
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
}
return &updateAST{path, op == storage.RemoveOp, value}, nil
@@ -171,14 +169,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path,
return newUpdateAST(val.Value, op, path, idx+1, value)
}
return nil, errors.NewNotFoundError(path)
}
func interfaceToValue(v any) (ast.Value, error) {
if v, ok := v.(ast.Value); ok {
return v, nil
}
return ast.InterfaceToValue(v)
return nil, errors.NotFoundErr
}
// setInAst updates the value in the AST at the given path with the given value.

View File

@@ -27,6 +27,7 @@ import (
"github.com/open-policy-agent/opa/internal/merge"
"github.com/open-policy-agent/opa/v1/ast"
"github.com/open-policy-agent/opa/v1/storage"
"github.com/open-policy-agent/opa/v1/storage/internal/errors"
"github.com/open-policy-agent/opa/v1/util"
)
@@ -50,6 +51,7 @@ func NewWithOpts(opts ...Opt) storage.Store {
if s.returnASTValuesOnRead {
s.data = ast.NewObject()
s.roundTripOnWrite = false
} else {
s.data = map[string]any{}
}
@@ -71,7 +73,7 @@ func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store {
if err != nil {
panic(err)
}
if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil {
if err := db.Write(ctx, txn, storage.AddOp, storage.RootPath, data); err != nil {
panic(err)
}
if err := db.Commit(ctx, txn); err != nil {
@@ -89,9 +91,8 @@ func NewFromReader(r io.Reader) storage.Store {
// NewFromReader returns a new in-memory store from a reader that produces a
// JSON serialized object, with extra options. This function is for test purposes.
func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store {
d := util.NewJSONDecoder(r)
var data map[string]any
if err := d.Decode(&data); err != nil {
if err := util.NewJSONDecoder(r).Decode(&data); err != nil {
panic(err)
}
return NewFromObjectWithOpts(data, opts...)
@@ -120,35 +121,39 @@ type handle struct {
}
func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) {
var write bool
var ctx *storage.Context
if len(params) > 0 {
write = params[0].Write
ctx = params[0].Context
txn := &transaction{
xid: atomic.AddUint64(&db.xid, uint64(1)),
db: db,
}
xid := atomic.AddUint64(&db.xid, uint64(1))
if write {
if len(params) > 0 {
txn.write = params[0].Write
txn.context = params[0].Context
}
if txn.write {
db.wmu.Lock()
} else {
db.rmu.RLock()
}
return newTransaction(xid, write, ctx, db), nil
return txn, nil
}
// Truncate implements the storage.Store interface. This method must be called within a transaction.
func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error {
var update *storage.Update
var err error
mergedData := map[string]any{}
underlying, err := db.underlying(txn)
if err != nil {
return err
}
mergedData := map[string]any{}
for {
update, err = it.Next()
if err != nil {
if update, err = it.Next(); err != nil {
break
}
@@ -159,8 +164,7 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s
}
} else {
var value any
err = util.Unmarshal(update.Value, &value)
if err != nil {
if err = util.Unmarshal(update.Value, &value); err != nil {
return err
}
@@ -193,11 +197,7 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s
// For backwards compatibility, check if `RootOverwrite` was configured.
if params.RootOverwrite {
newPath, ok := storage.ParsePathEscaped("/")
if !ok {
return fmt.Errorf("storage path invalid: %v", newPath)
}
return underlying.Write(storage.AddOp, newPath, mergedData)
return underlying.Write(storage.AddOp, storage.RootPath, mergedData)
}
for _, root := range params.BasePaths {
@@ -310,12 +310,7 @@ func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.P
return nil, err
}
v, err := underlying.Read(path)
if err != nil {
return nil, err
}
return v, nil
return underlying.Read(path)
}
func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value any) error {
@@ -323,12 +318,19 @@ func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.Pa
if err != nil {
return err
}
if db.returnASTValuesOnRead || !util.NeedsRoundTrip(value) {
// Fast path when value is nil, bool, string or json.Number.
return underlying.Write(op, path, value)
}
val := util.Reference(value)
if db.roundTripOnWrite {
if err := util.RoundTrip(val); err != nil {
return err
}
}
return underlying.Write(op, path, *val)
}
@@ -409,22 +411,12 @@ func (db *store) underlying(txn storage.Transaction) (*transaction, error) {
return underlying, nil
}
const rootMustBeObjectMsg = "root must be object"
const rootCannotBeRemovedMsg = "root cannot be removed"
func invalidPatchError(f string, a ...any) *storage.Error {
return &storage.Error{
Code: storage.InvalidPatchErr,
Message: fmt.Sprintf(f, a...),
}
}
func mktree(path []string, value any) (map[string]any, error) {
if len(path) == 0 {
// For 0 length path the value is the full tree.
obj, ok := value.(map[string]any)
if !ok {
return nil, invalidPatchError(rootMustBeObjectMsg)
return nil, errors.RootMustBeObjectErr
}
return obj, nil
}

View File

@@ -7,6 +7,7 @@ package inmem
import (
"container/list"
"encoding/json"
"slices"
"strconv"
"github.com/open-policy-agent/opa/internal/deepcopy"
@@ -34,13 +35,13 @@ import (
// Read transactions do not require any special handling and simply passthrough
// to the underlying store. Read transactions do not support upgrade.
type transaction struct {
db *store
updates *list.List
context *storage.Context
policies map[string]policyUpdate
xid uint64
write bool
stale bool
db *store
updates *list.List
policies map[string]policyUpdate
context *storage.Context
}
type policyUpdate struct {
@@ -48,28 +49,17 @@ type policyUpdate struct {
remove bool
}
func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction {
return &transaction{
xid: xid,
write: write,
db: db,
policies: map[string]policyUpdate{},
updates: list.New(),
context: context,
}
}
func (txn *transaction) ID() uint64 {
return txn.xid
}
func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) error {
if !txn.write {
return &storage.Error{
Code: storage.InvalidTransactionErr,
Message: "data write during read transaction",
}
return &storage.Error{Code: storage.InvalidTransactionErr, Message: "data write during read transaction"}
}
if txn.updates == nil {
txn.updates = list.New()
}
if len(path) == 0 {
@@ -85,9 +75,20 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any)
if update.Path().Equal(path) {
if update.Remove() {
if op != storage.AddOp {
return errors.NewNotFoundError(path)
return errors.NotFoundErr
}
}
// If the last update has the same path and value, we have nothing to do.
if txn.db.returnASTValuesOnRead {
if astValue, ok := update.Value().(ast.Value); ok {
if equalsValue(value, astValue) {
return nil
}
}
} else if comparableEquals(update.Value(), value) {
return nil
}
txn.updates.Remove(curr)
break
}
@@ -106,7 +107,7 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any)
// existing update is mutated.
if path.HasPrefix(update.Path()) {
if update.Remove() {
return errors.NewNotFoundError(path)
return errors.NotFoundErr
}
suffix := path[len(update.Path()):]
newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value)
@@ -129,33 +130,53 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any)
return nil
}
func comparableEquals(a, b any) bool {
switch a := a.(type) {
case nil:
return b == nil
case bool:
if vb, ok := b.(bool); ok {
return vb == a
}
case string:
if vs, ok := b.(string); ok {
return vs == a
}
case json.Number:
if vn, ok := b.(json.Number); ok {
return vn == a
}
}
return false
}
func (txn *transaction) updateRoot(op storage.PatchOp, value any) error {
if op == storage.RemoveOp {
return invalidPatchError(rootCannotBeRemovedMsg)
return errors.RootCannotBeRemovedErr
}
var update any
if txn.db.returnASTValuesOnRead {
valueAST, err := interfaceToValue(value)
valueAST, err := ast.InterfaceToValue(value)
if err != nil {
return err
}
if _, ok := valueAST.(ast.Object); !ok {
return invalidPatchError(rootMustBeObjectMsg)
return errors.RootMustBeObjectErr
}
update = &updateAST{
path: storage.Path{},
path: storage.RootPath,
remove: false,
value: valueAST,
}
} else {
if _, ok := value.(map[string]any); !ok {
return invalidPatchError(rootMustBeObjectMsg)
return errors.RootMustBeObjectErr
}
update = &updateRaw{
path: storage.Path{},
path: storage.RootPath,
remove: false,
value: value,
}
@@ -163,21 +184,36 @@ func (txn *transaction) updateRoot(op storage.PatchOp, value any) error {
txn.updates.Init()
txn.updates.PushFront(update)
return nil
}
func (txn *transaction) Commit() (result storage.TriggerEvent) {
result.Context = txn.context
for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
action := curr.Value.(dataUpdate)
txn.db.data = action.Apply(txn.db.data)
result.Data = append(result.Data, storage.DataEvent{
Path: action.Path(),
Data: action.Value(),
Removed: action.Remove(),
})
if txn.updates != nil {
if len(txn.db.triggers) > 0 {
result.Data = slices.Grow(result.Data, txn.updates.Len())
}
for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
action := curr.Value.(dataUpdate)
txn.db.data = action.Apply(txn.db.data)
if len(txn.db.triggers) > 0 {
result.Data = append(result.Data, storage.DataEvent{
Path: action.Path(),
Data: action.Value(),
Removed: action.Remove(),
})
}
}
}
if len(txn.policies) > 0 && len(txn.db.triggers) > 0 {
result.Policy = slices.Grow(result.Policy, len(txn.policies))
}
for id, upd := range txn.policies {
if upd.remove {
delete(txn.db.policies, id)
@@ -185,11 +221,13 @@ func (txn *transaction) Commit() (result storage.TriggerEvent) {
txn.db.policies[id] = upd.value
}
result.Policy = append(result.Policy, storage.PolicyEvent{
ID: id,
Data: upd.value,
Removed: upd.remove,
})
if len(txn.db.triggers) > 0 {
result.Policy = append(result.Policy, storage.PolicyEvent{
ID: id,
Data: upd.value,
Removed: upd.remove,
})
}
}
return result
}
@@ -218,8 +256,7 @@ func deepcpy(v any) any {
}
func (txn *transaction) Read(path storage.Path) (any, error) {
if !txn.write {
if !txn.write || txn.updates == nil {
return pointer(txn.db.data, path)
}
@@ -231,7 +268,7 @@ func (txn *transaction) Read(path storage.Path) (any, error) {
if path.HasPrefix(upd.Path()) {
if upd.Remove() {
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
return pointer(upd.Value(), path[len(upd.Path()):])
}
@@ -260,8 +297,7 @@ func (txn *transaction) Read(path storage.Path) (any, error) {
return cpy, nil
}
func (txn *transaction) ListPolicies() []string {
var ids []string
func (txn *transaction) ListPolicies() (ids []string) {
for id := range txn.db.policies {
if _, ok := txn.policies[id]; !ok {
ids = append(ids, id)
@@ -276,11 +312,13 @@ func (txn *transaction) ListPolicies() []string {
}
func (txn *transaction) GetPolicy(id string) ([]byte, error) {
if update, ok := txn.policies[id]; ok {
if !update.remove {
return update.value, nil
if txn.policies != nil {
if update, ok := txn.policies[id]; ok {
if !update.remove {
return update.value, nil
}
return nil, errors.NewNotFoundErrorf("policy id %q", id)
}
return nil, errors.NewNotFoundErrorf("policy id %q", id)
}
if exist, ok := txn.db.policies[id]; ok {
return exist, nil
@@ -289,24 +327,24 @@ func (txn *transaction) GetPolicy(id string) ([]byte, error) {
}
func (txn *transaction) UpsertPolicy(id string, bs []byte) error {
if !txn.write {
return &storage.Error{
Code: storage.InvalidTransactionErr,
Message: "policy write during read transaction",
}
}
txn.policies[id] = policyUpdate{bs, false}
return nil
return txn.updatePolicy(id, policyUpdate{bs, false})
}
func (txn *transaction) DeletePolicy(id string) error {
return txn.updatePolicy(id, policyUpdate{nil, true})
}
func (txn *transaction) updatePolicy(id string, update policyUpdate) error {
if !txn.write {
return &storage.Error{
Code: storage.InvalidTransactionErr,
Message: "policy write during read transaction",
}
return &storage.Error{Code: storage.InvalidTransactionErr, Message: "policy write during read transaction"}
}
txn.policies[id] = policyUpdate{nil, true}
if txn.policies == nil {
txn.policies = map[string]policyUpdate{id: update}
} else {
txn.policies[id] = update
}
return nil
}
@@ -327,13 +365,33 @@ type updateRaw struct {
value any // value to add/replace at path (ignored if remove is true)
}
func equalsValue(a any, v ast.Value) bool {
if a, ok := a.(ast.Value); ok {
return a.Compare(v) == 0
}
switch a := a.(type) {
case nil:
return v == ast.NullValue
case bool:
if vb, ok := v.(ast.Boolean); ok {
return bool(vb) == a
}
case string:
if vs, ok := v.(ast.String); ok {
return string(vs) == a
}
}
return false
}
func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
if db.returnASTValuesOnRead {
astData, err := interfaceToValue(data)
astData, err := ast.InterfaceToValue(data)
if err != nil {
return nil, err
}
astValue, err := interfaceToValue(value)
astValue, err := ast.InterfaceToValue(value)
if err != nil {
return nil, err
}
@@ -343,10 +401,9 @@ func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx
}
func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
switch data.(type) {
case nil, bool, json.Number, string:
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
switch data := data.(type) {
@@ -364,11 +421,10 @@ func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, valu
}
func newUpdateArray(data []any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
if idx == len(path)-1 {
if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) {
if op != storage.AddOp {
return nil, invalidPatchError("%v: invalid patch path", path)
return nil, errors.NewInvalidPatchError("%v: invalid patch path", path)
}
cpy := make([]any, len(data)+1)
copy(cpy, data)
@@ -417,7 +473,7 @@ func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path,
switch op {
case storage.ReplaceOp, storage.RemoveOp:
if _, ok := data[path[idx]]; !ok {
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
}
return &updateRaw{path, op == storage.RemoveOp, value}, nil
@@ -427,7 +483,7 @@ func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path,
return newUpdateRaw(data, op, path, idx+1, value)
}
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
func (u *updateRaw) Remove() bool {

View File

@@ -11,27 +11,31 @@ import (
"github.com/open-policy-agent/opa/v1/storage"
)
const ArrayIndexTypeMsg = "array index must be integer"
const DoesNotExistMsg = "document does not exist"
const OutOfRangeMsg = "array index out of range"
const (
ArrayIndexTypeMsg = "array index must be integer"
DoesNotExistMsg = "document does not exist"
OutOfRangeMsg = "array index out of range"
RootMustBeObjectMsg = "root must be object"
RootCannotBeRemovedMsg = "root cannot be removed"
)
func NewNotFoundError(path storage.Path) *storage.Error {
return NewNotFoundErrorWithHint(path, DoesNotExistMsg)
}
var (
NotFoundErr = &storage.Error{Code: storage.NotFoundErr, Message: DoesNotExistMsg}
RootMustBeObjectErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootMustBeObjectMsg}
RootCannotBeRemovedErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootCannotBeRemovedMsg}
)
func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error {
message := path.String() + ": " + hint
return &storage.Error{
Code: storage.NotFoundErr,
Message: message,
Message: path.String() + ": " + hint,
}
}
func NewNotFoundErrorf(f string, a ...any) *storage.Error {
msg := fmt.Sprintf(f, a...)
return &storage.Error{
Code: storage.NotFoundErr,
Message: msg,
Message: fmt.Sprintf(f, a...),
}
}
@@ -41,3 +45,10 @@ func NewWriteConflictError(p storage.Path) *storage.Error {
Message: p.String(),
}
}
func NewInvalidPatchError(f string, a ...any) *storage.Error {
return &storage.Error{
Code: storage.InvalidPatchErr,
Message: fmt.Sprintf(f, a...),
}
}

View File

@@ -21,7 +21,7 @@ func Ptr(data any, path storage.Path) (any, error) {
case map[string]any:
var ok bool
if node, ok = curr[key]; !ok {
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
case []any:
pos, err := ValidateArrayIndex(curr, key, path)
@@ -30,7 +30,7 @@ func Ptr(data any, path storage.Path) (any, error) {
}
node = curr[pos]
default:
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
}
@@ -38,24 +38,45 @@ func Ptr(data any, path storage.Path) (any, error) {
}
func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
var keyTerm *ast.Term
defer func() {
if keyTerm != nil {
ast.TermPtrPool.Put(keyTerm)
}
}()
node := data
for i := range path {
key := path[i]
switch curr := node.(type) {
case ast.Object:
// This term is only created for the lookup, which is not.. ideal.
// By using the pool, we can at least avoid allocating the term itself,
// while still having to pay 1 allocation for the value. A better solution
// would be dynamically interned string terms.
keyTerm := ast.TermPtrPool.Get()
keyTerm.Value = ast.String(key)
val := curr.Get(keyTerm)
ast.TermPtrPool.Put(keyTerm)
if val == nil {
return nil, errors.NewNotFoundError(path)
// Note(anders):
// This term is only created for the lookup, which is not great — especially
// considering the path likely was converted from a ref, where we had all
// the terms available already! Without chaging the storage API, our options
// for performant lookups are limitied to using interning or a pool. Prefer
// interning when possible, as that is zero alloc. Using the pool avoids at
// least allocating a new term for every lookup, but still requires an alloc
// for the string Value.
if ast.HasInternedValue(key) {
if val := curr.Get(ast.InternedTerm(key)); val != nil {
node = val.Value
} else {
return nil, errors.NotFoundErr
}
} else {
if keyTerm == nil {
keyTerm = ast.TermPtrPool.Get()
}
// 1 alloc
keyTerm.Value = ast.String(key)
if val := curr.Get(keyTerm); val != nil {
node = val.Value
} else {
return nil, errors.NotFoundErr
}
}
node = val.Value
case *ast.Array:
pos, err := ValidateASTArrayIndex(curr, key, path)
if err != nil {
@@ -63,7 +84,7 @@ func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
}
node = curr.Elem(pos).Value
default:
return nil, errors.NewNotFoundError(path)
return nil, errors.NotFoundErr
}
}

View File

@@ -8,40 +8,40 @@ import (
"errors"
"fmt"
"net/url"
"slices"
"strconv"
"strings"
"github.com/open-policy-agent/opa/v1/ast"
)
// RootPath refers to the root document in storage.
var RootPath = Path{}
// Path refers to a document in storage.
type Path []string
// ParsePath returns a new path for the given str.
func ParsePath(str string) (path Path, ok bool) {
if len(str) == 0 {
return nil, false
}
if str[0] != '/' {
if len(str) == 0 || str[0] != '/' {
return nil, false
}
if len(str) == 1 {
return Path{}, true
}
parts := strings.Split(str[1:], "/")
return parts, true
return strings.Split(str[1:], "/"), true
}
// ParsePathEscaped returns a new path for the given escaped str.
func ParsePathEscaped(str string) (path Path, ok bool) {
path, ok = ParsePath(str)
if !ok {
return
}
for i := range path {
segment, err := url.PathUnescape(path[i])
if err == nil {
path[i] = segment
if path, ok = ParsePath(str); ok {
for i := range path {
if segment, err := url.PathUnescape(path[i]); err == nil {
path[i] = segment
} else {
return nil, false
}
}
}
return
@@ -49,7 +49,6 @@ func ParsePathEscaped(str string) (path Path, ok bool) {
// NewPathForRef returns a new path for the given ref.
func NewPathForRef(ref ast.Ref) (path Path, err error) {
if len(ref) == 0 {
return nil, errors.New("empty reference (indicates error in caller)")
}
@@ -85,36 +84,17 @@ func NewPathForRef(ref ast.Ref) (path Path, err error) {
// is less than other, 0 if p is equal to other, or 1 if p is greater than
// other.
func (p Path) Compare(other Path) (cmp int) {
for i := range min(len(p), len(other)) {
if cmp := strings.Compare(p[i], other[i]); cmp != 0 {
return cmp
}
}
if len(p) < len(other) {
return -1
}
if len(p) == len(other) {
return 0
}
return 1
return slices.Compare(p, other)
}
// Equal returns true if p is the same as other.
func (p Path) Equal(other Path) bool {
return p.Compare(other) == 0
return slices.Equal(p, other)
}
// HasPrefix returns true if p starts with other.
func (p Path) HasPrefix(other Path) bool {
if len(other) > len(p) {
return false
}
for i := range other {
if p[i] != other[i] {
return false
}
}
return true
return len(other) <= len(p) && p[:len(other)].Equal(other)
}
// Ref returns a ref that represents p rooted at head.

View File

@@ -177,7 +177,7 @@ func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
// The null term is considered to be less than any other term,
// so in order for min of a set to make sense, we need to check
// for it.
if min.Value.Compare(ast.InternedNullTerm.Value) == 0 {
if min.Value.Compare(ast.InternedNullValue) == 0 {
return elem, nil
}

View File

@@ -106,6 +106,7 @@ type eval struct {
tracers []QueryTracer
tracingOpts tracing.Options
queryID uint64
timeStart int64
index int
genvarid int
indexing bool
@@ -171,16 +172,17 @@ func (e *eval) string(s *strings.Builder) {
func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) {
decl, ok := ast.BuiltinMap[name]
if ok {
f, ok := builtinFunctions[name]
if ok {
if f, ok := builtinFunctions[name]; ok {
return decl, f, true
}
} else {
bi, ok := e.builtins[name]
if ok {
return bi.Decl, bi.Func, true
if bi, ok := e.builtins[name]; ok {
return decl, bi.Func, true
}
}
if bi, ok := e.builtins[name]; ok {
return bi.Decl, bi.Func, true
}
return nil, nil, false
}
@@ -951,7 +953,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
var bctx *BuiltinContext
// Creating a BuiltinContext is expensive, so only do it if the builtin depends on it.
if bi.NeedsBuiltInContext() {
if !bi.CanSkipBctx {
var parentID uint64
if e.parent != nil {
parentID = e.parent.queryID
@@ -962,6 +964,10 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
capabilities = e.compiler.Capabilities()
}
if e.time == nil {
e.time = ast.NumberTerm(int64ToJSONNumber(e.timeStart))
}
bctx = &BuiltinContext{
Context: e.ctx,
Metrics: e.metrics,

View File

@@ -1316,7 +1316,7 @@ func parseCacheControlHeader(headers http.Header) map[string]string {
ccDirectives := map[string]string{}
ccHeader := headers.Get("cache-control")
for _, part := range strings.Split(ccHeader, ",") {
for part := range strings.SplitSeq(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue

View File

@@ -374,7 +374,7 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []
ctx: ctx,
metrics: q.metrics,
seed: q.seed,
time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
timeStart: q.time.UnixNano(),
cancel: q.cancel,
query: q.query,
queryCompiler: q.queryCompiler,
@@ -569,7 +569,7 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error {
ctx: ctx,
metrics: q.metrics,
seed: q.seed,
time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
timeStart: q.time.UnixNano(),
cancel: q.cancel,
query: q.query,
queryCompiler: q.queryCompiler,

View File

@@ -18,6 +18,7 @@ import (
"github.com/open-policy-agent/opa/v1/ast"
"github.com/open-policy-agent/opa/v1/topdown/builtins"
"github.com/open-policy-agent/opa/v1/util"
)
func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -514,18 +515,12 @@ func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
return err
}
if !strings.Contains(string(s), string(d)) {
text, delim := string(s), string(d)
if !strings.Contains(text, delim) {
return iter(ast.ArrayTerm(operands[0]))
}
elems := strings.Split(string(s), string(d))
arr := make([]*ast.Term, len(elems))
for i := range elems {
arr[i] = ast.InternedTerm(elems[i])
}
return iter(ast.ArrayTerm(arr...))
return iter(ast.ArrayTerm(util.SplitMap(text, delim, ast.InternedTerm)...))
}
func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {

View File

@@ -342,7 +342,7 @@ func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) {
if !ok {
continue
}
var key interface{}
var key any
if err := jwk.Export(k, &key); err != nil {
return nil, err
}

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"reflect"
"strconv"
"sigs.k8s.io/yaml"
@@ -19,15 +20,14 @@ import (
// UnmarshalJSON parses the JSON encoded data and stores the result in the value
// pointed to by x.
//
// This function is intended to be used in place of the standard json.Marshal
// function when json.Number is required.
// This function is intended to be used in place of the standard [json.Marshal]
// function when [json.Number] is required.
func UnmarshalJSON(bs []byte, x any) error {
return unmarshalJSON(bs, x, true)
}
func unmarshalJSON(bs []byte, x any, ext bool) error {
buf := bytes.NewBuffer(bs)
decoder := NewJSONDecoder(buf)
decoder := NewJSONDecoder(bytes.NewBuffer(bs))
if err := decoder.Decode(x); err != nil {
if handler := extension.FindExtension(".json"); handler != nil && ext {
return handler(bs, x)
@@ -49,8 +49,8 @@ func unmarshalJSON(bs []byte, x any, ext bool) error {
// NewJSONDecoder returns a new decoder that reads from r.
//
// This function is intended to be used in place of the standard json.NewDecoder
// when json.Number is required.
// This function is intended to be used in place of the standard [json.NewDecoder]
// when [json.Number] is required.
func NewJSONDecoder(r io.Reader) *json.Decoder {
decoder := json.NewDecoder(r)
decoder.UseNumber()
@@ -87,6 +87,55 @@ func MustMarshalJSON(x any) []byte {
// rego.Input and inmem's Write operations. Works with both references and
// values.
func RoundTrip(x *any) error {
// Avoid round-tripping types that won't change as a result of
// marshalling/unmarshalling, as even for those values, round-tripping
// comes with a significant cost.
if x == nil || !NeedsRoundTrip(*x) {
return nil
}
// For number types, we can write the json.Number representation
// directly into x without marshalling to bytes and back.
a := *x
switch v := a.(type) {
case int:
*x = json.Number(strconv.Itoa(v))
return nil
case int8:
*x = json.Number(strconv.FormatInt(int64(v), 10))
return nil
case int16:
*x = json.Number(strconv.FormatInt(int64(v), 10))
return nil
case int32:
*x = json.Number(strconv.FormatInt(int64(v), 10))
return nil
case int64:
*x = json.Number(strconv.FormatInt(v, 10))
return nil
case uint:
*x = json.Number(strconv.FormatUint(uint64(v), 10))
return nil
case uint8:
*x = json.Number(strconv.FormatUint(uint64(v), 10))
return nil
case uint16:
*x = json.Number(strconv.FormatUint(uint64(v), 10))
return nil
case uint32:
*x = json.Number(strconv.FormatUint(uint64(v), 10))
return nil
case uint64:
*x = json.Number(strconv.FormatUint(v, 10))
return nil
case float32:
*x = json.Number(strconv.FormatFloat(float64(v), 'f', -1, 32))
return nil
case float64:
*x = json.Number(strconv.FormatFloat(v, 'f', -1, 64))
return nil
}
bs, err := json.Marshal(x)
if err != nil {
return err
@@ -94,15 +143,28 @@ func RoundTrip(x *any) error {
return UnmarshalJSON(bs, x)
}
// NeedsRoundTrip returns true if the value won't change as a result of
// a marshalling/unmarshalling round-trip. Since [RoundTrip] itself calls
// this you normally don't need to call this function directly, unless you
// want to make decisions based on the round-tripability of a value without
// actually doing the round-trip.
func NeedsRoundTrip(x any) bool {
switch x.(type) {
case nil, bool, string, json.Number:
return false
}
return true
}
// Reference returns a pointer to its argument unless the argument already is
// a pointer. If the argument is **t, or ***t, etc, it will return *t.
//
// Used for preparing Go types (including pointers to structs) into values to be
// put through util.RoundTrip().
// put through [RoundTrip].
func Reference(x any) *any {
var y any
rv := reflect.ValueOf(x)
if rv.Kind() == reflect.Ptr {
if rv.Kind() == reflect.Pointer {
return Reference(rv.Elem().Interface())
}
if rv.Kind() != reflect.Invalid {

View File

@@ -3,6 +3,8 @@ package util
import (
"math"
"slices"
"strings"
"sync"
"unsafe"
)
@@ -73,3 +75,59 @@ func KeysCount[K comparable, V any](m map[K]V, p func(K) bool) int {
}
return count
}
// SplitMap calls fn for each delim-separated part of text and returns a slice of the results.
// Cheaper than calling fn on strings.Split(text, delim), as it avoids allocating an intermediate slice of strings.
func SplitMap[T any](text string, delim string, fn func(string) T) []T {
sl := make([]T, 0, strings.Count(text, delim)+1)
for s := range strings.SplitSeq(text, delim) {
sl = append(sl, fn(s))
}
return sl
}
// SlicePool is a pool for (pointers to) slices of type T.
// It uses sync.Pool to pool the slices, and grows them as needed.
type SlicePool[T any] struct {
pool sync.Pool
}
// NewSlicePool creates a new SlicePool for slices of type T with the given initial length.
// This number is only a hint, as the slices will grow as needed. For best performance, store
// slices of similar lengths in the same pool.
func NewSlicePool[T any](length int) *SlicePool[T] {
return &SlicePool[T]{
pool: sync.Pool{
New: func() any {
s := make([]T, length)
return &s
},
},
}
}
// Get returns a pointer to a slice of type T with the given length
// from the pool. The slice capacity will grow as needed to accommodate
// the requested length. The returned slice will have all its elements
// set to the zero value of T. Returns a pointer to avoid allocating.
func (sp *SlicePool[T]) Get(length int) *[]T {
s := sp.pool.Get().(*[]T)
d := *s
if cap(d) < length {
d = slices.Grow(d, length)
}
d = d[:length] // reslice to requested length, while keeping capacity
clear(d)
*s = d
return s
}
// Put returns a pointer to a slice of type T to the pool.
func (sp *SlicePool[T]) Put(s *[]T) {
sp.pool.Put(s)
}

View File

@@ -27,55 +27,46 @@ var gzipReaderPool = sync.Pool{
// payload size, but not an unbounded amount of memory, as was potentially
// possible before.
func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) {
var content *bytes.Buffer
// Note(philipc): If the request body is of unknown length (such as what
// happens when 'Transfer-Encoding: chunked' is set), we have to do an
// incremental read of the body. In this case, we can't be too clever, we
// just do the best we can with whatever is streamed over to us.
// Fetch gzip payload size limit from request context.
if maxLength, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok {
bs, err := io.ReadAll(io.LimitReader(r.Body, maxLength))
if err != nil {
return bs, err
}
content = bytes.NewBuffer(bs)
} else {
// Read content from the request body into a buffer of known size.
content = bytes.NewBuffer(make([]byte, 0, r.ContentLength))
if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil {
return content.Bytes(), err
}
length := r.ContentLength
if maxLenConf, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok {
length = maxLenConf
}
content, err := io.ReadAll(io.LimitReader(r.Body, length))
if err != nil {
return nil, err
}
// Decompress gzip content by reading from the buffer.
if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") {
// Fetch gzip payload size limit from request context.
gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context())
// Note(philipc): The last 4 bytes of a well-formed gzip blob will
// always be a little-endian uint32, representing the decompressed
// content size, modulo 2^32. We validate that the size is safe,
// earlier in DecodingLimitHandler.
sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:])
if sizeTrailerField > uint32(gzipMaxLength) {
return content.Bytes(), errors.New("gzip payload too large")
sizeDecompressed := int64(binary.LittleEndian.Uint32(content[len(content)-4:]))
if sizeDecompressed > gzipMaxLength {
return nil, errors.New("gzip payload too large")
}
// Pull a gzip decompressor from the pool, and assign it to the current
// buffer, using Reset(). Later, return it back to the pool for another
// request to use.
gzReader := gzipReaderPool.Get().(*gzip.Reader)
if err := gzReader.Reset(content); err != nil {
defer func() {
gzReader.Close()
gzipReaderPool.Put(gzReader)
}()
if err := gzReader.Reset(bytes.NewReader(content)); err != nil {
return nil, err
}
defer gzReader.Close()
defer gzipReaderPool.Put(gzReader)
decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField))
if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil {
return decompressedContent.Bytes(), err
decompressed := bytes.NewBuffer(make([]byte, 0, sizeDecompressed))
if _, err = io.CopyN(decompressed, gzReader, sizeDecompressed); err != nil {
return nil, err
}
return decompressedContent.Bytes(), nil
return decompressed.Bytes(), nil
}
// Request was not compressed; return the content bytes.
return content.Bytes(), nil
return content, nil
}

View File

@@ -10,7 +10,7 @@ import (
"runtime/debug"
)
var Version = "1.9.0"
var Version = "1.10.1"
// GoVersion is the version of Go this was built with
var GoVersion = runtime.Version()

2
vendor/modules.txt vendored
View File

@@ -1249,7 +1249,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
github.com/onsi/gomega/types
# github.com/open-policy-agent/opa v1.9.0
# github.com/open-policy-agent/opa v1.10.1
## explicit; go 1.24.6
github.com/open-policy-agent/opa/ast
github.com/open-policy-agent/opa/ast/json