Merge branch 'main' into groupware

This commit is contained in:
Pascal Bleser
2026-02-06 15:08:13 +01:00
committed by GitHub
92 changed files with 20123 additions and 1045 deletions

View File

@@ -140,11 +140,12 @@ A common use case would be integrating with different 3rd party signature
providers, like key management services from various cloud providers or Hardware
Security Modules (HSMs) or to implement additional standards.
| Extension | Purpose | Repo |
| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
| Extension | Purpose | Repo |
| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------------- |
| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
| TPM | Integrates with Trusted Platform Module (TPM) | https://github.com/salrashid123/golang-jwt-tpm |
*Disclaimer*: Unless otherwise specified, these integrations are maintained by
third parties and should not be considered as a primary offer by any of the

View File

@@ -97,7 +97,7 @@ Backwards compatible API change that was missed in 2.0.0.
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibility has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.

View File

@@ -76,13 +76,6 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
}
}
// Decode signature
token.Signature, err = p.DecodeSegment(parts[2])
if err != nil {
return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
}
text := strings.Join(parts[0:2], ".")
// Lookup key(s)
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
@@ -94,11 +87,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
}
// Join together header and claims in order to verify them with the signature
text := strings.Join(parts[0:2], ".")
switch have := got.(type) {
case VerificationKeySet:
if len(have.Keys) == 0 {
return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
}
// Iterate through keys and verify signature, skipping the rest when a match is found.
// Return the last error if no match is found.
for _, key := range have.Keys {
@@ -131,7 +127,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, nil
}
// ParseUnverified parses the token but doesn't validate the signature.
// ParseUnverified parses the token but does not validate the signature.
//
// WARNING: Don't use this method unless you know what you're doing.
//
@@ -146,7 +142,7 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
token = &Token{Raw: tokenString}
// parse Header
// Parse Header
var headerBytes []byte
if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
@@ -155,7 +151,7 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
}
// parse Claims
// Parse Claims
token.Claims = claims
claimBytes, err := p.DecodeSegment(parts[1])
@@ -196,6 +192,12 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
}
// Parse token signature
token.Signature, err = p.DecodeSegment(parts[2])
if err != nil {
return token, parts, newError("could not base64 decode signature", ErrTokenMalformed, err)
}
return token, parts, nil
}
@@ -216,7 +218,7 @@ func splitToken(token string) ([]string, bool) {
parts[1] = claims
// One more cut to ensure the signature is the last part of the token and there are no more
// delimiters. This avoids an issue where malicious input could contain additional delimiters
// causing unecessary overhead parsing tokens.
// causing unnecessary overhead parsing tokens.
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
if unexpected {
return nil, false

View File

@@ -3,9 +3,7 @@ package jwt
import "time"
// ParserOption is used to implement functional-style options that modify the
// behavior of the parser. To add new options, just create a function (ideally
// beginning with With or Without) that returns an anonymous function that takes
// a *Parser type as input and manipulates its configuration accordingly.
// behavior of the parser.
type ParserOption func(*Parser)
// WithValidMethods is an option to supply algorithm methods that the parser
@@ -66,6 +64,14 @@ func WithExpirationRequired() ParserOption {
}
}
// WithNotBeforeRequired returns the ParserOption to make nbf claim required.
// By default nbf claim is optional.
func WithNotBeforeRequired() ParserOption {
return func(p *Parser) {
p.validator.requireNbf = true
}
}
// WithAudience configures the validator to require any of the specified
// audiences in the `aud` claim. Validation will fail if the audience is not
// listed in the token or the `aud` claim is missing.

View File

@@ -32,8 +32,8 @@ type Token struct {
Method SigningMethod // Method is the signing method used or to be used
Header map[string]any // Header is the first segment of the token in decoded form
Claims Claims // Claims is the second segment of the token in decoded form
Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
Signature []byte // Signature is the third segment of the token in decoded form. Populated when you [Parse] or sign a token
Valid bool // Valid specifies if the token is valid. Populated when you [Parse] a token
}
// New creates a new [Token] with the specified signing method and an empty map
@@ -71,6 +71,8 @@ func (t *Token) SignedString(key any) (string, error) {
return "", err
}
t.Signature = sig
return sstr + "." + t.EncodeSegment(sig), nil
}

View File

@@ -44,6 +44,9 @@ type Validator struct {
// requireExp specifies whether the exp claim is required
requireExp bool
// requireNbf specifies whether the nbf claim is required
requireNbf bool
// verifyIat specifies whether the iat (Issued At) claim will be verified.
// According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
// only specifies the age of the token, but no validation check is
@@ -111,8 +114,9 @@ func (v *Validator) Validate(claims Claims) error {
}
// We always need to check not-before, but usage of the claim itself is
// OPTIONAL.
if err = v.verifyNotBefore(claims, now, false); err != nil {
// OPTIONAL by default. requireNbf overrides this behavior and makes
// the nbf claim mandatory.
if err = v.verifyNotBefore(claims, now, v.requireNbf); err != nil {
errs = append(errs, err)
}

View File

@@ -17,6 +17,7 @@ package profile
import (
"encoding/binary"
"fmt"
"slices"
"sort"
"strconv"
"strings"
@@ -78,12 +79,10 @@ func Merge(srcs []*Profile) (*Profile, error) {
}
}
for _, s := range p.Sample {
if isZeroSample(s) {
// If there are any zero samples, re-merge the profile to GC
// them.
return Merge([]*Profile{p})
}
if slices.ContainsFunc(p.Sample, isZeroSample) {
// If there are any zero samples, re-merge the profile to GC
// them.
return Merge([]*Profile{p})
}
return p, nil

View File

@@ -24,6 +24,7 @@ import (
"math"
"path/filepath"
"regexp"
"slices"
"sort"
"strings"
"sync"
@@ -277,7 +278,7 @@ func (p *Profile) massageMappings() {
// Use heuristics to identify main binary and move it to the top of the list of mappings
for i, m := range p.Mapping {
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
file := strings.TrimSpace(strings.ReplaceAll(m.File, "(deleted)", ""))
if len(file) == 0 {
continue
}
@@ -734,12 +735,7 @@ func (p *Profile) RemoveLabel(key string) {
// HasLabel returns true if a sample has a label with indicated key and value.
func (s *Sample) HasLabel(key, value string) bool {
for _, v := range s.Label[key] {
if v == value {
return true
}
}
return false
return slices.Contains(s.Label[key], value)
}
// SetNumLabel sets the specified key to the specified value for all samples in the
@@ -852,7 +848,17 @@ func (p *Profile) HasFileLines() bool {
// "[vdso]", "[vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
switch {
case strings.HasPrefix(name, "["):
case strings.HasPrefix(name, "linux-vdso"):
case strings.HasPrefix(m.File, "/dev/dri/"):
case m.File == "//anon":
case m.File == "":
case strings.HasPrefix(m.File, "/memfd:"):
default:
return false
}
return true
}
// Copy makes a fully independent copy of a profile.

View File

@@ -36,6 +36,7 @@ package profile
import (
"errors"
"fmt"
"slices"
)
type buffer struct {
@@ -187,6 +188,16 @@ func le32(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
}
func peekNumVarints(data []byte) (numVarints int) {
for ; len(data) > 0; numVarints++ {
var err error
if _, data, err = decodeVarint(data); err != nil {
break
}
}
return numVarints
}
func decodeVarint(data []byte) (uint64, []byte, error) {
var u uint64
for i := 0; ; i++ {
@@ -286,6 +297,9 @@ func decodeInt64(b *buffer, x *int64) error {
func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 {
// Packed encoding
dataLen := peekNumVarints(b.data)
*x = slices.Grow(*x, dataLen)
data := b.data
for len(data) > 0 {
var u uint64
@@ -316,8 +330,11 @@ func decodeUint64(b *buffer, x *uint64) error {
func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 {
data := b.data
// Packed encoding
dataLen := peekNumVarints(b.data)
*x = slices.Grow(*x, dataLen)
data := b.data
for len(data) > 0 {
var u uint64
var err error

View File

@@ -19,6 +19,7 @@ package profile
import (
"fmt"
"regexp"
"slices"
"strings"
)
@@ -40,13 +41,7 @@ func simplifyFunc(f string) string {
// Account for unsimplified names -- try to remove the argument list by trimming
// starting from the first '(', but skipping reserved names that have '('.
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
foundReserved := false
for _, res := range reservedNames {
if funcName[ind[0]:ind[1]] == res {
foundReserved = true
break
}
}
foundReserved := slices.Contains(reservedNames, funcName[ind[0]:ind[1]])
if !foundReserved {
funcName = funcName[:ind[0]]
break

View File

@@ -1,3 +1,21 @@
## 2.28.0
Ginkgo's SemVer filter now supports filtering multiple components by SemVer version:
```go
It("should work in a specific version range (1.0.0, 2.0.0) and third-party dependency redis in [8.0.0, ~)", SemVerConstraint(">= 3.2.0"), ComponentSemVerConstraint("redis", ">= 8.0.0") func() {
// This test will only run when version is between 1.0.0 (exclusive) and 2.0.0 (exclusive) and redis version is >= 8.0.0
})
```
can be filtered in or out with an invocation like:
```bash
ginkgo --sem-ver-filter="2.1.1, redis=8.2.0"
```
Huge thanks to @Icarus9913 for working on this!
## 2.27.5
### Fixes

View File

@@ -20,6 +20,7 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strings"
"github.com/go-logr/logr"
@@ -268,7 +269,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
}
defer global.PopClone()
suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
var reporter reporters.Reporter
if suiteConfig.ParallelTotal == 1 {
@@ -311,7 +312,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
outputInterceptor.Shutdown()
flagSet.ValidateDeprecations(deprecationTracker)
@@ -330,9 +331,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
return passed
}
func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) {
func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, ComponentSemVerConstraints, types.AroundNodes) {
suiteLabels := Labels{}
suiteSemVerConstraints := SemVerConstraints{}
suiteComponentSemVerConstraints := ComponentSemVerConstraints{}
aroundNodes := types.AroundNodes{}
configErrors := []error{}
for _, arg := range args {
@@ -345,6 +347,11 @@ func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.Aro
suiteLabels = append(suiteLabels, arg...)
case SemVerConstraints:
suiteSemVerConstraints = append(suiteSemVerConstraints, arg...)
case ComponentSemVerConstraints:
for component, constraints := range arg {
suiteComponentSemVerConstraints[component] = append(suiteComponentSemVerConstraints[component], constraints...)
suiteComponentSemVerConstraints[component] = slices.Compact(suiteComponentSemVerConstraints[component])
}
case types.AroundNodeDecorator:
aroundNodes = append(aroundNodes, arg)
default:
@@ -362,7 +369,7 @@ func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.Aro
os.Exit(1)
}
return suiteLabels, suiteSemVerConstraints, aroundNodes
return suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, aroundNodes
}
func getwd() (string, error) {
@@ -385,7 +392,7 @@ func PreviewSpecs(description string, args ...any) Report {
}
defer global.PopClone()
suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
defer func() {
@@ -403,7 +410,7 @@ func PreviewSpecs(description string, args ...any) Report {
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
return global.Suite.GetPreviewReport()
}

View File

@@ -117,6 +117,27 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-fi
*/
type SemVerConstraints = internal.SemVerConstraints
/*
ComponentSemVerConstraint decorates specs with ComponentSemVerConstraints. Multiple components semantic version constraints can be passed to ComponentSemVerConstraint and the component can't be empy, also the version strings must follow the semantic version constraint rules.
ComponentSemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple ComponentSemVerConstraints to a given node and a spec's component semantic version constraints is the union of all component semantic version constraints in its node hierarchy.
You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
*/
func ComponentSemVerConstraint(component string, semVerConstraints ...string) ComponentSemVerConstraints {
componentSemVerConstraints := ComponentSemVerConstraints{
component: semVerConstraints,
}
return componentSemVerConstraints
}
/*
ComponentSemVerConstraints are the type for spec ComponentSemVerConstraint decorators. Use ComponentSemVerConstraint(...) to construct ComponentSemVerConstraints.
You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
*/
type ComponentSemVerConstraints = internal.ComponentSemVerConstraints
/*
PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.

View File

@@ -33,7 +33,7 @@ func BuildRunCommand() command.Command {
Usage: "ginkgo run <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
ShortDoc: "Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank)",
Documentation: "Any arguments after -- will be passed to the test.",
DocLink: "running-tests",
DocLink: "running-specs",
Command: func(args []string, additionalArgs []string) {
var errors []error
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)

View File

@@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) {
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteComponentSemVerConstraints ComponentSemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
@@ -87,7 +87,24 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit
if suiteConfig.SemVerFilter != "" {
semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter)
skipChecks = append(skipChecks, func(spec Spec) bool {
return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints()))
noRun := false
// non-component-specific constraints
constraints := UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())
if len(constraints) != 0 && semVerFilter("", constraints) == false {
noRun = true
}
// component-specific constraints
componentConstraints := UnionOfComponentSemVerConstraints(suiteComponentSemVerConstraints, spec.Nodes.UnionOfComponentSemVerConstraints())
for component, constraints := range componentConstraints {
if semVerFilter(component, constraints) == false {
noRun = true
break
}
}
return noRun
})
}

View File

@@ -113,22 +113,24 @@ func newGroup(suite *Suite) *group {
// initialReportForSpec constructs a new SpecReport right before running the spec.
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
return types.SpecReport{
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints),
ParallelProcess: g.suite.config.ParallelProcess,
RunningInParallel: g.suite.isRunningInParallel(),
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
SpecPriority: spec.Nodes.GetSpecPriority(),
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(),
ContainerHierarchyComponentSemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).ComponentSemVerConstraints(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints),
LeafNodeComponentSemVerConstraints: map[string][]string(spec.FirstNodeWithType(types.NodeTypeIt).ComponentSemVerConstraints),
ParallelProcess: g.suite.config.ParallelProcess,
RunningInParallel: g.suite.isRunningInParallel(),
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
SpecPriority: spec.Nodes.GetSpecPriority(),
}
}
@@ -152,6 +154,7 @@ func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode
report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation)
report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels)
report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints)
report.ContainerHierarchyComponentSemVerConstraints = append(report.ContainerHierarchyComponentSemVerConstraints, node.Node.ComponentSemVerConstraints)
if node.Node.MarkedSerial {
report.IsSerial = true
}

View File

@@ -57,6 +57,7 @@ type Node struct {
MustPassRepeatedly int
Labels Labels
SemVerConstraints SemVerConstraints
ComponentSemVerConstraints ComponentSemVerConstraints
PollProgressAfter time.Duration
PollProgressInterval time.Duration
NodeTimeout time.Duration
@@ -106,7 +107,24 @@ func (l Labels) MatchesLabelFilter(query string) bool {
type SemVerConstraints []string
func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool {
return types.MustParseSemVerFilter(version)(svc)
return types.MustParseSemVerFilter(version)("", svc)
}
type ComponentSemVerConstraints map[string][]string
func (csvc ComponentSemVerConstraints) MatchesSemVerFilter(component, version string) bool {
for comp, constraints := range csvc {
if comp != component {
continue
}
input := version
if len(component) > 0 {
input = fmt.Sprintf("%s=%s", component, version)
}
return types.MustParseSemVerFilter(input)(component, constraints)
}
return false
}
func unionOf[S ~[]E, E comparable](slices ...S) S {
@@ -131,6 +149,16 @@ func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerCons
return unionOf(semVerConstraints...)
}
func UnionOfComponentSemVerConstraints(componentSemVerConstraintsSlice ...ComponentSemVerConstraints) ComponentSemVerConstraints {
unionComponentSemVerConstraints := ComponentSemVerConstraints{}
for _, componentSemVerConstraints := range componentSemVerConstraintsSlice {
for component, constraints := range componentSemVerConstraints {
unionComponentSemVerConstraints[component] = unionOf(unionComponentSemVerConstraints[component], constraints)
}
}
return unionComponentSemVerConstraints
}
func PartitionDecorations(args ...any) ([]any, []any) {
decorations := []any{}
remainingArgs := []any{}
@@ -174,6 +202,8 @@ func isDecoration(arg any) bool {
return true
case t == reflect.TypeOf(SemVerConstraints{}):
return true
case t == reflect.TypeOf(ComponentSemVerConstraints{}):
return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
@@ -214,16 +244,17 @@ var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
SemVerConstraints: SemVerConstraints{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
GracePeriod: -1,
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
SemVerConstraints: SemVerConstraints{},
ComponentSemVerConstraints: ComponentSemVerConstraints{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
GracePeriod: -1,
}
errors := []error{}
@@ -360,6 +391,36 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
appendError(err)
}
}
case t == reflect.TypeOf(ComponentSemVerConstraints{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ComponentSemVerConstraint"))
}
for component, semVerConstraints := range arg.(ComponentSemVerConstraints) {
// while using ComponentSemVerConstraints, we should not allow empty component names.
// you should use SemVerConstraints for that.
hasErr := false
if len(component) == 0 {
appendError(types.GinkgoErrors.InvalidEmptyComponentForSemVerConstraint(node.CodeLocation))
hasErr = true
}
for _, semVerConstraint := range semVerConstraints {
_, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation)
if err != nil {
appendError(err)
hasErr = true
}
}
if !hasErr {
// merge constraints if the component already exists
constraints := slices.Clone(semVerConstraints)
if existingConstraints, exists := node.ComponentSemVerConstraints[component]; exists {
constraints = UnionOfSemVerConstraints([]string(existingConstraints), constraints)
}
node.ComponentSemVerConstraints[component] = slices.Clone(constraints)
}
}
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeContainer) {
if node.Body != nil {
@@ -899,6 +960,34 @@ func (n Nodes) UnionOfSemVerConstraints() []string {
return out
}
func (n Nodes) ComponentSemVerConstraints() []map[string][]string {
out := make([]map[string][]string, len(n))
for i := range n {
if n[i].ComponentSemVerConstraints == nil {
out[i] = map[string][]string{}
} else {
out[i] = map[string][]string(n[i].ComponentSemVerConstraints)
}
}
return out
}
func (n Nodes) UnionOfComponentSemVerConstraints() map[string][]string {
out := map[string][]string{}
seen := map[string]bool{}
for i := range n {
for component := range n[i].ComponentSemVerConstraints {
if !seen[component] {
seen[component] = true
out[component] = n[i].ComponentSemVerConstraints[component]
} else {
out[component] = UnionOfSemVerConstraints(out[component], n[i].ComponentSemVerConstraints[component])
}
}
}
return out
}
func (n Nodes) CodeLocations() []types.CodeLocation {
out := make([]types.CodeLocation, len(n))
for i := range n {

View File

@@ -83,7 +83,7 @@ func goJSONActionFromSpecState(state types.SpecState) GoJSONAction {
type gojsonReport struct {
o types.Report
// Extra calculated fields
goPkg string
goPkg string
elapsed float64
}
@@ -109,8 +109,8 @@ type gojsonSpecReport struct {
o types.SpecReport
// extra calculated fields
testName string
elapsed float64
action GoJSONAction
elapsed float64
action GoJSONAction
}
func newSpecReport(in types.SpecReport) *gojsonSpecReport {
@@ -141,18 +141,31 @@ func suitePathToPkg(dir string) (string, error) {
}
func createTestName(spec types.SpecReport) string {
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
if spec.FullText() != "" {
name = name + " " + spec.FullText()
}
labels := spec.Labels()
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
semVerConstraints := spec.SemVerConstraints()
if len(semVerConstraints) > 0 {
name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
}
name = strings.TrimSpace(name)
return name
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
if spec.FullText() != "" {
name = name + " " + spec.FullText()
}
labels := spec.Labels()
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
semVerConstraints := spec.SemVerConstraints()
if len(semVerConstraints) > 0 {
name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
}
componentSemVerConstraints := spec.ComponentSemVerConstraints()
if len(componentSemVerConstraints) > 0 {
name = name + " [" + formatComponentSemVerConstraintsToString(componentSemVerConstraints) + "]"
}
name = strings.TrimSpace(name)
return name
}
func formatComponentSemVerConstraintsToString(componentSemVerConstraints map[string][]string) string {
var tmpStr string
for component, semVerConstraints := range componentSemVerConstraints {
tmpStr = tmpStr + fmt.Sprintf("%s: %s, ", component, semVerConstraints)
}
tmpStr = strings.TrimSuffix(tmpStr, ", ")
return tmpStr
}

View File

@@ -108,13 +108,13 @@ func (suite *Suite) BuildTree() error {
return nil
}
func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteComponentSemVerConstraints ComponentSemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
ApplyNestedFocusPolicyToTree(suite.tree)
specs := GenerateSpecsFromTreeRoot(suite.tree)
specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig)
specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suiteConfig)
specs = ComputeAroundNodes(specs)
suite.phase = PhaseRun
@@ -133,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConst
cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs)
success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suiteComponentSemVerConstraints, suitePath, hasProgrammaticFocus, specs)
cancelProgressHandler()
@@ -456,16 +456,17 @@ func (suite *Suite) processCurrentSpecReport() {
}
}
func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteComponentSemVerConstraints ComponentSemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
numSpecsThatWillBeRun := specs.CountWithoutSkip()
suite.report = types.Report{
SuitePath: suitePath,
SuiteDescription: description,
SuiteLabels: suiteLabels,
SuiteSemVerConstraints: suiteSemVerConstraints,
SuiteConfig: suite.config,
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
SuitePath: suitePath,
SuiteDescription: description,
SuiteLabels: suiteLabels,
SuiteSemVerConstraints: suiteSemVerConstraints,
SuiteComponentSemVerConstraints: suiteComponentSemVerConstraints,
SuiteConfig: suite.config,
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
PreRunStats: types.PreRunStats{
TotalSpecs: len(specs),
SpecsThatWillRun: numSpecsThatWillBeRun,

View File

@@ -75,6 +75,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
if len(report.SuiteSemVerConstraints) > 0 {
r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", ")))
}
if len(report.SuiteComponentSemVerConstraints) > 0 {
r.emit(r.f("{{coral}}[Components: %s]{{/}} ", formatComponentSemVerConstraintsToString(report.SuiteComponentSemVerConstraints)))
}
r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
if report.SuiteConfig.ParallelTotal > 1 {
r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
@@ -97,6 +100,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
bannerWidth = len(semVerConstraints) + 2
}
}
if len(report.SuiteComponentSemVerConstraints) > 0 {
componentSemVerConstraints := formatComponentSemVerConstraintsToString(report.SuiteComponentSemVerConstraints)
r.emitBlock(r.f("{{coral}}[Components: %s]{{/}} ", componentSemVerConstraints))
if len(componentSemVerConstraints)+2 > bannerWidth {
bannerWidth = len(componentSemVerConstraints) + 2
}
}
r.emitBlock(strings.Repeat("=", bannerWidth))
out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
@@ -725,8 +735,12 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
}
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{}
texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...)
texts, locations, labels, semVerConstraints, componentSemVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{}, []map[string][]string{}
texts = append(texts, report.ContainerHierarchyTexts...)
locations = append(locations, report.ContainerHierarchyLocations...)
labels = append(labels, report.ContainerHierarchyLabels...)
semVerConstraints = append(semVerConstraints, report.ContainerHierarchySemVerConstraints...)
componentSemVerConstraints = append(componentSemVerConstraints, report.ContainerHierarchyComponentSemVerConstraints...)
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
@@ -735,6 +749,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
}
labels = append(labels, report.LeafNodeLabels)
semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints)
componentSemVerConstraints = append(componentSemVerConstraints, report.LeafNodeComponentSemVerConstraints)
locations = append(locations, report.LeafNodeLocation)
failureLocation := report.Failure.FailureNodeLocation
@@ -749,6 +764,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
locations = append([]types.CodeLocation{failureLocation}, locations...)
labels = append([][]string{{}}, labels...)
semVerConstraints = append([][]string{{}}, semVerConstraints...)
componentSemVerConstraints = append([]map[string][]string{{}}, componentSemVerConstraints...)
highlightIndex = 0
case types.FailureNodeInContainer:
i := report.Failure.FailureNodeContainerIndex
@@ -779,6 +795,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(semVerConstraints[i]) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", "))
}
if len(componentSemVerConstraints[i]) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", formatComponentSemVerConstraintsToString(componentSemVerConstraints[i]))
}
out += "\n"
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
}
@@ -806,6 +825,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(flattenedSemVerConstraints) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", "))
}
flattenedComponentSemVerConstraints := report.ComponentSemVerConstraints()
if len(flattenedComponentSemVerConstraints) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", formatComponentSemVerConstraintsToString(flattenedComponentSemVerConstraints))
}
out += "\n"
if usePreciseFailureLocation {
out += r.f("{{gray}}%s{{/}}", failureLocation)

View File

@@ -13,9 +13,11 @@ package reporters
import (
"encoding/xml"
"fmt"
"maps"
"os"
"path"
"regexp"
"slices"
"strings"
"github.com/onsi/ginkgo/v2/config"
@@ -39,6 +41,9 @@ type JunitReportConfig struct {
// Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name
OmitSpecSemVerConstraints bool
// Enable OmitSpecComponentSemVerConstraints to prevent component semantic version constraints from appearing in the spec name
OmitSpecComponentSemVerConstraints bool
// Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
OmitLeafNodeType bool
@@ -173,6 +178,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
{"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
{"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
{"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))},
{"SuiteComponentSemVerConstraints", fmt.Sprintf("[%s]", formatComponentSemVerConstraintsToString(report.SuiteComponentSemVerConstraints))},
{"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
{"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
{"LabelFilter", report.SuiteConfig.LabelFilter},
@@ -216,6 +222,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints {
name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
}
componentSemVerConstraints := spec.ComponentSemVerConstraints()
if len(componentSemVerConstraints) > 0 && !config.OmitSpecComponentSemVerConstraints {
name = name + " [" + formatComponentSemVerConstraintsToString(componentSemVerConstraints) + "]"
}
name = strings.TrimSpace(name)
test := JUnitTestCase{
@@ -387,6 +397,16 @@ func systemOutForUnstructuredReporters(spec types.SpecReport) string {
return spec.CapturedStdOutErr
}
func formatComponentSemVerConstraintsToString(componentSemVerConstraints map[string][]string) string {
var tmpStr string
for _, key := range slices.Sorted(maps.Keys(componentSemVerConstraints)) {
tmpStr = tmpStr + fmt.Sprintf("%s: %s, ", key, componentSemVerConstraints[key])
}
tmpStr = strings.TrimSuffix(tmpStr, ", ")
return tmpStr
}
// Deprecated JUnitReporter (so folks can still compile their suites)
type JUnitReporter struct{}

View File

@@ -39,12 +39,16 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
name := report.SuiteDescription
labels := report.SuiteLabels
semVerConstraints := report.SuiteSemVerConstraints
componentSemVerConstraints := report.SuiteComponentSemVerConstraints
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
if len(semVerConstraints) > 0 {
name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
}
if len(componentSemVerConstraints) > 0 {
name = name + " [" + formatComponentSemVerConstraintsToString(componentSemVerConstraints) + "]"
}
fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
for _, spec := range report.SpecReports {
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
@@ -59,6 +63,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
if len(semVerConstraints) > 0 {
name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
}
componentSemVerConstraints := spec.ComponentSemVerConstraints()
if len(componentSemVerConstraints) > 0 {
name = name + " [" + formatComponentSemVerConstraintsToString(componentSemVerConstraints) + "]"
}
name = tcEscape(name)
fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)

View File

@@ -450,6 +450,15 @@ func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error {
}
}
func (g ginkgoErrors) InvalidEmptyComponentForSemVerConstraint(cl CodeLocation) error {
return GinkgoError{
Heading: "Invalid Empty Component for ComponentSemVerConstraint",
Message: "ComponentSemVerConstraint requires a non-empty component name",
CodeLocation: cl,
DocLink: "spec-semantic-version-filtering",
}
}
/* Table errors */
func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
return GinkgoError{

View File

@@ -2,11 +2,12 @@ package types
import (
"fmt"
"strings"
"github.com/Masterminds/semver/v3"
)
type SemVerFilter func([]string) bool
type SemVerFilter func(component string, constraints []string) bool
func MustParseSemVerFilter(input string) SemVerFilter {
filter, err := ParseSemVerFilter(input)
@@ -16,30 +17,90 @@ func MustParseSemVerFilter(input string) SemVerFilter {
return filter
}
func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) {
if filterVersion == "" {
return func(_ []string) bool { return true }, nil
// ParseSemVerFilter parses non-component and component-specific semantic version filter string.
// The filter string can contain multiple non-component and component-specific versions separated by commas.
// Each component-specific version is in the format "component=version".
// If a version is specified without a component, it applies to non-component-specific constraints.
func ParseSemVerFilter(componentFilterVersions string) (SemVerFilter, error) {
if componentFilterVersions == "" {
return func(_ string, _ []string) bool { return true }, nil
}
targetVersion, err := semver.NewVersion(filterVersion)
if err != nil {
return nil, fmt.Errorf("invalid filter version: %w", err)
result := map[string]*semver.Version{}
parts := strings.Split(componentFilterVersions, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if len(part) == 0 {
continue
}
if strings.Contains(part, "=") {
// validate component-specific version string
invalidPart, invalidErr := false, fmt.Errorf("invalid component filter version: %s", part)
subParts := strings.Split(part, "=")
if len(subParts) != 2 {
invalidPart = true
}
component := strings.TrimSpace(subParts[0])
versionStr := strings.TrimSpace(subParts[1])
if len(component) == 0 || len(versionStr) == 0 {
invalidPart = true
}
if invalidPart {
return nil, invalidErr
}
// validate semver
v, err := semver.NewVersion(versionStr)
if err != nil {
return nil, fmt.Errorf("invalid component filter version: %s, error: %w", part, err)
}
result[component] = v
} else {
v, err := semver.NewVersion(part)
if err != nil {
return nil, fmt.Errorf("invalid filter version: %s, error: %w", part, err)
}
result[""] = v
}
}
return func(constraints []string) bool {
return func(component string, constraints []string) bool {
// unconstrained specs always run
if len(constraints) == 0 {
if len(component) == 0 && len(constraints) == 0 {
return true
}
for _, constraintStr := range constraints {
constraint, err := semver.NewConstraint(constraintStr)
if err != nil {
return false
}
// check non-component specific version constraints
if len(component) == 0 && len(constraints) != 0 {
v := result[""]
if v != nil {
for _, constraintStr := range constraints {
constraint, err := semver.NewConstraint(constraintStr)
if err != nil {
return false
}
if !constraint.Check(targetVersion) {
return false
if !constraint.Check(v) {
return false
}
}
}
}
// check component-specific version constraints
if len(component) != 0 && len(constraints) != 0 {
v := result[component]
if v != nil {
for _, constraintStr := range constraints {
constraint, err := semver.NewConstraint(constraintStr)
if err != nil {
return false
}
if !constraint.Check(v) {
return false
}
}
}
}

View File

@@ -38,6 +38,10 @@ type ConstructionNodeReport struct {
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchySemVerConstraints [][]string
// ContainerHierarchyComponentSemVerConstraints is a slice containing the component-specific semVerConstraints of
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchyComponentSemVerConstraints []map[string][]string
// IsSerial captures whether the any container has the Serial decorator
IsSerial bool
@@ -85,6 +89,9 @@ type Report struct {
//SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function
SuiteSemVerConstraints []string
//SuiteComponentSemVerConstraints captures any component-specific semVerConstraints attached to the suite by the DSL's RunSpecs() function
SuiteComponentSemVerConstraints map[string][]string
//SuiteSucceeded captures the success or failure status of the test run
//If true, the test run is considered successful.
//If false, the test run is considered unsuccessful
@@ -188,14 +195,19 @@ type SpecReport struct {
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchySemVerConstraints [][]string
// ContainerHierarchyComponentSemVerConstraints is a slice containing the component-specific semVerConstraints of
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchyComponentSemVerConstraints []map[string][]string
// LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text
// of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
// one of the NodeTypesForSuiteLevelNodes node types)
LeafNodeType NodeType
LeafNodeLocation CodeLocation
LeafNodeLabels []string
LeafNodeSemVerConstraints []string
LeafNodeText string
LeafNodeType NodeType
LeafNodeLocation CodeLocation
LeafNodeLabels []string
LeafNodeSemVerConstraints []string
LeafNodeComponentSemVerConstraints map[string][]string
LeafNodeText string
// Captures the Spec Priority
SpecPriority int
@@ -261,52 +273,54 @@ type SpecReport struct {
func (report SpecReport) MarshalJSON() ([]byte, error) {
//All this to avoid emitting an empty Failure struct in the JSON
out := struct {
ContainerHierarchyTexts []string
ContainerHierarchyLocations []CodeLocation
ContainerHierarchyLabels [][]string
ContainerHierarchySemVerConstraints [][]string
LeafNodeType NodeType
LeafNodeLocation CodeLocation
LeafNodeLabels []string
LeafNodeSemVerConstraints []string
LeafNodeText string
State SpecState
StartTime time.Time
EndTime time.Time
RunTime time.Duration
ParallelProcess int
Failure *Failure `json:",omitempty"`
NumAttempts int
MaxFlakeAttempts int
MaxMustPassRepeatedly int
CapturedGinkgoWriterOutput string `json:",omitempty"`
CapturedStdOutErr string `json:",omitempty"`
ReportEntries ReportEntries `json:",omitempty"`
ProgressReports []ProgressReport `json:",omitempty"`
AdditionalFailures []AdditionalFailure `json:",omitempty"`
SpecEvents SpecEvents `json:",omitempty"`
ContainerHierarchyTexts []string
ContainerHierarchyLocations []CodeLocation
ContainerHierarchyLabels [][]string
ContainerHierarchySemVerConstraints [][]string
ContainerHierarchyComponentSemVerConstraints []map[string][]string
LeafNodeType NodeType
LeafNodeLocation CodeLocation
LeafNodeLabels []string
LeafNodeSemVerConstraints []string
LeafNodeText string
State SpecState
StartTime time.Time
EndTime time.Time
RunTime time.Duration
ParallelProcess int
Failure *Failure `json:",omitempty"`
NumAttempts int
MaxFlakeAttempts int
MaxMustPassRepeatedly int
CapturedGinkgoWriterOutput string `json:",omitempty"`
CapturedStdOutErr string `json:",omitempty"`
ReportEntries ReportEntries `json:",omitempty"`
ProgressReports []ProgressReport `json:",omitempty"`
AdditionalFailures []AdditionalFailure `json:",omitempty"`
SpecEvents SpecEvents `json:",omitempty"`
}{
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
ContainerHierarchyLocations: report.ContainerHierarchyLocations,
ContainerHierarchyLabels: report.ContainerHierarchyLabels,
ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints,
LeafNodeType: report.LeafNodeType,
LeafNodeLocation: report.LeafNodeLocation,
LeafNodeLabels: report.LeafNodeLabels,
LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints,
LeafNodeText: report.LeafNodeText,
State: report.State,
StartTime: report.StartTime,
EndTime: report.EndTime,
RunTime: report.RunTime,
ParallelProcess: report.ParallelProcess,
Failure: nil,
ReportEntries: nil,
NumAttempts: report.NumAttempts,
MaxFlakeAttempts: report.MaxFlakeAttempts,
MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
CapturedStdOutErr: report.CapturedStdOutErr,
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
ContainerHierarchyLocations: report.ContainerHierarchyLocations,
ContainerHierarchyLabels: report.ContainerHierarchyLabels,
ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints,
ContainerHierarchyComponentSemVerConstraints: report.ContainerHierarchyComponentSemVerConstraints,
LeafNodeType: report.LeafNodeType,
LeafNodeLocation: report.LeafNodeLocation,
LeafNodeLabels: report.LeafNodeLabels,
LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints,
LeafNodeText: report.LeafNodeText,
State: report.State,
StartTime: report.StartTime,
EndTime: report.EndTime,
RunTime: report.RunTime,
ParallelProcess: report.ParallelProcess,
Failure: nil,
ReportEntries: nil,
NumAttempts: report.NumAttempts,
MaxFlakeAttempts: report.MaxFlakeAttempts,
MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
CapturedStdOutErr: report.CapturedStdOutErr,
}
if !report.Failure.IsZero() {
@@ -404,6 +418,34 @@ func (report SpecReport) SemVerConstraints() []string {
return out
}
// ComponentSemVerConstraints returns a deduped map of all the spec's component-specific SemVerConstraints.
func (report SpecReport) ComponentSemVerConstraints() map[string][]string {
out := map[string][]string{}
seen := map[string]bool{}
for _, compSemVerConstraints := range report.ContainerHierarchyComponentSemVerConstraints {
for component := range compSemVerConstraints {
if !seen[component] {
seen[component] = true
out[component] = compSemVerConstraints[component]
} else {
out[component] = append(out[component], compSemVerConstraints[component]...)
out[component] = slices.Compact(out[component])
}
}
}
for component := range report.LeafNodeComponentSemVerConstraints {
if !seen[component] {
seen[component] = true
out[component] = report.LeafNodeComponentSemVerConstraints[component]
} else {
out[component] = append(out[component], report.LeafNodeComponentSemVerConstraints[component]...)
out[component] = slices.Compact(out[component])
}
}
return out
}
// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
filter, err := ParseLabelFilter(query)
@@ -419,7 +461,22 @@ func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) {
if err != nil {
return false, err
}
return filter(report.SemVerConstraints()), nil
semVerConstraints := report.SemVerConstraints()
if len(semVerConstraints) != 0 && filter("", report.SemVerConstraints()) == false {
return false, nil
}
componentSemVerConstraints := report.ComponentSemVerConstraints()
if len(componentSemVerConstraints) != 0 {
for component, constraints := range componentSemVerConstraints {
if filter(component, constraints) == false {
return false, nil
}
}
}
return true, nil
}
// FileName() returns the name of the file containing the spec

View File

@@ -1,3 +1,3 @@
package types
const VERSION = "2.27.5"
const VERSION = "2.28.0"

View File

@@ -1,3 +1,7 @@
## 1.39.1
Update all dependencies. This auto-updated the required version of Go to 1.24, consistent with the fact that Go 1.23 has been out of support for almost six months.
## 1.39.0
### Features

View File

@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
const GOMEGA_VERSION = "1.39.0"
const GOMEGA_VERSION = "1.39.1"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().

View File

@@ -21,6 +21,7 @@ package auth
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
@@ -75,11 +76,6 @@ func expandAndVerifyScope(ctx context.Context, req interface{}, tokenScope map[s
return nil
}
case strings.HasPrefix(k, "share"):
if err = resolveUserShare(ctx, ref, tokenScope[k], client, mgr); err == nil {
return nil
}
case strings.HasPrefix(k, "lightweight"):
if err = resolveLightweightScope(ctx, ref, tokenScope[k], user, client, mgr); err == nil {
return nil
@@ -130,8 +126,13 @@ func expandAndVerifyScope(ctx context.Context, req interface{}, tokenScope map[s
}
func resolveLightweightScope(ctx context.Context, ref *provider.Reference, scope *authpb.Scope, user *userpb.User, client gateway.GatewayAPIClient, mgr token.Manager) error {
refString, err := storagespace.FormatReference(ref)
if err != nil {
// cannot format reference, so cannot be valid
return errtypes.PermissionDenied("invalid reference")
}
// Check if this ref is cached
key := "lw:" + user.Id.OpaqueId + scopeDelimiter + getRefKey(ref)
key := "lw:" + user.Id.OpaqueId + scopeDelimiter + refString
if _, err := scopeExpansionCache.Get(key); err == nil {
return nil
}
@@ -164,13 +165,7 @@ func resolvePublicShare(ctx context.Context, ref *provider.Reference, scope *aut
return err
}
if err := checkCacheForNestedResource(ctx, ref, share.ResourceId, client, mgr); err == nil {
return nil
}
// Some services like wopi don't access the shared resource relative to the
// share root but instead relative to the shared resources parent.
return checkRelativeReference(ctx, ref, share.ResourceId, client)
return checkCacheForNestedResource(ctx, ref, share.ResourceId, client, mgr)
}
func resolveOCMShare(ctx context.Context, ref *provider.Reference, scope *authpb.Scope, client gateway.GatewayAPIClient, mgr token.Manager) error {
@@ -184,69 +179,18 @@ func resolveOCMShare(ctx context.Context, ref *provider.Reference, scope *authpb
ref.ResourceId = share.GetResourceId()
}
if err := checkCacheForNestedResource(ctx, ref, share.ResourceId, client, mgr); err == nil {
return nil
}
// Some services like wopi don't access the shared resource relative to the
// share root but instead relative to the shared resources parent.
return checkRelativeReference(ctx, ref, share.ResourceId, client)
}
// checkRelativeReference checks if the shared resource is being accessed via a relative reference
// e.g.:
// storage: abcd, space: efgh
// /root (id: efgh)
// - New file.txt (id: ijkl) <- shared resource
//
// If the requested reference looks like this:
// Reference{ResourceId: {StorageId: "abcd", SpaceId: "efgh"}, Path: "./New file.txt"}
// then the request is considered relative and this function would return true.
// Only references which are relative to the immediate parent of a resource are considered valid.
func checkRelativeReference(ctx context.Context, requested *provider.Reference, sharedResourceID *provider.ResourceId, client gateway.GatewayAPIClient) error {
sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: sharedResourceID}})
if err != nil {
return err
}
if sRes.Status.Code != rpc.Code_CODE_OK {
return statuspkg.NewErrorFromCode(sRes.Status.Code, "auth interceptor")
}
sharedResource := sRes.Info
// Is this a shared space
if sharedResource.ParentId == nil {
// Is the requested resource part of the shared space?
if requested.ResourceId.StorageId != sharedResource.Id.StorageId || requested.ResourceId.SpaceId != sharedResource.Id.SpaceId {
return errtypes.PermissionDenied("space access forbidden via public link")
}
} else {
parentID := sharedResource.ParentId
parentID.StorageId = sharedResource.Id.StorageId
if !utils.ResourceIDEqual(parentID, requested.ResourceId) && utils.MakeRelativePath(sharedResource.Path) != requested.Path {
return errtypes.PermissionDenied("access forbidden via public link")
}
}
key := storagespace.FormatResourceID(sharedResourceID) + scopeDelimiter + getRefKey(requested)
_ = scopeExpansionCache.SetWithExpire(key, nil, scopeCacheExpiration*time.Second)
return nil
}
func resolveUserShare(ctx context.Context, ref *provider.Reference, scope *authpb.Scope, client gateway.GatewayAPIClient, mgr token.Manager) error {
var share collaboration.Share
err := utils.UnmarshalJSONToProtoV1(scope.Resource.Value, &share)
if err != nil {
return err
}
return checkCacheForNestedResource(ctx, ref, share.ResourceId, client, mgr)
}
func checkCacheForNestedResource(ctx context.Context, ref *provider.Reference, resource *provider.ResourceId, client gateway.GatewayAPIClient, mgr token.Manager) error {
refString, err := storagespace.FormatReference(ref)
if err != nil {
// cannot format reference, so cannot be valid
return errtypes.PermissionDenied("invalid reference")
}
// Check if this ref is cached
key := storagespace.FormatResourceID(resource) + scopeDelimiter + getRefKey(ref)
key := storagespace.FormatResourceID(resource) + scopeDelimiter + refString
if _, err := scopeExpansionCache.Get(key); err == nil {
return nil
}
@@ -270,40 +214,25 @@ func checkIfNestedResource(ctx context.Context, ref *provider.Reference, parent
return false, statuspkg.NewErrorFromCode(statResponse.Status.Code, "auth interceptor")
}
pathResp, err := client.GetPath(ctx, &provider.GetPathRequest{ResourceId: statResponse.GetInfo().GetId()})
if err != nil {
return false, err
}
if pathResp.Status.Code != rpc.Code_CODE_OK {
return false, statuspkg.NewErrorFromCode(pathResp.Status.Code, "auth interceptor")
}
parentPath := pathResp.Path
parentInfo := statResponse.GetInfo()
childPath := ref.GetPath()
if childPath != "" && childPath != "." && strings.HasPrefix(childPath, parentPath) {
// if the request is relative from the root, we can return directly
return true, nil
}
// The request is not relative to the root. We need to find out if the requested resource is child of the `parent` (coming from token scope)
// We need to find out if the requested resource is child of the `parent` (coming from token scope)
// We mint a token as the owner of the public share and try to stat the reference
// TODO(ishank011): We need to find a better alternative to this
// NOTE: did somebody say service accounts? ...
var user *userpb.User
if statResponse.GetInfo().GetOwner().GetType() == userpb.UserType_USER_TYPE_SPACE_OWNER {
if parentInfo.GetOwner().GetType() == userpb.UserType_USER_TYPE_SPACE_OWNER {
// fake a space owner user
user = &userpb.User{
Id: statResponse.GetInfo().GetOwner(),
Id: parentInfo.GetOwner(),
}
} else {
userResp, err := client.GetUser(ctx, &userpb.GetUserRequest{UserId: statResponse.Info.Owner, SkipFetchingUserGroups: true})
userResp, err := client.GetUser(ctx, &userpb.GetUserRequest{UserId: parentInfo.GetOwner(), SkipFetchingUserGroups: true})
if err != nil || userResp.Status.Code != rpc.Code_CODE_OK {
return false, err
}
user = userResp.User
}
scope, err := scope.AddOwnerScope(map[string]*authpb.Scope{})
if err != nil {
return false, err
@@ -329,6 +258,24 @@ func checkIfNestedResource(ctx context.Context, ref *provider.Reference, parent
if childStat.GetStatus().GetCode() != rpc.Code_CODE_OK {
return false, statuspkg.NewErrorFromCode(childStat.Status.Code, "auth interceptor")
}
childInfo := childStat.GetInfo()
// child can only be a nested resource if it is within the same space as parent
if childInfo.GetId().GetStorageId() != parentInfo.GetId().GetStorageId() ||
childInfo.GetId().GetSpaceId() != parentInfo.GetId().GetSpaceId() {
return false, nil
}
// Both resources are in the same space, now check paths
pathResp, err := client.GetPath(ctx, &provider.GetPathRequest{ResourceId: statResponse.GetInfo().GetId()})
if err != nil {
return false, err
}
if pathResp.Status.Code != rpc.Code_CODE_OK {
return false, statuspkg.NewErrorFromCode(pathResp.Status.Code, "auth interceptor")
}
parentPath := pathResp.Path
pathResp, err = client.GetPath(ctx, &provider.GetPathRequest{ResourceId: childStat.GetInfo().GetId()})
if err != nil {
return false, err
@@ -336,10 +283,12 @@ func checkIfNestedResource(ctx context.Context, ref *provider.Reference, parent
if pathResp.GetStatus().GetCode() != rpc.Code_CODE_OK {
return false, statuspkg.NewErrorFromCode(pathResp.Status.Code, "auth interceptor")
}
childPath = pathResp.Path
return strings.HasPrefix(childPath, parentPath), nil
childPath := pathResp.Path
rel, err := filepath.Rel(parentPath, childPath)
if err != nil {
return false, err
}
return !strings.HasPrefix(rel, ".."), nil
}
func extractRefFromListProvidersReq(v *registry.ListStorageProvidersRequest) (*provider.Reference, bool) {
@@ -513,17 +462,3 @@ func extractShareRef(req interface{}) (*collaboration.ShareReference, bool) {
}
return nil, false
}
func getRefKey(ref *provider.Reference) string {
if ref.GetPath() != "" {
return ref.Path
}
if ref.GetResourceId() != nil {
return storagespace.FormatResourceID(ref.ResourceId)
}
// on malicious request both path and rid could be empty
// we still should not panic
return ""
}

View File

@@ -21,6 +21,7 @@ package ocdav
import (
"context"
"fmt"
"io"
"net/http"
"path"
"path/filepath"
@@ -274,7 +275,7 @@ func (s *svc) executePathCopy(ctx context.Context, selector pool.Selectable[gate
var uploadEP, uploadToken string
for _, p := range uRes.Protocols {
if p.Protocol == "simple" {
if p.Protocol == "tus" {
uploadEP, uploadToken = p.UploadEndpoint, p.Token
}
}
@@ -303,24 +304,10 @@ func (s *svc) executePathCopy(ctx context.Context, selector pool.Selectable[gate
}
// 4. do upload
httpUploadReq, err := rhttp.NewRequest(ctx, "PUT", uploadEP, httpDownloadRes.Body)
fileid, err = s.tusUpload(ctx, uploadEP, uploadToken, httpDownloadRes.Body, int64(cp.sourceInfo.GetSize()))
if err != nil {
return err
}
httpUploadReq.Header.Set(datagateway.TokenTransportHeader, uploadToken)
httpUploadReq.ContentLength = int64(cp.sourceInfo.GetSize())
httpUploadRes, err := s.client.Do(httpUploadReq)
if err != nil {
return err
}
defer httpUploadRes.Body.Close()
if httpUploadRes.StatusCode != http.StatusOK {
return err
}
fileid = httpUploadRes.Header.Get(net.HeaderOCFileID)
}
w.Header().Set(net.HeaderOCFileID, fileid)
@@ -498,7 +485,7 @@ func (s *svc) executeSpacesCopy(ctx context.Context, w http.ResponseWriter, sele
var uploadEP, uploadToken string
for _, p := range uRes.Protocols {
if p.Protocol == "simple" {
if p.Protocol == "tus" {
uploadEP, uploadToken = p.UploadEndpoint, p.Token
}
}
@@ -530,24 +517,10 @@ func (s *svc) executeSpacesCopy(ctx context.Context, w http.ResponseWriter, sele
}
// 4. do upload
httpUploadReq, err := rhttp.NewRequest(ctx, http.MethodPut, uploadEP, httpDownloadRes.Body)
fileid, err = s.tusUpload(ctx, uploadEP, uploadToken, httpDownloadRes.Body, int64(cp.sourceInfo.GetSize()))
if err != nil {
return err
}
httpUploadReq.Header.Set(datagateway.TokenTransportHeader, uploadToken)
httpUploadReq.ContentLength = int64(cp.sourceInfo.GetSize())
httpUploadRes, err := s.client.Do(httpUploadReq)
if err != nil {
return err
}
defer httpUploadRes.Body.Close()
if httpUploadRes.StatusCode != http.StatusOK {
return err
}
fileid = httpUploadRes.Header.Get(net.HeaderOCFileID)
}
w.Header().Set(net.HeaderOCFileID, fileid)
@@ -756,3 +729,55 @@ func (s *svc) prepareCopy(ctx context.Context, w http.ResponseWriter, r *http.Re
return &copy{source: srcRef, sourceInfo: srcStatRes.Info, depth: depth, successCode: successCode, destination: dstRef}
}
func (s *svc) tusUpload(ctx context.Context, uploadEP, uploadToken string, body io.Reader, size int64) (string, error) {
chunkSize := int64(10000000)
var offset int64
var fileid string
for offset < size {
n := chunkSize
if offset+n > size {
n = size - offset
}
req, err := rhttp.NewRequest(ctx, http.MethodPatch, uploadEP, io.LimitReader(body, n))
if err != nil {
return "", err
}
req.Header.Set(datagateway.TokenTransportHeader, uploadToken)
req.Header.Set(net.HeaderTusResumable, "1.0.0")
req.Header.Set(net.HeaderUploadOffset, strconv.FormatInt(offset, 10))
req.Header.Set(net.HeaderContentType, "application/offset+octet-stream")
req.ContentLength = n
res, err := s.client.Do(req)
if err != nil {
return "", err
}
if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
res.Body.Close()
return "", fmt.Errorf("unexpected status code during TUS upload: %d", res.StatusCode)
}
if id := res.Header.Get(net.HeaderOCFileID); id != "" {
fileid = id
}
newOffsetStr := res.Header.Get(net.HeaderUploadOffset)
res.Body.Close()
if newOffsetStr != "" {
newOffset, err := strconv.ParseInt(newOffsetStr, 10, 64)
if err != nil {
return "", fmt.Errorf("invalid Upload-Offset header: %v", err)
}
offset = newOffset
} else {
offset += n
}
}
return fileid, nil
}

View File

@@ -97,10 +97,6 @@ func (s *svc) handleSpacesMkCol(w http.ResponseWriter, r *http.Request, spaceID
sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Str("handler", "mkcol").Logger()
if err := ValidateName(filename(r.URL.Path), s.nameValidators); err != nil {
return http.StatusBadRequest, err
}
parentRef, err := spacelookup.MakeStorageSpaceReference(spaceID, path.Dir(r.URL.Path))
if err != nil {
return http.StatusBadRequest, fmt.Errorf("invalid space id")

View File

@@ -131,16 +131,6 @@ func (s *svc) handleSpacesMove(w http.ResponseWriter, r *http.Request, srcSpaceI
dstSpaceID, dstRelPath := router.ShiftPath(dst)
if dstRelPath != "" && dstRelPath != "." && dstRelPath != "/" {
err := ValidateDestination(filename(dstRelPath), s.nameValidators)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
b, err := errors.Marshal(http.StatusBadRequest, "destination naming rules", "", "")
errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err)
return
}
}
dstRef, err := spacelookup.MakeStorageSpaceReference(dstSpaceID, dstRelPath)
if err != nil {
w.WriteHeader(http.StatusBadRequest)

View File

@@ -1,75 +0,0 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package scope
import (
"context"
"fmt"
authpb "github.com/cs3org/go-cs3apis/cs3/auth/provider/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/rs/zerolog"
)
func receivedShareScope(_ context.Context, scope *authpb.Scope, resource interface{}, logger *zerolog.Logger) (bool, error) {
var share collaboration.ReceivedShare
err := utils.UnmarshalJSONToProtoV1(scope.Resource.Value, &share)
if err != nil {
return false, err
}
switch v := resource.(type) {
case *collaboration.GetReceivedShareRequest:
return checkShareRef(share.Share, v.GetRef()), nil
case *collaboration.UpdateReceivedShareRequest:
return checkShare(share.Share, v.GetShare().GetShare()), nil
case string:
return checkSharePath(v) || checkResourcePath(v), nil
}
msg := fmt.Sprintf("resource type assertion failed: %+v", resource)
logger.Debug().Str("scope", "receivedShareScope").Msg(msg)
return false, errtypes.InternalError(msg)
}
// AddReceivedShareScope adds the scope to allow access to a received user/group share and
// the shared resource.
func AddReceivedShareScope(share *collaboration.ReceivedShare, role authpb.Role, scopes map[string]*authpb.Scope) (map[string]*authpb.Scope, error) {
// Create a new "scope share" to only expose the required fields to the scope.
scopeShare := &collaboration.Share{Id: share.Share.Id, Owner: share.Share.Owner, Creator: share.Share.Creator, ResourceId: share.Share.ResourceId}
val, err := utils.MarshalProtoV1ToJSON(&collaboration.ReceivedShare{Share: scopeShare})
if err != nil {
return nil, err
}
if scopes == nil {
scopes = make(map[string]*authpb.Scope)
}
scopes["receivedshare:"+share.Share.Id.OpaqueId] = &authpb.Scope{
Resource: &types.OpaqueEntry{
Decoder: "json",
Value: val,
},
Role: role,
}
return scopes, nil
}

View File

@@ -31,13 +31,11 @@ import (
type Verifier func(context.Context, *authpb.Scope, interface{}, *zerolog.Logger) (bool, error)
var supportedScopes = map[string]Verifier{
"user": userScope,
"publicshare": publicshareScope,
"resourceinfo": resourceinfoScope,
"share": shareScope,
"receivedshare": receivedShareScope,
"lightweight": lightweightAccountScope,
"ocmshare": ocmShareScope,
"user": userScope,
"publicshare": publicshareScope,
"resourceinfo": resourceinfoScope,
"lightweight": lightweightAccountScope,
"ocmshare": ocmShareScope,
}
// VerifyScope is the function to be called when dismantling tokens to check if

View File

@@ -1,142 +0,0 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package scope
import (
"context"
"fmt"
"strings"
authpb "github.com/cs3org/go-cs3apis/cs3/auth/provider/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
registry "github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/rs/zerolog"
)
func shareScope(_ context.Context, scope *authpb.Scope, resource interface{}, logger *zerolog.Logger) (bool, error) {
var share collaboration.Share
err := utils.UnmarshalJSONToProtoV1(scope.Resource.Value, &share)
if err != nil {
return false, err
}
switch v := resource.(type) {
// Viewer role
case *registry.GetStorageProvidersRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.StatRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.ListContainerRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.InitiateFileDownloadRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
// Editor role
// TODO(ishank011): Add role checks,
// need to return appropriate status codes in the ocs/ocdav layers.
case *provider.CreateContainerRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.TouchFileRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.DeleteRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *provider.MoveRequest:
return checkShareStorageRef(&share, v.GetSource()) && checkShareStorageRef(&share, v.GetDestination()), nil
case *provider.InitiateFileUploadRequest:
return checkShareStorageRef(&share, v.GetRef()), nil
case *collaboration.ListReceivedSharesRequest:
return true, nil
case *collaboration.GetReceivedShareRequest:
return checkShareRef(&share, v.GetRef()), nil
case string:
return checkSharePath(v) || checkResourcePath(v), nil
}
msg := fmt.Sprintf("resource type assertion failed: %+v", resource)
logger.Debug().Str("scope", "shareScope").Msg(msg)
return false, errtypes.InternalError(msg)
}
func checkShareStorageRef(s *collaboration.Share, r *provider.Reference) bool {
// ref: <id:<storage_id:$storageID opaque_id:$opaqueID > >
if r.GetResourceId() != nil && r.Path == "" { // path must be empty
return utils.ResourceIDEqual(s.ResourceId, r.GetResourceId())
}
return false
}
func checkShareRef(s *collaboration.Share, ref *collaboration.ShareReference) bool {
if ref.GetId() != nil {
return ref.GetId().OpaqueId == s.Id.OpaqueId
}
if key := ref.GetKey(); key != nil {
return (utils.UserEqual(key.Owner, s.Owner) || utils.UserEqual(key.Owner, s.Creator)) &&
utils.ResourceIDEqual(key.ResourceId, s.ResourceId) && utils.GranteeEqual(key.Grantee, s.Grantee)
}
return false
}
func checkShare(s1 *collaboration.Share, s2 *collaboration.Share) bool {
if s2.GetId() != nil {
return s2.GetId().OpaqueId == s1.Id.OpaqueId
}
return false
}
func checkSharePath(path string) bool {
paths := []string{
"/ocs/v2.php/apps/files_sharing/api/v1/shares",
"/ocs/v1.php/apps/files_sharing/api/v1/shares",
"/remote.php/webdav",
"/remote.php/dav/files",
}
for _, p := range paths {
if strings.HasPrefix(path, p) {
return true
}
}
return false
}
// AddShareScope adds the scope to allow access to a user/group share and
// the shared resource.
func AddShareScope(share *collaboration.Share, role authpb.Role, scopes map[string]*authpb.Scope) (map[string]*authpb.Scope, error) {
// Create a new "scope share" to only expose the required fields to the scope.
scopeShare := &collaboration.Share{Id: share.Id, Owner: share.Owner, Creator: share.Creator, ResourceId: share.ResourceId}
val, err := utils.MarshalProtoV1ToJSON(scopeShare)
if err != nil {
return nil, err
}
if scopes == nil {
scopes = make(map[string]*authpb.Scope)
}
scopes["share:"+share.Id.OpaqueId] = &authpb.Scope{
Resource: &types.OpaqueEntry{
Decoder: "json",
Value: val,
},
Role: role,
}
return scopes, nil
}