[full-ci] chore: bump reva to v2.43.0 (#2630)

This commit is contained in:
Viktor Scharf
2026-04-20 16:05:48 +02:00
committed by GitHub
parent 7d98f03703
commit 6099ca3658
36 changed files with 883 additions and 227 deletions

10
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/beevik/etree v1.6.0
github.com/blevesearch/bleve/v2 v2.5.7
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.17.0
github.com/coreos/go-oidc/v3 v3.18.0
github.com/cs3org/go-cs3apis v0.0.0-20260407125717-5d69ba49048b
github.com/davidbyttow/govips/v2 v2.17.0
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
@@ -33,7 +33,7 @@ require (
github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20240726082623-6831adfdcdc4
github.com/go-micro/plugins/v4/wrapper/monitoring/prometheus v1.2.0
github.com/go-micro/plugins/v4/wrapper/trace/opentelemetry v1.2.0
github.com/go-playground/validator/v10 v10.30.1
github.com/go-playground/validator/v10 v10.30.2
github.com/go-resty/resty/v2 v2.17.2
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/golang/protobuf v1.5.4
@@ -56,7 +56,7 @@ require (
github.com/mna/pigeon v1.3.0
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/nats-io/nats-server/v2 v2.12.6
github.com/nats-io/nats.go v1.50.0
github.com/nats-io/nats.go v1.51.0
github.com/oklog/run v1.2.0
github.com/olekukonko/tablewriter v1.1.4
github.com/onsi/ginkgo v1.16.5
@@ -65,7 +65,7 @@ require (
github.com/open-policy-agent/opa v1.15.1
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20260310090739-853d972b282d
github.com/opencloud-eu/reva/v2 v2.42.7-0.20260413125349-61dfc72a7d60
github.com/opencloud-eu/reva/v2 v2.43.0
github.com/opensearch-project/opensearch-go/v4 v4.6.0
github.com/orcaman/concurrent-map v1.0.0
github.com/pkg/errors v0.9.1
@@ -207,7 +207,7 @@ require (
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.8.0 // indirect
github.com/go-git/go-git/v5 v5.17.1 // indirect
github.com/go-git/go-git/v5 v5.18.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.4 // indirect
github.com/go-kit/log v0.2.1 // indirect

20
go.sum
View File

@@ -239,8 +239,8 @@ github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6a
github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/coreos/go-oidc/v3 v3.18.0 h1:V9orjXynvu5wiC9SemFTWnG4F45v403aIcjWo0d41+A=
github.com/coreos/go-oidc/v3 v3.18.0/go.mod h1:DYCf24+ncYi+XkIH97GY1+dqoRlbaSI26KVTCI9SrY4=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
@@ -388,8 +388,8 @@ github.com/go-git/go-billy/v5 v5.8.0 h1:I8hjc3LbBlXTtVuFNJuwYuMiHvQJDq1AT6u4DwDz
github.com/go-git/go-billy/v5 v5.8.0/go.mod h1:RpvI/rw4Vr5QA+Z60c6d6LXH0rYJo0uD5SqfmrrheCY=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.17.1 h1:WnljyxIzSj9BRRUlnmAU35ohDsjRK0EKmL0evDqi5Jk=
github.com/go-git/go-git/v5 v5.17.1/go.mod h1:pW/VmeqkanRFqR6AljLcs7EA7FbZaN5MQqO7oZADXpo=
github.com/go-git/go-git/v5 v5.18.0 h1:O831KI+0PR51hM2kep6T8k+w0/LIAD490gvqMCvL5hM=
github.com/go-git/go-git/v5 v5.18.0/go.mod h1:pW/VmeqkanRFqR6AljLcs7EA7FbZaN5MQqO7oZADXpo=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -451,8 +451,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w=
github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM=
github.com/go-playground/validator/v10 v10.30.2 h1:JiFIMtSSHb2/XBUbWM4i/MpeQm9ZK2xqPNk8vgvu5JQ=
github.com/go-playground/validator/v10 v10.30.2/go.mod h1:mAf2pIOVXjTEBrwUMGKkCWKKPs9NheYGabeB04txQSc=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
@@ -904,8 +904,8 @@ github.com/nats-io/jwt/v2 v2.8.1 h1:V0xpGuD/N8Mi+fQNDynXohVvp7ZztevW5io8CUWlPmU=
github.com/nats-io/jwt/v2 v2.8.1/go.mod h1:nWnOEEiVMiKHQpnAy4eXlizVEtSfzacZ1Q43LIRavZg=
github.com/nats-io/nats-server/v2 v2.12.6 h1:Egbx9Vl7Ch8wTtpXPGqbehkZ+IncKqShUxvrt1+Enc8=
github.com/nats-io/nats-server/v2 v2.12.6/go.mod h1:4HPlrvtmSO3yd7KcElDNMx9kv5EBJBnJJzQPptXlheo=
github.com/nats-io/nats.go v1.50.0 h1:5zAeQrTvyrKrWLJ0fu02W3br8ym57qf7csDzgLOpcds=
github.com/nats-io/nats.go v1.50.0/go.mod h1:26HypzazeOkyO3/mqd1zZd53STJN0EjCYF9Uy2ZOBno=
github.com/nats-io/nats.go v1.51.0 h1:ByW84XTz6W03GSSsygsZcA+xgKK8vPGaa/FCAAEHnAI=
github.com/nats-io/nats.go v1.51.0/go.mod h1:26HypzazeOkyO3/mqd1zZd53STJN0EjCYF9Uy2ZOBno=
github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4=
github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@@ -954,8 +954,8 @@ github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9 h1:dIft
github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9/go.mod h1:JWyDC6H+5oZRdUJUgKuaye+8Ph5hEs6HVzVoPKzWSGI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20260310090739-853d972b282d h1:JcqGDiyrcaQwVyV861TUyQgO7uEmsjkhfm7aQd84dOw=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20260310090739-853d972b282d/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
github.com/opencloud-eu/reva/v2 v2.42.7-0.20260413125349-61dfc72a7d60 h1:XzEgzKHK1uj/Y5dLq3jvj+bqoD91L3JPyN8AhVUXPxs=
github.com/opencloud-eu/reva/v2 v2.42.7-0.20260413125349-61dfc72a7d60/go.mod h1:kLxe5tA2IfxRPHSHRN2KbrQ10C/I3O+WnPqUg6yD9cA=
github.com/opencloud-eu/reva/v2 v2.43.0 h1:b7YazijK2K3O6MM7wsUxE2UmlRkrE8h3NUxIl5nxQmo=
github.com/opencloud-eu/reva/v2 v2.43.0/go.mod h1:iXbAo4xX1PFucJjFxMsFk7DlhnfAskAt5zV2eYUVJYE=
github.com/opencloud-eu/secure v0.0.0-20260312082735-b6f5cb2244e4 h1:l2oB/RctH+t8r7QBj5p8thfEHCM/jF35aAY3WQ3hADI=
github.com/opencloud-eu/secure v0.0.0-20260312082735-b6f5cb2244e4/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=

View File

@@ -34,7 +34,7 @@ var (
// LatestTag is the latest released version plus the dev meta version.
// Will be overwritten by the release pipeline
// Needs a manual change for every tagged release
LatestTag = "6.0.0+dev"
LatestTag = "6.1.0+dev"
// Date indicates the build date.
// This has been removed, it looks like you can only replace static strings with recent go versions

View File

@@ -7,7 +7,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"reflect"
@@ -24,6 +23,33 @@ import (
"github.com/go-git/go-git/v5/utils/ioutil"
)
type contextKey int
const initialRequestKey contextKey = iota
// RedirectPolicy controls how the HTTP transport follows redirects.
//
// The values mirror Git's http.followRedirects config:
// "true" follows redirects for all requests, "false" treats redirects as
// errors, and "initial" follows redirects only for the initial
// /info/refs discovery request. The zero value defaults to "initial".
type RedirectPolicy string
const (
FollowInitialRedirects RedirectPolicy = "initial"
FollowRedirects RedirectPolicy = "true"
NoFollowRedirects RedirectPolicy = "false"
)
func withInitialRequest(ctx context.Context) context.Context {
return context.WithValue(ctx, initialRequestKey, true)
}
func isInitialRequest(req *http.Request) bool {
v, _ := req.Context().Value(initialRequestKey).(bool)
return v
}
// it requires a bytes.Buffer, because we need to know the length
func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) {
req.Header.Add("User-Agent", capability.DefaultAgent())
@@ -54,12 +80,15 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
s.ApplyAuthToRequest(req)
applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
res, err := s.client.Do(req.WithContext(ctx))
res, err := s.client.Do(req.WithContext(withInitialRequest(ctx)))
if err != nil {
return nil, err
}
s.ModifyEndpointIfRedirect(res)
if err := s.ModifyEndpointIfRedirect(res); err != nil {
_ = res.Body.Close()
return nil, err
}
defer ioutil.CheckClose(res.Body, &err)
if err = NewErr(res); err != nil {
@@ -96,6 +125,7 @@ type client struct {
client *http.Client
transports *lru.Cache
mutex sync.RWMutex
follow RedirectPolicy
}
// ClientOptions holds user configurable options for the client.
@@ -106,6 +136,11 @@ type ClientOptions struct {
// size, will result in the least recently used transport getting deleted
// before the provided transport is added to the cache.
CacheMaxEntries int
// RedirectPolicy controls redirect handling. Supported values are
// "true", "false", and "initial". The zero value defaults to
// "initial", matching Git's http.followRedirects default.
RedirectPolicy RedirectPolicy
}
var (
@@ -150,12 +185,16 @@ func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transpo
}
cl := &client{
client: c,
follow: FollowInitialRedirects,
}
if opts != nil {
if opts.CacheMaxEntries > 0 {
cl.transports = lru.New(opts.CacheMaxEntries)
}
if opts.RedirectPolicy != "" {
cl.follow = opts.RedirectPolicy
}
}
return cl
}
@@ -289,14 +328,9 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*
}
}
httpClient = &http.Client{
Transport: transport,
CheckRedirect: c.client.CheckRedirect,
Jar: c.client.Jar,
Timeout: c.client.Timeout,
}
httpClient = c.cloneHTTPClient(transport)
} else {
httpClient = c.client
httpClient = c.cloneHTTPClient(c.client.Transport)
}
s := &session{
@@ -324,30 +358,122 @@ func (s *session) ApplyAuthToRequest(req *http.Request) {
s.auth.SetAuth(req)
}
func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
func (s *session) ModifyEndpointIfRedirect(res *http.Response) error {
if res.Request == nil {
return
return nil
}
if s.endpoint == nil {
return fmt.Errorf("http redirect: nil endpoint")
}
r := res.Request
if !strings.HasSuffix(r.URL.Path, infoRefsPath) {
return
return fmt.Errorf("http redirect: target %q does not end with %s", r.URL.Path, infoRefsPath)
}
if r.URL.Scheme != "http" && r.URL.Scheme != "https" {
return fmt.Errorf("http redirect: unsupported scheme %q", r.URL.Scheme)
}
if r.URL.Scheme != s.endpoint.Protocol &&
!(s.endpoint.Protocol == "http" && r.URL.Scheme == "https") {
return fmt.Errorf("http redirect: changes scheme from %q to %q", s.endpoint.Protocol, r.URL.Scheme)
}
h, p, err := net.SplitHostPort(r.URL.Host)
host := endpointHost(r.URL.Hostname())
port, err := endpointPort(r.URL.Port())
if err != nil {
h = r.URL.Host
return err
}
if p != "" {
port, err := strconv.Atoi(p)
if err == nil {
s.endpoint.Port = port
}
if host != s.endpoint.Host || effectivePort(r.URL.Scheme, port) != effectivePort(s.endpoint.Protocol, s.endpoint.Port) {
s.endpoint.User = ""
s.endpoint.Password = ""
s.auth = nil
}
s.endpoint.Host = h
s.endpoint.Host = host
s.endpoint.Port = port
s.endpoint.Protocol = r.URL.Scheme
s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
return nil
}
func endpointHost(host string) string {
if strings.Contains(host, ":") {
return "[" + host + "]"
}
return host
}
func endpointPort(port string) (int, error) {
if port == "" {
return 0, nil
}
parsed, err := strconv.Atoi(port)
if err != nil {
return 0, fmt.Errorf("http redirect: invalid port %q", port)
}
return parsed, nil
}
func effectivePort(scheme string, port int) int {
if port != 0 {
return port
}
switch strings.ToLower(scheme) {
case "http":
return 80
case "https":
return 443
default:
return 0
}
}
func (c *client) cloneHTTPClient(transport http.RoundTripper) *http.Client {
return &http.Client{
Transport: transport,
CheckRedirect: wrapCheckRedirect(c.follow, c.client.CheckRedirect),
Jar: c.client.Jar,
Timeout: c.client.Timeout,
}
}
func wrapCheckRedirect(policy RedirectPolicy, next func(*http.Request, []*http.Request) error) func(*http.Request, []*http.Request) error {
return func(req *http.Request, via []*http.Request) error {
if err := checkRedirect(req, via, policy); err != nil {
return err
}
if next != nil {
return next(req, via)
}
return nil
}
}
func checkRedirect(req *http.Request, via []*http.Request, policy RedirectPolicy) error {
switch policy {
case FollowRedirects:
case NoFollowRedirects:
return fmt.Errorf("http redirect: redirects disabled to %s", req.URL)
case "", FollowInitialRedirects:
if !isInitialRequest(req) {
return fmt.Errorf("http redirect: redirect on non-initial request to %s", req.URL)
}
default:
return fmt.Errorf("http redirect: invalid redirect policy %q", policy)
}
if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
return fmt.Errorf("http redirect: unsupported scheme %q", req.URL.Scheme)
}
if len(via) >= 10 {
return fmt.Errorf("http redirect: too many redirects")
}
return nil
}
func (*session) Close() error {

View File

@@ -132,30 +132,64 @@ func (w *PackWriter) clean() error {
func (w *PackWriter) save() error {
base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
// Pack files are content addressable. Each file is checked
// individually — if it already exists on disk, skip creating it.
idxPath := fmt.Sprintf("%s.idx", base)
exists, err := fileExists(w.fs, idxPath)
if err != nil {
return err
}
if !exists {
idx, err := w.fs.Create(idxPath)
if err != nil {
return err
}
if err := w.encodeIdx(idx); err != nil {
_ = idx.Close()
return err
}
if err := w.encodeIdx(idx); err != nil {
_ = idx.Close()
return err
}
if err := idx.Close(); err != nil {
return err
if err := idx.Close(); err != nil {
return err
}
fixPermissions(w.fs, idxPath)
}
fixPermissions(w.fs, fmt.Sprintf("%s.idx", base))
packPath := fmt.Sprintf("%s.pack", base)
if err := w.fs.Rename(w.fw.Name(), packPath); err != nil {
exists, err = fileExists(w.fs, packPath)
if err != nil {
return err
}
fixPermissions(w.fs, packPath)
if !exists {
if err := w.fs.Rename(w.fw.Name(), packPath); err != nil {
return err
}
fixPermissions(w.fs, packPath)
} else {
// Pack already exists, clean up the temp file.
return w.clean()
}
return nil
}
// fileExists checks whether path already exists as a regular file.
// It returns (true, nil) for an existing regular file, (false, nil) when the
// path does not exist, and (false, err) if the path exists but is not a
// regular file (e.g. a directory or symlink).
func fileExists(fs billy.Filesystem, path string) (bool, error) {
fi, err := fs.Lstat(path)
if err != nil {
return false, nil
}
if !fi.Mode().IsRegular() {
return false, fmt.Errorf("unexpected file type for %q: %s", path, fi.Mode().Type())
}
return true, nil
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
idx, err := w.writer.Index()
if err != nil {
@@ -235,7 +269,6 @@ func (s *syncedReader) sleep() {
atomic.StoreUint32(&s.blocked, 1)
<-s.news
}
}
func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
@@ -293,7 +326,7 @@ func (w *ObjectWriter) save() error {
// Loose objects are content addressable, if they already exist
// we can safely delete the temporary file and short-circuit the
// operation.
if _, err := w.fs.Stat(file); err == nil || os.IsExist(err) {
if _, err := w.fs.Lstat(file); err == nil || os.IsExist(err) {
return w.fs.Remove(w.f.Name())
}

View File

@@ -15,7 +15,7 @@ It has the following **unique** features:
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Ability to dive into both map keys and values for validation
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Handles custom field types such as sql driver [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) and the [Valuer interface](https://github.com/go-playground/validator/blob/master/_examples/valuer/main.go)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
- Customizable i18n aware error messages.
@@ -24,7 +24,7 @@ It has the following **unique** features:
A Call for Maintainers
----------------------
Please read the discussiong started [here](https://github.com/go-playground/validator/discussions/1330) if you are interested in contributing/helping maintain this package.
Please read the discussion started [here](https://github.com/go-playground/validator/discussions/1330) if you are interested in contributing/helping maintain this package.
Installation
------------
@@ -178,13 +178,14 @@ validate := validator.New(validator.WithRequiredStructEnabled())
| spicedb | SpiceDb ObjectID/Permission/Type |
| datetime | Datetime |
| e164 | e164 formatted phone number |
| ein | U.S. Employeer Identification Number |
| ein | U.S. Employer Identification Number |
| email | E-mail String
| eth_addr | Ethereum Address |
| hexadecimal | Hexadecimal String |
| hexcolor | Hexcolor String |
| hsl | HSL String |
| hsla | HSLA String |
| cmyk | CMYK String |
| html | HTML Tags |
| html_encoded | HTML Encoded |
| isbn | International Standard Book Number |
@@ -274,7 +275,7 @@ validate := validator.New(validator.WithRequiredStructEnabled())
#### Aliases:
| Tag | Description |
| - | - |
| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla |
| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla\|cmyk |
| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric |
Benchmarks

View File

@@ -70,7 +70,7 @@ var (
// defines a common or complex set of validation(s) to simplify
// adding validation to structs.
bakedInAliases = map[string]string{
"iscolor": "hexcolor|rgb|rgba|hsl|hsla",
"iscolor": "hexcolor|rgb|rgba|hsl|hsla|cmyk",
"country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
"eu_country_code": "iso3166_1_alpha2_eu|iso3166_1_alpha3_eu|iso3166_1_alpha_numeric_eu",
}
@@ -134,6 +134,7 @@ var (
"rgba": isRGBA,
"hsl": isHSL,
"hsla": isHSLA,
"cmyk": isCMYK,
"e164": isE164,
"email": isEmail,
"url": isURL,
@@ -335,61 +336,110 @@ func isOneOfCI(fl FieldLevel) bool {
func isUnique(fl FieldLevel) bool {
field := fl.Field()
param := fl.Param()
v := reflect.ValueOf(struct{}{})
// sentinel used as map key for nil values
var nilKey = struct{}{}
switch field.Kind() {
case reflect.Slice, reflect.Array:
elem := field.Type().Elem()
if elem.Kind() == reflect.Ptr {
elem = elem.Elem()
}
seen := make(map[interface{}]struct{})
if param == "" {
m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
for i := 0; i < field.Len(); i++ {
m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
}
return field.Len() == m.Len()
}
sf, ok := elem.FieldByName(param)
if !ok {
panic(fmt.Sprintf("Bad field name %s", param))
}
sfTyp := sf.Type
if sfTyp.Kind() == reflect.Ptr {
sfTyp = sfTyp.Elem()
}
m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
var fieldlen int
for i := 0; i < field.Len(); i++ {
key := reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param))
if key.IsValid() {
fieldlen++
m.SetMapIndex(key, v)
elem := field.Index(i)
// -------- unique (no param) --------
if param == "" {
var key interface{}
if elem.Kind() == reflect.Ptr {
if elem.IsNil() {
key = nilKey
} else {
key = elem.Elem().Interface() // <-- compare underlying value
}
} else {
key = elem.Interface()
}
if _, ok := seen[key]; ok {
return false
}
seen[key] = struct{}{}
continue
}
// -------- unique=Field --------
if elem.Kind() == reflect.Ptr {
if elem.IsNil() {
if _, ok := seen[nilKey]; ok {
return false
}
seen[nilKey] = struct{}{}
continue
}
elem = elem.Elem()
}
if elem.Kind() != reflect.Struct {
panic(fmt.Sprintf("Bad field type %s", elem.Type()))
}
sf := elem.FieldByName(param)
if !sf.IsValid() {
panic(fmt.Sprintf("Bad field name %s", param))
}
var key interface{}
if sf.Kind() == reflect.Ptr {
if sf.IsNil() {
key = nilKey
} else {
key = sf.Elem().Interface()
}
} else {
key = sf.Interface()
}
if _, ok := seen[key]; ok {
return false
}
seen[key] = struct{}{}
}
return fieldlen == m.Len()
return true
case reflect.Map:
var m reflect.Value
if field.Type().Elem().Kind() == reflect.Ptr {
m = reflect.MakeMap(reflect.MapOf(field.Type().Elem().Elem(), v.Type()))
} else {
m = reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
}
seen := make(map[interface{}]struct{})
for _, k := range field.MapKeys() {
m.SetMapIndex(reflect.Indirect(field.MapIndex(k)), v)
val := field.MapIndex(k)
var key interface{}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
key = nilKey
} else {
key = val.Elem().Interface() // <-- compare underlying value
}
} else {
key = val.Interface()
}
if _, ok := seen[key]; ok {
return false
}
seen[key] = struct{}{}
}
return field.Len() == m.Len()
return true
default:
if parent := fl.Parent(); parent.Kind() == reflect.Struct {
uniqueField := parent.FieldByName(param)
if uniqueField == reflect.ValueOf(nil) {
if !uniqueField.IsValid() {
panic(fmt.Sprintf("Bad field name provided %s", param))
}
@@ -1721,6 +1771,11 @@ func isHSLA(fl FieldLevel) bool {
return hslaRegex().MatchString(fl.Field().String())
}
// isCMYK is the validation function for validating if the current field's value is a valid CMYK color.
func isCMYK(fl FieldLevel) bool {
return cmykRegex().MatchString(fl.Field().String())
}
// isHSL is the validation function for validating if the current field's value is a valid HSL color.
func isHSL(fl FieldLevel) bool {
return hslRegex().MatchString(fl.Field().String())

View File

@@ -52,6 +52,40 @@ Custom Validation functions can be added. Example:
// NOTES: using the same tag name as an existing function
// will overwrite the existing one
# Valuer Interface
Custom types can implement the Valuer interface to return the value that should
be validated. This is useful when a type wraps another value and you want
validation to run against the unwrapped value.
type Nullable[T any] struct {
Data T
}
func (n Nullable[T]) ValidatorValue() any {
return n.Data
}
type Config struct {
Name string `validate:"required"`
}
type Record struct {
Config Nullable[Config] `validate:"required"`
}
r := Record{
Config: Nullable[Config]{
Data: Config{Name: "validator"},
},
}
err := validate.Struct(r)
The library also supports types like sql/driver.Valuer using
RegisterCustomTypeFunc. See _examples/valuer/main.go and
_examples/custom/main.go for both approaches.
# Cross-Field Validation
Cross-Field Validation can be done via the following tags:
@@ -886,6 +920,12 @@ This validates that a string value contains a valid hsla color
Usage: hsla
# CMYK String
This validates that a string value contains a valid cmyk color
Usage: cmyk
# E.164 Phone Number String
This validates that a string value contains a valid E.164 Phone number
@@ -1170,7 +1210,7 @@ This validates that a string value contains a valid longitude.
Usage: longitude
# Employeer Identification Number EIN
# Employer Identification Number EIN
This validates that a string value contains a valid U.S. Employer Identification Number.

View File

@@ -50,6 +50,7 @@ var postCodePatternDict = map[string]string{
"KH": `^\d{5}$`,
"CV": `^\d{4}$`,
"CL": `^\d{7}$`,
"CO": `^\d{6}$`,
"CR": `^\d{4,5}|\d{3}-\d{4}$`,
"HR": `^\d{5}$`,
"CY": `^\d{4}$`,
@@ -149,6 +150,7 @@ var postCodePatternDict = map[string]string{
"MQ": `^9[78]2\d{2}$`,
"NC": `^988\d{2}$`,
"NE": `^\d{4}$`,
"VG": `^VG\d{4}$`,
"VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
"VN": `^[0-9]{1,6}$`,
"PF": `^987\d{2}$`,

View File

@@ -20,6 +20,7 @@ const (
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
cmykRegexString = "^cmyk\\((100|[1-9]?\\d)%\\s*,\\s*(100|[1-9]?\\d)%\\s*,\\s*(100|[1-9]?\\d)%\\s*,\\s*(100|[1-9]?\\d)%\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
e164RegexString = "^\\+?[1-9]\\d{7,14}$"
base32RegexString = "^(?:[A-Z2-7]{8})*(?:[A-Z2-7]{2}={6}|[A-Z2-7]{4}={4}|[A-Z2-7]{5}={3}|[A-Z2-7]{7}=|[A-Z2-7]{8})$"
@@ -57,7 +58,7 @@ const (
sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9-]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
@@ -109,6 +110,7 @@ var (
rgbaRegex = lazyRegexCompile(rgbaRegexString)
hslRegex = lazyRegexCompile(hslRegexString)
hslaRegex = lazyRegexCompile(hslaRegexString)
cmykRegex = lazyRegexCompile(cmykRegexString)
e164Regex = lazyRegexCompile(e164RegexString)
emailRegex = lazyRegexCompile(emailRegexString)
base32Regex = lazyRegexCompile(base32RegexString)

View File

@@ -9,6 +9,13 @@ import (
"time"
)
// Valuer is an interface that allows you to expose a method on a type
// (including generic types) that returns a value that is supposed to be validated.
type Valuer interface {
// ValidatorValue returns the value that is supposed to be validated.
ValidatorValue() any
}
// extractTypeInternal gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
@@ -23,6 +30,13 @@ BEGIN:
return current, reflect.Ptr, nullable
}
if current.CanInterface() {
if v, ok := current.Interface().(Valuer); ok {
current = reflect.ValueOf(v.ValidatorValue())
goto BEGIN
}
}
current = current.Elem()
goto BEGIN
@@ -34,6 +48,13 @@ BEGIN:
return current, reflect.Interface, nullable
}
if current.CanInterface() {
if v, ok := current.Interface().(Valuer); ok {
current = reflect.ValueOf(v.ValidatorValue())
goto BEGIN
}
}
current = current.Elem()
goto BEGIN
@@ -42,6 +63,13 @@ BEGIN:
default:
if current.CanInterface() {
if v, ok := current.Interface().(Valuer); ok {
current = reflect.ValueOf(v.ValidatorValue())
goto BEGIN
}
}
if v.v.hasCustomFuncs {
if fn, ok := v.v.customFuncs[current.Type()]; ok {
current = reflect.ValueOf(fn(current))

View File

@@ -23,7 +23,7 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io
go get github.com/nats-io/nats.go@latest
# To get a specific version:
go get github.com/nats-io/nats.go@v1.50.0
go get github.com/nats-io/nats.go@v1.51.0
# Note that the latest major version for NATS Server is v2:
go get github.com/nats-io/nats-server/v2@latest

View File

@@ -194,13 +194,6 @@ const (
unset = -1
)
func min(x, y int) int {
if x < y {
return x
}
return y
}
// Consume can be used to continuously receive messages and handle them
// with the provided callback function. Consume cannot be used concurrently
// when using ordered consumer.
@@ -258,14 +251,6 @@ func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (
}
return
}
defer func() {
sub.Lock()
sub.checkPending()
if sub.hbMonitor != nil {
sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat)
}
sub.Unlock()
}()
if !userMsg {
// heartbeat message
if msgErr == nil {
@@ -273,15 +258,24 @@ func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (
}
sub.Lock()
err := sub.handleStatusMsg(msg, msgErr)
termErr, notifyErr := sub.handleStatusMsg(msg, msgErr)
if termErr == nil {
sub.checkPending()
if sub.hbMonitor != nil {
sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat)
}
}
sub.Unlock()
if err != nil {
if sub.consumeOpts.ErrHandler != nil && notifyErr != nil {
sub.consumeOpts.ErrHandler(sub, notifyErr)
}
if termErr != nil {
if sub.closed.Load() == 1 {
return
}
if sub.consumeOpts.ErrHandler != nil {
sub.consumeOpts.ErrHandler(sub, err)
sub.consumeOpts.ErrHandler(sub, termErr)
}
sub.Stop()
}
@@ -294,6 +288,10 @@ func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (
sub.Lock()
sub.decrementPendingMsgs(msg)
sub.incrementDeliveredMsgs()
sub.checkPending()
if sub.hbMonitor != nil {
sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat)
}
sub.Unlock()
if sub.consumeOpts.StopAfter > 0 && sub.consumeOpts.StopAfter == sub.delivered {
@@ -388,9 +386,6 @@ func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (
}
case err := <-sub.errs:
sub.Lock()
if sub.consumeOpts.ErrHandler != nil {
sub.consumeOpts.ErrHandler(sub, err)
}
if errors.Is(err, ErrNoHeartbeat) {
batchSize := sub.consumeOpts.MaxMessages
if sub.consumeOpts.StopAfter > 0 {
@@ -413,6 +408,9 @@ func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (
sub.resetPendingMsgs()
}
sub.Unlock()
if sub.consumeOpts.ErrHandler != nil {
sub.consumeOpts.ErrHandler(sub, err)
}
if errors.Is(err, ErrConnectionClosed) {
sub.Stop()
}
@@ -664,9 +662,9 @@ func (s *pullSubscription) Next(opts ...NextOpt) (Msg, error) {
if msgErr == nil {
continue
}
if err := s.handleStatusMsg(msg, msgErr); err != nil {
if termErr, _ := s.handleStatusMsg(msg, msgErr); termErr != nil {
s.Stop()
return nil, err
return nil, termErr
}
continue
}
@@ -715,28 +713,29 @@ func (s *pullSubscription) Next(opts ...NextOpt) (Msg, error) {
}
}
func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error {
// handleStatusMsg processes a status message from the server.
// It returns a terminal error (caller should stop) and a non-terminal
// error to notify the user about via ErrHandler. The caller should invoke
// ErrHandler outside the lock to avoid deadlocks.
func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) (error, error) {
if !errors.Is(msgErr, nats.ErrTimeout) && !errors.Is(msgErr, ErrMaxBytesExceeded) && !errors.Is(msgErr, ErrBatchCompleted) {
if errors.Is(msgErr, ErrConsumerDeleted) || errors.Is(msgErr, ErrBadRequest) {
return msgErr
return msgErr, nil
}
if errors.Is(msgErr, ErrPinIDMismatch) {
s.consumer.setPinID("")
s.pending.msgCount = 0
s.pending.byteCount = 0
}
if s.consumeOpts.ErrHandler != nil {
s.consumeOpts.ErrHandler(s, msgErr)
}
if errors.Is(msgErr, ErrConsumerLeadershipChanged) {
s.pending.msgCount = 0
s.pending.byteCount = 0
}
return nil
return nil, msgErr
}
msgsLeft, bytesLeft, err := parsePending(msg)
if err != nil {
return err
return err, nil
}
s.pending.msgCount -= msgsLeft
if s.pending.msgCount < 0 {
@@ -748,7 +747,7 @@ func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error {
s.pending.byteCount = 0
}
}
return nil
return nil, nil
}
func (hb *hbMonitor) Stop() {

View File

@@ -2029,10 +2029,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
// If maxap is greater than the default sub's pending limit, use that.
if maxap > DefaultSubPendingMsgsLimit {
// For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit
bl := maxap * 1024 * 1024
if bl < DefaultSubPendingBytesLimit {
bl = DefaultSubPendingBytesLimit
}
bl := max(maxap*1024*1024, DefaultSubPendingBytesLimit)
if err := sub.SetPendingLimits(maxap, bl); err != nil {
return nil, err
}
@@ -3114,10 +3111,7 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
}
// Make our request expiration a bit shorter than the current timeout.
expiresDiff := time.Duration(float64(ttl) * 0.1)
if expiresDiff > 5*time.Second {
expiresDiff = 5 * time.Second
}
expiresDiff := min(time.Duration(float64(ttl)*0.1), 5*time.Second)
expires := ttl - expiresDiff
nr.Batch = batch - len(msgs)
@@ -3398,10 +3392,7 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
ttl = time.Until(deadline)
// Make our request expiration a bit shorter than the current timeout.
expiresDiff := time.Duration(float64(ttl) * 0.1)
if expiresDiff > 5*time.Second {
expiresDiff = 5 * time.Second
}
expiresDiff := min(time.Duration(float64(ttl)*0.1), 5*time.Second)
expires := ttl - expiresDiff
connStatusChanged := nc.StatusChanged()

View File

@@ -49,7 +49,7 @@ import (
// Default Constants
const (
Version = "1.50.0"
Version = "1.51.0"
DefaultURL = "nats://127.0.0.1:4222"
DefaultPort = 4222
DefaultMaxReconnect = 60
@@ -61,6 +61,7 @@ const (
DefaultMaxPingOut = 2
DefaultMaxChanLen = 64 * 1024 // 64k
DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB
DefaultWriteBufSize = defaultBufSize
RequestChanLen = 8
DefaultDrainTimeout = 30 * time.Second
DefaultFlusherTimeout = time.Minute
@@ -409,6 +410,34 @@ type Options struct {
// Defaults to 1m.
FlusherTimeout time.Duration
// ReconnectOnFlusherError, when set to true, causes the client to
// trigger a reconnect if the background flusher fails to write to the
// underlying connection for any reason (timeout, broken pipe,
// connection reset, EOF etc.).
//
// This is an advanced option. Most applications do not need to enable
// it: the server-side stale connection detection (via PingInterval /
// MaxPingsOut) and the read loop's own error handling will eventually
// notice a dead connection and the client will reconnect. Enable this
// only if you need faster recovery from a stalled or broken TCP write
// — for example, in latency-sensitive setups where waiting for a ping
// timeout is unacceptable.
//
// Messages buffered at the time of the error are lost, as they are
// with any flusher write error. The purpose of this option is to
// limit the blast radius by preventing further messages from being
// buffered into a potentially corrupted connection, not to recover
// the in-flight data.
//
// When triggered, the standard DisconnectErrHandler and
// ReconnectHandler callbacks are invoked as with any other reconnect.
// The first reconnect attempt bypasses the configured ReconnectWait
// so that recovery is as fast as possible; if that attempt fails,
// subsequent attempts obey the normal backoff.
//
// Defaults to false.
ReconnectOnFlusherError bool
// PingInterval is the period at which the client will be sending ping
// commands to the server, disabled if 0 or negative.
// Defaults to 2m.
@@ -574,6 +603,14 @@ type Options struct {
// IgnoreDiscoveredServers will disable adding advertised server URLs
// from INFO messages to the server pool.
IgnoreDiscoveredServers bool
// WriteBufferSize is an advanced option that sets the flush threshold
// of the write buffer used to batch outgoing data before writing to
// the underlying connection. In most cases, the default value should
// not be changed. A smaller buffer reduces the amount of data that
// can be lost on blocked writes but may significantly reduce throughput.
// Defaults to 32768 bytes (32KB).
WriteBufferSize int
}
const (
@@ -888,6 +925,14 @@ type ServerInfo struct {
Cluster string `json:"cluster,omitempty"`
ConnectURLs []string `json:"connect_urls,omitempty"`
LameDuckMode bool `json:"ldm,omitempty"`
// JetStream indicates whether the server has JetStream enabled.
JetStream bool `json:"jetstream,omitempty"`
// IsSystemAccount indicates whether the connected client's account
// is the system account.
IsSystemAccount bool `json:"acc_is_sys,omitempty"`
// JSApiLevel is the JetStream API level advertised by the server.
// Requires nats-server v2.12.0 or later; older servers will report 0.
JSApiLevel int `json:"api_lvl,omitempty"`
}
const (
@@ -1172,6 +1217,19 @@ func ReconnectBufSize(size int) Option {
}
}
// WriteBufferSize is an advanced option that sets the flush threshold
// of the write buffer used to batch outgoing data before writing to
// the underlying connection. In most cases, the default value should
// not be changed. A smaller buffer reduces the amount of data that
// can be lost on blocked writes but may significantly reduce throughput.
// Defaults to 32768 bytes (32KB).
func WriteBufferSize(size int) Option {
return func(o *Options) error {
o.WriteBufferSize = size
return nil
}
}
// Timeout is an Option to set the timeout for Dial on a connection.
// Defaults to 2s.
func Timeout(t time.Duration) Option {
@@ -1189,6 +1247,17 @@ func FlusherTimeout(t time.Duration) Option {
}
}
// ReconnectOnFlusherError is an Option to automatically trigger a
// reconnect when the background flusher hits any write error. See
// [Options.ReconnectOnFlusherError] for details. This is an
// advanced option and is usually not required.
func ReconnectOnFlusherError() Option {
return func(o *Options) error {
o.ReconnectOnFlusherError = true
return nil
}
}
// DrainTimeout is an Option to set the timeout for draining a connection.
// Defaults to 30s.
func DrainTimeout(t time.Duration) Option {
@@ -1741,6 +1810,10 @@ func (o Options) Connect() (*Conn, error) {
if nc.Opts.ReconnectBufSize == 0 {
nc.Opts.ReconnectBufSize = DefaultReconnectBufSize
}
// Default WriteBufferSize
if nc.Opts.WriteBufferSize <= 0 {
nc.Opts.WriteBufferSize = DefaultWriteBufSize
}
// Ensure that Timeout is not 0
if nc.Opts.Timeout == 0 {
nc.Opts.Timeout = DefaultTimeout
@@ -2080,7 +2153,7 @@ func (nc *Conn) newReaderWriter() {
off: -1,
}
nc.bw = &natsWriter{
limit: defaultBufSize,
limit: nc.Opts.WriteBufferSize,
plimit: nc.Opts.ReconnectBufSize,
}
}
@@ -2414,8 +2487,10 @@ func (nc *Conn) ForceReconnect() error {
// Stop ping timer if set.
nc.stopPingTimer()
// Go ahead and make sure we have flushed the outbound
// flush any pending data and switch to pending mode to buffer new outgoing
// data until we reconnect and can flush it.
nc.bw.flush()
nc.bw.switchToPending()
nc.conn.Close()
nc.changeConnStatus(RECONNECTING)
@@ -2574,6 +2649,40 @@ func (nc *Conn) ConnectedClusterName() string {
return nc.info.Cluster
}
// ConnectedServerJetStream reports whether the connected server has
// JetStream enabled and, if so, its API level. The API level is
// advertised by nats-server v2.12.0 or later; older servers will
// report 0 even when JetStream is enabled.
func (nc *Conn) ConnectedServerJetStream() (bool, int) {
if nc == nil {
return false, 0
}
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.status != CONNECTED {
return false, 0
}
return nc.info.JetStream, nc.info.JSApiLevel
}
// IsSystemAccount reports whether the connected client's account
// is the system account.
func (nc *Conn) IsSystemAccount() bool {
if nc == nil {
return false
}
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.status != CONNECTED {
return false
}
return nc.info.IsSystemAccount
}
// Low level setup for structs, etc
func (nc *Conn) setup() {
nc.subs = make(map[int64]*Subscription)
@@ -3316,8 +3425,10 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) {
}
// processOpErr handles errors from reading or parsing the protocol.
// The lock should not be held entering this function.
func (nc *Conn) processOpErr(err error) bool {
// The lock should not be held entering this function. If forceReconnect
// is true, the first reconnect attempt will bypass the configured
// ReconnectWait; subsequent attempts still obey the normal backoff.
func (nc *Conn) processOpErr(err error, forceReconnect bool) bool {
nc.mu.Lock()
defer nc.mu.Unlock()
if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() {
@@ -3340,7 +3451,7 @@ func (nc *Conn) processOpErr(err error) bool {
// Clear any queued pongs, e.g. pending flush calls.
nc.clearPendingFlushCalls()
go nc.doReconnect(err, false)
go nc.doReconnect(err, forceReconnect)
return false
}
@@ -3443,7 +3554,7 @@ func (nc *Conn) readLoop() {
err = nc.parse(buf)
}
if err != nil {
if shouldClose := nc.processOpErr(err); shouldClose {
if shouldClose := nc.processOpErr(err, false); shouldClose {
nc.close(CLOSED, true, nil)
}
break
@@ -3891,6 +4002,13 @@ func (nc *Conn) flusher() {
if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil {
nc.ach.push(func() { asyncErrorCB(nc, nil, err) })
}
if nc.Opts.ReconnectOnFlusherError {
nc.mu.Unlock()
if shouldClose := nc.processOpErr(err, true); shouldClose {
nc.close(CLOSED, true, nil)
}
return
}
}
}
nc.mu.Unlock()
@@ -4070,11 +4188,11 @@ func (nc *Conn) processErr(ie string) {
// FIXME(dlc) - process Slow Consumer signals special.
if e == STALE_CONNECTION {
close = nc.processOpErr(ErrStaleConnection)
close = nc.processOpErr(ErrStaleConnection, false)
} else if e == MAX_CONNECTIONS_ERR {
close = nc.processOpErr(ErrMaxConnectionsExceeded)
close = nc.processOpErr(ErrMaxConnectionsExceeded, false)
} else if e == MAX_ACCOUNT_CONNECTIONS_ERR {
close = nc.processOpErr(ErrMaxAccountConnectionsExceeded)
close = nc.processOpErr(ErrMaxAccountConnectionsExceeded, false)
} else if strings.HasPrefix(e, PERMISSIONS_ERR) {
nc.processTransientError(fmt.Errorf("%w: %s", ErrPermissionViolation, ne))
} else if strings.HasPrefix(e, MAX_SUBSCRIPTIONS_ERR) {
@@ -5656,7 +5774,7 @@ func (nc *Conn) processPingTimer() {
nc.pout++
if nc.pout > nc.Opts.MaxPingsOut {
nc.mu.Unlock()
if shouldClose := nc.processOpErr(ErrStaleConnection); shouldClose {
if shouldClose := nc.processOpErr(ErrStaleConnection, false); shouldClose {
nc.close(CLOSED, true, nil)
}
return

View File

@@ -55,6 +55,12 @@ const (
wsMaxControlPayloadSize = 125
wsCloseSatusSize = 2
// wsMaxMsgPayloadMultiple is the multiplier applied to MaxPayload to
// determine the maximum WebSocket frame size.
wsMaxMsgPayloadMultiple = 8
// wsMaxMsgPayloadLimit is the absolute cap on WebSocket frame size (64MB).
wsMaxMsgPayloadLimit = 64 * 1024 * 1024
// From https://tools.ietf.org/html/rfc6455#section-11.7
wsCloseStatusNormalClosure = 1000
wsCloseStatusNoStatusReceived = 1005
@@ -114,10 +120,7 @@ func (d *wsDecompressor) Read(dst []byte) (int, error) {
copied := 0
rem := len(dst)
for buf := d.bufs[0]; buf != nil && rem > 0; {
n := len(buf[d.off:])
if n > rem {
n = rem
}
n := min(len(buf[d.off:]), rem)
copy(dst[copied:], buf[d.off:d.off+n])
copied += n
rem -= n
@@ -182,6 +185,18 @@ func wsNewReader(r io.Reader) *websocketReader {
return &websocketReader{r: r, ff: true}
}
// maxFrameSize returns the maximum allowed WebSocket frame size based on the
// negotiated MaxPayload. This mirrors the server-side wsMaxMessageSize logic.
func (r *websocketReader) maxFrameSize() uint64 {
if r.nc != nil {
mp := r.nc.info.MaxPayload
if mp > 0 && uint64(mp) <= wsMaxMsgPayloadLimit/wsMaxMsgPayloadMultiple {
return uint64(mp) * wsMaxMsgPayloadMultiple
}
}
return wsMaxMsgPayloadLimit
}
// From now on, reads will be from the readLoop and we will need to
// acquire the connection lock should we have to send/write a control
// message from handleControlFrame.
@@ -288,7 +303,14 @@ func (r *websocketReader) Read(p []byte) (int, error) {
if err != nil {
return 0, err
}
rem = int(binary.BigEndian.Uint64(tmpBuf))
rem64 := binary.BigEndian.Uint64(tmpBuf)
if rem64&(1<<63) != 0 {
return 0, errors.New("invalid websocket frame: MSB set in 64-bit payload length")
}
if rem64 > r.maxFrameSize() {
return 0, fmt.Errorf("websocket frame too large: %d", rem64)
}
rem = int(rem64)
}
// Handle control messages in place...

View File

@@ -35,7 +35,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/cache")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/cache")
}
// NewStatCache creates a new StatCache

View File

@@ -53,7 +53,7 @@ var _spaceTypePersonal = "personal"
var _spaceTypeProject = "project"
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/lookup")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup")
}
// IDCache is a cache for node ids

View File

@@ -50,7 +50,7 @@ var (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/fs/posix/trashbin")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin")
}
type Trashbin struct {

View File

@@ -1,23 +0,0 @@
//go:build !linux
// Copyright 2025 OpenCloud GmbH <mail@opencloud.eu>
// SPDX-License-Identifier: Apache-2.0
package tree
import (
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/rs/zerolog"
)
// NullWatcher is a dummy watcher that does nothing
type NullWatcher struct{}
// Watch does nothing
func (*NullWatcher) Watch(path string) {}
// NewInotifyWatcher returns a new inotify watcher
func NewInotifyWatcher(_ *Tree, _ *options.Options, _ *zerolog.Logger) (*NullWatcher, error) {
return nil, errtypes.NotSupported("inotify watcher is not supported on this platform")
}

View File

@@ -68,7 +68,7 @@ var (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/tree")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree")
}
type Watcher interface {
@@ -160,7 +160,7 @@ func New(lu node.PathLookup, bs node.Blobstore, um usermapper.Mapper, trashbin *
return nil, err
}
default:
t.watcher, err = NewInotifyWatcher(t, o, log)
t.watcher, err = NewWatcher(t, o, log)
if err != nil {
return nil, err
}
@@ -360,6 +360,9 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
// Move replaces the target with the source
func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "Move")
defer span.End()
if oldNode.SpaceID != newNode.SpaceID {
// WebDAV RFC https://www.rfc-editor.org/rfc/rfc4918#section-9.9.4 says to use
// > 502 (Bad Gateway) - This may occur when the destination is on another
@@ -382,6 +385,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
newNode.ID = oldNode.ID
}
_, subspan := tracer.Start(ctx, "os.Rename")
// rename node
err = os.Rename(
filepath.Join(oldParent, oldNode.Name),
@@ -390,7 +394,9 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
if err != nil {
return errors.Wrap(err, "posixfs: could not move child")
}
subspan.End()
_, subspan = tracer.Start(ctx, "update id cache and attributes")
// update the id cache
// invalidate old tree
err = t.lookup.IDCache.DeleteByPath(ctx, filepath.Join(oldNode.ParentPath(), oldNode.Name))
@@ -409,6 +415,9 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return errors.Wrap(err, "posixfs: could not update node attributes")
}
subspan.End()
_, subspan = tracer.Start(ctx, "warmup id cache for moved subtree")
// update id cache for the moved subtree.
if oldNode.IsDir(ctx) {
err = t.WarmupIDCache(filepath.Join(newNode.ParentPath(), newNode.Name), false, false)
@@ -416,6 +425,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return err
}
}
subspan.End()
// the size diff is the current treesize or blobsize of the old/source node
var sizeDiff int64
@@ -429,6 +439,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
sizeDiff = oldNode.Blobsize
}
_, subspan = tracer.Start(ctx, "propagate size changes")
err = t.Propagate(ctx, oldNode, -sizeDiff)
if err != nil {
return errors.Wrap(err, "posixfs: Move: could not propagate old node")
@@ -437,6 +448,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
if err != nil {
return errors.Wrap(err, "posixfs: Move: could not propagate new node")
}
subspan.End()
return nil
}

View File

@@ -0,0 +1,13 @@
package tree
import (
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
)
var ErrUnsupportedWatcher = errtypes.NotSupported("watching the filesystem is not supported on this platform")
// NoopWatcher is a watcher that does nothing
type NoopWatcher struct{}
// Watch does nothing
func (*NoopWatcher) Watch(_ string) {}

View File

@@ -0,0 +1,226 @@
//go:build darwin && experimental_watchfs_darwin
package tree
import (
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"github.com/fsnotify/fsnotify"
"github.com/rs/zerolog"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/watcher"
)
// FSnotifyWatcher fills the gap with fsnotify on Darwin, be careful with its limitations.
// The main reason for its existence is to provide a working watcher on Darwin for development and testing purposes.
type FSnotifyWatcher struct {
tree *Tree
options *options.Options
log *zerolog.Logger
mu sync.Mutex
watched map[string]struct{}
}
// NewWatcher creates a new FSnotifyWatcher which implements the Watcher interface for Darwin using fsnotify.
func NewWatcher(tree *Tree, o *options.Options, log *zerolog.Logger) (*FSnotifyWatcher, error) {
log.Warn().Msg("fsnotify watcher on darwin has limitations and may not work as expected in all scenarios, not recommended for production use")
return &FSnotifyWatcher{
tree: tree,
options: o,
log: log,
watched: make(map[string]struct{}),
}, nil
}
// add takes care of adding watches for root and its subpaths.
func (w *FSnotifyWatcher) add(fsWatcher *fsnotify.Watcher, root string) error {
// Check if the root is ignored before walking the tree
if isPathIgnored(w.tree, root) {
return nil
}
return filepath.WalkDir(root, func(p string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// skip ignored paths or files
if isPathIgnored(w.tree, p) || !d.IsDir() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
// path is known, skip
if _, ok := w.watched[p]; ok {
return nil
}
if err := fsWatcher.Add(p); err != nil {
return err
}
w.watched[p] = struct{}{}
return nil
})
}
// remove takes care of removing watches for root and its subpaths.
func (w *FSnotifyWatcher) remove(fsWatcher *fsnotify.Watcher, root string) {
w.mu.Lock()
defer w.mu.Unlock()
for p := range w.watched {
if p == root || isSubpath(root, p) {
if err := fsWatcher.Remove(p); err != nil {
w.log.Debug().Err(err).Str("path", p).Msg("failed to remove watch")
}
delete(w.watched, p)
}
}
}
// handleEvent supervises the handling of fsnotify events.
func (w *FSnotifyWatcher) handleEvent(fsWatcher *fsnotify.Watcher, event fsnotify.Event) error {
isCreate := event.Op&fsnotify.Create != 0
isRemove := event.Op&fsnotify.Remove != 0
isRename := event.Op&fsnotify.Rename != 0
isWrite := event.Op&fsnotify.Write != 0
isKnownEvent := isCreate || isRemove || isRename || isWrite
isIgnored := isPathIgnored(w.tree, event.Name)
// filter out unwanted events
if isIgnored || !isKnownEvent {
return nil
}
st, statErr := os.Stat(event.Name)
exists := statErr == nil
isDir := exists && st.IsDir()
switch {
case isRename:
if exists {
if isDir {
_ = w.add(fsWatcher, event.Name)
}
return w.tree.Scan(event.Name, watcher.ActionMove, isDir)
}
w.remove(fsWatcher, event.Name)
return w.tree.Scan(event.Name, watcher.ActionMoveFrom, false)
case isRemove:
w.remove(fsWatcher, event.Name)
return w.tree.Scan(event.Name, watcher.ActionDelete, false)
case isCreate:
if exists {
if isDir {
_ = w.add(fsWatcher, event.Name)
}
return w.tree.Scan(event.Name, watcher.ActionCreate, isDir)
}
w.remove(fsWatcher, event.Name)
return w.tree.Scan(event.Name, watcher.ActionMoveFrom, false)
case isWrite:
if exists {
return w.tree.Scan(event.Name, watcher.ActionUpdate, isDir)
}
default:
w.log.Warn().Interface("event", event).Msg("unhandled event")
}
return nil
}
// Watch starts watching the given path for changes.
func (w *FSnotifyWatcher) Watch(path string) {
fsWatcher, err := fsnotify.NewWatcher()
if err != nil {
w.log.Error().Err(err).Msg("failed to create watcher")
return
}
defer func() { _ = fsWatcher.Close() }()
if w.options.InotifyStatsFrequency > 0 {
w.log.Debug().Str("watcher", "not implemented on darwin").Msg("fsnotify stats")
}
go func() {
for {
select {
case event, ok := <-fsWatcher.Events:
if !ok {
return
}
if err := w.handleEvent(fsWatcher, event); err != nil {
w.log.Error().Err(err).Str("path", event.Name).Msg("error scanning file")
}
case err, ok := <-fsWatcher.Errors:
if !ok {
return
}
w.log.Error().Err(err).Msg("fsnotify error")
}
}
}()
base := filepath.Join(path, "users")
if err := w.add(fsWatcher, base); err != nil {
w.log.Error().Err(err).Str("path", base).Msg("failed to add initial watches")
}
<-make(chan struct{})
}
// isSubpath checks if p is a subpath of root
func isSubpath(root, p string) bool {
r, err := filepath.Abs(root)
if err != nil {
r = filepath.Clean(root)
}
pp, err := filepath.Abs(p)
if err != nil {
pp = filepath.Clean(p)
}
rel, err := filepath.Rel(r, pp)
if err != nil {
return false
}
return rel != "." && !strings.HasPrefix(rel, "..")
}
// isIgnored checks if the path is ignored by its tree.
func isPathIgnored(tree *Tree, path string) bool {
isLockFile := isLockFile(path)
isTrash := isTrash(path)
isUpload := tree.isUpload(path)
isInternal := tree.isInternal(path)
// ask the tree if the path is internal or ignored
return path == "" ||
isLockFile ||
isTrash ||
isUpload ||
isInternal
}

View File

@@ -29,11 +29,12 @@ import (
"strings"
"time"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/watcher"
"github.com/pablodz/inotifywaitgo/inotifywaitgo"
"github.com/rs/zerolog"
slogzerolog "github.com/samber/slog-zerolog/v2"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/watcher"
)
type InotifyWatcher struct {
@@ -42,7 +43,7 @@ type InotifyWatcher struct {
log *zerolog.Logger
}
func NewInotifyWatcher(tree *Tree, o *options.Options, log *zerolog.Logger) (*InotifyWatcher, error) {
func NewWatcher(tree *Tree, o *options.Options, log *zerolog.Logger) (*InotifyWatcher, error) {
return &InotifyWatcher{
tree: tree,
options: o,

View File

@@ -0,0 +1,17 @@
//go:build !linux && (!darwin || !experimental_watchfs_darwin)
// Copyright 2025 OpenCloud GmbH <mail@opencloud.eu>
// SPDX-License-Identifier: Apache-2.0
package tree
import (
"github.com/rs/zerolog"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
)
// NewWatcher returns a NoopWatcher on unsupported platforms
func NewWatcher(_ *Tree, _ *options.Options, _ *zerolog.Logger) (*NoopWatcher, error) {
return nil, ErrUnsupportedWatcher
}

View File

@@ -86,7 +86,7 @@ var (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs")
}
// Session is the interface that DecomposedfsSession implements. By combining tus.Upload,

View File

@@ -48,7 +48,7 @@ const (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/lookup")
}
// Lookup implements transformations from filepath to node and back

View File

@@ -153,6 +153,10 @@ func (b HybridBackend) getAll(ctx context.Context, n MetadataNode, skipCache, sk
}
if len(attrNames) == 0 {
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
if err != nil {
return nil, err
}
return attribs, nil
}
@@ -179,6 +183,9 @@ func (b HybridBackend) getAll(ctx context.Context, n MetadataNode, skipCache, sk
// merge the attributes from the offload file
offloaded, err := xattr.Get(path, _metadataOffloadedAttr)
if err != nil && !IsAttrUnset(err) {
return nil, err
}
if !skipOffloaded && err == nil && string(offloaded) == "1" {
msgpackAttribs := map[string][]byte{}
msgBytes, err := os.ReadFile(b.MetadataPath(n))
@@ -308,16 +315,9 @@ func (b HybridBackend) SetMultiple(ctx context.Context, n MetadataNode, attribs
return fmt.Errorf("failed to set %d/%d xattrs: %w", xerrs, total, xerr)
}
attribs, err = b.getAll(ctx, n, true, false, false)
if err != nil {
return err
}
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
if err != nil {
return err
}
return nil
// Update the cache with the new values
_, err = b.getAll(ctx, n, true, false, false)
return err
}
func (b HybridBackend) offloadMetadata(ctx context.Context, n MetadataNode) error {
@@ -439,11 +439,9 @@ func (b HybridBackend) Remove(ctx context.Context, n MetadataNode, key string, a
}
}
attribs, err := b.getAll(ctx, n, true, false, false)
if err != nil {
return err
}
return b.metaCache.PushToCache(b.cacheKey(n), attribs)
// Update the cache with the new values
_, err := b.getAll(ctx, n, true, false, false)
return err
}
// IsMetaFile returns whether the given path represents a meta file
@@ -455,15 +453,10 @@ func (b HybridBackend) Purge(ctx context.Context, n MetadataNode) error {
_, err := os.Stat(path)
if err == nil {
attribs, err := b.getAll(ctx, n, true, false, true)
if err != nil {
return err
}
for attr := range attribs {
if strings.HasPrefix(attr, prefixes.OcPrefix) {
err := xattr.Remove(path, attr)
if err != nil {
return err
if err == nil {
for attr := range attribs {
if strings.HasPrefix(attr, prefixes.OcPrefix) {
_ = xattr.Remove(path, attr)
}
}
}

View File

@@ -31,7 +31,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/metadata")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata")
}
var errUnconfiguredError = errors.New("no metadata backend configured. Bailing out")

View File

@@ -58,7 +58,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node")
}
// Define keys and values used in the node metadata

View File

@@ -21,7 +21,7 @@ var (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/permissions")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/permissions")
}
const (

View File

@@ -38,7 +38,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/propagator")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/tree/propagator")
}
type Propagator interface {

View File

@@ -53,7 +53,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/tree")
}
// Tree manages a hierarchical tree

View File

@@ -57,7 +57,7 @@ var (
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/upload")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/upload")
}
// WriteChunk writes the stream from the reader to the given offset of the upload

View File

@@ -49,7 +49,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/metadata")
tracer = otel.Tracer("github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata")
}
// CS3 represents a metadata storage with a cs3 storage backend

14
vendor/modules.txt vendored
View File

@@ -289,8 +289,8 @@ github.com/containerd/log
# github.com/containerd/platforms v1.0.0-rc.2
## explicit; go 1.20
github.com/containerd/platforms
# github.com/coreos/go-oidc/v3 v3.17.0
## explicit; go 1.24.0
# github.com/coreos/go-oidc/v3 v3.18.0
## explicit; go 1.25.0
github.com/coreos/go-oidc/v3/oidc
# github.com/coreos/go-semver v0.3.1
## explicit; go 1.8
@@ -506,7 +506,7 @@ github.com/go-git/go-billy/v5/helper/polyfill
github.com/go-git/go-billy/v5/memfs
github.com/go-git/go-billy/v5/osfs
github.com/go-git/go-billy/v5/util
# github.com/go-git/go-git/v5 v5.17.1
# github.com/go-git/go-git/v5 v5.18.0
## explicit; go 1.24.0
github.com/go-git/go-git/v5
github.com/go-git/go-git/v5/config
@@ -633,8 +633,8 @@ github.com/go-playground/locales/en
# github.com/go-playground/universal-translator v0.18.1
## explicit; go 1.18
github.com/go-playground/universal-translator
# github.com/go-playground/validator/v10 v10.30.1
## explicit; go 1.24.0
# github.com/go-playground/validator/v10 v10.30.2
## explicit; go 1.25.0
github.com/go-playground/validator/v10
github.com/go-playground/validator/v10/translations/en
# github.com/go-redis/redis/v8 v8.11.5
@@ -1177,7 +1177,7 @@ github.com/nats-io/nats-server/v2/server/stree
github.com/nats-io/nats-server/v2/server/sysmem
github.com/nats-io/nats-server/v2/server/thw
github.com/nats-io/nats-server/v2/server/tpm
# github.com/nats-io/nats.go v1.50.0
# github.com/nats-io/nats.go v1.51.0
## explicit; go 1.25.0
github.com/nats-io/nats.go
github.com/nats-io/nats.go/encoders/builtin
@@ -1372,7 +1372,7 @@ github.com/opencloud-eu/icap-client
# github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20260310090739-853d972b282d
## explicit; go 1.18
github.com/opencloud-eu/libre-graph-api-go
# github.com/opencloud-eu/reva/v2 v2.42.7-0.20260413125349-61dfc72a7d60
# github.com/opencloud-eu/reva/v2 v2.43.0
## explicit; go 1.25.0
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
github.com/opencloud-eu/reva/v2/cmd/revad/runtime