mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-04-30 20:23:26 -04:00
Merge pull request #263 from opencloud-eu/bump-reva-4eb591e
bump reva to 4eb591e
This commit is contained in:
13
go.mod
13
go.mod
@@ -65,7 +65,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.22.2
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250226135705-4eb591e3210d
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -73,7 +73,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.21.0
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
github.com/riandyrn/otelchi v0.12.0
|
||||
github.com/rogpeppe/go-internal v1.13.1
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/shamaton/msgpack/v2 v2.2.2
|
||||
@@ -101,7 +101,7 @@ require (
|
||||
golang.org/x/crypto v0.33.0
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
|
||||
golang.org/x/image v0.24.0
|
||||
golang.org/x/net v0.34.0
|
||||
golang.org/x/net v0.35.0
|
||||
golang.org/x/oauth2 v0.26.0
|
||||
golang.org/x/sync v0.11.0
|
||||
golang.org/x/term v0.29.0
|
||||
@@ -208,7 +208,7 @@ require (
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/gobwas/ws v1.2.1 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/goccy/go-yaml v1.11.2 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
@@ -239,7 +239,7 @@ require (
|
||||
github.com/juliangruber/go-intersect v1.1.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libregraph/oidc-go v1.1.0 // indirect
|
||||
github.com/longsleep/go-metrics v1.0.0 // indirect
|
||||
@@ -253,9 +253,10 @@ require (
|
||||
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect
|
||||
github.com/miekg/dns v1.1.57 // indirect
|
||||
github.com/mileusna/useragent v1.3.5 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.78 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.87 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
||||
26
go.sum
26
go.sum
@@ -438,8 +438,8 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ=
|
||||
github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@@ -687,8 +687,8 @@ github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/kobergj/gowebdav v0.0.0-20250102091030-aa65266db202 h1:A1xJ2NKgiYFiaHiLl9B5yw/gUBACSs9crDykTS3GuQI=
|
||||
github.com/kobergj/gowebdav v0.0.0-20250102091030-aa65266db202/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||
github.com/kobergj/plugins/v4/store/nats-js-kv v0.0.0-20240807130109-f62bb67e8c90 h1:pfI8Z5yavO6fU6vDGlWhZ4BgDlvj8c6xB7J57HfTPwA=
|
||||
@@ -781,12 +781,14 @@ github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
|
||||
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
|
||||
github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws=
|
||||
github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.78 h1:LqW2zy52fxnI4gg8C2oZviTaKHcBV36scS+RzJnxUFs=
|
||||
github.com/minio/minio-go/v7 v7.0.78/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0=
|
||||
github.com/minio/minio-go/v7 v7.0.87 h1:nkr9x0u53PespfxfUqxP3UYWiE2a41gaofgNnC4Y8WQ=
|
||||
github.com/minio/minio-go/v7 v7.0.87/go.mod h1:33+O8h0tO7pCeCWwBVa07RhVVfB/3vS4kEX7rwYKmIg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
@@ -861,8 +863,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520 h1:JCFtRPS6l0zouBBMBewWDXOBiKBhmAzsw6liG83b+Xw=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520/go.mod h1:ZERf8ae/ppN23rL0TwUH+oxxGY2DZ0x3o31ho3w7GeY=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250226135705-4eb591e3210d h1:0AldgkqIcc6K3ciTB4zaWK/PvCeq6M5J66SXGOTQyOY=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250226135705-4eb591e3210d/go.mod h1:BQdl4BybewOQRtKtmM57qg05IsWXETCItuHPsnYvhZg=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
@@ -974,8 +976,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
@@ -1323,8 +1325,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
||||
18
vendor/github.com/goccy/go-json/internal/decoder/compile.go
generated
vendored
18
vendor/github.com/goccy/go-json/internal/decoder/compile.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unicode"
|
||||
"unsafe"
|
||||
@@ -17,22 +18,27 @@ var (
|
||||
typeAddr *runtime.TypeAddr
|
||||
cachedDecoderMap unsafe.Pointer // map[uintptr]decoder
|
||||
cachedDecoder []Decoder
|
||||
initOnce sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
typeAddr = runtime.AnalyzeTypeAddr()
|
||||
if typeAddr == nil {
|
||||
typeAddr = &runtime.TypeAddr{}
|
||||
}
|
||||
cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1)
|
||||
func initDecoder() {
|
||||
initOnce.Do(func() {
|
||||
typeAddr = runtime.AnalyzeTypeAddr()
|
||||
if typeAddr == nil {
|
||||
typeAddr = &runtime.TypeAddr{}
|
||||
}
|
||||
cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1)
|
||||
})
|
||||
}
|
||||
|
||||
func loadDecoderMap() map[uintptr]Decoder {
|
||||
initDecoder()
|
||||
p := atomic.LoadPointer(&cachedDecoderMap)
|
||||
return *(*map[uintptr]Decoder)(unsafe.Pointer(&p))
|
||||
}
|
||||
|
||||
func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) {
|
||||
initDecoder()
|
||||
newDecoderMap := make(map[uintptr]Decoder, len(m)+1)
|
||||
newDecoderMap[typ] = dec
|
||||
|
||||
|
||||
1
vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
generated
vendored
1
vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
|
||||
initDecoder()
|
||||
typeptr := uintptr(unsafe.Pointer(typ))
|
||||
if typeptr > typeAddr.MaxTypeAddr {
|
||||
return compileToGetDecoderSlowPath(typeptr, typ)
|
||||
|
||||
1
vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
generated
vendored
1
vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
var decMu sync.RWMutex
|
||||
|
||||
func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
|
||||
initDecoder()
|
||||
typeptr := uintptr(unsafe.Pointer(typ))
|
||||
if typeptr > typeAddr.MaxTypeAddr {
|
||||
return compileToGetDecoderSlowPath(typeptr, typ)
|
||||
|
||||
16
vendor/github.com/goccy/go-json/internal/encoder/compiler.go
generated
vendored
16
vendor/github.com/goccy/go-json/internal/encoder/compiler.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
@@ -24,14 +25,17 @@ var (
|
||||
cachedOpcodeSets []*OpcodeSet
|
||||
cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet
|
||||
typeAddr *runtime.TypeAddr
|
||||
initEncoderOnce sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
typeAddr = runtime.AnalyzeTypeAddr()
|
||||
if typeAddr == nil {
|
||||
typeAddr = &runtime.TypeAddr{}
|
||||
}
|
||||
cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1)
|
||||
func initEncoder() {
|
||||
initEncoderOnce.Do(func() {
|
||||
typeAddr = runtime.AnalyzeTypeAddr()
|
||||
if typeAddr == nil {
|
||||
typeAddr = &runtime.TypeAddr{}
|
||||
}
|
||||
cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1)
|
||||
})
|
||||
}
|
||||
|
||||
func loadOpcodeMap() map[uintptr]*OpcodeSet {
|
||||
|
||||
1
vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
generated
vendored
1
vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
generated
vendored
@@ -4,6 +4,7 @@
|
||||
package encoder
|
||||
|
||||
func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
|
||||
initEncoder()
|
||||
if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
|
||||
codeSet, err := compileToGetCodeSetSlowPath(typeptr)
|
||||
if err != nil {
|
||||
|
||||
1
vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
generated
vendored
1
vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
var setsMu sync.RWMutex
|
||||
|
||||
func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
|
||||
initEncoder()
|
||||
if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
|
||||
codeSet, err := compileToGetCodeSetSlowPath(typeptr)
|
||||
if err != nil {
|
||||
|
||||
5
vendor/github.com/goccy/go-json/internal/encoder/encoder.go
generated
vendored
5
vendor/github.com/goccy/go-json/internal/encoder/encoder.go
generated
vendored
@@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{
|
||||
rv = newV
|
||||
}
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Ptr && rv.IsNil() {
|
||||
return AppendNull(ctx, b), nil
|
||||
}
|
||||
|
||||
v = rv.Interface()
|
||||
var bb []byte
|
||||
if (code.Flags & MarshalerContextFlags) != 0 {
|
||||
|
||||
108
vendor/github.com/goccy/go-json/internal/runtime/type.go
generated
vendored
108
vendor/github.com/goccy/go-json/internal/runtime/type.go
generated
vendored
@@ -2,6 +2,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@@ -23,8 +24,8 @@ type TypeAddr struct {
|
||||
}
|
||||
|
||||
var (
|
||||
typeAddr *TypeAddr
|
||||
alreadyAnalyzed bool
|
||||
typeAddr *TypeAddr
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
//go:linkname typelinks reflect.typelinks
|
||||
@@ -34,67 +35,64 @@ func typelinks() ([]unsafe.Pointer, [][]int32)
|
||||
func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
|
||||
|
||||
func AnalyzeTypeAddr() *TypeAddr {
|
||||
defer func() {
|
||||
alreadyAnalyzed = true
|
||||
}()
|
||||
if alreadyAnalyzed {
|
||||
return typeAddr
|
||||
}
|
||||
sections, offsets := typelinks()
|
||||
if len(sections) != 1 {
|
||||
return nil
|
||||
}
|
||||
if len(offsets) != 1 {
|
||||
return nil
|
||||
}
|
||||
section := sections[0]
|
||||
offset := offsets[0]
|
||||
var (
|
||||
min uintptr = uintptr(^uint(0))
|
||||
max uintptr = 0
|
||||
isAligned64 = true
|
||||
isAligned32 = true
|
||||
)
|
||||
for i := 0; i < len(offset); i++ {
|
||||
typ := (*Type)(rtypeOff(section, offset[i]))
|
||||
addr := uintptr(unsafe.Pointer(typ))
|
||||
if min > addr {
|
||||
min = addr
|
||||
once.Do(func() {
|
||||
sections, offsets := typelinks()
|
||||
if len(sections) != 1 {
|
||||
return
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
if len(offsets) != 1 {
|
||||
return
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
addr = uintptr(unsafe.Pointer(typ.Elem()))
|
||||
section := sections[0]
|
||||
offset := offsets[0]
|
||||
var (
|
||||
min uintptr = uintptr(^uint(0))
|
||||
max uintptr = 0
|
||||
isAligned64 = true
|
||||
isAligned32 = true
|
||||
)
|
||||
for i := 0; i < len(offset); i++ {
|
||||
typ := (*Type)(rtypeOff(section, offset[i]))
|
||||
addr := uintptr(unsafe.Pointer(typ))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
addr = uintptr(unsafe.Pointer(typ.Elem()))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
}
|
||||
isAligned64 = isAligned64 && (addr-min)&63 == 0
|
||||
isAligned32 = isAligned32 && (addr-min)&31 == 0
|
||||
}
|
||||
isAligned64 = isAligned64 && (addr-min)&63 == 0
|
||||
isAligned32 = isAligned32 && (addr-min)&31 == 0
|
||||
}
|
||||
addrRange := max - min
|
||||
if addrRange == 0 {
|
||||
return nil
|
||||
}
|
||||
var addrShift uintptr
|
||||
if isAligned64 {
|
||||
addrShift = 6
|
||||
} else if isAligned32 {
|
||||
addrShift = 5
|
||||
}
|
||||
cacheSize := addrRange >> addrShift
|
||||
if cacheSize > maxAcceptableTypeAddrRange {
|
||||
return nil
|
||||
}
|
||||
typeAddr = &TypeAddr{
|
||||
BaseTypeAddr: min,
|
||||
MaxTypeAddr: max,
|
||||
AddrRange: addrRange,
|
||||
AddrShift: addrShift,
|
||||
}
|
||||
addrRange := max - min
|
||||
if addrRange == 0 {
|
||||
return
|
||||
}
|
||||
var addrShift uintptr
|
||||
if isAligned64 {
|
||||
addrShift = 6
|
||||
} else if isAligned32 {
|
||||
addrShift = 5
|
||||
}
|
||||
cacheSize := addrRange >> addrShift
|
||||
if cacheSize > maxAcceptableTypeAddrRange {
|
||||
return
|
||||
}
|
||||
typeAddr = &TypeAddr{
|
||||
BaseTypeAddr: min,
|
||||
MaxTypeAddr: max,
|
||||
AddrRange: addrRange,
|
||||
AddrShift: addrShift,
|
||||
}
|
||||
})
|
||||
|
||||
return typeAddr
|
||||
}
|
||||
|
||||
1
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
1
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
@@ -281,6 +281,7 @@ Exit Code 1
|
||||
| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
|
||||
| AMXINT8 | Tile computational operations on 8-bit integers |
|
||||
| AMXFP16 | Tile computational operations on FP16 numbers |
|
||||
| AMXFP8 | Tile computational operations on FP8 numbers |
|
||||
| AMXTILE | Tile architecture |
|
||||
| APX_F | Intel APX |
|
||||
| AVX | AVX functions |
|
||||
|
||||
84
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
84
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
@@ -55,6 +55,12 @@ const (
|
||||
Qualcomm
|
||||
Marvell
|
||||
|
||||
QEMU
|
||||
QNX
|
||||
ACRN
|
||||
SRE
|
||||
Apple
|
||||
|
||||
lastVendor
|
||||
)
|
||||
|
||||
@@ -75,6 +81,7 @@ const (
|
||||
AMXBF16 // Tile computational operations on BFLOAT16 numbers
|
||||
AMXFP16 // Tile computational operations on FP16 numbers
|
||||
AMXINT8 // Tile computational operations on 8-bit integers
|
||||
AMXFP8 // Tile computational operations on FP8 numbers
|
||||
AMXTILE // Tile architecture
|
||||
APX_F // Intel APX
|
||||
AVX // AVX functions
|
||||
@@ -296,20 +303,22 @@ const (
|
||||
|
||||
// CPUInfo contains information about the detected system CPU.
|
||||
type CPUInfo struct {
|
||||
BrandName string // Brand name reported by the CPU
|
||||
VendorID Vendor // Comparable CPU vendor ID
|
||||
VendorString string // Raw vendor string.
|
||||
featureSet flagSet // Features of the CPU
|
||||
PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
|
||||
ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
|
||||
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
|
||||
Family int // CPU family number
|
||||
Model int // CPU model number
|
||||
Stepping int // CPU stepping info
|
||||
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
||||
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
|
||||
BoostFreq int64 // Max clock speed, if known, 0 otherwise
|
||||
Cache struct {
|
||||
BrandName string // Brand name reported by the CPU
|
||||
VendorID Vendor // Comparable CPU vendor ID
|
||||
VendorString string // Raw vendor string.
|
||||
HypervisorVendorID Vendor // Hypervisor vendor
|
||||
HypervisorVendorString string // Raw hypervisor vendor string
|
||||
featureSet flagSet // Features of the CPU
|
||||
PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
|
||||
ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
|
||||
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
|
||||
Family int // CPU family number
|
||||
Model int // CPU model number
|
||||
Stepping int // CPU stepping info
|
||||
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
||||
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
|
||||
BoostFreq int64 // Max clock speed, if known, 0 otherwise
|
||||
Cache struct {
|
||||
L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
|
||||
L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
|
||||
L2 int // L2 Cache (per core or shared). Will be -1 if undetected
|
||||
@@ -318,8 +327,9 @@ type CPUInfo struct {
|
||||
SGX SGXSupport
|
||||
AMDMemEncryption AMDMemEncryptionSupport
|
||||
AVX10Level uint8
|
||||
maxFunc uint32
|
||||
maxExFunc uint32
|
||||
|
||||
maxFunc uint32
|
||||
maxExFunc uint32
|
||||
}
|
||||
|
||||
var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
@@ -503,7 +513,7 @@ func (c CPUInfo) FeatureSet() []string {
|
||||
// Uses the RDTSCP instruction. The value 0 is returned
|
||||
// if the CPU does not support the instruction.
|
||||
func (c CPUInfo) RTCounter() uint64 {
|
||||
if !c.Supports(RDTSCP) {
|
||||
if !c.Has(RDTSCP) {
|
||||
return 0
|
||||
}
|
||||
a, _, _, d := rdtscpAsm()
|
||||
@@ -515,13 +525,22 @@ func (c CPUInfo) RTCounter() uint64 {
|
||||
// about the current cpu/core the code is running on.
|
||||
// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
|
||||
func (c CPUInfo) Ia32TscAux() uint32 {
|
||||
if !c.Supports(RDTSCP) {
|
||||
if !c.Has(RDTSCP) {
|
||||
return 0
|
||||
}
|
||||
_, _, ecx, _ := rdtscpAsm()
|
||||
return ecx
|
||||
}
|
||||
|
||||
// SveLengths returns arm SVE vector and predicate lengths.
|
||||
// Will return 0, 0 if SVE is not enabled or otherwise unable to detect.
|
||||
func (c CPUInfo) SveLengths() (vl, pl uint64) {
|
||||
if !c.Has(SVE) {
|
||||
return 0, 0
|
||||
}
|
||||
return getVectorLength()
|
||||
}
|
||||
|
||||
// LogicalCPU will return the Logical CPU the code is currently executing on.
|
||||
// This is likely to change when the OS re-schedules the running thread
|
||||
// to another CPU.
|
||||
@@ -781,11 +800,16 @@ func threadsPerCore() int {
|
||||
_, b, _, _ := cpuidex(0xb, 0)
|
||||
if b&0xffff == 0 {
|
||||
if vend == AMD {
|
||||
// Workaround for AMD returning 0, assume 2 if >= Zen 2
|
||||
// It will be more correct than not.
|
||||
// if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core.
|
||||
// The number of threads per core is ThreadsPerCore+1
|
||||
// See PPR for AMD Family 17h Models 00h-0Fh (page 82)
|
||||
fam, _, _ := familyModel()
|
||||
_, _, _, d := cpuid(1)
|
||||
if (d&(1<<28)) != 0 && fam >= 23 {
|
||||
if maxExtendedFunction() >= 0x8000001e {
|
||||
_, b, _, _ := cpuid(0x8000001e)
|
||||
return int((b>>8)&0xff) + 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
}
|
||||
@@ -877,7 +901,9 @@ var vendorMapping = map[string]Vendor{
|
||||
"GenuineTMx86": Transmeta,
|
||||
"Geode by NSC": NSC,
|
||||
"VIA VIA VIA ": VIA,
|
||||
"KVMKVMKVMKVM": KVM,
|
||||
"KVMKVMKVM": KVM,
|
||||
"Linux KVM Hv": KVM,
|
||||
"TCGTCGTCGTCG": QEMU,
|
||||
"Microsoft Hv": MSVM,
|
||||
"VMwareVMware": VMware,
|
||||
"XenVMMXenVMM": XenHVM,
|
||||
@@ -887,6 +913,10 @@ var vendorMapping = map[string]Vendor{
|
||||
"SiS SiS SiS ": SiS,
|
||||
"RiseRiseRise": SiS,
|
||||
"Genuine RDC": RDC,
|
||||
"QNXQVMBSQG": QNX,
|
||||
"ACRNACRNACRN": ACRN,
|
||||
"SRESRESRESRE": SRE,
|
||||
"Apple VZ": Apple,
|
||||
}
|
||||
|
||||
func vendorID() (Vendor, string) {
|
||||
@@ -899,6 +929,17 @@ func vendorID() (Vendor, string) {
|
||||
return vend, v
|
||||
}
|
||||
|
||||
func hypervisorVendorID() (Vendor, string) {
|
||||
// https://lwn.net/Articles/301888/
|
||||
_, b, c, d := cpuid(0x40000000)
|
||||
v := string(valAsString(b, c, d))
|
||||
vend, ok := vendorMapping[v]
|
||||
if !ok {
|
||||
return VendorUnknown, v
|
||||
}
|
||||
return vend, v
|
||||
}
|
||||
|
||||
func cacheLine() int {
|
||||
if maxFunctionID() < 0x1 {
|
||||
return 0
|
||||
@@ -1271,6 +1312,7 @@ func support() flagSet {
|
||||
fs.setIf(ebx&(1<<31) != 0, AVX512VL)
|
||||
// ecx
|
||||
fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
|
||||
fs.setIf(ecx&(1<<3) != 0, AMXFP8)
|
||||
fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
|
||||
fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
|
||||
fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
|
||||
|
||||
10
vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
generated
vendored
10
vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
generated
vendored
@@ -24,3 +24,13 @@ TEXT ·getInstAttributes(SB), 7, $0
|
||||
MOVD R1, instAttrReg1+8(FP)
|
||||
RET
|
||||
|
||||
TEXT ·getVectorLength(SB), 7, $0
|
||||
WORD $0xd2800002 // mov x2, #0
|
||||
WORD $0x04225022 // addvl x2, x2, #1
|
||||
WORD $0xd37df042 // lsl x2, x2, #3
|
||||
WORD $0xd2800003 // mov x3, #0
|
||||
WORD $0x04635023 // addpl x3, x3, #1
|
||||
WORD $0xd37df063 // lsl x3, x3, #3
|
||||
MOVD R2, vl+0(FP)
|
||||
MOVD R3, pl+8(FP)
|
||||
RET
|
||||
|
||||
3
vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
generated
vendored
3
vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
generated
vendored
@@ -10,6 +10,7 @@ import "runtime"
|
||||
func getMidr() (midr uint64)
|
||||
func getProcFeatures() (procFeatures uint64)
|
||||
func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
|
||||
func getVectorLength() (vl, pl uint64)
|
||||
|
||||
func initCPU() {
|
||||
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
@@ -24,7 +25,7 @@ func addInfo(c *CPUInfo, safe bool) {
|
||||
detectOS(c)
|
||||
|
||||
// ARM64 disabled since it may crash if interrupt is not intercepted by OS.
|
||||
if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" {
|
||||
if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" {
|
||||
return
|
||||
}
|
||||
midr := getMidr()
|
||||
|
||||
2
vendor/github.com/klauspost/cpuid/v2/detect_ref.go
generated
vendored
2
vendor/github.com/klauspost/cpuid/v2/detect_ref.go
generated
vendored
@@ -10,6 +10,8 @@ func initCPU() {
|
||||
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
|
||||
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
|
||||
}
|
||||
|
||||
func addInfo(info *CPUInfo, safe bool) {}
|
||||
func getVectorLength() (vl, pl uint64) { return 0, 0 }
|
||||
|
||||
3
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
3
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
@@ -32,7 +32,10 @@ func addInfo(c *CPUInfo, safe bool) {
|
||||
c.LogicalCores = logicalCores()
|
||||
c.PhysicalCores = physicalCores()
|
||||
c.VendorID, c.VendorString = vendorID()
|
||||
c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID()
|
||||
c.AVX10Level = c.supportAVX10()
|
||||
c.cacheSize()
|
||||
c.frequencies()
|
||||
}
|
||||
|
||||
func getVectorLength() (vl, pl uint64) { return 0, 0 }
|
||||
|
||||
440
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
440
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
@@ -15,224 +15,225 @@ func _() {
|
||||
_ = x[AMXBF16-5]
|
||||
_ = x[AMXFP16-6]
|
||||
_ = x[AMXINT8-7]
|
||||
_ = x[AMXTILE-8]
|
||||
_ = x[APX_F-9]
|
||||
_ = x[AVX-10]
|
||||
_ = x[AVX10-11]
|
||||
_ = x[AVX10_128-12]
|
||||
_ = x[AVX10_256-13]
|
||||
_ = x[AVX10_512-14]
|
||||
_ = x[AVX2-15]
|
||||
_ = x[AVX512BF16-16]
|
||||
_ = x[AVX512BITALG-17]
|
||||
_ = x[AVX512BW-18]
|
||||
_ = x[AVX512CD-19]
|
||||
_ = x[AVX512DQ-20]
|
||||
_ = x[AVX512ER-21]
|
||||
_ = x[AVX512F-22]
|
||||
_ = x[AVX512FP16-23]
|
||||
_ = x[AVX512IFMA-24]
|
||||
_ = x[AVX512PF-25]
|
||||
_ = x[AVX512VBMI-26]
|
||||
_ = x[AVX512VBMI2-27]
|
||||
_ = x[AVX512VL-28]
|
||||
_ = x[AVX512VNNI-29]
|
||||
_ = x[AVX512VP2INTERSECT-30]
|
||||
_ = x[AVX512VPOPCNTDQ-31]
|
||||
_ = x[AVXIFMA-32]
|
||||
_ = x[AVXNECONVERT-33]
|
||||
_ = x[AVXSLOW-34]
|
||||
_ = x[AVXVNNI-35]
|
||||
_ = x[AVXVNNIINT8-36]
|
||||
_ = x[AVXVNNIINT16-37]
|
||||
_ = x[BHI_CTRL-38]
|
||||
_ = x[BMI1-39]
|
||||
_ = x[BMI2-40]
|
||||
_ = x[CETIBT-41]
|
||||
_ = x[CETSS-42]
|
||||
_ = x[CLDEMOTE-43]
|
||||
_ = x[CLMUL-44]
|
||||
_ = x[CLZERO-45]
|
||||
_ = x[CMOV-46]
|
||||
_ = x[CMPCCXADD-47]
|
||||
_ = x[CMPSB_SCADBS_SHORT-48]
|
||||
_ = x[CMPXCHG8-49]
|
||||
_ = x[CPBOOST-50]
|
||||
_ = x[CPPC-51]
|
||||
_ = x[CX16-52]
|
||||
_ = x[EFER_LMSLE_UNS-53]
|
||||
_ = x[ENQCMD-54]
|
||||
_ = x[ERMS-55]
|
||||
_ = x[F16C-56]
|
||||
_ = x[FLUSH_L1D-57]
|
||||
_ = x[FMA3-58]
|
||||
_ = x[FMA4-59]
|
||||
_ = x[FP128-60]
|
||||
_ = x[FP256-61]
|
||||
_ = x[FSRM-62]
|
||||
_ = x[FXSR-63]
|
||||
_ = x[FXSROPT-64]
|
||||
_ = x[GFNI-65]
|
||||
_ = x[HLE-66]
|
||||
_ = x[HRESET-67]
|
||||
_ = x[HTT-68]
|
||||
_ = x[HWA-69]
|
||||
_ = x[HYBRID_CPU-70]
|
||||
_ = x[HYPERVISOR-71]
|
||||
_ = x[IA32_ARCH_CAP-72]
|
||||
_ = x[IA32_CORE_CAP-73]
|
||||
_ = x[IBPB-74]
|
||||
_ = x[IBPB_BRTYPE-75]
|
||||
_ = x[IBRS-76]
|
||||
_ = x[IBRS_PREFERRED-77]
|
||||
_ = x[IBRS_PROVIDES_SMP-78]
|
||||
_ = x[IBS-79]
|
||||
_ = x[IBSBRNTRGT-80]
|
||||
_ = x[IBSFETCHSAM-81]
|
||||
_ = x[IBSFFV-82]
|
||||
_ = x[IBSOPCNT-83]
|
||||
_ = x[IBSOPCNTEXT-84]
|
||||
_ = x[IBSOPSAM-85]
|
||||
_ = x[IBSRDWROPCNT-86]
|
||||
_ = x[IBSRIPINVALIDCHK-87]
|
||||
_ = x[IBS_FETCH_CTLX-88]
|
||||
_ = x[IBS_OPDATA4-89]
|
||||
_ = x[IBS_OPFUSE-90]
|
||||
_ = x[IBS_PREVENTHOST-91]
|
||||
_ = x[IBS_ZEN4-92]
|
||||
_ = x[IDPRED_CTRL-93]
|
||||
_ = x[INT_WBINVD-94]
|
||||
_ = x[INVLPGB-95]
|
||||
_ = x[KEYLOCKER-96]
|
||||
_ = x[KEYLOCKERW-97]
|
||||
_ = x[LAHF-98]
|
||||
_ = x[LAM-99]
|
||||
_ = x[LBRVIRT-100]
|
||||
_ = x[LZCNT-101]
|
||||
_ = x[MCAOVERFLOW-102]
|
||||
_ = x[MCDT_NO-103]
|
||||
_ = x[MCOMMIT-104]
|
||||
_ = x[MD_CLEAR-105]
|
||||
_ = x[MMX-106]
|
||||
_ = x[MMXEXT-107]
|
||||
_ = x[MOVBE-108]
|
||||
_ = x[MOVDIR64B-109]
|
||||
_ = x[MOVDIRI-110]
|
||||
_ = x[MOVSB_ZL-111]
|
||||
_ = x[MOVU-112]
|
||||
_ = x[MPX-113]
|
||||
_ = x[MSRIRC-114]
|
||||
_ = x[MSRLIST-115]
|
||||
_ = x[MSR_PAGEFLUSH-116]
|
||||
_ = x[NRIPS-117]
|
||||
_ = x[NX-118]
|
||||
_ = x[OSXSAVE-119]
|
||||
_ = x[PCONFIG-120]
|
||||
_ = x[POPCNT-121]
|
||||
_ = x[PPIN-122]
|
||||
_ = x[PREFETCHI-123]
|
||||
_ = x[PSFD-124]
|
||||
_ = x[RDPRU-125]
|
||||
_ = x[RDRAND-126]
|
||||
_ = x[RDSEED-127]
|
||||
_ = x[RDTSCP-128]
|
||||
_ = x[RRSBA_CTRL-129]
|
||||
_ = x[RTM-130]
|
||||
_ = x[RTM_ALWAYS_ABORT-131]
|
||||
_ = x[SBPB-132]
|
||||
_ = x[SERIALIZE-133]
|
||||
_ = x[SEV-134]
|
||||
_ = x[SEV_64BIT-135]
|
||||
_ = x[SEV_ALTERNATIVE-136]
|
||||
_ = x[SEV_DEBUGSWAP-137]
|
||||
_ = x[SEV_ES-138]
|
||||
_ = x[SEV_RESTRICTED-139]
|
||||
_ = x[SEV_SNP-140]
|
||||
_ = x[SGX-141]
|
||||
_ = x[SGXLC-142]
|
||||
_ = x[SHA-143]
|
||||
_ = x[SME-144]
|
||||
_ = x[SME_COHERENT-145]
|
||||
_ = x[SPEC_CTRL_SSBD-146]
|
||||
_ = x[SRBDS_CTRL-147]
|
||||
_ = x[SRSO_MSR_FIX-148]
|
||||
_ = x[SRSO_NO-149]
|
||||
_ = x[SRSO_USER_KERNEL_NO-150]
|
||||
_ = x[SSE-151]
|
||||
_ = x[SSE2-152]
|
||||
_ = x[SSE3-153]
|
||||
_ = x[SSE4-154]
|
||||
_ = x[SSE42-155]
|
||||
_ = x[SSE4A-156]
|
||||
_ = x[SSSE3-157]
|
||||
_ = x[STIBP-158]
|
||||
_ = x[STIBP_ALWAYSON-159]
|
||||
_ = x[STOSB_SHORT-160]
|
||||
_ = x[SUCCOR-161]
|
||||
_ = x[SVM-162]
|
||||
_ = x[SVMDA-163]
|
||||
_ = x[SVMFBASID-164]
|
||||
_ = x[SVML-165]
|
||||
_ = x[SVMNP-166]
|
||||
_ = x[SVMPF-167]
|
||||
_ = x[SVMPFT-168]
|
||||
_ = x[SYSCALL-169]
|
||||
_ = x[SYSEE-170]
|
||||
_ = x[TBM-171]
|
||||
_ = x[TDX_GUEST-172]
|
||||
_ = x[TLB_FLUSH_NESTED-173]
|
||||
_ = x[TME-174]
|
||||
_ = x[TOPEXT-175]
|
||||
_ = x[TSCRATEMSR-176]
|
||||
_ = x[TSXLDTRK-177]
|
||||
_ = x[VAES-178]
|
||||
_ = x[VMCBCLEAN-179]
|
||||
_ = x[VMPL-180]
|
||||
_ = x[VMSA_REGPROT-181]
|
||||
_ = x[VMX-182]
|
||||
_ = x[VPCLMULQDQ-183]
|
||||
_ = x[VTE-184]
|
||||
_ = x[WAITPKG-185]
|
||||
_ = x[WBNOINVD-186]
|
||||
_ = x[WRMSRNS-187]
|
||||
_ = x[X87-188]
|
||||
_ = x[XGETBV1-189]
|
||||
_ = x[XOP-190]
|
||||
_ = x[XSAVE-191]
|
||||
_ = x[XSAVEC-192]
|
||||
_ = x[XSAVEOPT-193]
|
||||
_ = x[XSAVES-194]
|
||||
_ = x[AESARM-195]
|
||||
_ = x[ARMCPUID-196]
|
||||
_ = x[ASIMD-197]
|
||||
_ = x[ASIMDDP-198]
|
||||
_ = x[ASIMDHP-199]
|
||||
_ = x[ASIMDRDM-200]
|
||||
_ = x[ATOMICS-201]
|
||||
_ = x[CRC32-202]
|
||||
_ = x[DCPOP-203]
|
||||
_ = x[EVTSTRM-204]
|
||||
_ = x[FCMA-205]
|
||||
_ = x[FP-206]
|
||||
_ = x[FPHP-207]
|
||||
_ = x[GPA-208]
|
||||
_ = x[JSCVT-209]
|
||||
_ = x[LRCPC-210]
|
||||
_ = x[PMULL-211]
|
||||
_ = x[SHA1-212]
|
||||
_ = x[SHA2-213]
|
||||
_ = x[SHA3-214]
|
||||
_ = x[SHA512-215]
|
||||
_ = x[SM3-216]
|
||||
_ = x[SM4-217]
|
||||
_ = x[SVE-218]
|
||||
_ = x[lastID-219]
|
||||
_ = x[AMXFP8-8]
|
||||
_ = x[AMXTILE-9]
|
||||
_ = x[APX_F-10]
|
||||
_ = x[AVX-11]
|
||||
_ = x[AVX10-12]
|
||||
_ = x[AVX10_128-13]
|
||||
_ = x[AVX10_256-14]
|
||||
_ = x[AVX10_512-15]
|
||||
_ = x[AVX2-16]
|
||||
_ = x[AVX512BF16-17]
|
||||
_ = x[AVX512BITALG-18]
|
||||
_ = x[AVX512BW-19]
|
||||
_ = x[AVX512CD-20]
|
||||
_ = x[AVX512DQ-21]
|
||||
_ = x[AVX512ER-22]
|
||||
_ = x[AVX512F-23]
|
||||
_ = x[AVX512FP16-24]
|
||||
_ = x[AVX512IFMA-25]
|
||||
_ = x[AVX512PF-26]
|
||||
_ = x[AVX512VBMI-27]
|
||||
_ = x[AVX512VBMI2-28]
|
||||
_ = x[AVX512VL-29]
|
||||
_ = x[AVX512VNNI-30]
|
||||
_ = x[AVX512VP2INTERSECT-31]
|
||||
_ = x[AVX512VPOPCNTDQ-32]
|
||||
_ = x[AVXIFMA-33]
|
||||
_ = x[AVXNECONVERT-34]
|
||||
_ = x[AVXSLOW-35]
|
||||
_ = x[AVXVNNI-36]
|
||||
_ = x[AVXVNNIINT8-37]
|
||||
_ = x[AVXVNNIINT16-38]
|
||||
_ = x[BHI_CTRL-39]
|
||||
_ = x[BMI1-40]
|
||||
_ = x[BMI2-41]
|
||||
_ = x[CETIBT-42]
|
||||
_ = x[CETSS-43]
|
||||
_ = x[CLDEMOTE-44]
|
||||
_ = x[CLMUL-45]
|
||||
_ = x[CLZERO-46]
|
||||
_ = x[CMOV-47]
|
||||
_ = x[CMPCCXADD-48]
|
||||
_ = x[CMPSB_SCADBS_SHORT-49]
|
||||
_ = x[CMPXCHG8-50]
|
||||
_ = x[CPBOOST-51]
|
||||
_ = x[CPPC-52]
|
||||
_ = x[CX16-53]
|
||||
_ = x[EFER_LMSLE_UNS-54]
|
||||
_ = x[ENQCMD-55]
|
||||
_ = x[ERMS-56]
|
||||
_ = x[F16C-57]
|
||||
_ = x[FLUSH_L1D-58]
|
||||
_ = x[FMA3-59]
|
||||
_ = x[FMA4-60]
|
||||
_ = x[FP128-61]
|
||||
_ = x[FP256-62]
|
||||
_ = x[FSRM-63]
|
||||
_ = x[FXSR-64]
|
||||
_ = x[FXSROPT-65]
|
||||
_ = x[GFNI-66]
|
||||
_ = x[HLE-67]
|
||||
_ = x[HRESET-68]
|
||||
_ = x[HTT-69]
|
||||
_ = x[HWA-70]
|
||||
_ = x[HYBRID_CPU-71]
|
||||
_ = x[HYPERVISOR-72]
|
||||
_ = x[IA32_ARCH_CAP-73]
|
||||
_ = x[IA32_CORE_CAP-74]
|
||||
_ = x[IBPB-75]
|
||||
_ = x[IBPB_BRTYPE-76]
|
||||
_ = x[IBRS-77]
|
||||
_ = x[IBRS_PREFERRED-78]
|
||||
_ = x[IBRS_PROVIDES_SMP-79]
|
||||
_ = x[IBS-80]
|
||||
_ = x[IBSBRNTRGT-81]
|
||||
_ = x[IBSFETCHSAM-82]
|
||||
_ = x[IBSFFV-83]
|
||||
_ = x[IBSOPCNT-84]
|
||||
_ = x[IBSOPCNTEXT-85]
|
||||
_ = x[IBSOPSAM-86]
|
||||
_ = x[IBSRDWROPCNT-87]
|
||||
_ = x[IBSRIPINVALIDCHK-88]
|
||||
_ = x[IBS_FETCH_CTLX-89]
|
||||
_ = x[IBS_OPDATA4-90]
|
||||
_ = x[IBS_OPFUSE-91]
|
||||
_ = x[IBS_PREVENTHOST-92]
|
||||
_ = x[IBS_ZEN4-93]
|
||||
_ = x[IDPRED_CTRL-94]
|
||||
_ = x[INT_WBINVD-95]
|
||||
_ = x[INVLPGB-96]
|
||||
_ = x[KEYLOCKER-97]
|
||||
_ = x[KEYLOCKERW-98]
|
||||
_ = x[LAHF-99]
|
||||
_ = x[LAM-100]
|
||||
_ = x[LBRVIRT-101]
|
||||
_ = x[LZCNT-102]
|
||||
_ = x[MCAOVERFLOW-103]
|
||||
_ = x[MCDT_NO-104]
|
||||
_ = x[MCOMMIT-105]
|
||||
_ = x[MD_CLEAR-106]
|
||||
_ = x[MMX-107]
|
||||
_ = x[MMXEXT-108]
|
||||
_ = x[MOVBE-109]
|
||||
_ = x[MOVDIR64B-110]
|
||||
_ = x[MOVDIRI-111]
|
||||
_ = x[MOVSB_ZL-112]
|
||||
_ = x[MOVU-113]
|
||||
_ = x[MPX-114]
|
||||
_ = x[MSRIRC-115]
|
||||
_ = x[MSRLIST-116]
|
||||
_ = x[MSR_PAGEFLUSH-117]
|
||||
_ = x[NRIPS-118]
|
||||
_ = x[NX-119]
|
||||
_ = x[OSXSAVE-120]
|
||||
_ = x[PCONFIG-121]
|
||||
_ = x[POPCNT-122]
|
||||
_ = x[PPIN-123]
|
||||
_ = x[PREFETCHI-124]
|
||||
_ = x[PSFD-125]
|
||||
_ = x[RDPRU-126]
|
||||
_ = x[RDRAND-127]
|
||||
_ = x[RDSEED-128]
|
||||
_ = x[RDTSCP-129]
|
||||
_ = x[RRSBA_CTRL-130]
|
||||
_ = x[RTM-131]
|
||||
_ = x[RTM_ALWAYS_ABORT-132]
|
||||
_ = x[SBPB-133]
|
||||
_ = x[SERIALIZE-134]
|
||||
_ = x[SEV-135]
|
||||
_ = x[SEV_64BIT-136]
|
||||
_ = x[SEV_ALTERNATIVE-137]
|
||||
_ = x[SEV_DEBUGSWAP-138]
|
||||
_ = x[SEV_ES-139]
|
||||
_ = x[SEV_RESTRICTED-140]
|
||||
_ = x[SEV_SNP-141]
|
||||
_ = x[SGX-142]
|
||||
_ = x[SGXLC-143]
|
||||
_ = x[SHA-144]
|
||||
_ = x[SME-145]
|
||||
_ = x[SME_COHERENT-146]
|
||||
_ = x[SPEC_CTRL_SSBD-147]
|
||||
_ = x[SRBDS_CTRL-148]
|
||||
_ = x[SRSO_MSR_FIX-149]
|
||||
_ = x[SRSO_NO-150]
|
||||
_ = x[SRSO_USER_KERNEL_NO-151]
|
||||
_ = x[SSE-152]
|
||||
_ = x[SSE2-153]
|
||||
_ = x[SSE3-154]
|
||||
_ = x[SSE4-155]
|
||||
_ = x[SSE42-156]
|
||||
_ = x[SSE4A-157]
|
||||
_ = x[SSSE3-158]
|
||||
_ = x[STIBP-159]
|
||||
_ = x[STIBP_ALWAYSON-160]
|
||||
_ = x[STOSB_SHORT-161]
|
||||
_ = x[SUCCOR-162]
|
||||
_ = x[SVM-163]
|
||||
_ = x[SVMDA-164]
|
||||
_ = x[SVMFBASID-165]
|
||||
_ = x[SVML-166]
|
||||
_ = x[SVMNP-167]
|
||||
_ = x[SVMPF-168]
|
||||
_ = x[SVMPFT-169]
|
||||
_ = x[SYSCALL-170]
|
||||
_ = x[SYSEE-171]
|
||||
_ = x[TBM-172]
|
||||
_ = x[TDX_GUEST-173]
|
||||
_ = x[TLB_FLUSH_NESTED-174]
|
||||
_ = x[TME-175]
|
||||
_ = x[TOPEXT-176]
|
||||
_ = x[TSCRATEMSR-177]
|
||||
_ = x[TSXLDTRK-178]
|
||||
_ = x[VAES-179]
|
||||
_ = x[VMCBCLEAN-180]
|
||||
_ = x[VMPL-181]
|
||||
_ = x[VMSA_REGPROT-182]
|
||||
_ = x[VMX-183]
|
||||
_ = x[VPCLMULQDQ-184]
|
||||
_ = x[VTE-185]
|
||||
_ = x[WAITPKG-186]
|
||||
_ = x[WBNOINVD-187]
|
||||
_ = x[WRMSRNS-188]
|
||||
_ = x[X87-189]
|
||||
_ = x[XGETBV1-190]
|
||||
_ = x[XOP-191]
|
||||
_ = x[XSAVE-192]
|
||||
_ = x[XSAVEC-193]
|
||||
_ = x[XSAVEOPT-194]
|
||||
_ = x[XSAVES-195]
|
||||
_ = x[AESARM-196]
|
||||
_ = x[ARMCPUID-197]
|
||||
_ = x[ASIMD-198]
|
||||
_ = x[ASIMDDP-199]
|
||||
_ = x[ASIMDHP-200]
|
||||
_ = x[ASIMDRDM-201]
|
||||
_ = x[ATOMICS-202]
|
||||
_ = x[CRC32-203]
|
||||
_ = x[DCPOP-204]
|
||||
_ = x[EVTSTRM-205]
|
||||
_ = x[FCMA-206]
|
||||
_ = x[FP-207]
|
||||
_ = x[FPHP-208]
|
||||
_ = x[GPA-209]
|
||||
_ = x[JSCVT-210]
|
||||
_ = x[LRCPC-211]
|
||||
_ = x[PMULL-212]
|
||||
_ = x[SHA1-213]
|
||||
_ = x[SHA2-214]
|
||||
_ = x[SHA3-215]
|
||||
_ = x[SHA512-216]
|
||||
_ = x[SM3-217]
|
||||
_ = x[SM4-218]
|
||||
_ = x[SVE-219]
|
||||
_ = x[lastID-220]
|
||||
_ = x[firstID-0]
|
||||
}
|
||||
|
||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||
|
||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586}
|
||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592}
|
||||
|
||||
func (i FeatureID) String() string {
|
||||
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
||||
@@ -270,12 +271,17 @@ func _() {
|
||||
_ = x[AMCC-23]
|
||||
_ = x[Qualcomm-24]
|
||||
_ = x[Marvell-25]
|
||||
_ = x[lastVendor-26]
|
||||
_ = x[QEMU-26]
|
||||
_ = x[QNX-27]
|
||||
_ = x[ACRN-28]
|
||||
_ = x[SRE-29]
|
||||
_ = x[Apple-30]
|
||||
_ = x[lastVendor-31]
|
||||
}
|
||||
|
||||
const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor"
|
||||
const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor"
|
||||
|
||||
var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155}
|
||||
var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174}
|
||||
|
||||
func (i Vendor) String() string {
|
||||
if i < 0 || i >= Vendor(len(_Vendor_index)-1) {
|
||||
|
||||
202
vendor/github.com/minio/crc64nvme/LICENSE
generated
vendored
Normal file
202
vendor/github.com/minio/crc64nvme/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
20
vendor/github.com/minio/crc64nvme/README.md
generated
vendored
Normal file
20
vendor/github.com/minio/crc64nvme/README.md
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
|
||||
## crc64nvme
|
||||
|
||||
This Golang package calculates CRC64 checksums using carryless-multiplication accelerated with SIMD instructions for both ARM and x86. It is based on the NVME polynomial as specified in the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf).
|
||||
|
||||
The code is based on the [crc64fast-nvme](https://github.com/awesomized/crc64fast-nvme.git) package in Rust and is released under the Apache 2.0 license.
|
||||
|
||||
For more background on the exact technique used, see this [Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction](https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf) paper.
|
||||
|
||||
### Performance
|
||||
|
||||
To follow.
|
||||
|
||||
### Requirements
|
||||
|
||||
All Go versions >= 1.22 are supported.
|
||||
|
||||
### Contributing
|
||||
|
||||
Contributions are welcome, please send PRs for any enhancements.
|
||||
180
vendor/github.com/minio/crc64nvme/crc64.go
generated
vendored
Normal file
180
vendor/github.com/minio/crc64nvme/crc64.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package crc64nvme implements the 64-bit cyclic redundancy check with NVME polynomial.
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// The size of a CRC-64 checksum in bytes.
|
||||
Size = 8
|
||||
|
||||
// The NVME polynoimial (reversed, as used by Go)
|
||||
NVME = 0x9a6c9329ac4bc9b5
|
||||
)
|
||||
|
||||
var (
|
||||
// precalculated table.
|
||||
nvmeTable = makeTable(NVME)
|
||||
)
|
||||
|
||||
// table is a 256-word table representing the polynomial for efficient processing.
|
||||
type table [256]uint64
|
||||
|
||||
var (
|
||||
slicing8TablesBuildOnce sync.Once
|
||||
slicing8TableNVME *[8]table
|
||||
)
|
||||
|
||||
func buildSlicing8TablesOnce() {
|
||||
slicing8TablesBuildOnce.Do(buildSlicing8Tables)
|
||||
}
|
||||
|
||||
func buildSlicing8Tables() {
|
||||
slicing8TableNVME = makeSlicingBy8Table(makeTable(NVME))
|
||||
}
|
||||
|
||||
func makeTable(poly uint64) *table {
|
||||
t := new(table)
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := uint64(i)
|
||||
for j := 0; j < 8; j++ {
|
||||
if crc&1 == 1 {
|
||||
crc = (crc >> 1) ^ poly
|
||||
} else {
|
||||
crc >>= 1
|
||||
}
|
||||
}
|
||||
t[i] = crc
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func makeSlicingBy8Table(t *table) *[8]table {
|
||||
var helperTable [8]table
|
||||
helperTable[0] = *t
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := t[i]
|
||||
for j := 1; j < 8; j++ {
|
||||
crc = t[crc&0xff] ^ (crc >> 8)
|
||||
helperTable[j][i] = crc
|
||||
}
|
||||
}
|
||||
return &helperTable
|
||||
}
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint64
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash64 computing the CRC-64 checksum using the
|
||||
// NVME polynomial. Its Sum method will lay the
|
||||
// value out in big-endian byte order. The returned Hash64 also
|
||||
// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
|
||||
// marshal and unmarshal the internal state of the hash.
|
||||
func New() hash.Hash64 { return &digest{0} }
|
||||
|
||||
func (d *digest) Size() int { return Size }
|
||||
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
const (
|
||||
magic = "crc\x02"
|
||||
marshaledSize = len(magic) + 8 + 8
|
||||
)
|
||||
|
||||
func (d *digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = binary.BigEndian.AppendUint64(b, tableSum)
|
||||
b = binary.BigEndian.AppendUint64(b, d.crc)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("hash/crc64: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("hash/crc64: invalid hash state size")
|
||||
}
|
||||
if tableSum != binary.BigEndian.Uint64(b[4:]) {
|
||||
return errors.New("hash/crc64: tables do not match")
|
||||
}
|
||||
d.crc = binary.BigEndian.Uint64(b[12:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func update(crc uint64, p []byte) uint64 {
|
||||
if hasAsm && len(p) > 127 {
|
||||
ptr := unsafe.Pointer(&p[0])
|
||||
if align := (uintptr(ptr)+15)&^0xf - uintptr(ptr); align > 0 {
|
||||
// Align to 16-byte boundary.
|
||||
crc = update(crc, p[:align])
|
||||
p = p[align:]
|
||||
}
|
||||
runs := len(p) / 128
|
||||
crc = updateAsm(crc, p[:128*runs])
|
||||
return update(crc, p[128*runs:])
|
||||
}
|
||||
|
||||
buildSlicing8TablesOnce()
|
||||
crc = ^crc
|
||||
// table comparison is somewhat expensive, so avoid it for small sizes
|
||||
for len(p) >= 64 {
|
||||
var helperTable = slicing8TableNVME
|
||||
// Update using slicing-by-8
|
||||
for len(p) > 8 {
|
||||
crc ^= binary.LittleEndian.Uint64(p)
|
||||
crc = helperTable[7][crc&0xff] ^
|
||||
helperTable[6][(crc>>8)&0xff] ^
|
||||
helperTable[5][(crc>>16)&0xff] ^
|
||||
helperTable[4][(crc>>24)&0xff] ^
|
||||
helperTable[3][(crc>>32)&0xff] ^
|
||||
helperTable[2][(crc>>40)&0xff] ^
|
||||
helperTable[1][(crc>>48)&0xff] ^
|
||||
helperTable[0][crc>>56]
|
||||
p = p[8:]
|
||||
}
|
||||
}
|
||||
// For reminders or small sizes
|
||||
for _, v := range p {
|
||||
crc = nvmeTable[byte(crc)^v] ^ (crc >> 8)
|
||||
}
|
||||
return ^crc
|
||||
}
|
||||
|
||||
// Update returns the result of adding the bytes in p to the crc.
|
||||
func Update(crc uint64, p []byte) uint64 {
|
||||
return update(crc, p)
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
d.crc = update(d.crc, p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// Checksum returns the CRC-64 checksum of data
|
||||
// using the NVME polynomial.
|
||||
func Checksum(data []byte) uint64 { return update(0, data) }
|
||||
|
||||
// ISO tablesum of NVME poly
|
||||
const tableSum = 0x8ddd9ee4402c7163
|
||||
15
vendor/github.com/minio/crc64nvme/crc64_amd64.go
generated
vendored
Normal file
15
vendor/github.com/minio/crc64nvme/crc64_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.SSE2, cpuid.CLMUL, cpuid.SSE4)
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64)
|
||||
157
vendor/github.com/minio/crc64nvme/crc64_amd64.s
generated
vendored
Normal file
157
vendor/github.com/minio/crc64nvme/crc64_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·updateAsm(SB), $0-40
|
||||
MOVQ crc+0(FP), AX // checksum
|
||||
MOVQ p_base+8(FP), SI // start pointer
|
||||
MOVQ p_len+16(FP), CX // length of buffer
|
||||
NOTQ AX
|
||||
SHRQ $7, CX
|
||||
CMPQ CX, $1
|
||||
JLT skip128
|
||||
|
||||
VMOVDQA 0x00(SI), X0
|
||||
VMOVDQA 0x10(SI), X1
|
||||
VMOVDQA 0x20(SI), X2
|
||||
VMOVDQA 0x30(SI), X3
|
||||
VMOVDQA 0x40(SI), X4
|
||||
VMOVDQA 0x50(SI), X5
|
||||
VMOVDQA 0x60(SI), X6
|
||||
VMOVDQA 0x70(SI), X7
|
||||
MOVQ AX, X8
|
||||
PXOR X8, X0
|
||||
CMPQ CX, $1
|
||||
JE tail128
|
||||
|
||||
MOVQ $0xa1ca681e733f9c40, AX
|
||||
MOVQ AX, X8
|
||||
MOVQ $0x5f852fb61e8d92dc, AX
|
||||
PINSRQ $0x1, AX, X9
|
||||
|
||||
loop128:
|
||||
ADDQ $128, SI
|
||||
SUBQ $1, CX
|
||||
VMOVDQA X0, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X0
|
||||
PXOR X10, X0
|
||||
PXOR 0(SI), X0
|
||||
VMOVDQA X1, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X1
|
||||
PXOR X10, X1
|
||||
PXOR 0x10(SI), X1
|
||||
VMOVDQA X2, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X2
|
||||
PXOR X10, X2
|
||||
PXOR 0x20(SI), X2
|
||||
VMOVDQA X3, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X3
|
||||
PXOR X10, X3
|
||||
PXOR 0x30(SI), X3
|
||||
VMOVDQA X4, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X4
|
||||
PXOR X10, X4
|
||||
PXOR 0x40(SI), X4
|
||||
VMOVDQA X5, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X5
|
||||
PXOR X10, X5
|
||||
PXOR 0x50(SI), X5
|
||||
VMOVDQA X6, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X6
|
||||
PXOR X10, X6
|
||||
PXOR 0x60(SI), X6
|
||||
VMOVDQA X7, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X7
|
||||
PXOR X10, X7
|
||||
PXOR 0x70(SI), X7
|
||||
CMPQ CX, $1
|
||||
JGT loop128
|
||||
|
||||
tail128:
|
||||
MOVQ $0xd083dd594d96319d, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X0, X11
|
||||
MOVQ $0x946588403d4adcbc, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X0
|
||||
PXOR X11, X7
|
||||
PXOR X0, X7
|
||||
MOVQ $0x3c255f5ebc414423, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X1, X11
|
||||
MOVQ $0x34f5a24e22d66e90, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X1
|
||||
PXOR X11, X1
|
||||
PXOR X7, X1
|
||||
MOVQ $0x7b0ab10dd0f809fe, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X2, X11
|
||||
MOVQ $0x03363823e6e791e5, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X2
|
||||
PXOR X11, X2
|
||||
PXOR X1, X2
|
||||
MOVQ $0x0c32cdb31e18a84a, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X3, X11
|
||||
MOVQ $0x62242240ace5045a, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X3
|
||||
PXOR X11, X3
|
||||
PXOR X2, X3
|
||||
MOVQ $0xbdd7ac0ee1a4a0f0, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X4, X11
|
||||
MOVQ $0xa3ffdc1fe8e82a8b, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X4
|
||||
PXOR X11, X4
|
||||
PXOR X3, X4
|
||||
MOVQ $0xb0bc2e589204f500, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X5, X11
|
||||
MOVQ $0xe1e0bb9d45d7a44c, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X5
|
||||
PXOR X11, X5
|
||||
PXOR X4, X5
|
||||
MOVQ $0xeadc41fd2ba3d420, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X6, X11
|
||||
MOVQ $0x21e9761e252621ac, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X6
|
||||
PXOR X11, X6
|
||||
PXOR X5, X6
|
||||
MOVQ AX, X5
|
||||
PCLMULQDQ $0x00, X6, X5
|
||||
PSHUFD $0xee, X6, X6
|
||||
PXOR X5, X6
|
||||
MOVQ $0x27ecfa329aef9f77, AX
|
||||
MOVQ AX, X4
|
||||
PCLMULQDQ $0x00, X4, X6
|
||||
PEXTRQ $0, X6, BX
|
||||
MOVQ $0x34d926535897936b, AX
|
||||
MOVQ AX, X4
|
||||
PCLMULQDQ $0x00, X4, X6
|
||||
PXOR X5, X6
|
||||
PEXTRQ $1, X6, AX
|
||||
XORQ BX, AX
|
||||
|
||||
skip128:
|
||||
NOTQ AX
|
||||
MOVQ AX, checksum+32(FP)
|
||||
RET
|
||||
15
vendor/github.com/minio/crc64nvme/crc64_arm64.go
generated
vendored
Normal file
15
vendor/github.com/minio/crc64nvme/crc64_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD) && cpuid.CPU.Supports(cpuid.PMULL)
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64)
|
||||
157
vendor/github.com/minio/crc64nvme/crc64_arm64.s
generated
vendored
Normal file
157
vendor/github.com/minio/crc64nvme/crc64_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·updateAsm(SB), $0-40
|
||||
MOVD crc+0(FP), R0 // checksum
|
||||
MOVD p_base+8(FP), R1 // start pointer
|
||||
MOVD p_len+16(FP), R2 // length of buffer
|
||||
MOVD $·const(SB), R3 // constants
|
||||
MVN R0, R0
|
||||
LSR $7, R2, R2
|
||||
CMP $1, R2
|
||||
BLT skip128
|
||||
|
||||
FLDPQ (R1), (F0, F1)
|
||||
FLDPQ 32(R1), (F2, F3)
|
||||
FLDPQ 64(R1), (F4, F5)
|
||||
FLDPQ 96(R1), (F6, F7)
|
||||
FMOVD R0, F8
|
||||
VMOVI $0, V9.B16
|
||||
VMOV V9.D[0], V8.D[1]
|
||||
VEOR V8.B16, V0.B16, V0.B16
|
||||
CMP $1, R2
|
||||
BEQ tail128
|
||||
|
||||
MOVD 112(R3), R4
|
||||
MOVD 120(R3), R5
|
||||
FMOVD R4, F8
|
||||
VDUP R5, V9.D2
|
||||
|
||||
loop128:
|
||||
ADD $128, R1, R1
|
||||
SUB $1, R2, R2
|
||||
VPMULL V0.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V0.D2, V9.D2, V0.Q1
|
||||
FLDPQ (R1), (F11, F12)
|
||||
VEOR3 V0.B16, V11.B16, V10.B16, V0.B16
|
||||
VPMULL V1.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V1.D2, V9.D2, V1.Q1
|
||||
VEOR3 V1.B16, V12.B16, V10.B16, V1.B16
|
||||
VPMULL V2.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V2.D2, V9.D2, V2.Q1
|
||||
FLDPQ 32(R1), (F11, F12)
|
||||
VEOR3 V2.B16, V11.B16, V10.B16, V2.B16
|
||||
VPMULL V3.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V3.D2, V9.D2, V3.Q1
|
||||
VEOR3 V3.B16, V12.B16, V10.B16, V3.B16
|
||||
VPMULL V4.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V4.D2, V9.D2, V4.Q1
|
||||
FLDPQ 64(R1), (F11, F12)
|
||||
VEOR3 V4.B16, V11.B16, V10.B16, V4.B16
|
||||
VPMULL V5.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V5.D2, V9.D2, V5.Q1
|
||||
VEOR3 V5.B16, V12.B16, V10.B16, V5.B16
|
||||
VPMULL V6.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V6.D2, V9.D2, V6.Q1
|
||||
FLDPQ 96(R1), (F11, F12)
|
||||
VEOR3 V6.B16, V11.B16, V10.B16, V6.B16
|
||||
VPMULL V7.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V7.D2, V9.D2, V7.Q1
|
||||
VEOR3 V7.B16, V12.B16, V10.B16, V7.B16
|
||||
CMP $1, R2
|
||||
BHI loop128
|
||||
|
||||
tail128:
|
||||
MOVD (R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V0.D1, V11.D1, V11.Q1
|
||||
MOVD 8(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V0.D2, V12.D2, V0.Q1
|
||||
VEOR3 V0.B16, V7.B16, V11.B16, V7.B16
|
||||
MOVD 16(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V1.D1, V11.D1, V11.Q1
|
||||
MOVD 24(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V1.D2, V12.D2, V1.Q1
|
||||
VEOR3 V1.B16, V11.B16, V7.B16, V1.B16
|
||||
MOVD 32(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V2.D1, V11.D1, V11.Q1
|
||||
MOVD 40(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V2.D2, V12.D2, V2.Q1
|
||||
VEOR3 V2.B16, V11.B16, V1.B16, V2.B16
|
||||
MOVD 48(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V3.D1, V11.D1, V11.Q1
|
||||
MOVD 56(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V3.D2, V12.D2, V3.Q1
|
||||
VEOR3 V3.B16, V11.B16, V2.B16, V3.B16
|
||||
MOVD 64(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V4.D1, V11.D1, V11.Q1
|
||||
MOVD 72(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V4.D2, V12.D2, V4.Q1
|
||||
VEOR3 V4.B16, V11.B16, V3.B16, V4.B16
|
||||
MOVD 80(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V5.D1, V11.D1, V11.Q1
|
||||
MOVD 88(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V5.D2, V12.D2, V5.Q1
|
||||
VEOR3 V5.B16, V11.B16, V4.B16, V5.B16
|
||||
MOVD 96(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V6.D1, V11.D1, V11.Q1
|
||||
MOVD 104(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V6.D2, V12.D2, V6.Q1
|
||||
VEOR3 V6.B16, V11.B16, V5.B16, V6.B16
|
||||
FMOVD R4, F5
|
||||
VPMULL V6.D1, V5.D1, V5.Q1
|
||||
VDUP V6.D[1], V6.D2
|
||||
VEOR V5.B8, V6.B8, V6.B8
|
||||
MOVD 128(R3), R4
|
||||
FMOVD R4, F4
|
||||
VPMULL V4.D1, V6.D1, V6.Q1
|
||||
FMOVD F6, R4
|
||||
MOVD 136(R3), R5
|
||||
FMOVD R5, F4
|
||||
VPMULL V4.D1, V6.D1, V6.Q1
|
||||
VEOR V6.B16, V5.B16, V6.B16
|
||||
VMOV V6.D[1], R5
|
||||
EOR R4, R5, R0
|
||||
|
||||
skip128:
|
||||
MVN R0, R0
|
||||
MOVD R0, checksum+32(FP)
|
||||
RET
|
||||
|
||||
DATA ·const+0x000(SB)/8, $0xd083dd594d96319d // K_959
|
||||
DATA ·const+0x008(SB)/8, $0x946588403d4adcbc // K_895
|
||||
DATA ·const+0x010(SB)/8, $0x3c255f5ebc414423 // K_831
|
||||
DATA ·const+0x018(SB)/8, $0x34f5a24e22d66e90 // K_767
|
||||
DATA ·const+0x020(SB)/8, $0x7b0ab10dd0f809fe // K_703
|
||||
DATA ·const+0x028(SB)/8, $0x03363823e6e791e5 // K_639
|
||||
DATA ·const+0x030(SB)/8, $0x0c32cdb31e18a84a // K_575
|
||||
DATA ·const+0x038(SB)/8, $0x62242240ace5045a // K_511
|
||||
DATA ·const+0x040(SB)/8, $0xbdd7ac0ee1a4a0f0 // K_447
|
||||
DATA ·const+0x048(SB)/8, $0xa3ffdc1fe8e82a8b // K_383
|
||||
DATA ·const+0x050(SB)/8, $0xb0bc2e589204f500 // K_319
|
||||
DATA ·const+0x058(SB)/8, $0xe1e0bb9d45d7a44c // K_255
|
||||
DATA ·const+0x060(SB)/8, $0xeadc41fd2ba3d420 // K_191
|
||||
DATA ·const+0x068(SB)/8, $0x21e9761e252621ac // K_127
|
||||
DATA ·const+0x070(SB)/8, $0xa1ca681e733f9c40 // K_1087
|
||||
DATA ·const+0x078(SB)/8, $0x5f852fb61e8d92dc // K_1023
|
||||
DATA ·const+0x080(SB)/8, $0x27ecfa329aef9f77 // MU
|
||||
DATA ·const+0x088(SB)/8, $0x34d926535897936b // POLY
|
||||
GLOBL ·const(SB), (NOPTR+RODATA), $144
|
||||
11
vendor/github.com/minio/crc64nvme/crc64_other.go
generated
vendored
Normal file
11
vendor/github.com/minio/crc64nvme/crc64_other.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 || noasm || appengine || gccgo) && (!arm64 || noasm || appengine || gccgo)
|
||||
|
||||
package crc64nvme
|
||||
|
||||
var hasAsm = false
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") }
|
||||
2
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
2
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
@@ -253,7 +253,7 @@ The full API Reference is available here.
|
||||
|
||||
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
|
||||
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
|
||||
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
|
||||
* [removebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketencryption.go)
|
||||
|
||||
### Full Examples : Bucket replication Operations
|
||||
|
||||
|
||||
9
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
|
||||
@@ -98,8 +99,8 @@ func (opts CopyDestOptions) Marshal(header http.Header) {
|
||||
const replaceDirective = "REPLACE"
|
||||
if opts.ReplaceTags {
|
||||
header.Set(amzTaggingHeaderDirective, replaceDirective)
|
||||
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
|
||||
header.Set(amzTaggingHeader, tags)
|
||||
if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil {
|
||||
header.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,7 +237,9 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
|
||||
}
|
||||
|
||||
if len(dstOpts.UserTags) != 0 {
|
||||
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
|
||||
if tags, _ := tags.NewTags(dstOpts.UserTags, true); tags != nil {
|
||||
headers.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
@@ -68,7 +68,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
|
||||
Bucket: dst.Bucket,
|
||||
Key: dst.Object,
|
||||
LastModified: cpObjRes.LastModified,
|
||||
ETag: trimEtag(resp.Header.Get("ETag")),
|
||||
ETag: trimEtag(cpObjRes.ETag),
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
18
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
18
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
@@ -143,10 +143,11 @@ type UploadInfo struct {
|
||||
// Verified checksum values, if any.
|
||||
// Values are base64 (standard) encoded.
|
||||
// For multipart objects this is a checksum of the checksum of each part.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// RestoreInfo contains information of the restore operation of an archived object
|
||||
@@ -215,10 +216,11 @@ type ObjectInfo struct {
|
||||
Restore *RestoreInfo
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
|
||||
Internal *struct {
|
||||
K int // Data blocks
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
@@ -318,7 +318,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
response := <-o.resCh
|
||||
|
||||
// Return any error to the top level.
|
||||
if response.Error != nil {
|
||||
if response.Error != nil && response.Error != io.EOF {
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||
o.seekData = false
|
||||
|
||||
return response, nil
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
// setOffset - handles the setting of offsets for
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/api-presigned.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-presigned.go
generated
vendored
@@ -140,7 +140,7 @@ func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
credValues, err := c.credsProvider.Get()
|
||||
credValues, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
78
vendor/github.com/minio/minio-go/v7/api-prompt-object.go
generated
vendored
Normal file
78
vendor/github.com/minio/minio-go/v7/api-prompt-object.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// PromptObject performs language model inference with the prompt and referenced object as context.
|
||||
// Inference is performed using a Lambda handler that can process the prompt and object.
|
||||
// Currently, this functionality is limited to certain MinIO servers.
|
||||
func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
opts.AddLambdaArnToReqParams(opts.LambdaArn)
|
||||
opts.SetHeader("Content-Type", "application/json")
|
||||
opts.AddPromptArg("prompt", prompt)
|
||||
promptReqBytes, err := json.Marshal(opts.PromptArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Execute POST on bucket/object.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: opts.toQueryValues(),
|
||||
customHeader: opts.Header(),
|
||||
contentSHA256Hex: sum256Hex(promptReqBytes),
|
||||
contentBody: bytes.NewReader(promptReqBytes),
|
||||
contentLength: int64(len(promptReqBytes)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer closeResponse(resp)
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
84
vendor/github.com/minio/minio-go/v7/api-prompt-options.go
generated
vendored
Normal file
84
vendor/github.com/minio/minio-go/v7/api-prompt-options.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// PromptObjectOptions provides options to PromptObject call.
|
||||
// LambdaArn is the ARN of the Prompt Lambda to be invoked.
|
||||
// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda.
|
||||
// "prompt" is a reserved key and should not be used as a key in PromptArgs.
|
||||
type PromptObjectOptions struct {
|
||||
LambdaArn string
|
||||
PromptArgs map[string]any
|
||||
headers map[string]string
|
||||
reqParams url.Values
|
||||
}
|
||||
|
||||
// Header returns the http.Header representation of the POST options.
|
||||
func (o PromptObjectOptions) Header() http.Header {
|
||||
headers := make(http.Header, len(o.headers))
|
||||
for k, v := range o.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and
|
||||
// the value is a JSON serializable.
|
||||
func (o *PromptObjectOptions) AddPromptArg(key string, value any) {
|
||||
if o.PromptArgs == nil {
|
||||
o.PromptArgs = make(map[string]any)
|
||||
}
|
||||
o.PromptArgs[key] = value
|
||||
}
|
||||
|
||||
// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters.
|
||||
func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) {
|
||||
if o.reqParams == nil {
|
||||
o.reqParams = make(url.Values)
|
||||
}
|
||||
o.reqParams.Add("lambdaArn", lambdaArn)
|
||||
}
|
||||
|
||||
// SetHeader adds a key value pair to the options. The
|
||||
// key-value pair will be part of the HTTP POST request
|
||||
// headers.
|
||||
func (o *PromptObjectOptions) SetHeader(key, value string) {
|
||||
if o.headers == nil {
|
||||
o.headers = make(map[string]string)
|
||||
}
|
||||
o.headers[http.CanonicalHeaderKey(key)] = value
|
||||
}
|
||||
|
||||
// toQueryValues - Convert the reqParams in Options to query string parameters.
|
||||
func (o *PromptObjectOptions) toQueryValues() url.Values {
|
||||
urlValues := make(url.Values)
|
||||
if o.reqParams != nil {
|
||||
for key, values := range o.reqParams {
|
||||
for _, value := range values {
|
||||
urlValues.Add(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return urlValues
|
||||
}
|
||||
5
vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
generated
vendored
@@ -85,7 +85,10 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData
|
||||
policy.SetEncryption(fanOutReq.SSE)
|
||||
|
||||
// Set checksum headers if any.
|
||||
policy.SetChecksum(fanOutReq.Checksum)
|
||||
err := policy.SetChecksum(fanOutReq.Checksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url, formData, err := c.PresignedPostPolicy(ctx, policy)
|
||||
if err != nil {
|
||||
|
||||
48
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
48
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
@@ -83,10 +83,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
|
||||
if len(hashSums) == 0 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
@@ -113,7 +110,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
for partNumber <= totalPartsCount {
|
||||
@@ -154,7 +150,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||
@@ -182,18 +177,21 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -203,12 +201,8 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -354,10 +348,11 @@ func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart
|
||||
// Once successfully uploaded, return completed part.
|
||||
h := resp.Header
|
||||
objPart := ObjectPart{
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}
|
||||
objPart.Size = p.size
|
||||
objPart.PartNumber = p.partNumber
|
||||
@@ -457,9 +452,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
|
||||
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
|
||||
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
|
||||
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
|
||||
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
|
||||
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
|
||||
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
|
||||
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
|
||||
ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME,
|
||||
}, nil
|
||||
}
|
||||
|
||||
101
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
101
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
@@ -52,7 +52,7 @@ func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objec
|
||||
} else {
|
||||
info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
if err != nil {
|
||||
if err != nil && s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
@@ -113,10 +113,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
}
|
||||
withChecksum := c.trailingHeaderSupport
|
||||
if withChecksum {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
@@ -240,6 +237,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
allParts := make([]ObjectPart, 0, totalPartsCount)
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -248,16 +246,17 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
if uploadRes.Error != nil {
|
||||
return UploadInfo{}, uploadRes.Error
|
||||
}
|
||||
|
||||
allParts = append(allParts, uploadRes.Part)
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: uploadRes.Part.ETag,
|
||||
PartNumber: uploadRes.Part.PartNumber,
|
||||
ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
|
||||
ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
|
||||
ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
|
||||
ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
|
||||
ETag: uploadRes.Part.ETag,
|
||||
PartNumber: uploadRes.Part.PartNumber,
|
||||
ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
|
||||
ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
|
||||
ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
|
||||
ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: uploadRes.Part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -275,15 +274,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if withChecksum {
|
||||
// Add hash of hashes.
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
for _, part := range complMultipartUpload.Parts {
|
||||
cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
|
||||
if err == nil {
|
||||
crc.Write(cs)
|
||||
}
|
||||
}
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
}
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
@@ -312,10 +303,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
}
|
||||
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
@@ -342,7 +330,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
md5Hash := c.md5Hasher()
|
||||
@@ -389,7 +376,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
@@ -420,18 +406,21 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -442,12 +431,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -475,10 +459,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Cancel all when an error occurs.
|
||||
@@ -510,7 +491,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
@@ -570,7 +550,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
@@ -630,18 +609,21 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -652,12 +634,8 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -823,9 +801,10 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
35
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
35
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
@@ -229,7 +230,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||
}
|
||||
|
||||
if len(opts.UserTags) != 0 {
|
||||
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
|
||||
if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil {
|
||||
header.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range opts.UserMetadata {
|
||||
@@ -387,10 +390,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
@@ -417,7 +417,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
|
||||
@@ -443,7 +442,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
@@ -475,18 +473,21 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,12 +498,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
|
||||
8
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
@@ -213,6 +213,14 @@ type RemoveObjectError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *RemoveObjectError) Error() string {
|
||||
// This should never happen as we will have a non-nil error with no underlying error.
|
||||
if err.Err == nil {
|
||||
return "unexpected remove object error result"
|
||||
}
|
||||
return err.Err.Error()
|
||||
}
|
||||
|
||||
// RemoveObjectResult - container of Multi Delete S3 API result
|
||||
type RemoveObjectResult struct {
|
||||
ObjectName string
|
||||
|
||||
70
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
70
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
@@ -18,6 +18,7 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
@@ -276,10 +277,45 @@ type ObjectPart struct {
|
||||
Size int64
|
||||
|
||||
// Checksum values of each part.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// Checksum will return the checksum for the given type.
|
||||
// Will return the empty string if not set.
|
||||
func (c ObjectPart) Checksum(t ChecksumType) string {
|
||||
switch {
|
||||
case t.Is(ChecksumCRC32C):
|
||||
return c.ChecksumCRC32C
|
||||
case t.Is(ChecksumCRC32):
|
||||
return c.ChecksumCRC32
|
||||
case t.Is(ChecksumSHA1):
|
||||
return c.ChecksumSHA1
|
||||
case t.Is(ChecksumSHA256):
|
||||
return c.ChecksumSHA256
|
||||
case t.Is(ChecksumCRC64NVME):
|
||||
return c.ChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChecksumRaw returns the decoded checksum from the part.
|
||||
func (c ObjectPart) ChecksumRaw(t ChecksumType) ([]byte, error) {
|
||||
b64 := c.Checksum(t)
|
||||
if b64 == "" {
|
||||
return nil, errors.New("no checksum set")
|
||||
}
|
||||
decoded, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(decoded) != t.RawByteLen() {
|
||||
return nil, errors.New("checksum length mismatch")
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
// ListObjectPartsResult container for ListObjectParts response.
|
||||
@@ -296,6 +332,12 @@ type ListObjectPartsResult struct {
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
|
||||
// ChecksumAlgorithm will be CRC32, CRC32C, etc.
|
||||
ChecksumAlgorithm string
|
||||
|
||||
// ChecksumType is FULL_OBJECT or COMPOSITE (assume COMPOSITE when unset)
|
||||
ChecksumType string
|
||||
|
||||
// Indicates whether the returned list of parts is truncated.
|
||||
IsTruncated bool
|
||||
ObjectParts []ObjectPart `xml:"Part"`
|
||||
@@ -320,10 +362,11 @@ type completeMultipartUploadResult struct {
|
||||
ETag string
|
||||
|
||||
// Checksum values, hash of hashes of parts.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// CompletePart sub container lists individual part numbers and their
|
||||
@@ -334,10 +377,11 @@ type CompletePart struct {
|
||||
ETag string
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// Checksum will return the checksum for the given type.
|
||||
@@ -352,6 +396,8 @@ func (c CompletePart) Checksum(t ChecksumType) string {
|
||||
return c.ChecksumSHA1
|
||||
case t.Is(ChecksumSHA256):
|
||||
return c.ChecksumSHA256
|
||||
case t.Is(ChecksumCRC64NVME):
|
||||
return c.ChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
66
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
66
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
@@ -92,6 +92,9 @@ type Client struct {
|
||||
// default to Auto.
|
||||
lookup BucketLookupType
|
||||
|
||||
// lookupFn is a custom function to return URL lookup type supported by the server.
|
||||
lookupFn func(u url.URL, bucketName string) BucketLookupType
|
||||
|
||||
// Factory for MD5 hash functions.
|
||||
md5Hasher func() md5simd.Hasher
|
||||
sha256Hasher func() md5simd.Hasher
|
||||
@@ -99,6 +102,7 @@ type Client struct {
|
||||
healthStatus int32
|
||||
|
||||
trailingHeaderSupport bool
|
||||
maxRetries int
|
||||
}
|
||||
|
||||
// Options for New method
|
||||
@@ -116,6 +120,25 @@ type Options struct {
|
||||
// function to perform region lookups appropriately.
|
||||
CustomRegionViaURL func(u url.URL) string
|
||||
|
||||
// Provide a custom function that returns BucketLookupType based
|
||||
// on the input URL, this is just like s3utils.IsVirtualHostSupported()
|
||||
// function but allows users to provide their own implementation.
|
||||
// Once this is set it overrides all settings for opts.BucketLookup
|
||||
// if this function returns BucketLookupAuto then default detection
|
||||
// via s3utils.IsVirtualHostSupported() is used, otherwise the
|
||||
// function is expected to return appropriate value as expected for
|
||||
// the URL the user wishes to honor.
|
||||
//
|
||||
// BucketName is passed additionally for the caller to ensure
|
||||
// handle situations where `bucketNames` have multiple `.` separators
|
||||
// in such case HTTPs certs will not work properly for *.<domain>
|
||||
// wildcards, so you need to specifically handle these situations
|
||||
// and not return bucket as part of DNS since those requests may fail.
|
||||
//
|
||||
// For better understanding look at s3utils.IsVirtualHostSupported()
|
||||
// implementation.
|
||||
BucketLookupViaURL func(u url.URL, bucketName string) BucketLookupType
|
||||
|
||||
// TrailingHeaders indicates server support of trailing headers.
|
||||
// Only supported for v4 signatures.
|
||||
TrailingHeaders bool
|
||||
@@ -123,12 +146,16 @@ type Options struct {
|
||||
// Custom hash routines. Leave nil to use standard.
|
||||
CustomMD5 func() md5simd.Hasher
|
||||
CustomSHA256 func() md5simd.Hasher
|
||||
|
||||
// Number of times a request is retried. Defaults to 10 retries if this option is not configured.
|
||||
// Set to 1 to disable retries.
|
||||
MaxRetries int
|
||||
}
|
||||
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.78"
|
||||
libraryVersion = "v7.0.87"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@@ -274,10 +301,16 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
|
||||
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
||||
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
||||
clnt.lookup = opts.BucketLookup
|
||||
clnt.lookupFn = opts.BucketLookupViaURL
|
||||
|
||||
// healthcheck is not initialized
|
||||
clnt.healthStatus = unknown
|
||||
|
||||
clnt.maxRetries = MaxRetry
|
||||
if opts.MaxRetries > 0 {
|
||||
clnt.maxRetries = opts.MaxRetries
|
||||
}
|
||||
|
||||
// Return.
|
||||
return clnt, nil
|
||||
}
|
||||
@@ -592,7 +625,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||
|
||||
var retryable bool // Indicates if request can be retried.
|
||||
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||
reqRetry := MaxRetry // Indicates how many times we can retry the request
|
||||
reqRetry := c.maxRetries // Indicates how many times we can retry the request
|
||||
|
||||
if metadata.contentBody != nil {
|
||||
// Check if body is seekable then it is retryable.
|
||||
@@ -798,7 +831,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
value, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -993,6 +1026,18 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
|
||||
|
||||
// returns true if virtual hosted style requests are to be used.
|
||||
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
|
||||
if c.lookupFn != nil {
|
||||
lookup := c.lookupFn(url, bucketName)
|
||||
switch lookup {
|
||||
case BucketLookupDNS:
|
||||
return true
|
||||
case BucketLookupPath:
|
||||
return false
|
||||
}
|
||||
// if its auto then we fallback to default detection.
|
||||
return s3utils.IsVirtualHostSupported(url, bucketName)
|
||||
}
|
||||
|
||||
if bucketName == "" {
|
||||
return false
|
||||
}
|
||||
@@ -1000,11 +1045,24 @@ func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool
|
||||
if c.lookup == BucketLookupDNS {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.lookup == BucketLookupPath {
|
||||
return false
|
||||
}
|
||||
|
||||
// default to virtual only for Amazon/Google storage. In all other cases use
|
||||
// default to virtual only for Amazon/Google storage. In all other cases use
|
||||
// path style requests
|
||||
return s3utils.IsVirtualHostSupported(url, bucketName)
|
||||
}
|
||||
|
||||
// CredContext returns the context for fetching credentials
|
||||
func (c *Client) CredContext() *credentials.CredContext {
|
||||
httpClient := c.httpClient
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
return &credentials.CredContext{
|
||||
Client: httpClient,
|
||||
Endpoint: c.endpointURL.String(),
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
@@ -212,7 +212,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
value, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
209
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
209
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
@@ -21,11 +21,17 @@ import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"math/bits"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/crc64nvme"
|
||||
)
|
||||
|
||||
// ChecksumType contains information about the checksum type.
|
||||
@@ -41,23 +47,41 @@ const (
|
||||
ChecksumCRC32
|
||||
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
|
||||
ChecksumCRC32C
|
||||
// ChecksumCRC64NVME indicates CRC64 with 0xad93d23594c93659 polynomial.
|
||||
ChecksumCRC64NVME
|
||||
|
||||
// Keep after all valid checksums
|
||||
checksumLast
|
||||
|
||||
// ChecksumFullObject is a modifier that can be used on CRC32 and CRC32C
|
||||
// to indicate full object checksums.
|
||||
ChecksumFullObject
|
||||
|
||||
// checksumMask is a mask for valid checksum types.
|
||||
checksumMask = checksumLast - 1
|
||||
|
||||
// ChecksumNone indicates no checksum.
|
||||
ChecksumNone ChecksumType = 0
|
||||
|
||||
amzChecksumAlgo = "x-amz-checksum-algorithm"
|
||||
amzChecksumCRC32 = "x-amz-checksum-crc32"
|
||||
amzChecksumCRC32C = "x-amz-checksum-crc32c"
|
||||
amzChecksumSHA1 = "x-amz-checksum-sha1"
|
||||
amzChecksumSHA256 = "x-amz-checksum-sha256"
|
||||
// ChecksumFullObjectCRC32 indicates full object CRC32
|
||||
ChecksumFullObjectCRC32 = ChecksumCRC32 | ChecksumFullObject
|
||||
|
||||
// ChecksumFullObjectCRC32C indicates full object CRC32C
|
||||
ChecksumFullObjectCRC32C = ChecksumCRC32C | ChecksumFullObject
|
||||
|
||||
amzChecksumAlgo = "x-amz-checksum-algorithm"
|
||||
amzChecksumCRC32 = "x-amz-checksum-crc32"
|
||||
amzChecksumCRC32C = "x-amz-checksum-crc32c"
|
||||
amzChecksumSHA1 = "x-amz-checksum-sha1"
|
||||
amzChecksumSHA256 = "x-amz-checksum-sha256"
|
||||
amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme"
|
||||
)
|
||||
|
||||
// Base returns the base type, without modifiers.
|
||||
func (c ChecksumType) Base() ChecksumType {
|
||||
return c & checksumMask
|
||||
}
|
||||
|
||||
// Is returns if c is all of t.
|
||||
func (c ChecksumType) Is(t ChecksumType) bool {
|
||||
return c&t == t
|
||||
@@ -75,10 +99,39 @@ func (c ChecksumType) Key() string {
|
||||
return amzChecksumSHA1
|
||||
case ChecksumSHA256:
|
||||
return amzChecksumSHA256
|
||||
case ChecksumCRC64NVME:
|
||||
return amzChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CanComposite will return if the checksum type can be used for composite multipart upload on AWS.
|
||||
func (c ChecksumType) CanComposite() bool {
|
||||
switch c & checksumMask {
|
||||
case ChecksumSHA256, ChecksumSHA1, ChecksumCRC32, ChecksumCRC32C:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CanMergeCRC will return if the checksum type can be used for multipart upload on AWS.
|
||||
func (c ChecksumType) CanMergeCRC() bool {
|
||||
switch c & checksumMask {
|
||||
case ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FullObjectRequested will return if the checksum type indicates full object checksum was requested.
|
||||
func (c ChecksumType) FullObjectRequested() bool {
|
||||
switch c & (ChecksumFullObject | checksumMask) {
|
||||
case ChecksumFullObjectCRC32C, ChecksumFullObjectCRC32, ChecksumCRC64NVME:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// KeyCapitalized returns the capitalized key as used in HTTP headers.
|
||||
func (c ChecksumType) KeyCapitalized() string {
|
||||
return http.CanonicalHeaderKey(c.Key())
|
||||
@@ -93,10 +146,14 @@ func (c ChecksumType) RawByteLen() int {
|
||||
return sha1.Size
|
||||
case ChecksumSHA256:
|
||||
return sha256.Size
|
||||
case ChecksumCRC64NVME:
|
||||
return crc64.Size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const crc64NVMEPolynomial = 0xad93d23594c93659
|
||||
|
||||
// Hasher returns a hasher corresponding to the checksum type.
|
||||
// Returns nil if no checksum.
|
||||
func (c ChecksumType) Hasher() hash.Hash {
|
||||
@@ -109,13 +166,15 @@ func (c ChecksumType) Hasher() hash.Hash {
|
||||
return sha1.New()
|
||||
case ChecksumSHA256:
|
||||
return sha256.New()
|
||||
case ChecksumCRC64NVME:
|
||||
return crc64nvme.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSet returns whether the type is valid and known.
|
||||
func (c ChecksumType) IsSet() bool {
|
||||
return bits.OnesCount32(uint32(c)) == 1
|
||||
return bits.OnesCount32(uint32(c&checksumMask)) == 1
|
||||
}
|
||||
|
||||
// SetDefault will set the checksum if not already set.
|
||||
@@ -125,6 +184,16 @@ func (c *ChecksumType) SetDefault(t ChecksumType) {
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeToString the encoded hash value of the content provided in b.
|
||||
func (c ChecksumType) EncodeToString(b []byte) string {
|
||||
if !c.IsSet() {
|
||||
return ""
|
||||
}
|
||||
h := c.Hasher()
|
||||
h.Write(b)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// String returns the type as a string.
|
||||
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
|
||||
// Empty string for unset and "<invalid>" if not valid.
|
||||
@@ -140,6 +209,8 @@ func (c ChecksumType) String() string {
|
||||
return "SHA256"
|
||||
case ChecksumNone:
|
||||
return ""
|
||||
case ChecksumCRC64NVME:
|
||||
return "CRC64NVME"
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
@@ -221,3 +292,129 @@ func (c Checksum) Raw() []byte {
|
||||
}
|
||||
return c.r
|
||||
}
|
||||
|
||||
// CompositeChecksum returns the composite checksum of all provided parts.
|
||||
func (c ChecksumType) CompositeChecksum(p []ObjectPart) (*Checksum, error) {
|
||||
if !c.CanComposite() {
|
||||
return nil, errors.New("cannot do composite checksum")
|
||||
}
|
||||
sort.Slice(p, func(i, j int) bool {
|
||||
return p[i].PartNumber < p[j].PartNumber
|
||||
})
|
||||
c = c.Base()
|
||||
crcBytes := make([]byte, 0, len(p)*c.RawByteLen())
|
||||
for _, part := range p {
|
||||
pCrc, err := part.ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crcBytes = append(crcBytes, pCrc...)
|
||||
}
|
||||
h := c.Hasher()
|
||||
h.Write(crcBytes)
|
||||
return &Checksum{Type: c, r: h.Sum(nil)}, nil
|
||||
}
|
||||
|
||||
// FullObjectChecksum will return the full object checksum from provided parts.
|
||||
func (c ChecksumType) FullObjectChecksum(p []ObjectPart) (*Checksum, error) {
|
||||
if !c.CanMergeCRC() {
|
||||
return nil, errors.New("cannot merge this checksum type")
|
||||
}
|
||||
c = c.Base()
|
||||
sort.Slice(p, func(i, j int) bool {
|
||||
return p[i].PartNumber < p[j].PartNumber
|
||||
})
|
||||
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return nil, errors.New("no parts given")
|
||||
case 1:
|
||||
check, err := p[0].ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: check,
|
||||
}, nil
|
||||
}
|
||||
var merged uint32
|
||||
var merged64 uint64
|
||||
first, err := p[0].ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sz := p[0].Size
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
merged = binary.BigEndian.Uint32(first)
|
||||
case ChecksumCRC64NVME:
|
||||
merged64 = binary.BigEndian.Uint64(first)
|
||||
}
|
||||
|
||||
poly32 := uint32(crc32.IEEE)
|
||||
if c.Is(ChecksumCRC32C) {
|
||||
poly32 = crc32.Castagnoli
|
||||
}
|
||||
for _, part := range p[1:] {
|
||||
if part.Size == 0 {
|
||||
continue
|
||||
}
|
||||
sz += part.Size
|
||||
pCrc, err := part.ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
merged = crc32Combine(poly32, merged, binary.BigEndian.Uint32(pCrc), part.Size)
|
||||
case ChecksumCRC64NVME:
|
||||
merged64 = crc64Combine(bits.Reverse64(crc64NVMEPolynomial), merged64, binary.BigEndian.Uint64(pCrc), part.Size)
|
||||
}
|
||||
}
|
||||
var tmp [8]byte
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
binary.BigEndian.PutUint32(tmp[:], merged)
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: tmp[:4],
|
||||
}, nil
|
||||
case ChecksumCRC64NVME:
|
||||
binary.BigEndian.PutUint64(tmp[:], merged64)
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: tmp[:8],
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.New("unknown checksum type")
|
||||
}
|
||||
}
|
||||
|
||||
func addAutoChecksumHeaders(opts *PutObjectOptions) {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT"
|
||||
}
|
||||
}
|
||||
|
||||
func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
|
||||
if !opts.AutoChecksum.IsSet() {
|
||||
return
|
||||
}
|
||||
if opts.AutoChecksum.CanComposite() && !opts.AutoChecksum.Is(ChecksumFullObject) {
|
||||
// Add composite hash of hashes.
|
||||
crc, err := opts.AutoChecksum.CompositeChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): crc.Encoded()}
|
||||
}
|
||||
} else if opts.AutoChecksum.CanMergeCRC() {
|
||||
crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1986
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
1986
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
File diff suppressed because it is too large
Load Diff
43
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
43
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
@@ -76,7 +76,8 @@ type AssumeRoleResult struct {
|
||||
type STSAssumeRole struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// STS endpoint to fetch STS credentials.
|
||||
@@ -108,16 +109,10 @@ type STSAssumeRoleOptions struct {
|
||||
// NewSTSAssumeRole returns a pointer to a new
|
||||
// Credentials object wrapping the STSAssumeRole.
|
||||
func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if opts.AccessKey == "" || opts.SecretKey == "" {
|
||||
return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
|
||||
}
|
||||
return New(&STSAssumeRole{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
Options: opts,
|
||||
}), nil
|
||||
@@ -222,10 +217,30 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
|
||||
// RetrieveWithCredContext retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails, optional cred context.
|
||||
func (m *STSAssumeRole) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getAssumeRoleCredentials(client, stsEndpoint, m.Options)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -241,3 +256,9 @@ func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
18
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
18
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
@@ -55,6 +55,24 @@ func NewChainCredentials(providers []Provider) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve with CredContext
|
||||
func (c *Chain) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
for _, p := range c.Providers {
|
||||
creds, _ := p.RetrieveWithCredContext(cc)
|
||||
// Always prioritize non-anonymous providers, if any.
|
||||
if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
|
||||
continue
|
||||
}
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
// At this point we have exhausted all the providers and
|
||||
// are left without any credentials return anonymous.
|
||||
return Value{
|
||||
SignerType: SignatureAnonymous,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value, returns no credentials(anonymous)
|
||||
// if no credentials provider returned any value.
|
||||
//
|
||||
|
||||
48
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
48
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
@@ -18,6 +18,7 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -30,6 +31,10 @@ const (
|
||||
defaultExpiryWindow = 0.8
|
||||
)
|
||||
|
||||
// defaultCredContext is used when the credential context doesn't
|
||||
// actually matter or the default context is suitable.
|
||||
var defaultCredContext = &CredContext{Client: http.DefaultClient}
|
||||
|
||||
// A Value is the S3 credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// S3 Access key ID
|
||||
@@ -52,8 +57,17 @@ type Value struct {
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
type Provider interface {
|
||||
// RetrieveWithCredContext returns nil if it successfully retrieved the
|
||||
// value. Error is returned if the value were not obtainable, or empty.
|
||||
// optionally takes CredContext for additional context to retrieve credentials.
|
||||
RetrieveWithCredContext(cc *CredContext) (Value, error)
|
||||
|
||||
// Retrieve returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
//
|
||||
// Deprecated: Retrieve() exists for historical compatibility and should not
|
||||
// be used. To get new credentials use the RetrieveWithCredContext function
|
||||
// to ensure the proper context (i.e. HTTP client) will be used.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
@@ -61,6 +75,18 @@ type Provider interface {
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// CredContext is passed to the Retrieve function of a provider to provide
|
||||
// some additional context to retrieve credentials.
|
||||
type CredContext struct {
|
||||
// Client specifies the HTTP client that should be used if an HTTP
|
||||
// request is to be made to fetch the credentials.
|
||||
Client *http.Client
|
||||
|
||||
// Endpoint specifies the MinIO endpoint that will be used if no
|
||||
// explicit endpoint is provided.
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
@@ -146,16 +172,36 @@ func New(provider Provider) *Credentials {
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
//
|
||||
// Deprecated: Get() exists for historical compatibility and should not be
|
||||
// used. To get new credentials use the Credentials.GetWithContext function
|
||||
// to ensure the proper context (i.e. HTTP client) will be used.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
return c.GetWithContext(nil)
|
||||
}
|
||||
|
||||
// GetWithContext returns the credentials value, or error if the
|
||||
// credentials Value failed to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) GetWithContext(cc *CredContext) (Value, error) {
|
||||
if c == nil {
|
||||
return Value{}, nil
|
||||
}
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.isExpired() {
|
||||
creds, err := c.provider.Retrieve()
|
||||
creds, err := c.provider.RetrieveWithCredContext(cc)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
generated
vendored
@@ -37,8 +37,7 @@ func NewEnvAWS() *Credentials {
|
||||
return New(&EnvAWS{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
func (e *EnvAWS) retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
@@ -65,6 +64,16 @@ func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve (no-op input of Cred Context)
|
||||
func (e *EnvAWS) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvAWS) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
|
||||
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
generated
vendored
@@ -38,8 +38,7 @@ func NewEnvMinio() *Credentials {
|
||||
return New(&EnvMinio{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
func (e *EnvMinio) retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("MINIO_ROOT_USER")
|
||||
@@ -62,6 +61,16 @@ func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve() (no-op input cred context)
|
||||
func (e *EnvMinio) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvMinio) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
|
||||
15
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
@@ -71,9 +71,7 @@ func NewFileAWSCredentials(filename, profile string) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
func (p *FileAWSCredentials) retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
if p.Filename == "" {
|
||||
@@ -142,6 +140,17 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve(), cred context is no-op for File credentials
|
||||
func (p *FileAWSCredentials) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
|
||||
15
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
@@ -56,9 +56,7 @@ func NewFileMinioClient(filename, alias string) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
func (p *FileMinioClient) retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
|
||||
p.Filename = value
|
||||
@@ -96,6 +94,17 @@ func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext - is like Retrieve()
|
||||
func (p *FileMinioClient) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *FileMinioClient) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
|
||||
44
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
44
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
@@ -49,7 +49,8 @@ const DefaultExpiryWindow = -1
|
||||
type IAM struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to IAM metadata service.
|
||||
// Optional http Client to use when connecting to IAM metadata service
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Custom endpoint to fetch IAM role credentials.
|
||||
@@ -90,17 +91,16 @@ const (
|
||||
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
|
||||
func NewIAM(endpoint string) *Credentials {
|
||||
return New(&IAM{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
// RetrieveWithCredContext is like Retrieve with Cred Context
|
||||
func (m *IAM) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
|
||||
if token == "" {
|
||||
token = m.Container.AuthorizationToken
|
||||
@@ -144,7 +144,16 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
var roleCreds ec2RoleCredRespBody
|
||||
var err error
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
endpoint := m.Endpoint
|
||||
|
||||
switch {
|
||||
case identityFile != "":
|
||||
if len(endpoint) == 0 {
|
||||
@@ -160,7 +169,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}
|
||||
|
||||
creds := &STSWebIdentity{
|
||||
Client: m.Client,
|
||||
Client: client,
|
||||
STSEndpoint: endpoint,
|
||||
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
||||
token, err := os.ReadFile(identityFile)
|
||||
@@ -174,7 +183,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
roleSessionName: roleSessionName,
|
||||
}
|
||||
|
||||
stsWebIdentityCreds, err := creds.Retrieve()
|
||||
stsWebIdentityCreds, err := creds.RetrieveWithCredContext(cc)
|
||||
if err == nil {
|
||||
m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
|
||||
}
|
||||
@@ -185,11 +194,11 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
|
||||
roleCreds, err = getEcsTaskCredentials(client, endpoint, token)
|
||||
|
||||
case tokenFile != "" && fullURI != "":
|
||||
endpoint = fullURI
|
||||
roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile)
|
||||
roleCreds, err = getEKSPodIdentityCredentials(client, endpoint, tokenFile)
|
||||
|
||||
case fullURI != "":
|
||||
if len(endpoint) == 0 {
|
||||
@@ -203,10 +212,10 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
|
||||
roleCreds, err = getEcsTaskCredentials(client, endpoint, token)
|
||||
|
||||
default:
|
||||
roleCreds, err = getCredentials(m.Client, endpoint)
|
||||
roleCreds, err = getCredentials(client, endpoint)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -224,6 +233,13 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
|
||||
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
generated
vendored
@@ -59,6 +59,11 @@ func (s *Static) Retrieve() (Value, error) {
|
||||
return s.Value, nil
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext returns the static credentials.
|
||||
func (s *Static) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return s.Retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
//
|
||||
// For Static, the credentials never expired.
|
||||
|
||||
42
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
42
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
@@ -72,7 +72,8 @@ type ClientGrantsToken struct {
|
||||
type STSClientGrants struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// MinIO endpoint to fetch STS credentials.
|
||||
@@ -90,16 +91,10 @@ type STSClientGrants struct {
|
||||
// NewSTSClientGrants returns a pointer to a new
|
||||
// Credentials object wrapping the STSClientGrants.
|
||||
func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if getClientGrantsTokenExpiry == nil {
|
||||
return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
|
||||
}
|
||||
return New(&STSClientGrants{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
|
||||
}), nil
|
||||
@@ -162,10 +157,29 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
|
||||
// RetrieveWithCredContext is like Retrieve() with cred context
|
||||
func (m *STSClientGrants) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getClientGrantsCredentials(client, stsEndpoint, m.GetClientGrantsTokenExpiry)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -181,3 +195,9 @@ func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
36
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
generated
vendored
36
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
generated
vendored
@@ -53,6 +53,8 @@ type AssumeRoleWithCustomTokenResponse struct {
|
||||
type CustomTokenIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// MinIO server STS endpoint to fetch STS credentials.
|
||||
@@ -69,9 +71,21 @@ type CustomTokenIdentity struct {
|
||||
RequestedExpiry time.Duration
|
||||
}
|
||||
|
||||
// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
|
||||
func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
u, err := url.Parse(c.STSEndpoint)
|
||||
// RetrieveWithCredContext with Retrieve optionally cred context
|
||||
func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := c.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
u, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -92,7 +106,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
return value, err
|
||||
}
|
||||
|
||||
resp, err := c.Client.Do(req)
|
||||
client := c.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -118,11 +140,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
|
||||
func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
return c.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// NewCustomTokenCredentials - returns credentials using the
|
||||
// AssumeRoleWithCustomToken STS API.
|
||||
func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
|
||||
c := CustomTokenIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
Token: token,
|
||||
RoleArn: roleArn,
|
||||
|
||||
40
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
40
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
@@ -20,6 +20,7 @@ package credentials
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -55,7 +56,8 @@ type LDAPIdentityResult struct {
|
||||
type LDAPIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Exported STS endpoint to fetch STS credentials.
|
||||
@@ -77,7 +79,6 @@ type LDAPIdentity struct {
|
||||
// Identity.
|
||||
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
|
||||
l := LDAPIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
LDAPUsername: ldapUsername,
|
||||
LDAPPassword: ldapPassword,
|
||||
@@ -113,7 +114,6 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
|
||||
// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
|
||||
func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
|
||||
return New(&LDAPIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
LDAPUsername: ldapUsername,
|
||||
LDAPPassword: ldapPassword,
|
||||
@@ -121,10 +121,22 @@ func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, p
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Retrieve gets the credential by calling the MinIO STS API for
|
||||
// RetrieveWithCredContext gets the credential by calling the MinIO STS API for
|
||||
// LDAP on the configured stsEndpoint.
|
||||
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
u, err := url.Parse(k.STSEndpoint)
|
||||
func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := k.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
u, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -148,7 +160,15 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := k.Client.Do(req)
|
||||
client := k.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -188,3 +208,9 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve gets the credential by calling the MinIO STS API for
|
||||
// LDAP on the configured stsEndpoint.
|
||||
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
return k.RetrieveWithCredContext(defaultCredContext)
|
||||
}
|
||||
|
||||
100
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
100
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
@@ -20,8 +20,8 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -36,7 +36,12 @@ type CertificateIdentityOption func(*STSCertificateIdentity)
|
||||
// CertificateIdentityWithTransport returns a CertificateIdentityOption that
|
||||
// customizes the STSCertificateIdentity with the given http.RoundTripper.
|
||||
func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
|
||||
return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
|
||||
return CertificateIdentityOption(func(i *STSCertificateIdentity) {
|
||||
if i.Client == nil {
|
||||
i.Client = &http.Client{}
|
||||
}
|
||||
i.Client.Transport = t
|
||||
})
|
||||
}
|
||||
|
||||
// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
|
||||
@@ -53,6 +58,10 @@ func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOp
|
||||
type STSCertificateIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// STSEndpoint is the base URL endpoint of the STS API.
|
||||
// For example, https://minio.local:9000
|
||||
STSEndpoint string
|
||||
@@ -68,50 +77,18 @@ type STSCertificateIdentity struct {
|
||||
// The default livetime is one hour.
|
||||
S3CredentialLivetime time.Duration
|
||||
|
||||
// Client is the HTTP client used to authenticate and fetch
|
||||
// S3 credentials.
|
||||
//
|
||||
// A custom TLS client configuration can be specified by
|
||||
// using a custom http.Transport:
|
||||
// Client: http.Client {
|
||||
// Transport: &http.Transport{
|
||||
// TLSClientConfig: &tls.Config{},
|
||||
// },
|
||||
// }
|
||||
Client http.Client
|
||||
// Certificate is the client certificate that is used for
|
||||
// STS authentication.
|
||||
Certificate tls.Certificate
|
||||
}
|
||||
|
||||
var _ Provider = (*STSWebIdentity)(nil) // compiler check
|
||||
|
||||
// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
|
||||
// to the given STS endpoint with the given TLS certificate and retrieves and
|
||||
// rotates S3 credentials.
|
||||
func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if _, err := url.Parse(endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
identity := &STSCertificateIdentity{
|
||||
STSEndpoint: endpoint,
|
||||
Client: http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
Certificates: []tls.Certificate{certificate},
|
||||
},
|
||||
},
|
||||
},
|
||||
Certificate: certificate,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(identity)
|
||||
@@ -119,10 +96,21 @@ func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, opt
|
||||
return New(identity), nil
|
||||
}
|
||||
|
||||
// Retrieve fetches a new set of S3 credentials from the configured
|
||||
// STS API endpoint.
|
||||
func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
endpointURL, err := url.Parse(i.STSEndpoint)
|
||||
// RetrieveWithCredContext is Retrieve with cred context
|
||||
func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := i.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
endpointURL, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -145,7 +133,28 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
}
|
||||
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
|
||||
|
||||
resp, err := i.Client.Do(req)
|
||||
client := i.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
tr, ok := client.Transport.(*http.Transport)
|
||||
if !ok {
|
||||
return Value{}, fmt.Errorf("CredContext should contain an http.Transport value")
|
||||
}
|
||||
|
||||
// Clone the HTTP transport (patch the TLS client certificate)
|
||||
trCopy := tr.Clone()
|
||||
trCopy.TLSClientConfig.Certificates = []tls.Certificate{i.Certificate}
|
||||
|
||||
// Clone the HTTP client (patch the HTTP transport)
|
||||
clientCopy := *client
|
||||
clientCopy.Transport = trCopy
|
||||
|
||||
resp, err := clientCopy.Do(req)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -193,6 +202,11 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve fetches a new set of S3 credentials from the configured STS API endpoint.
|
||||
func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
return i.RetrieveWithCredContext(defaultCredContext)
|
||||
}
|
||||
|
||||
// Expiration returns the expiration time of the current S3 credentials.
|
||||
func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
|
||||
|
||||
|
||||
53
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
53
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
@@ -58,9 +58,10 @@ type WebIdentityResult struct {
|
||||
|
||||
// WebIdentityToken - web identity token with expiry.
|
||||
type WebIdentityToken struct {
|
||||
Token string
|
||||
AccessToken string
|
||||
Expiry int
|
||||
Token string
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
Expiry int
|
||||
}
|
||||
|
||||
// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
|
||||
@@ -68,7 +69,8 @@ type WebIdentityToken struct {
|
||||
type STSWebIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Exported STS endpoint to fetch STS credentials.
|
||||
@@ -96,16 +98,10 @@ type STSWebIdentity struct {
|
||||
// NewSTSWebIdentity returns a pointer to a new
|
||||
// Credentials object wrapping the STSWebIdentity.
|
||||
func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if getWebIDTokenExpiry == nil {
|
||||
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
|
||||
}
|
||||
i := &STSWebIdentity{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
GetWebIDTokenExpiry: getWebIDTokenExpiry,
|
||||
}
|
||||
@@ -161,6 +157,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
// Usually set when server is using extended userInfo endpoint.
|
||||
v.Set("WebIdentityAccessToken", idToken.AccessToken)
|
||||
}
|
||||
if idToken.RefreshToken != "" {
|
||||
// Usually set when server is using extended userInfo endpoint.
|
||||
v.Set("WebIdentityRefreshToken", idToken.RefreshToken)
|
||||
}
|
||||
if idToken.Expiry > 0 {
|
||||
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
||||
}
|
||||
@@ -214,10 +214,29 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
|
||||
// RetrieveWithCredContext is like Retrieve with optional cred context.
|
||||
func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -234,6 +253,12 @@ func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// Expiration returns the expiration time of the credentials
|
||||
func (m *STSWebIdentity) Expiration() time.Time {
|
||||
return m.expiration
|
||||
|
||||
26
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
26
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
@@ -434,12 +434,34 @@ func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElemen
|
||||
return enc.EncodeElement(delMarkerExp(de), start)
|
||||
}
|
||||
|
||||
// AllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy
|
||||
type AllVersionsExpiration struct {
|
||||
XMLName xml.Name `xml:"AllVersionsExpiration" json:"-"`
|
||||
Days int `xml:"Days,omitempty" json:"Days,omitempty"`
|
||||
DeleteMarker ExpireDeleteMarker `xml:"DeleteMarker,omitempty" json:"DeleteMarker,omitempty"`
|
||||
}
|
||||
|
||||
// IsNull returns true if days field is 0
|
||||
func (e AllVersionsExpiration) IsNull() bool {
|
||||
return e.Days == 0
|
||||
}
|
||||
|
||||
// MarshalXML satisfies xml.Marshaler to provide custom encoding
|
||||
func (e AllVersionsExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
if e.IsNull() {
|
||||
return nil
|
||||
}
|
||||
type allVersionsExp AllVersionsExpiration
|
||||
return enc.EncodeElement(allVersionsExp(e), start)
|
||||
}
|
||||
|
||||
// MarshalJSON customizes json encoding by omitting empty values
|
||||
func (r Rule) MarshalJSON() ([]byte, error) {
|
||||
type rule struct {
|
||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Expiration *Expiration `json:"Expiration,omitempty"`
|
||||
DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"`
|
||||
AllVersionsExpiration *AllVersionsExpiration `json:"AllVersionsExpiration,omitempty"`
|
||||
ID string `json:"ID"`
|
||||
RuleFilter *Filter `json:"Filter,omitempty"`
|
||||
NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
|
||||
@@ -475,6 +497,9 @@ func (r Rule) MarshalJSON() ([]byte, error) {
|
||||
if !r.NoncurrentVersionTransition.isNull() {
|
||||
newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
|
||||
}
|
||||
if !r.AllVersionsExpiration.IsNull() {
|
||||
newr.AllVersionsExpiration = &r.AllVersionsExpiration
|
||||
}
|
||||
|
||||
return json.Marshal(newr)
|
||||
}
|
||||
@@ -485,6 +510,7 @@ type Rule struct {
|
||||
AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
|
||||
DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"`
|
||||
AllVersionsExpiration AllVersionsExpiration `xml:"AllVersionsExpiration,omitempty" json:"AllVersionsExpiration,omitempty"`
|
||||
ID string `xml:"ID" json:"ID"`
|
||||
RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
|
||||
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
|
||||
|
||||
60
vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
generated
vendored
60
vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
generated
vendored
@@ -118,53 +118,53 @@ func GetRegionFromURL(endpointURL url.URL) string {
|
||||
if endpointURL == sentinelURL {
|
||||
return ""
|
||||
}
|
||||
if endpointURL.Host == "s3-external-1.amazonaws.com" {
|
||||
if endpointURL.Hostname() == "s3-external-1.amazonaws.com" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// if elb's are used we cannot calculate which region it may be, just return empty.
|
||||
if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
|
||||
if elbAmazonRegex.MatchString(endpointURL.Hostname()) || elbAmazonCnRegex.MatchString(endpointURL.Hostname()) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// We check for FIPS dualstack matching first to avoid the non-greedy
|
||||
// regex for FIPS non-dualstack matching a dualstack URL
|
||||
parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
|
||||
if endpointURL == sentinelURL {
|
||||
return false
|
||||
}
|
||||
return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
|
||||
return amazonS3HostPrivateLink.MatchString(endpointURL.Hostname())
|
||||
}
|
||||
|
||||
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
|
||||
@@ -261,44 +261,6 @@ func QueryEncode(v url.Values) string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// TagDecode - decodes canonical tag into map of key and value.
|
||||
func TagDecode(ctag string) map[string]string {
|
||||
if ctag == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
tags := strings.Split(ctag, "&")
|
||||
tagMap := make(map[string]string, len(tags))
|
||||
var err error
|
||||
for _, tag := range tags {
|
||||
kvs := strings.SplitN(tag, "=", 2)
|
||||
if len(kvs) == 0 {
|
||||
return map[string]string{}
|
||||
}
|
||||
if len(kvs) == 1 {
|
||||
return map[string]string{}
|
||||
}
|
||||
tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return tagMap
|
||||
}
|
||||
|
||||
// TagEncode - encodes tag values in their URL encoded form. In
|
||||
// addition to the percent encoding performed by urlEncodePath() used
|
||||
// here, it also percent encodes '/' (forward slash)
|
||||
func TagEncode(tags map[string]string) string {
|
||||
if tags == nil {
|
||||
return ""
|
||||
}
|
||||
values := url.Values{}
|
||||
for k, v := range tags {
|
||||
values[k] = []string{v}
|
||||
}
|
||||
return QueryEncode(values)
|
||||
}
|
||||
|
||||
// if object matches reserved string, no need to encode them
|
||||
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||
|
||||
|
||||
71
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
71
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
@@ -85,7 +85,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error {
|
||||
|
||||
// SetKey - Sets an object name for the policy based upload.
|
||||
func (p *PostPolicy) SetKey(key string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Object name is empty.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -118,7 +118,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
|
||||
|
||||
// SetBucket - Sets bucket at which objects will be uploaded to.
|
||||
func (p *PostPolicy) SetBucket(bucketName string) error {
|
||||
if strings.TrimSpace(bucketName) == "" || bucketName == "" {
|
||||
if strings.TrimSpace(bucketName) == "" {
|
||||
return errInvalidArgument("Bucket name is empty.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -135,7 +135,7 @@ func (p *PostPolicy) SetBucket(bucketName string) error {
|
||||
|
||||
// SetCondition - Sets condition for credentials, date and algorithm
|
||||
func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
|
||||
if strings.TrimSpace(value) == "" || value == "" {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return errInvalidArgument("No value specified for condition")
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
|
||||
|
||||
// SetTagging - Sets tagging for the object for this policy based upload.
|
||||
func (p *PostPolicy) SetTagging(tagging string) error {
|
||||
if strings.TrimSpace(tagging) == "" || tagging == "" {
|
||||
if strings.TrimSpace(tagging) == "" {
|
||||
return errInvalidArgument("No tagging specified.")
|
||||
}
|
||||
_, err := tags.ParseObjectXML(strings.NewReader(tagging))
|
||||
@@ -178,7 +178,7 @@ func (p *PostPolicy) SetTagging(tagging string) error {
|
||||
// SetContentType - Sets content-type of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetContentType(contentType string) error {
|
||||
if strings.TrimSpace(contentType) == "" || contentType == "" {
|
||||
if strings.TrimSpace(contentType) == "" {
|
||||
return errInvalidArgument("No content type specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -211,7 +211,7 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro
|
||||
|
||||
// SetContentDisposition - Sets content-disposition of the object for this policy
|
||||
func (p *PostPolicy) SetContentDisposition(contentDisposition string) error {
|
||||
if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" {
|
||||
if strings.TrimSpace(contentDisposition) == "" {
|
||||
return errInvalidArgument("No content disposition specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -226,27 +226,44 @@ func (p *PostPolicy) SetContentDisposition(contentDisposition string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContentEncoding - Sets content-encoding of the object for this policy
|
||||
func (p *PostPolicy) SetContentEncoding(contentEncoding string) error {
|
||||
if strings.TrimSpace(contentEncoding) == "" {
|
||||
return errInvalidArgument("No content encoding specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$Content-Encoding",
|
||||
value: contentEncoding,
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
p.formData["Content-Encoding"] = contentEncoding
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContentLengthRange - Set new min and max content length
|
||||
// condition for all incoming uploads.
|
||||
func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
|
||||
if min > max {
|
||||
func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error {
|
||||
if minLen > maxLen {
|
||||
return errInvalidArgument("Minimum limit is larger than maximum limit.")
|
||||
}
|
||||
if min < 0 {
|
||||
if minLen < 0 {
|
||||
return errInvalidArgument("Minimum limit cannot be negative.")
|
||||
}
|
||||
if max <= 0 {
|
||||
if maxLen <= 0 {
|
||||
return errInvalidArgument("Maximum limit cannot be non-positive.")
|
||||
}
|
||||
p.contentLengthRange.min = min
|
||||
p.contentLengthRange.max = max
|
||||
p.contentLengthRange.min = minLen
|
||||
p.contentLengthRange.max = maxLen
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
|
||||
if strings.TrimSpace(redirect) == "" || redirect == "" {
|
||||
if strings.TrimSpace(redirect) == "" {
|
||||
return errInvalidArgument("Redirect is empty")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -264,7 +281,7 @@ func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
|
||||
// SetSuccessStatusAction - Sets the status success code of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
|
||||
if strings.TrimSpace(status) == "" || status == "" {
|
||||
if strings.TrimSpace(status) == "" {
|
||||
return errInvalidArgument("Status is empty")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -282,10 +299,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
|
||||
// SetUserMetadata - Set user metadata as a key/value couple.
|
||||
// Can be retrieved through a HEAD request or an event.
|
||||
func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Key is empty")
|
||||
}
|
||||
if strings.TrimSpace(value) == "" || value == "" {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return errInvalidArgument("Value is empty")
|
||||
}
|
||||
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||
@@ -304,7 +321,7 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
||||
// SetUserMetadataStartsWith - Set how an user metadata should starts with.
|
||||
// Can be retrieved through a HEAD request or an event.
|
||||
func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Key is empty")
|
||||
}
|
||||
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||
@@ -321,11 +338,29 @@ func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||
}
|
||||
|
||||
// SetChecksum sets the checksum of the request.
|
||||
func (p *PostPolicy) SetChecksum(c Checksum) {
|
||||
func (p *PostPolicy) SetChecksum(c Checksum) error {
|
||||
if c.IsSet() {
|
||||
p.formData[amzChecksumAlgo] = c.Type.String()
|
||||
p.formData[c.Type.Key()] = c.Encoded()
|
||||
|
||||
policyCond := policyCondition{
|
||||
matchType: "eq",
|
||||
condition: fmt.Sprintf("$%s", amzChecksumAlgo),
|
||||
value: c.Type.String(),
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
policyCond = policyCondition{
|
||||
matchType: "eq",
|
||||
condition: fmt.Sprintf("$%s", c.Type.Key()),
|
||||
value: c.Encoded(),
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEncryption - sets encryption headers for POST API
|
||||
|
||||
10
vendor/github.com/minio/minio-go/v7/retry-continous.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/retry-continous.go
generated
vendored
@@ -20,7 +20,7 @@ package minio
|
||||
import "time"
|
||||
|
||||
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
|
||||
func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||
func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||
attemptCh := make(chan int)
|
||||
|
||||
// normalize jitter to the range [0, 1.0]
|
||||
@@ -39,10 +39,10 @@ func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64,
|
||||
if attempt > maxAttempt {
|
||||
attempt = maxAttempt
|
||||
}
|
||||
// sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
// sleep = random_between(0, min(maxSleep, base * 2 ** attempt))
|
||||
sleep := baseSleep * time.Duration(1<<uint(attempt))
|
||||
if sleep > maxSleep {
|
||||
sleep = maxSleep
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||
|
||||
11
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
11
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
@@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second
|
||||
|
||||
// newRetryTimer creates a timer with exponentially increasing
|
||||
// delays until the maximum retry attempts are reached.
|
||||
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
|
||||
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int {
|
||||
attemptCh := make(chan int)
|
||||
|
||||
// computes the exponential backoff duration according to
|
||||
@@ -59,10 +59,10 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time
|
||||
jitter = MaxJitter
|
||||
}
|
||||
|
||||
// sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
// sleep = random_between(0, min(maxSleep, base * 2 ** attempt))
|
||||
sleep := baseSleep * time.Duration(1<<uint(attempt))
|
||||
if sleep > maxSleep {
|
||||
sleep = maxSleep
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||
@@ -112,6 +112,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) {
|
||||
|
||||
// List of HTTP status codes which are retryable.
|
||||
var retryableHTTPStatusCodes = map[int]struct{}{
|
||||
http.StatusRequestTimeout: {},
|
||||
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
|
||||
499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
|
||||
http.StatusInternalServerError: {},
|
||||
|
||||
12
vendor/github.com/minio/minio-go/v7/s3-endpoints.go
generated
vendored
12
vendor/github.com/minio/minio-go/v7/s3-endpoints.go
generated
vendored
@@ -32,6 +32,18 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{
|
||||
"s3.us-east-2.amazonaws.com",
|
||||
"s3.dualstack.us-east-2.amazonaws.com",
|
||||
},
|
||||
"us-iso-east-1": {
|
||||
"s3.us-iso-east-1.c2s.ic.gov",
|
||||
"s3.dualstack.us-iso-east-1.c2s.ic.gov",
|
||||
},
|
||||
"us-isob-east-1": {
|
||||
"s3.us-isob-east-1.sc2s.sgov.gov",
|
||||
"s3.dualstack.us-isob-east-1.sc2s.sgov.gov",
|
||||
},
|
||||
"us-iso-west-1": {
|
||||
"s3.us-iso-west-1.c2s.ic.gov",
|
||||
"s3.dualstack.us-iso-west-1.c2s.ic.gov",
|
||||
},
|
||||
"us-west-2": {
|
||||
"s3.us-west-2.amazonaws.com",
|
||||
"s3.dualstack.us-west-2.amazonaws.com",
|
||||
|
||||
163
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
163
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
@@ -41,6 +41,7 @@ import (
|
||||
|
||||
md5simd "github.com/minio/md5-simd"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
func trimEtag(etag string) string {
|
||||
@@ -322,7 +323,13 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
||||
userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
|
||||
}
|
||||
}
|
||||
userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
|
||||
|
||||
userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
}
|
||||
}
|
||||
|
||||
var tagCount int
|
||||
if count := h.Get(amzTaggingCount); count != "" {
|
||||
@@ -373,15 +380,16 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
||||
// which are not part of object metadata.
|
||||
Metadata: metadata,
|
||||
UserMetadata: userMetadata,
|
||||
UserTags: userTags,
|
||||
UserTags: userTags.ToMap(),
|
||||
UserTagCount: tagCount,
|
||||
Restore: restore,
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -698,3 +706,146 @@ func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration.
|
||||
// Used uint for unsigned long. Used uint32 for input arguments in order to match
|
||||
// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib)
|
||||
// Modified for hash/crc64 by Klaus Post, 2024.
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
|
||||
for vec != 0 {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[0]
|
||||
}
|
||||
vec >>= 1
|
||||
mat = mat[1:]
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square, mat []uint64) {
|
||||
if len(square) != len(mat) {
|
||||
panic("square matrix size mismatch")
|
||||
}
|
||||
for n := range mat {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32
|
||||
// hash values crc1 and crc2. poly represents the generator polynomial
|
||||
// and len2 specifies the byte length that the crc2 hash covers.
|
||||
func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 32) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 32) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = uint64(poly) // CRC-32 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 32; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := uint64(crc1)
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= uint64(crc2)
|
||||
return uint32(crc1n)
|
||||
}
|
||||
|
||||
func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 64) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 64) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = poly // CRC-64 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 64; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := crc1
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= crc2
|
||||
return crc1n
|
||||
}
|
||||
|
||||
3
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
3
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
@@ -475,5 +475,6 @@ func (tb *DecomposedfsTrashbin) EmptyRecycle(ctx context.Context, ref *provider.
|
||||
}
|
||||
|
||||
func (tb *DecomposedfsTrashbin) getRecycleRoot(spaceID string) string {
|
||||
return filepath.Join(tb.fs.getSpaceRoot(spaceID), "trash")
|
||||
rootNode := node.NewBaseNode(spaceID, spaceID, tb.fs.lu)
|
||||
return filepath.Join(rootNode.InternalPath(), "trash")
|
||||
}
|
||||
|
||||
7
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
7
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
@@ -42,7 +42,6 @@ import (
|
||||
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/status"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
|
||||
sdk "github.com/opencloud-eu/reva/v2/pkg/sdk/common"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions"
|
||||
@@ -745,7 +744,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
|
||||
return err
|
||||
}
|
||||
|
||||
root := fs.getSpaceRoot(spaceID)
|
||||
root := n.InternalPath()
|
||||
|
||||
// walkfn will delete the blob if the node has one
|
||||
walkfn := func(path string, info os.FileInfo, err error) error {
|
||||
@@ -1103,10 +1102,6 @@ func isGrantExpired(g *provider.Grant) bool {
|
||||
return time.Now().After(time.Unix(int64(g.Expiration.Seconds), int64(g.Expiration.Nanos)))
|
||||
}
|
||||
|
||||
func (fs *Decomposedfs) getSpaceRoot(spaceID string) string {
|
||||
return filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2))
|
||||
}
|
||||
|
||||
// Space deletion can be tricky as there are lots of different cases:
|
||||
// - spaces of type personal can only be disabled and deleted by users with the "delete-all-home-spaces" permission
|
||||
// - a user with the "delete-all-spaces" permission may delete but not enable/disable any project space
|
||||
|
||||
39
vendor/golang.org/x/net/http2/http2.go
generated
vendored
39
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@@ -34,11 +34,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
disableExtendedConnectProtocol bool
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
|
||||
// Enabling extended CONNECT by causes browsers to attempt to use
|
||||
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
|
||||
// package doesn't support extended CONNECT.
|
||||
//
|
||||
// Disable extended CONNECT by default for now.
|
||||
//
|
||||
// Issue #71128.
|
||||
disableExtendedConnectProtocol = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -51,8 +59,8 @@ func init() {
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
if strings.Contains(e, "http2xconnect=0") {
|
||||
disableExtendedConnectProtocol = true
|
||||
if strings.Contains(e, "http2xconnect=1") {
|
||||
disableExtendedConnectProtocol = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
|
||||
s.v = save
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// - a non-empty string starting with '/'
|
||||
// - the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
|
||||
4
vendor/golang.org/x/net/http2/server.go
generated
vendored
4
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -50,6 +50,7 @@ import (
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
|
||||
|
||||
func (sc *serverConn) canonicalHeader(v string) string {
|
||||
sc.serveG.check()
|
||||
buildCommonHeaderMapsOnce()
|
||||
cv, ok := commonCanonHeader[v]
|
||||
cv, ok := httpcommon.CachedCanonicalHeader(v)
|
||||
if ok {
|
||||
return cv
|
||||
}
|
||||
|
||||
330
vendor/golang.org/x/net/http2/transport.go
generated
vendored
330
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -25,7 +25,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
|
||||
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
|
||||
var errRequestCanceled = errors.New("net/http: request canceled")
|
||||
|
||||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
k = canonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
return strings.Join(keys, ","), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
||||
if cc.t.t1 != nil {
|
||||
return cc.t.t1.ResponseHeaderTimeout
|
||||
@@ -1303,35 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
// checkConnHeaders checks whether req has any invalid connection-level headers.
|
||||
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
func checkConnHeaders(req *http.Request) error {
|
||||
if v := req.Header.Get("Upgrade"); v != "" {
|
||||
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
|
||||
}
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// actualContentLength returns a sanitized version of
|
||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||
// means unknown.
|
||||
func actualContentLength(req *http.Request) int64 {
|
||||
if req.Body == nil || req.Body == http.NoBody {
|
||||
return 0
|
||||
}
|
||||
if req.ContentLength != 0 {
|
||||
return req.ContentLength
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (cc *ClientConn) decrStreamReservations() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
@@ -1356,7 +1310,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
reqCancel: req.Cancel,
|
||||
isHead: req.Method == "HEAD",
|
||||
reqBody: req.Body,
|
||||
reqBodyContentLength: actualContentLength(req),
|
||||
reqBodyContentLength: httpcommon.ActualContentLength(req),
|
||||
trace: httptrace.ContextClientTrace(ctx),
|
||||
peerClosed: make(chan struct{}),
|
||||
abort: make(chan struct{}),
|
||||
@@ -1364,25 +1318,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
!cs.isHead {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
cs.requestedGzip = true
|
||||
}
|
||||
cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
|
||||
|
||||
go cs.doRequest(req, streamf)
|
||||
|
||||
@@ -1413,7 +1349,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
}
|
||||
res.Request = req
|
||||
res.TLS = cc.tlsState
|
||||
if res.Body == noBody && actualContentLength(req) == 0 {
|
||||
if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
|
||||
// If there isn't a request or response body still being
|
||||
// written, then wait for the stream to be closed before
|
||||
// RoundTrip returns.
|
||||
@@ -1496,10 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
|
||||
cc := cs.cc
|
||||
ctx := cs.ctx
|
||||
|
||||
if err := checkConnHeaders(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for setting frames to be received, a server can change this value later,
|
||||
// but we just wait for the first settings frame
|
||||
var isExtendedConnect bool
|
||||
@@ -1663,20 +1595,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
|
||||
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
|
||||
// sent by writeRequestBody below, along with any Trailers,
|
||||
// again in form HEADERS{1}, CONTINUATION{0,})
|
||||
trailers, err := commaSeparatedTrailers(req)
|
||||
cc.hbuf.Reset()
|
||||
res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
|
||||
Request: req,
|
||||
AddGzipHeader: cs.requestedGzip,
|
||||
PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
|
||||
DefaultUserAgent: defaultUserAgent,
|
||||
}, func(name, value string) {
|
||||
cc.writeHeader(name, value)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasTrailers := trailers != ""
|
||||
contentLen := actualContentLength(req)
|
||||
hasBody := contentLen != 0
|
||||
hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("http2: %w", err)
|
||||
}
|
||||
hdrs := cc.hbuf.Bytes()
|
||||
|
||||
// Write the request.
|
||||
endStream := !hasBody && !hasTrailers
|
||||
endStream := !res.HasBody && !res.HasTrailers
|
||||
cs.sentHeaders = true
|
||||
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
|
||||
traceWroteHeaders(cs.trace)
|
||||
@@ -2070,218 +2004,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
}
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var errNilRequestURL = errors.New("http2: Request.URI is nil")
|
||||
|
||||
func isNormalConnect(req *http.Request) bool {
|
||||
return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
|
||||
}
|
||||
|
||||
// requires cc.wmu be held.
|
||||
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
if req.URL == nil {
|
||||
return nil, errNilRequestURL
|
||||
}
|
||||
|
||||
host := req.Host
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httpguts.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return nil, errors.New("http2: invalid Host header")
|
||||
}
|
||||
|
||||
var path string
|
||||
if !isNormalConnect(req) {
|
||||
path = req.URL.RequestURI()
|
||||
if !validPseudoPath(path) {
|
||||
orig := path
|
||||
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
|
||||
if !validPseudoPath(path) {
|
||||
if req.URL.Opaque != "" {
|
||||
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid request :path %q", orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
// 8.1.2.3 Request Pseudo-Header Fields
|
||||
// The :path pseudo-header field includes the path and query parts of the
|
||||
// target URI (the path-absolute production and optionally a '?' character
|
||||
// followed by the query production, see Sections 3.3 and 3.4 of
|
||||
// [RFC3986]).
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
if m == "" {
|
||||
m = http.MethodGet
|
||||
}
|
||||
f(":method", m)
|
||||
if !isNormalConnect(req) {
|
||||
f(":path", path)
|
||||
f(":scheme", req.URL.Scheme)
|
||||
}
|
||||
if trailers != "" {
|
||||
f("trailer", trailers)
|
||||
}
|
||||
|
||||
var didUA bool
|
||||
for k, vv := range req.Header {
|
||||
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
|
||||
// Host is :authority, already sent.
|
||||
// Content-Length is automatic, set below.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "connection") ||
|
||||
asciiEqualFold(k, "proxy-connection") ||
|
||||
asciiEqualFold(k, "transfer-encoding") ||
|
||||
asciiEqualFold(k, "upgrade") ||
|
||||
asciiEqualFold(k, "keep-alive") {
|
||||
// Per 8.1.2.2 Connection-Specific Header
|
||||
// Fields, don't send connection-specific
|
||||
// fields. We have already checked if any
|
||||
// are error-worthy so just ignore the rest.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "user-agent") {
|
||||
// Match Go's http1 behavior: at most one
|
||||
// User-Agent. If set to nil or empty string,
|
||||
// then omit it. Otherwise if not mentioned,
|
||||
// include the default (below).
|
||||
didUA = true
|
||||
if len(vv) < 1 {
|
||||
continue
|
||||
}
|
||||
vv = vv[:1]
|
||||
if vv[0] == "" {
|
||||
continue
|
||||
}
|
||||
} else if asciiEqualFold(k, "cookie") {
|
||||
// Per 8.1.2.5 To allow for better compression efficiency, the
|
||||
// Cookie header field MAY be split into separate header fields,
|
||||
// each with one or more cookie-pairs.
|
||||
for _, v := range vv {
|
||||
for {
|
||||
p := strings.IndexByte(v, ';')
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
f("cookie", v[:p])
|
||||
p++
|
||||
// strip space after semicolon if any.
|
||||
for p+1 <= len(v) && v[p] == ' ' {
|
||||
p++
|
||||
}
|
||||
v = v[p:]
|
||||
}
|
||||
if len(v) > 0 {
|
||||
f("cookie", v)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range vv {
|
||||
f(k, v)
|
||||
}
|
||||
}
|
||||
if shouldSendReqContentLength(req.Method, contentLength) {
|
||||
f("content-length", strconv.FormatInt(contentLength, 10))
|
||||
}
|
||||
if addGzipHeader {
|
||||
f("accept-encoding", "gzip")
|
||||
}
|
||||
if !didUA {
|
||||
f("user-agent", defaultUserAgent)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a first pass over the headers counting bytes to ensure
|
||||
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
|
||||
// separate pass before encoding the headers to prevent
|
||||
// modifying the hpack state.
|
||||
hlSize := uint64(0)
|
||||
enumerateHeaders(func(name, value string) {
|
||||
hf := hpack.HeaderField{Name: name, Value: value}
|
||||
hlSize += uint64(hf.Size())
|
||||
})
|
||||
|
||||
if hlSize > cc.peerMaxHeaderListSize {
|
||||
return nil, errRequestHeaderListSize
|
||||
}
|
||||
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
traceHeaders := traceHasWroteHeaderField(trace)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := lowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
return
|
||||
}
|
||||
cc.writeHeader(name, value)
|
||||
if traceHeaders {
|
||||
traceWroteHeaderField(trace, name, value)
|
||||
}
|
||||
})
|
||||
|
||||
return cc.hbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
// shouldSendReqContentLength reports whether the http2.Transport should send
|
||||
// a "content-length" request header. This logic is basically a copy of the net/http
|
||||
// transferWriter.shouldSendContentLength.
|
||||
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
|
||||
// -1 means unknown.
|
||||
func shouldSendReqContentLength(method string, contentLength int64) bool {
|
||||
if contentLength > 0 {
|
||||
return true
|
||||
}
|
||||
if contentLength < 0 {
|
||||
return false
|
||||
}
|
||||
// For zero bodies, whether we send a content-length depends on the method.
|
||||
// It also kinda doesn't matter for http2 either way, with END_STREAM.
|
||||
switch method {
|
||||
case "POST", "PUT", "PATCH":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// requires cc.wmu be held.
|
||||
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
@@ -2298,7 +2020,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
||||
}
|
||||
|
||||
for k, vv := range trailer {
|
||||
lowKey, ascii := lowerHeader(k)
|
||||
lowKey, ascii := httpcommon.LowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
@@ -2653,7 +2375,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
Status: status + " " + http.StatusText(statusCode),
|
||||
}
|
||||
for _, hf := range regularFields {
|
||||
key := canonicalHeader(hf.Name)
|
||||
key := httpcommon.CanonicalHeader(hf.Name)
|
||||
if key == "Trailer" {
|
||||
t := res.Trailer
|
||||
if t == nil {
|
||||
@@ -2661,7 +2383,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
res.Trailer = t
|
||||
}
|
||||
foreachHeaderElement(hf.Value, func(v string) {
|
||||
t[canonicalHeader(v)] = nil
|
||||
t[httpcommon.CanonicalHeader(v)] = nil
|
||||
})
|
||||
} else {
|
||||
vv := header[key]
|
||||
@@ -2785,7 +2507,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
|
||||
|
||||
trailer := make(http.Header)
|
||||
for _, hf := range f.RegularFields() {
|
||||
key := canonicalHeader(hf.Name)
|
||||
key := httpcommon.CanonicalHeader(hf.Name)
|
||||
trailer[key] = append(trailer[key], hf.Value)
|
||||
}
|
||||
cs.trailer = trailer
|
||||
@@ -3331,7 +3053,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
|
||||
|
||||
var (
|
||||
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
|
||||
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
|
||||
errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
|
||||
)
|
||||
|
||||
func (cc *ClientConn) logf(format string, args ...interface{}) {
|
||||
@@ -3515,16 +3237,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
|
||||
}
|
||||
}
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
|
||||
3
vendor/golang.org/x/net/http2/write.go
generated
vendored
3
vendor/golang.org/x/net/http2/write.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
@@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k, ascii := lowerHeader(k)
|
||||
k, ascii := httpcommon.LowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
|
||||
53
vendor/golang.org/x/net/internal/httpcommon/ascii.go
generated
vendored
Normal file
53
vendor/golang.org/x/net/internal/httpcommon/ascii.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpcommon
|
||||
|
||||
import "strings"
|
||||
|
||||
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
|
||||
// contains helper functions which may use Unicode-aware functions which would
|
||||
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
|
||||
|
||||
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
||||
// are equal, ASCII-case-insensitively.
|
||||
func asciiEqualFold(s, t string) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if lower(s[i]) != lower(t[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lower returns the ASCII lowercase version of b.
|
||||
func lower(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// isASCIIPrint returns whether s is ASCII and printable according to
|
||||
// https://tools.ietf.org/html/rfc20#section-4.2.
|
||||
func isASCIIPrint(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] < ' ' || s[i] > '~' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
|
||||
// and whether or not it was.
|
||||
func asciiToLower(s string) (lower string, ok bool) {
|
||||
if !isASCIIPrint(s) {
|
||||
return "", false
|
||||
}
|
||||
return strings.ToLower(s), true
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -88,7 +88,9 @@ func buildCommonHeaderMaps() {
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) (lower string, ascii bool) {
|
||||
// LowerHeader returns the lowercase form of a header name,
|
||||
// used on the wire for HTTP/2 and HTTP/3 requests.
|
||||
func LowerHeader(v string) (lower string, ascii bool) {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s, true
|
||||
@@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
|
||||
return asciiToLower(v)
|
||||
}
|
||||
|
||||
func canonicalHeader(v string) string {
|
||||
// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
|
||||
func CanonicalHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonCanonHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return http.CanonicalHeaderKey(v)
|
||||
}
|
||||
|
||||
// CachedCanonicalHeader returns the canonical form of a well-known header name.
|
||||
func CachedCanonicalHeader(v string) (string, bool) {
|
||||
buildCommonHeaderMapsOnce()
|
||||
s, ok := commonCanonHeader[v]
|
||||
return s, ok
|
||||
}
|
||||
379
vendor/golang.org/x/net/internal/httpcommon/request.go
generated
vendored
Normal file
379
vendor/golang.org/x/net/internal/httpcommon/request.go
generated
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
|
||||
)
|
||||
|
||||
// EncodeHeadersParam is parameters to EncodeHeaders.
|
||||
type EncodeHeadersParam struct {
|
||||
Request *http.Request
|
||||
|
||||
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
|
||||
// added to the request.
|
||||
AddGzipHeader bool
|
||||
|
||||
// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
|
||||
PeerMaxHeaderListSize uint64
|
||||
|
||||
// DefaultUserAgent is the User-Agent header to send when the request
|
||||
// neither contains a User-Agent nor disables it.
|
||||
DefaultUserAgent string
|
||||
}
|
||||
|
||||
// EncodeHeadersParam is the result of EncodeHeaders.
|
||||
type EncodeHeadersResult struct {
|
||||
HasBody bool
|
||||
HasTrailers bool
|
||||
}
|
||||
|
||||
// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
|
||||
// It validates a request and calls headerf with each pseudo-header and header
|
||||
// for the request.
|
||||
// The headerf function is called with the validated, canonicalized header name.
|
||||
func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
|
||||
req := param.Request
|
||||
|
||||
// Check for invalid connection-level headers.
|
||||
if err := checkConnHeaders(req); err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
if req.URL == nil {
|
||||
return res, errors.New("Request.URL is nil")
|
||||
}
|
||||
|
||||
host := req.Host
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httpguts.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return res, errors.New("invalid Host header")
|
||||
}
|
||||
|
||||
// isNormalConnect is true if this is a non-extended CONNECT request.
|
||||
isNormalConnect := false
|
||||
protocol := req.Header.Get(":protocol")
|
||||
if req.Method == "CONNECT" && protocol == "" {
|
||||
isNormalConnect = true
|
||||
} else if protocol != "" && req.Method != "CONNECT" {
|
||||
return res, errors.New("invalid :protocol header in non-CONNECT request")
|
||||
}
|
||||
|
||||
// Validate the path, except for non-extended CONNECT requests which have no path.
|
||||
var path string
|
||||
if !isNormalConnect {
|
||||
path = req.URL.RequestURI()
|
||||
if !validPseudoPath(path) {
|
||||
orig := path
|
||||
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
|
||||
if !validPseudoPath(path) {
|
||||
if req.URL.Opaque != "" {
|
||||
return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
|
||||
} else {
|
||||
return res, fmt.Errorf("invalid request :path %q", orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return res, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return res, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
contentLength := ActualContentLength(req)
|
||||
|
||||
trailers, err := commaSeparatedTrailers(req)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
// 8.1.2.3 Request Pseudo-Header Fields
|
||||
// The :path pseudo-header field includes the path and query parts of the
|
||||
// target URI (the path-absolute production and optionally a '?' character
|
||||
// followed by the query production, see Sections 3.3 and 3.4 of
|
||||
// [RFC3986]).
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
if m == "" {
|
||||
m = http.MethodGet
|
||||
}
|
||||
f(":method", m)
|
||||
if !isNormalConnect {
|
||||
f(":path", path)
|
||||
f(":scheme", req.URL.Scheme)
|
||||
}
|
||||
if protocol != "" {
|
||||
f(":protocol", protocol)
|
||||
}
|
||||
if trailers != "" {
|
||||
f("trailer", trailers)
|
||||
}
|
||||
|
||||
var didUA bool
|
||||
for k, vv := range req.Header {
|
||||
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
|
||||
// Host is :authority, already sent.
|
||||
// Content-Length is automatic, set below.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "connection") ||
|
||||
asciiEqualFold(k, "proxy-connection") ||
|
||||
asciiEqualFold(k, "transfer-encoding") ||
|
||||
asciiEqualFold(k, "upgrade") ||
|
||||
asciiEqualFold(k, "keep-alive") {
|
||||
// Per 8.1.2.2 Connection-Specific Header
|
||||
// Fields, don't send connection-specific
|
||||
// fields. We have already checked if any
|
||||
// are error-worthy so just ignore the rest.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "user-agent") {
|
||||
// Match Go's http1 behavior: at most one
|
||||
// User-Agent. If set to nil or empty string,
|
||||
// then omit it. Otherwise if not mentioned,
|
||||
// include the default (below).
|
||||
didUA = true
|
||||
if len(vv) < 1 {
|
||||
continue
|
||||
}
|
||||
vv = vv[:1]
|
||||
if vv[0] == "" {
|
||||
continue
|
||||
}
|
||||
} else if asciiEqualFold(k, "cookie") {
|
||||
// Per 8.1.2.5 To allow for better compression efficiency, the
|
||||
// Cookie header field MAY be split into separate header fields,
|
||||
// each with one or more cookie-pairs.
|
||||
for _, v := range vv {
|
||||
for {
|
||||
p := strings.IndexByte(v, ';')
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
f("cookie", v[:p])
|
||||
p++
|
||||
// strip space after semicolon if any.
|
||||
for p+1 <= len(v) && v[p] == ' ' {
|
||||
p++
|
||||
}
|
||||
v = v[p:]
|
||||
}
|
||||
if len(v) > 0 {
|
||||
f("cookie", v)
|
||||
}
|
||||
}
|
||||
continue
|
||||
} else if k == ":protocol" {
|
||||
// :protocol pseudo-header was already sent above.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range vv {
|
||||
f(k, v)
|
||||
}
|
||||
}
|
||||
if shouldSendReqContentLength(req.Method, contentLength) {
|
||||
f("content-length", strconv.FormatInt(contentLength, 10))
|
||||
}
|
||||
if param.AddGzipHeader {
|
||||
f("accept-encoding", "gzip")
|
||||
}
|
||||
if !didUA {
|
||||
f("user-agent", param.DefaultUserAgent)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a first pass over the headers counting bytes to ensure
|
||||
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
|
||||
// separate pass before encoding the headers to prevent
|
||||
// modifying the hpack state.
|
||||
if param.PeerMaxHeaderListSize > 0 {
|
||||
hlSize := uint64(0)
|
||||
enumerateHeaders(func(name, value string) {
|
||||
hf := hpack.HeaderField{Name: name, Value: value}
|
||||
hlSize += uint64(hf.Size())
|
||||
})
|
||||
|
||||
if hlSize > param.PeerMaxHeaderListSize {
|
||||
return res, ErrRequestHeaderListSize
|
||||
}
|
||||
}
|
||||
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := LowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
return
|
||||
}
|
||||
|
||||
headerf(name, value)
|
||||
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(name, []string{value})
|
||||
}
|
||||
})
|
||||
|
||||
res.HasBody = contentLength != 0
|
||||
res.HasTrailers = trailers != ""
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
|
||||
// for a request.
|
||||
func IsRequestGzip(req *http.Request, disableCompression bool) bool {
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !disableCompression &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
req.Method != "HEAD" {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkConnHeaders checks whether req has any invalid connection-level headers.
|
||||
//
|
||||
// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
|
||||
//
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
|
||||
func checkConnHeaders(req *http.Request) error {
|
||||
if v := req.Header.Get("Upgrade"); v != "" {
|
||||
return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
|
||||
}
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
return fmt.Errorf("invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
k = CanonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
return strings.Join(keys, ","), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ActualContentLength returns a sanitized version of
|
||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||
// means unknown.
|
||||
func ActualContentLength(req *http.Request) int64 {
|
||||
if req.Body == nil || req.Body == http.NoBody {
|
||||
return 0
|
||||
}
|
||||
if req.ContentLength != 0 {
|
||||
return req.ContentLength
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// - a non-empty string starting with '/'
|
||||
// - the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// shouldSendReqContentLength reports whether we should send
|
||||
// a "content-length" request header. This logic is basically a copy of the net/http
|
||||
// transferWriter.shouldSendContentLength.
|
||||
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
|
||||
// -1 means unknown.
|
||||
func shouldSendReqContentLength(method string, contentLength int64) bool {
|
||||
if contentLength > 0 {
|
||||
return true
|
||||
}
|
||||
if contentLength < 0 {
|
||||
return false
|
||||
}
|
||||
// For zero bodies, whether we send a content-length depends on the method.
|
||||
// It also kinda doesn't matter for http2 either way, with END_STREAM.
|
||||
switch method {
|
||||
case "POST", "PUT", "PATCH":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
20
vendor/modules.txt
vendored
20
vendor/modules.txt
vendored
@@ -634,7 +634,7 @@ github.com/gobwas/pool/pbytes
|
||||
## explicit; go 1.15
|
||||
github.com/gobwas/ws
|
||||
github.com/gobwas/ws/wsutil
|
||||
# github.com/goccy/go-json v0.10.3
|
||||
# github.com/goccy/go-json v0.10.5
|
||||
## explicit; go 1.19
|
||||
github.com/goccy/go-json
|
||||
github.com/goccy/go-json/internal/decoder
|
||||
@@ -841,8 +841,8 @@ github.com/klauspost/compress/s2
|
||||
github.com/klauspost/compress/snappy
|
||||
github.com/klauspost/compress/zstd
|
||||
github.com/klauspost/compress/zstd/internal/xxhash
|
||||
# github.com/klauspost/cpuid/v2 v2.2.8
|
||||
## explicit; go 1.15
|
||||
# github.com/klauspost/cpuid/v2 v2.2.9
|
||||
## explicit; go 1.20
|
||||
github.com/klauspost/cpuid/v2
|
||||
# github.com/kovidgoyal/imaging v1.6.3
|
||||
## explicit; go 1.21
|
||||
@@ -935,13 +935,16 @@ github.com/miekg/dns
|
||||
# github.com/mileusna/useragent v1.3.5
|
||||
## explicit; go 1.14
|
||||
github.com/mileusna/useragent
|
||||
# github.com/minio/crc64nvme v1.0.1
|
||||
## explicit; go 1.22
|
||||
github.com/minio/crc64nvme
|
||||
# github.com/minio/highwayhash v1.0.3
|
||||
## explicit; go 1.15
|
||||
github.com/minio/highwayhash
|
||||
# github.com/minio/md5-simd v1.1.2
|
||||
## explicit; go 1.14
|
||||
github.com/minio/md5-simd
|
||||
# github.com/minio/minio-go/v7 v7.0.78
|
||||
# github.com/minio/minio-go/v7 v7.0.87
|
||||
## explicit; go 1.22
|
||||
github.com/minio/minio-go/v7
|
||||
github.com/minio/minio-go/v7/pkg/cors
|
||||
@@ -1189,7 +1192,7 @@ github.com/open-policy-agent/opa/v1/types
|
||||
github.com/open-policy-agent/opa/v1/util
|
||||
github.com/open-policy-agent/opa/v1/util/decoding
|
||||
github.com/open-policy-agent/opa/v1/version
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250226135705-4eb591e3210d
|
||||
## explicit; go 1.23.1
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
|
||||
@@ -1671,8 +1674,8 @@ github.com/riandyrn/otelchi/version
|
||||
# github.com/rivo/uniseg v0.4.7
|
||||
## explicit; go 1.18
|
||||
github.com/rivo/uniseg
|
||||
# github.com/rogpeppe/go-internal v1.13.1
|
||||
## explicit; go 1.22
|
||||
# github.com/rogpeppe/go-internal v1.14.1
|
||||
## explicit; go 1.23
|
||||
github.com/rogpeppe/go-internal/internal/syscall/windows
|
||||
github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll
|
||||
github.com/rogpeppe/go-internal/lockedfile
|
||||
@@ -2145,7 +2148,7 @@ golang.org/x/image/vector
|
||||
golang.org/x/mod/internal/lazyregexp
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.34.0
|
||||
# golang.org/x/net v0.35.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/bpf
|
||||
golang.org/x/net/context
|
||||
@@ -2158,6 +2161,7 @@ golang.org/x/net/http2
|
||||
golang.org/x/net/http2/h2c
|
||||
golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/httpcommon
|
||||
golang.org/x/net/internal/iana
|
||||
golang.org/x/net/internal/socket
|
||||
golang.org/x/net/internal/socks
|
||||
|
||||
Reference in New Issue
Block a user