build(deps): bump google.golang.org/grpc from 1.76.0 to 1.77.0

Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.76.0 to 1.77.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.76.0...v1.77.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-version: 1.77.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-11-26 14:17:16 +00:00
committed by Ralf Haferkamp
parent 8a70a65597
commit 288dd8c220
38 changed files with 1679 additions and 1507 deletions

6
go.mod
View File

@@ -109,8 +109,8 @@ require (
golang.org/x/sync v0.18.0
golang.org/x/term v0.37.0
golang.org/x/text v0.31.0
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
google.golang.org/grpc v1.76.0
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
google.golang.org/grpc v1.77.0
google.golang.org/protobuf v1.36.10
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -389,7 +389,7 @@ require (
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect

12
go.sum
View File

@@ -1728,10 +1728,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1747,8 +1747,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU=
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,913 +0,0 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package pickfirstleaf contains the pick_first load balancing policy which
// will be the universal leaf policy after dualstack changes are implemented.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package pickfirstleaf
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/netip"
"sync"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
func init() {
if envconfig.NewPickFirstEnabled {
// Register as the default pick_first balancer.
Name = "pick_first"
}
balancer.Register(pickfirstBuilder{})
}
// enableHealthListenerKeyType is a unique key type used in resolver
// attributes to indicate whether the health listener usage is enabled.
type enableHealthListenerKeyType struct{}
var (
logger = grpclog.Component("pick-first-leaf-lb")
// Name is the name of the pick_first_leaf balancer.
// It is changed to "pick_first" in init() if this balancer is to be
// registered as the default pickfirst.
Name = "pick_first_leaf"
disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.lb.pick_first.disconnections",
Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
Unit: "{disconnection}",
Labels: []string{"grpc.target"},
Default: false,
})
connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.lb.pick_first.connection_attempts_succeeded",
Description: "EXPERIMENTAL. Number of successful connection attempts.",
Unit: "{attempt}",
Labels: []string{"grpc.target"},
Default: false,
})
connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.lb.pick_first.connection_attempts_failed",
Description: "EXPERIMENTAL. Number of failed connection attempts.",
Unit: "{attempt}",
Labels: []string{"grpc.target"},
Default: false,
})
)
const (
// TODO: change to pick-first when this becomes the default pick_first policy.
logPrefix = "[pick-first-leaf-lb %p] "
// connectionDelayInterval is the time to wait for during the happy eyeballs
// pass before starting the next connection attempt.
connectionDelayInterval = 250 * time.Millisecond
)
type ipAddrFamily int
const (
// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
// address.
ipAddrFamilyUnknown ipAddrFamily = iota
ipAddrFamilyV4
ipAddrFamilyV6
)
type pickfirstBuilder struct{}
func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{
cc: cc,
target: bo.Target.String(),
metricsRecorder: cc.MetricsRecorder(),
subConns: resolver.NewAddressMapV2[*scData](),
state: connectivity.Connecting,
cancelConnectionTimer: func() {},
}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
func (b pickfirstBuilder) Name() string {
return Name
}
func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
}
return cfg, nil
}
// EnableHealthListener updates the state to configure pickfirst for using a
// generic health listener.
func EnableHealthListener(state resolver.State) resolver.State {
state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
return state
}
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
// If set to true, instructs the LB policy to shuffle the order of the list
// of endpoints received from the name resolver before attempting to
// connect to them.
ShuffleAddressList bool `json:"shuffleAddressList"`
}
// scData keeps track of the current state of the subConn.
// It is not safe for concurrent access.
type scData struct {
// The following fields are initialized at build time and read-only after
// that.
subConn balancer.SubConn
addr resolver.Address
rawConnectivityState connectivity.State
// The effective connectivity state based on raw connectivity, health state
// and after following sticky TransientFailure behaviour defined in A62.
effectiveState connectivity.State
lastErr error
connectionFailedInFirstPass bool
}
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
sd := &scData{
rawConnectivityState: connectivity.Idle,
effectiveState: connectivity.Idle,
addr: addr,
}
sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
StateListener: func(state balancer.SubConnState) {
b.updateSubConnState(sd, state)
},
})
if err != nil {
return nil, err
}
sd.subConn = sc
return sd, nil
}
type pickfirstBalancer struct {
// The following fields are initialized at build time and read-only after
// that and therefore do not need to be guarded by a mutex.
logger *internalgrpclog.PrefixLogger
cc balancer.ClientConn
target string
metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
// The mutex is used to ensure synchronization of updates triggered
// from the idle picker and the already serialized resolver,
// SubConn state updates.
mu sync.Mutex
// State reported to the channel based on SubConn states and resolver
// updates.
state connectivity.State
// scData for active subonns mapped by address.
subConns *resolver.AddressMapV2[*scData]
addressList addressList
firstPass bool
numTF int
cancelConnectionTimer func()
healthCheckingEnabled bool
}
// ResolverError is called by the ClientConn when the name resolver produces
// an error or when pickfirst determined the resolver update to be invalid.
func (b *pickfirstBalancer) ResolverError(err error) {
b.mu.Lock()
defer b.mu.Unlock()
b.resolverErrorLocked(err)
}
func (b *pickfirstBalancer) resolverErrorLocked(err error) {
if b.logger.V(2) {
b.logger.Infof("Received error from the name resolver: %v", err)
}
// The picker will not change since the balancer does not currently
// report an error. If the balancer hasn't received a single good resolver
// update yet, transition to TRANSIENT_FAILURE.
if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
if b.logger.V(2) {
b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
}
return
}
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
b.mu.Lock()
defer b.mu.Unlock()
b.cancelConnectionTimer()
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// Cleanup state pertaining to the previous resolver state.
// Treat an empty address list like an error by calling b.ResolverError.
b.closeSubConnsLocked()
b.addressList.updateAddrs(nil)
b.resolverErrorLocked(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
}
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
var newAddrs []resolver.Address
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
// Perform the optional shuffling described in gRFC A62. The shuffling
// will change the order of endpoints but not touch the order of the
// addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for
// each of the endpoints, in order." - A61
for _, endpoint := range endpoints {
newAddrs = append(newAddrs, endpoint.Addresses...)
}
} else {
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
newAddrs = append([]resolver.Address{}, newAddrs...)
internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
}
}
// If an address appears in multiple endpoints or in the same endpoint
// multiple times, we keep it only once. We will create only one SubConn
// for the address because an AddressMap is used to store SubConns.
// Not de-duplicating would result in attempting to connect to the same
// SubConn multiple times in the same pass. We don't want this.
newAddrs = deDupAddresses(newAddrs)
newAddrs = interleaveAddresses(newAddrs)
prevAddr := b.addressList.currentAddress()
prevSCData, found := b.subConns.Get(prevAddr)
prevAddrsCount := b.addressList.size()
isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
b.addressList.updateAddrs(newAddrs)
// If the previous ready SubConn exists in new address list,
// keep this connection and don't create new SubConns.
if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
return nil
}
b.reconcileSubConnsLocked(newAddrs)
// If it's the first resolver update or the balancer was already READY
// (but the new address list does not contain the ready SubConn) or
// CONNECTING, enter CONNECTING.
// We may be in TRANSIENT_FAILURE due to a previous empty address list,
// we should still enter CONNECTING because the sticky TF behaviour
// mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
// due to connectivity failures.
if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
// Start connection attempt at first address.
b.forceUpdateConcludedStateLocked(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
b.startFirstPassLocked()
} else if b.state == connectivity.TransientFailure {
// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
// we're READY. See A62.
b.startFirstPassLocked()
}
return nil
}
// UpdateSubConnState is unused as a StateListener is always registered when
// creating SubConns.
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
}
func (b *pickfirstBalancer) Close() {
b.mu.Lock()
defer b.mu.Unlock()
b.closeSubConnsLocked()
b.cancelConnectionTimer()
b.state = connectivity.Shutdown
}
// ExitIdle moves the balancer out of idle state. It can be called concurrently
// by the idlePicker and clientConn so access to variables should be
// synchronized.
func (b *pickfirstBalancer) ExitIdle() {
b.mu.Lock()
defer b.mu.Unlock()
if b.state == connectivity.Idle {
// Move the balancer into CONNECTING state immediately. This is done to
// avoid staying in IDLE if a resolver update arrives before the first
// SubConn reports CONNECTING.
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
b.startFirstPassLocked()
}
}
func (b *pickfirstBalancer) startFirstPassLocked() {
b.firstPass = true
b.numTF = 0
// Reset the connection attempt record for existing SubConns.
for _, sd := range b.subConns.Values() {
sd.connectionFailedInFirstPass = false
}
b.requestConnectionLocked()
}
func (b *pickfirstBalancer) closeSubConnsLocked() {
for _, sd := range b.subConns.Values() {
sd.subConn.Shutdown()
}
b.subConns = resolver.NewAddressMapV2[*scData]()
}
// deDupAddresses ensures that each address appears only once in the slice.
func deDupAddresses(addrs []resolver.Address) []resolver.Address {
seenAddrs := resolver.NewAddressMapV2[*scData]()
retAddrs := []resolver.Address{}
for _, addr := range addrs {
if _, ok := seenAddrs.Get(addr); ok {
continue
}
retAddrs = append(retAddrs, addr)
}
return retAddrs
}
// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
// as per RFC-8305 section 4.
// Whichever address family is first in the list is followed by an address of
// the other address family; that is, if the first address in the list is IPv6,
// then the first IPv4 address should be moved up in the list to be second in
// the list. It doesn't support configuring "First Address Family Count", i.e.
// there will always be a single member of the first address family at the
// beginning of the interleaved list.
// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
// "unknown" family for interleaving.
// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
interleavingOrder := []ipAddrFamily{}
for _, addr := range addrs {
family := addressFamily(addr.Addr)
if _, found := familyAddrsMap[family]; !found {
interleavingOrder = append(interleavingOrder, family)
}
familyAddrsMap[family] = append(familyAddrsMap[family], addr)
}
interleavedAddrs := make([]resolver.Address, 0, len(addrs))
for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
// Some IP types may have fewer addresses than others, so we look for
// the next type that has a remaining member to add to the interleaved
// list.
family := interleavingOrder[curFamilyIdx]
remainingMembers := familyAddrsMap[family]
if len(remainingMembers) > 0 {
interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
familyAddrsMap[family] = remainingMembers[1:]
}
}
return interleavedAddrs
}
// addressFamily returns the ipAddrFamily after parsing the address string.
// If the address isn't of the format "ip-address:port", it returns
// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
// using a resolver like passthrough where the address may be a hostname in
// some format that the dialer can resolve.
func addressFamily(address string) ipAddrFamily {
// Parse the IP after removing the port.
host, _, err := net.SplitHostPort(address)
if err != nil {
return ipAddrFamilyUnknown
}
ip, err := netip.ParseAddr(host)
if err != nil {
return ipAddrFamilyUnknown
}
switch {
case ip.Is4() || ip.Is4In6():
return ipAddrFamilyV4
case ip.Is6():
return ipAddrFamilyV6
default:
return ipAddrFamilyUnknown
}
}
// reconcileSubConnsLocked updates the active subchannels based on a new address
// list from the resolver. It does this by:
// - closing subchannels: any existing subchannels associated with addresses
// that are no longer in the updated list are shut down.
// - removing subchannels: entries for these closed subchannels are removed
// from the subchannel map.
//
// This ensures that the subchannel map accurately reflects the current set of
// addresses received from the name resolver.
func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
newAddrsMap := resolver.NewAddressMapV2[bool]()
for _, addr := range newAddrs {
newAddrsMap.Set(addr, true)
}
for _, oldAddr := range b.subConns.Keys() {
if _, ok := newAddrsMap.Get(oldAddr); ok {
continue
}
val, _ := b.subConns.Get(oldAddr)
val.subConn.Shutdown()
b.subConns.Delete(oldAddr)
}
}
// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
// becomes ready, which means that all other subConn must be shutdown.
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
b.cancelConnectionTimer()
for _, sd := range b.subConns.Values() {
if sd.subConn != selected.subConn {
sd.subConn.Shutdown()
}
}
b.subConns = resolver.NewAddressMapV2[*scData]()
b.subConns.Set(selected.addr, selected)
}
// requestConnectionLocked starts connecting on the subchannel corresponding to
// the current address. If no subchannel exists, one is created. If the current
// subchannel is in TransientFailure, a connection to the next address is
// attempted until a subchannel is found.
func (b *pickfirstBalancer) requestConnectionLocked() {
if !b.addressList.isValid() {
return
}
var lastErr error
for valid := true; valid; valid = b.addressList.increment() {
curAddr := b.addressList.currentAddress()
sd, ok := b.subConns.Get(curAddr)
if !ok {
var err error
// We want to assign the new scData to sd from the outer scope,
// hence we can't use := below.
sd, err = b.newSCData(curAddr)
if err != nil {
// This should never happen, unless the clientConn is being shut
// down.
if b.logger.V(2) {
b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
}
// Do nothing, the LB policy will be closed soon.
return
}
b.subConns.Set(curAddr, sd)
}
switch sd.rawConnectivityState {
case connectivity.Idle:
sd.subConn.Connect()
b.scheduleNextConnectionLocked()
return
case connectivity.TransientFailure:
// The SubConn is being re-used and failed during a previous pass
// over the addressList. It has not completed backoff yet.
// Mark it as having failed and try the next address.
sd.connectionFailedInFirstPass = true
lastErr = sd.lastErr
continue
case connectivity.Connecting:
// Wait for the connection attempt to complete or the timer to fire
// before attempting the next address.
b.scheduleNextConnectionLocked()
return
default:
b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
return
}
}
// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
// first pass if possible.
b.endFirstPassIfPossibleLocked(lastErr)
}
func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
b.cancelConnectionTimer()
if !b.addressList.hasNext() {
return
}
curAddr := b.addressList.currentAddress()
cancelled := false // Access to this is protected by the balancer's mutex.
closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
b.mu.Lock()
defer b.mu.Unlock()
// If the scheduled task is cancelled while acquiring the mutex, return.
if cancelled {
return
}
if b.logger.V(2) {
b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
}
if b.addressList.increment() {
b.requestConnectionLocked()
}
})
// Access to the cancellation callback held by the balancer is guarded by
// the balancer's mutex, so it's safe to set the boolean from the callback.
b.cancelConnectionTimer = sync.OnceFunc(func() {
cancelled = true
closeFn()
})
}
func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
b.mu.Lock()
defer b.mu.Unlock()
oldState := sd.rawConnectivityState
sd.rawConnectivityState = newState.ConnectivityState
// Previously relevant SubConns can still callback with state updates.
// To prevent pickers from returning these obsolete SubConns, this logic
// is included to check if the current list of active SubConns includes this
// SubConn.
if !b.isActiveSCData(sd) {
return
}
if newState.ConnectivityState == connectivity.Shutdown {
sd.effectiveState = connectivity.Shutdown
return
}
// Record a connection attempt when exiting CONNECTING.
if newState.ConnectivityState == connectivity.TransientFailure {
sd.connectionFailedInFirstPass = true
connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
}
if newState.ConnectivityState == connectivity.Ready {
connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
b.shutdownRemainingLocked(sd)
if !b.addressList.seekTo(sd.addr) {
// This should not fail as we should have only one SubConn after
// entering READY. The SubConn should be present in the addressList.
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
if !b.healthCheckingEnabled {
if b.logger.V(2) {
b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
}
sd.effectiveState = connectivity.Ready
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Ready,
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
return
}
if b.logger.V(2) {
b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
}
// Send a CONNECTING update to take the SubConn out of sticky-TF if
// required.
sd.effectiveState = connectivity.Connecting
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
b.updateSubConnHealthState(sd, scs)
})
return
}
// If the LB policy is READY, and it receives a subchannel state change,
// it means that the READY subchannel has failed.
// A SubConn can also transition from CONNECTING directly to IDLE when
// a transport is successfully created, but the connection fails
// before the SubConn can send the notification for READY. We treat
// this as a successful connection and transition to IDLE.
// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
// part of the if condition below once the issue is fixed.
if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
// Once a transport fails, the balancer enters IDLE and starts from
// the first address when the picker is used.
b.shutdownRemainingLocked(sd)
sd.effectiveState = newState.ConnectivityState
// READY SubConn interspliced in between CONNECTING and IDLE, need to
// account for that.
if oldState == connectivity.Connecting {
// A known issue (https://github.com/grpc/grpc-go/issues/7862)
// causes a race that prevents the READY state change notification.
// This works around it.
connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
}
disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
b.addressList.reset()
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Idle,
Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
})
return
}
if b.firstPass {
switch newState.ConnectivityState {
case connectivity.Connecting:
// The effective state can be in either IDLE, CONNECTING or
// TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
// TRANSIENT_FAILURE until it's READY. See A62.
if sd.effectiveState != connectivity.TransientFailure {
sd.effectiveState = connectivity.Connecting
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
}
case connectivity.TransientFailure:
sd.lastErr = newState.ConnectionError
sd.effectiveState = connectivity.TransientFailure
// Since we're re-using common SubConns while handling resolver
// updates, we could receive an out of turn TRANSIENT_FAILURE from
// a pass over the previous address list. Happy Eyeballs will also
// cause out of order updates to arrive.
if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
b.cancelConnectionTimer()
if b.addressList.increment() {
b.requestConnectionLocked()
return
}
}
// End the first pass if we've seen a TRANSIENT_FAILURE from all
// SubConns once.
b.endFirstPassIfPossibleLocked(newState.ConnectionError)
}
return
}
// We have finished the first pass, keep re-connecting failing SubConns.
switch newState.ConnectivityState {
case connectivity.TransientFailure:
b.numTF = (b.numTF + 1) % b.subConns.Len()
sd.lastErr = newState.ConnectionError
if b.numTF%b.subConns.Len() == 0 {
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: newState.ConnectionError},
})
}
// We don't need to request re-resolution since the SubConn already
// does that before reporting TRANSIENT_FAILURE.
// TODO: #7534 - Move re-resolution requests from SubConn into
// pick_first.
case connectivity.Idle:
sd.subConn.Connect()
}
}
// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
// addresses are tried and their SubConns have reported a failure.
func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
// An optimization to avoid iterating over the entire SubConn map.
if b.addressList.isValid() {
return
}
// Connect() has been called on all the SubConns. The first pass can be
// ended if all the SubConns have reported a failure.
for _, sd := range b.subConns.Values() {
if !sd.connectionFailedInFirstPass {
return
}
}
b.firstPass = false
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: lastErr},
})
// Start re-connecting all the SubConns that are already in IDLE.
for _, sd := range b.subConns.Values() {
if sd.rawConnectivityState == connectivity.Idle {
sd.subConn.Connect()
}
}
}
func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
activeSD, found := b.subConns.Get(sd.addr)
return found && activeSD == sd
}
func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
b.mu.Lock()
defer b.mu.Unlock()
// Previously relevant SubConns can still callback with state updates.
// To prevent pickers from returning these obsolete SubConns, this logic
// is included to check if the current list of active SubConns includes
// this SubConn.
if !b.isActiveSCData(sd) {
return
}
sd.effectiveState = state.ConnectivityState
switch state.ConnectivityState {
case connectivity.Ready:
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Ready,
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
case connectivity.TransientFailure:
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
})
case connectivity.Connecting:
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
default:
b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
}
}
// updateBalancerState stores the state reported to the channel and calls
// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
// updates to the channel.
func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
// In case of TransientFailures allow the picker to be updated to update
// the connectivity error, in all other cases don't send duplicate state
// updates.
if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
return
}
b.forceUpdateConcludedStateLocked(newState)
}
// forceUpdateConcludedStateLocked stores the state reported to the channel and
// calls ClientConn.UpdateState().
// A separate function is defined to force update the ClientConn state since the
// channel doesn't correctly assume that LB policies start in CONNECTING and
// relies on LB policy to send an initial CONNECTING update.
func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
b.state = newState.ConnectivityState
b.cc.UpdateState(newState)
}
type picker struct {
result balancer.PickResult
err error
}
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return p.result, p.err
}
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
exitIdle func()
}
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
i.exitIdle()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
// addressList manages sequentially iterating over addresses present in a list
// of endpoints. It provides a 1 dimensional view of the addresses present in
// the endpoints.
// This type is not safe for concurrent access.
type addressList struct {
addresses []resolver.Address
idx int
}
func (al *addressList) isValid() bool {
return al.idx < len(al.addresses)
}
func (al *addressList) size() int {
return len(al.addresses)
}
// increment moves to the next index in the address list.
// This method returns false if it went off the list, true otherwise.
func (al *addressList) increment() bool {
if !al.isValid() {
return false
}
al.idx++
return al.idx < len(al.addresses)
}
// currentAddress returns the current address pointed to in the addressList.
// If the list is in an invalid state, it returns an empty address instead.
func (al *addressList) currentAddress() resolver.Address {
if !al.isValid() {
return resolver.Address{}
}
return al.addresses[al.idx]
}
func (al *addressList) reset() {
al.idx = 0
}
func (al *addressList) updateAddrs(addrs []resolver.Address) {
al.addresses = addrs
al.reset()
}
// seekTo returns false if the needle was not found and the current index was
// left unchanged.
func (al *addressList) seekTo(needle resolver.Address) bool {
for ai, addr := range al.addresses {
if !equalAddressIgnoringBalAttributes(&addr, &needle) {
continue
}
al.idx = ai
return true
}
return false
}
// hasNext returns whether incrementing the addressList will result in moving
// past the end of the list. If the list has already moved past the end, it
// returns false.
func (al *addressList) hasNext() bool {
if !al.isValid() {
return false
}
return al.idx+1 < len(al.addresses)
}
// equalAddressIgnoringBalAttributes returns true is a and b are considered
// equal. This is different from the Equal method on the resolver.Address type
// which considers all fields to determine equality. Here, we only consider
// fields that are meaningful to the SubConn.
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
a.Attributes.Equal(b.Attributes)
}

View File

@@ -26,7 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/endpointsharding"
"google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
"google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/grpclog"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
@@ -47,7 +47,7 @@ func (bb builder) Name() string {
}
func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
childBuilder := balancer.Get(pickfirstleaf.Name).Build
childBuilder := balancer.Get(pickfirst.Name).Build
bal := &rrBalancer{
cc: cc,
Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
@@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
// Enable the health listener in pickfirst children for client side health
// checks and outlier detection, if configured.
ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState),
})
}

View File

@@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(
if acbw.ccb.cc.dopts.disableHealthCheck {
return noOpRegisterHealthListenerFn
}
cfg := acbw.ac.cc.healthCheckConfig()
if cfg == nil {
return noOpRegisterHealthListenerFn
}
regHealthLisFn := internal.RegisterClientHealthCheckListener
if regHealthLisFn == nil {
// The health package is not imported.
return noOpRegisterHealthListenerFn
}
cfg := acbw.ac.cc.healthCheckConfig()
if cfg == nil {
channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.")
return noOpRegisterHealthListenerFn
}
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {

View File

@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto

View File

@@ -40,11 +40,12 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/stats"
istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
@@ -210,7 +211,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper()
cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...)
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
@@ -621,7 +623,8 @@ type ClientConn struct {
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
metricsRecorderList *stats.MetricsRecorderList
metricsRecorderList *istats.MetricsRecorderList
statsHandler stats.Handler
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.

View File

@@ -44,8 +44,7 @@ type PerRPCCredentials interface {
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
// of leaving it as an arbitrary string.
// to this call.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.

View File

@@ -27,8 +27,10 @@ package encoding
import (
"io"
"slices"
"strings"
"google.golang.org/grpc/encoding/internal"
"google.golang.org/grpc/internal/grpcutil"
)
@@ -36,6 +38,24 @@ import (
// It is intended for grpc internal use only.
const Identity = "identity"
func init() {
internal.RegisterCompressorForTesting = func(c Compressor) func() {
name := c.Name()
curCompressor, found := registeredCompressor[name]
RegisterCompressor(c)
return func() {
if found {
registeredCompressor[name] = curCompressor
return
}
delete(registeredCompressor, name)
grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool {
return s == name
})
}
}
}
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
//

View File

@@ -0,0 +1,28 @@
/*
*
* Copyright 2025 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains code internal to the encoding package.
package internal
// RegisterCompressorForTesting registers a compressor in the global compressor
// registry. It returns a cleanup function that should be called at the end
// of the test to unregister the compressor.
//
// This prevents compressors registered in one test from appearing in the
// encoding headers of subsequent tests.
var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func()

View File

@@ -75,6 +75,7 @@ const (
MetricTypeIntHisto
MetricTypeFloatHisto
MetricTypeIntGauge
MetricTypeIntUpDownCount
)
// Int64CountHandle is a typed handle for a int count metric. This handle
@@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Count(h, incr, labels...)
}
// Int64UpDownCountHandle is a typed handle for an int up-down counter metric.
// This handle is passed at the recording point in order to know which metric
// to record on.
type Int64UpDownCountHandle MetricDescriptor
// Descriptor returns the int64 up-down counter handle typecast to a pointer to a
// MetricDescriptor.
func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the int64 up-down counter value on the metrics recorder provided.
// The value 'v' can be positive to increment or negative to decrement.
func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) {
recorder.RecordInt64UpDownCount(h, v, labels...)
}
// Float64CountHandle is a typed handle for a float count metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Float64CountHandle MetricDescriptor
@@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
return (*Int64GaugeHandle)(descPtr)
}
// RegisterInt64UpDownCount registers the metric description onto the global registry.
// It returns a typed handle to use for recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle {
registerMetric(descriptor.Name, descriptor.Default)
// Set the specific metric type for the up-down counter
descriptor.Type = MetricTypeIntUpDownCount
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Int64UpDownCountHandle)(descPtr)
}
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.

View File

@@ -38,6 +38,9 @@ type MetricsRecorder interface {
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
// RecordInt64UpDownCounter records the measurement alongside labels on the int
// count associated with the provided handle.
RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
}
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.

View File

@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/health/v1/health.proto

View File

@@ -52,12 +52,6 @@ var (
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
// instead of the exiting pickfirst implementation. This can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
// to "false".
NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
// setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
@@ -75,6 +69,14 @@ var (
// ALTSHandshakerKeepaliveParams is set if we should add the
// KeepaliveParams when dial the ALTS handshaker service.
ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
// EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443
// to a target address that lacks one. This flag only has an effect when all of
// the following conditions are met:
// - A connect proxy is being used.
// - Target resolution is disabled.
// - The DNS resolver is being used.
EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
)
func boolFromEnv(envVar string, def bool) bool {

View File

@@ -74,4 +74,9 @@ var (
// For more details, see:
// https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
// XDSBootstrapCallCredsEnabled controls if call credentials can be used in
// xDS bootstrap configuration via the `call_creds` field. For more details,
// see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md
XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false)
)

View File

@@ -22,11 +22,13 @@ package delegatingresolver
import (
"fmt"
"net"
"net/http"
"net/url"
"sync"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/proxyattributes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/internal/transport/networktype"
@@ -40,6 +42,8 @@ var (
HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
)
const defaultPort = "443"
// delegatingResolver manages both target URI and proxy address resolution by
// delegating these tasks to separate child resolvers. Essentially, it acts as
// an intermediary between the gRPC ClientConn and the child resolvers.
@@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
targetResolver: nopResolver{},
}
addr := target.Endpoint()
var err error
r.proxyURL, err = proxyURLForTarget(target.Endpoint())
if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget {
addr, err = parseTarget(addr)
if err != nil {
return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err)
}
}
r.proxyURL, err = proxyURLForTarget(addr)
if err != nil {
return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err)
}
// proxy is not configured or proxy address excluded using `NO_PROXY` env
@@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
// bypass the target resolver and store the unresolved target address.
if target.URL.Scheme == "dns" && !targetResolutionEnabled {
r.targetResolverState = &resolver.State{
Addresses: []resolver.Address{{Addr: target.Endpoint()}},
Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
Addresses: []resolver.Address{{Addr: addr}},
Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}},
}
r.updateTargetResolverState(*r.targetResolverState)
return r, nil
@@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool {
return false
}
// parseTarget takes a target string and ensures it is a valid "host:port" target.
//
// It does the following:
// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"),
// it is returned as is.
// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost",
// returning "localhost:80".
// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort
// is added.
//
// An error is returned for empty targets or targets with a trailing colon
// but no port (e.g., "host:").
func parseTarget(target string) (string, error) {
if target == "" {
return "", fmt.Errorf("missing address")
}
host, port, err := net.SplitHostPort(target)
if err != nil {
// If SplitHostPort fails, it's likely because the port is missing.
// We append the default port and return the result.
return net.JoinHostPort(target, defaultPort), nil
}
// If SplitHostPort succeeds, we check for edge cases.
if port == "" {
// A success with an empty port means the target had a trailing colon,
// e.g., "host:", which is an error.
return "", fmt.Errorf("missing port after port-separator colon")
}
if host == "" {
// A success with an empty host means the target was like ":80".
// We default the host to "localhost".
host = "localhost"
}
return net.JoinHostPort(host, port), nil
}
func skipProxy(address resolver.Address) bool {
// Avoid proxy when network is not tcp.
networkType, ok := networktype.Get(address)

View File

@@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
}
}
// RecordInt64UpDownCount records the measurement alongside labels on the int
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordInt64UpDownCount(handle, incr, labels...)
}
}
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {

70
vendor/google.golang.org/grpc/internal/stats/stats.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
/*
*
* Copyright 2025 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package stats
import (
"context"
"google.golang.org/grpc/stats"
)
type combinedHandler struct {
handlers []stats.Handler
}
// NewCombinedHandler combines multiple stats.Handlers into a single handler.
//
// It returns nil if no handlers are provided. If only one handler is
// provided, it is returned directly without wrapping.
func NewCombinedHandler(handlers ...stats.Handler) stats.Handler {
switch len(handlers) {
case 0:
return nil
case 1:
return handlers[0]
default:
return &combinedHandler{handlers: handlers}
}
}
func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
for _, h := range ch.handlers {
ctx = h.TagRPC(ctx, info)
}
return ctx
}
func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
for _, h := range ch.handlers {
h.HandleRPC(ctx, stats)
}
}
func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
for _, h := range ch.handlers {
ctx = h.TagConn(ctx, info)
}
return ctx
}
func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
for _, h := range ch.handlers {
h.HandleConn(ctx, stats)
}
}

View File

@@ -29,25 +29,27 @@ import (
// ClientStream implements streaming functionality for a gRPC client.
type ClientStream struct {
*Stream // Embed for common stream functionality.
Stream // Embed for common stream functionality.
ct *http2Client
done chan struct{} // closed at the end of stream to unblock writers.
doneFunc func() // invoked at the end of stream.
headerChan chan struct{} // closed to indicate the end of header metadata.
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
headerChan chan struct{} // closed to indicate the end of header metadata.
header metadata.MD // the received header metadata
status *status.Status // the status error received from the server
// Non-pointer fields are at the end to optimize GC allocations.
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value).
headerValid bool
header metadata.MD // the received header metadata
noHeaders bool // set if the client never received headers (set only after the stream is done).
bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
status *status.Status // the status error received from the server
headerValid bool
noHeaders bool // set if the client never received headers (set only after the stream is done).
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
}
// Read reads an n byte message from the input stream.
@@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool {
func (s *ClientStream) Status() *status.Status {
return s.status
}
func (s *ClientStream) requestRead(n int) {
s.ct.adjustWindow(s, uint32(n))
}
func (s *ClientStream) updateWindow(n int) {
s.ct.updateWindow(s, uint32(n))
}

View File

@@ -496,6 +496,16 @@ const (
serverSide
)
// maxWriteBufSize is the maximum length (number of elements) the cached
// writeBuf can grow to. The length depends on the number of buffers
// contained within the BufferSlice produced by the codec, which is
// generally small.
//
// If a writeBuf larger than this limit is required, it will be allocated
// and freed after use, rather than being cached. This avoids holding
// on to large amounts of memory.
const maxWriteBufSize = 64
// Loopy receives frames from the control buffer.
// Each frame is handled individually; most of the work done by loopy goes
// into handling data frames. Loopy maintains a queue of active streams, and each
@@ -530,6 +540,8 @@ type loopyWriter struct {
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek.
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
@@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
reader: mem.BufferSlice{}.Reader(),
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
}
l.estdStreams[h.streamID] = str
}
@@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
}
// Case 2: Client wants to originate stream.
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
reader: mem.BufferSlice{}.Reader(),
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
}
return l.originateStream(str, h)
}
@@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) {
if str == nil {
return true, nil
}
reader := str.reader
reader := &str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
if !dataItem.processing {
dataItem.processing = true
str.reader.Reset(dataItem.data)
reader.Reset(dataItem.data)
dataItem.data.Free()
}
// A data item is represented by a dataFrame, since it later translates into
@@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) {
if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
_ = reader.Close()
reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) {
remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
size := hSize + dSize
var buf *[]byte
if hSize != 0 && dSize == 0 {
buf = &dataItem.h
} else {
// Note: this is only necessary because the http2.Framer does not support
// partially writing a frame, so the sequence must be materialized into a buffer.
// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
pool := l.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
l.writeBuf = l.writeBuf[:0]
if hSize > 0 {
l.writeBuf = append(l.writeBuf, dataItem.h[:hSize])
}
if dSize > 0 {
var err error
l.writeBuf, err = reader.Peek(dSize, l.writeBuf)
if err != nil {
// This must never happen since the reader must have at least dSize
// bytes.
// Log an error to fail tests.
l.logger.Errorf("unexpected error while reading Data frame payload: %v", err)
return false, err
}
buf = pool.Get(size)
defer pool.Put(buf)
copy((*buf)[:hSize], dataItem.h)
_, _ = reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
@@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) {
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf)
reader.Discard(dSize)
if cap(l.writeBuf) > maxWriteBufSize {
l.writeBuf = nil
} else {
clear(l.writeBuf)
}
if err != nil {
return false, err
}
str.bytesOutStanding += size
@@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem.h = dataItem.h[hSize:]
if remainingBytes == 0 { // All the data from that message was written out.
_ = reader.Close()
reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {

View File

@@ -28,7 +28,7 @@ import (
// writeQuota is a soft limit on the amount of data a stream can
// schedule before some of it is written out.
type writeQuota struct {
quota int32
_ noCopy
// get waits on read from when quota goes less than or equal to zero.
// replenish writes on it when quota goes positive again.
ch chan struct{}
@@ -38,16 +38,17 @@ type writeQuota struct {
// It is implemented as a field so that it can be updated
// by tests.
replenish func(n int)
quota int32
}
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
w := &writeQuota{
quota: sz,
ch: make(chan struct{}, 1),
done: done,
}
// init allows a writeQuota to be initialized in-place, which is useful for
// resetting a buffer or for avoiding a heap allocation when the buffer is
// embedded in another struct.
func (w *writeQuota) init(sz int32, done <-chan struct{}) {
w.quota = sz
w.ch = make(chan struct{}, 1)
w.done = done
w.replenish = w.realReplenish
return w
}
func (w *writeQuota) get(sz int32) error {
@@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error {
func (w *writeQuota) realReplenish(n int) {
sz := int32(n)
a := atomic.AddInt32(&w.quota, sz)
b := a - sz
if b <= 0 && a > 0 {
newQuota := atomic.AddInt32(&w.quota, sz)
previousQuota := newQuota - sz
if previousQuota <= 0 && newQuota > 0 {
select {
case w.ch <- struct{}{}:
default:

View File

@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -170,7 +170,7 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
stats []stats.Handler
stats stats.Handler
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
@@ -274,15 +274,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
}
})
if err == nil { // transport has not been closed
if err == nil && ht.stats != nil { // transport has not been closed
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
s.hdrMu.Lock()
for _, sh := range ht.stats {
sh.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
s.hdrMu.Unlock()
}
ht.Close(errors.New("finished writing status"))
@@ -374,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e
ht.rw.(http.Flusher).Flush()
})
if err == nil {
for _, sh := range ht.stats {
// Note: The header fields are compressed with hpack after this call returns.
// No WireLength field is set here.
sh.HandleRPC(s.Context(), &stats.OutHeader{
Header: md.Copy(),
Compression: s.sendCompress,
})
}
if err == nil && ht.stats != nil {
// Note: The header fields are compressed with hpack after this call returns.
// No WireLength field is set here.
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
Header: md.Copy(),
Compression: s.sendCompress,
})
}
return err
}
func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) {
}
func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) {
}
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var cancel context.CancelFunc
@@ -411,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
s := &ServerStream{
Stream: &Stream{
Stream: Stream{
id: 0, // irrelevant
ctx: ctx,
requestRead: func(int) {},
buf: newRecvBuffer(),
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
contentSubtype: ht.contentSubtype,
@@ -424,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
st: ht,
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
s.Stream.buf.init()
s.readRequester = s
s.trReader = transportReader{
reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf},
windowHandler: s,
}
// readerDone is closed when the Body.Read-ing goroutine exits.

View File

@@ -44,6 +44,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/proxyattributes"
istats "google.golang.org/grpc/internal/stats"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
@@ -105,7 +106,7 @@ type http2Client struct {
kp keepalive.ClientParameters
keepaliveEnabled bool
statsHandlers []stats.Handler
statsHandler stats.Handler
initialWindowSize int32
@@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
keepaliveDone: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
statsHandlers: opts.StatsHandlers,
statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...),
initialWindowSize: initialWindowSize,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
@@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
updateFlowControl: t.updateFlowControl,
}
}
for _, sh := range t.statsHandlers {
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
if t.statsHandler != nil {
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{
t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{
Client: true,
}
sh.HandleConn(t.ctx, connBegin)
})
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &ClientStream{
Stream: &Stream{
Stream: Stream{
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
contentSubtype: callHdr.ContentSubtype,
},
ct: t,
@@ -492,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
headerChan: make(chan struct{}),
doneFunc: callHdr.DoneFunc,
}
s.wq = newWriteQuota(defaultWriteQuota, s.done)
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.Stream.buf.init()
s.Stream.wq.init(defaultWriteQuota, s.done)
s.readRequester = s
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctx.Done(),
recv: s.buf,
closeStream: func(err error) {
s.Close(err)
},
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
s.trReader = transportReader{
reader: recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctx.Done(),
recv: &s.buf,
clientStream: s,
},
windowHandler: s,
}
return s
}
@@ -823,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil
},
onOrphaned: cleanup,
wq: s.wq,
wq: &s.wq,
}
firstTry := true
var ch chan struct{}
@@ -854,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
transportDrainRequired = t.nextID > MaxStreamID
s.id = hdr.streamID
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
s.fc = inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
@@ -905,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
if len(t.statsHandlers) != 0 {
if t.statsHandler != nil {
header, ok := metadata.FromOutgoingContext(ctx)
if ok {
header.Set("user-agent", t.userAgent)
} else {
header = metadata.Pairs("user-agent", t.userAgent)
}
for _, sh := range t.statsHandlers {
// Note: The header fields are compressed with hpack after this call returns.
// No WireLength field is set here.
// Note: Creating a new stats object to prevent pollution.
outHeader := &stats.OutHeader{
Client: true,
FullMethod: callHdr.Method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
Header: header,
}
sh.HandleRPC(s.ctx, outHeader)
}
// Note: The header fields are compressed with hpack after this call returns.
// No WireLength field is set here.
t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{
Client: true,
FullMethod: callHdr.Method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
Header: header,
})
}
if transportDrainRequired {
if t.logger.V(logLevel) {
@@ -1002,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode
// accessed anymore.
func (t *http2Client) Close(err error) {
t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
// For background on the deadline value chosen here, see
// https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 .
t.conn.SetReadDeadline(time.Now().Add(time.Second))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1063,11 +1056,10 @@ func (t *http2Client) Close(err error) {
for _, s := range streams {
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
for _, sh := range t.statsHandlers {
connEnd := &stats.ConnEnd{
if t.statsHandler != nil {
t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{
Client: true,
}
sh.HandleConn(t.ctx, connEnd)
})
}
}
@@ -1178,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
})
}
func (t *http2Client) handleData(f *http2.DataFrame) {
func (t *http2Client) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -1222,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
return
}
dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
pool := t.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
}
s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
if dataLen > 0 {
f.data.Ref()
s.write(recvMsg{buffer: f.data})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1477,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
recvCompress string
httpStatusCode *int
httpStatusErr string
rawStatusCode = codes.Unknown
// the code from the grpc-status header, if present
grpcStatusCode = codes.Unknown
// headerError is set if an error is encountered while parsing the headers
headerError string
httpStatus string
)
if initialHeader {
httpStatusErr = "malformed header: missing HTTP status"
}
for _, hf := range frame.Fields {
switch hf.Name {
case "content-type":
@@ -1507,36 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
rawStatusCode = codes.Code(uint32(code))
grpcStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
case ":status":
c, err := strconv.ParseInt(hf.Value, 10, 32)
if err != nil {
se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
statusCode := int(c)
if statusCode >= 100 && statusCode < 200 {
if endStream {
se := status.New(codes.Internal, fmt.Sprintf(
"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
}
return
}
httpStatusCode = &statusCode
if statusCode == 200 {
httpStatusErr = ""
break
}
httpStatusErr = fmt.Sprintf(
"unexpected HTTP status code received from server: %d (%s)",
statusCode,
http.StatusText(statusCode),
)
httpStatus = hf.Value
default:
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
break
@@ -1551,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
if !isGRPC || httpStatusErr != "" {
var code = codes.Internal // when header does not include HTTP status, return INTERNAL
if httpStatusCode != nil {
// If a non-gRPC response is received, then evaluate the HTTP status to
// process the response and close the stream.
// In case http status doesn't provide any error information (status : 200),
// then evalute response code to be Unknown.
if !isGRPC {
var grpcErrorCode = codes.Internal
if httpStatus == "" {
httpStatusErr = "malformed header: missing HTTP status"
} else {
// Parse the status codes (e.g. "200", 404").
statusCode, err := strconv.Atoi(httpStatus)
if err != nil {
se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
if statusCode >= 100 && statusCode < 200 {
if endStream {
se := status.New(codes.Internal, fmt.Sprintf(
"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
}
// In case of informational headers, return.
return
}
httpStatusErr = fmt.Sprintf(
"unexpected HTTP status code received from server: %d (%s)",
statusCode,
http.StatusText(statusCode),
)
var ok bool
code, ok = HTTPStatusConvTab[*httpStatusCode]
grpcErrorCode, ok = HTTPStatusConvTab[statusCode]
if !ok {
code = codes.Unknown
grpcErrorCode = codes.Unknown
}
}
var errs []string
if httpStatusErr != "" {
errs = append(errs, httpStatusErr)
}
if contentTypeErr != "" {
errs = append(errs, contentTypeErr)
}
// Verify the HTTP response is a 200.
se := status.New(code, strings.Join(errs, "; "))
se := status.New(grpcErrorCode, strings.Join(errs, "; "))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
@@ -1600,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
for _, sh := range t.statsHandlers {
if t.statsHandler != nil {
if !endStream {
inHeader := &stats.InHeader{
t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Compression: s.recvCompress,
}
sh.HandleRPC(s.ctx, inHeader)
})
} else {
inTrailer := &stats.InTrailer{
t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
Trailer: metadata.MD(mdata).Copy(),
}
sh.HandleRPC(s.ctx, inTrailer)
})
}
}
@@ -1623,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
@@ -1670,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) {
// loop to keep reading incoming messages on this transport.
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
frame, err := t.framer.readFrame()
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
@@ -1685,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) {
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
errorDetail := t.framer.fr.ErrorDetail()
errorDetail := t.framer.errorDetail()
var msg string
if errorDetail != nil {
msg = errorDetail.Error()
@@ -1703,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) {
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
t.operateHeaders(frame)
case *http2.DataFrame:
case *parsedDataFrame:
t.handleData(frame)
frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:

View File

@@ -35,6 +35,8 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
@@ -42,7 +44,6 @@ import (
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -86,7 +87,7 @@ type http2Server struct {
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
stats []stats.Handler
stats stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
@@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
@@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*ServerStream),
stats: config.StatsHandlers,
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
@@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
}
t.maxStreamID = streamID
buf := newRecvBuffer()
s := &ServerStream{
Stream: &Stream{
id: streamID,
buf: buf,
fc: &inFlow{limit: uint32(t.initialWindowSize)},
Stream: Stream{
id: streamID,
fc: inFlow{limit: uint32(t.initialWindowSize)},
},
st: t,
headerWireLength: int(frame.Header().Length),
}
s.Stream.buf.init()
var (
// if false, content-type was missing or invalid
isGRPC = false
@@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.channelz.SocketMetrics.StreamsStarted.Add(1)
t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.readRequester = s
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
s.Stream.wq.init(defaultWriteQuota, s.ctxDone)
s.trReader = transportReader{
reader: recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
recv: &s.buf,
},
windowHandler: s,
}
// Register the stream with loopy.
t.controlBuf.put(&registerStream{
streamID: s.id,
wq: s.wq,
wq: &s.wq,
})
handle(s)
return nil
@@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
}()
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
frame, err := t.framer.readFrame()
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
@@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
})
continue
}
case *http2.DataFrame:
case *parsedDataFrame:
t.handleData(frame)
frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
@@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
}
func (t *http2Server) handleData(f *http2.DataFrame) {
func (t *http2Server) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
pool := t.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
}
s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
if dataLen > 0 {
f.data.Ref()
s.write(recvMsg{buffer: f.data})
}
}
if f.StreamEnded() {
@@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
for _, sh := range t.stats {
if t.stats != nil {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
outHeader := &stats.OutHeader{
t.stats.HandleRPC(s.Context(), &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
}
sh.HandleRPC(s.Context(), outHeader)
})
}
return nil
}
@@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
for _, sh := range t.stats {
if t.stats != nil {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
sh.HandleRPC(s.Context(), &stats.OutTrailer{
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
_, isActive := t.activeStreams[s.id]
if isActive {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
@@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
}
t.mu.Unlock()
if channelz.IsOn() {
if isActive && channelz.IsOn() {
if eosReceived {
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {

View File

@@ -25,7 +25,6 @@ import (
"fmt"
"io"
"math"
"net"
"net/http"
"net/url"
"strconv"
@@ -37,6 +36,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/mem"
)
const (
@@ -300,11 +300,11 @@ type bufWriter struct {
buf []byte
offset int
batchSize int
conn net.Conn
conn io.Writer
err error
}
func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter {
w := &bufWriter{
batchSize: batchSize,
conn: conn,
@@ -388,15 +388,35 @@ func toIOError(err error) error {
return ioError{error: err}
}
type parsedDataFrame struct {
http2.FrameHeader
data mem.Buffer
}
func (df *parsedDataFrame) StreamEnded() bool {
return df.FrameHeader.Flags.Has(http2.FlagDataEndStream)
}
type framer struct {
writer *bufWriter
fr *http2.Framer
writer *bufWriter
fr *http2.Framer
headerBuf []byte // cached slice for framer headers to reduce heap allocs.
reader io.Reader
dataFrame parsedDataFrame // Cached data frame to avoid heap allocations.
pool mem.BufferPool
errDetail error
}
var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
if memPool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream
// is always initialized with a BufferPool.
memPool = mem.DefaultBufferPool()
}
if writeBufferSize < 0 {
writeBufferSize = 0
}
@@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
f := &framer{
writer: w,
fr: http2.NewFramer(w, r),
reader: r,
pool: memPool,
}
f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
// Opt-in to Frame reuse API on framer to reduce garbage.
@@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
return f
}
// writeData writes a DATA frame.
//
// It is the caller's responsibility not to violate the maximum frame size.
func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error {
var flags http2.Flags
if endStream {
flags = http2.FlagDataEndStream
}
length := uint32(0)
for _, d := range data {
length += uint32(len(d))
}
// TODO: Replace the header write with the framer API being added in
// https://github.com/golang/go/issues/66655.
f.headerBuf = append(f.headerBuf[:0],
byte(length>>16),
byte(length>>8),
byte(length),
byte(http2.FrameData),
byte(flags),
byte(streamID>>24),
byte(streamID>>16),
byte(streamID>>8),
byte(streamID))
if _, err := f.writer.Write(f.headerBuf); err != nil {
return err
}
for _, d := range data {
if _, err := f.writer.Write(d); err != nil {
return err
}
}
return nil
}
// readFrame reads a single frame. The returned Frame is only valid
// until the next call to readFrame.
func (f *framer) readFrame() (any, error) {
f.errDetail = nil
fh, err := f.fr.ReadFrameHeader()
if err != nil {
f.errDetail = f.fr.ErrorDetail()
return nil, err
}
// Read the data frame directly from the underlying io.Reader to avoid
// copies.
if fh.Type == http2.FrameData {
err = f.readDataFrame(fh)
return &f.dataFrame, err
}
fr, err := f.fr.ReadFrameForHeader(fh)
if err != nil {
f.errDetail = f.fr.ErrorDetail()
return nil, err
}
return fr, err
}
// errorDetail returns a more detailed error of the last error
// returned by framer.readFrame. For instance, if readFrame
// returns a StreamError with code PROTOCOL_ERROR, errorDetail
// will say exactly what was invalid. errorDetail is not guaranteed
// to return a non-nil value.
// errorDetail is reset after the next call to readFrame.
func (f *framer) errorDetail() error {
return f.errDetail
}
func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
// field is 0x0, the recipient MUST respond with a
// connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
f.errDetail = errors.New("DATA frame with stream ID 0")
return http2.ConnectionError(http2.ErrCodeProtocol)
}
// Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This
// conversion is performed by mem.NewBuffer. To avoid the extra allocation
// a []byte is allocated directly if required and cast to a mem.SliceBuffer.
var buf []byte
// poolHandle is the pointer returned by the buffer pool (if it's used.).
var poolHandle *[]byte
useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length))
if useBufferPool {
poolHandle = f.pool.Get(int(fh.Length))
buf = *poolHandle
defer func() {
if err != nil {
f.pool.Put(poolHandle)
}
}()
} else {
buf = make([]byte, int(fh.Length))
}
if fh.Flags.Has(http2.FlagDataPadded) {
if fh.Length == 0 {
return io.ErrUnexpectedEOF
}
// This initial 1-byte read can be inefficient for unbuffered readers,
// but it allows the rest of the payload to be read directly to the
// start of the destination slice. This makes it easy to return the
// original slice back to the buffer pool.
if _, err := io.ReadFull(f.reader, buf[:1]); err != nil {
return err
}
padSize := buf[0]
buf = buf[:len(buf)-1]
if int(padSize) > len(buf) {
// If the length of the padding is greater than the
// length of the frame payload, the recipient MUST
// treat this as a connection error.
// Filed: https://github.com/http2/http2-spec/issues/610
f.errDetail = errors.New("pad size larger than data payload")
return http2.ConnectionError(http2.ErrCodeProtocol)
}
if _, err := io.ReadFull(f.reader, buf); err != nil {
return err
}
buf = buf[:len(buf)-int(padSize)]
} else if _, err := io.ReadFull(f.reader, buf); err != nil {
return err
}
f.dataFrame.FrameHeader = fh
if useBufferPool {
// Update the handle to point to the (potentially re-sliced) buf.
*poolHandle = buf
f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool)
} else {
f.dataFrame.data = mem.SliceBuffer(buf)
}
return nil
}
func (df *parsedDataFrame) Header() http2.FrameHeader {
return df.FrameHeader
}
func getWriteBufferPool(size int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()

View File

@@ -32,7 +32,7 @@ import (
// ServerStream implements streaming functionality for a gRPC server.
type ServerStream struct {
*Stream // Embed for common stream functionality.
Stream // Embed for common stream functionality.
st internalServerTransport
ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
@@ -43,12 +43,13 @@ type ServerStream struct {
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
clientAdvertisedCompressors string
headerWireLength int
// hdrMu protects outgoing header and trailer metadata.
hdrMu sync.Mutex
header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
headerSent atomic.Bool // atomically set when the headers are sent out.
headerWireLength int
}
// Read reads an n byte message from the input stream.
@@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error {
s.hdrMu.Unlock()
return nil
}
func (s *ServerStream) requestRead(n int) {
s.st.adjustWindow(s, uint32(n))
}
func (s *ServerStream) updateWindow(n int) {
s.st.updateWindow(s, uint32(n))
}

View File

@@ -68,11 +68,11 @@ type recvBuffer struct {
err error
}
func newRecvBuffer() *recvBuffer {
b := &recvBuffer{
c: make(chan recvMsg, 1),
}
return b
// init allows a recvBuffer to be initialized in-place, which is useful
// for resetting a buffer or for avoiding a heap allocation when the buffer
// is embedded in another struct.
func (b *recvBuffer) init() {
b.c = make(chan recvMsg, 1)
}
func (b *recvBuffer) put(r recvMsg) {
@@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg {
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last mem.Buffer // Stores the remaining data in the previous calls.
err error
_ noCopy
clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata.
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last mem.Buffer // Stores the remaining data in the previous calls.
err error
}
func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
@@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
n, r.last = mem.ReadUnsafe(header, r.last)
return n, nil
}
if r.closeStream != nil {
if r.clientStream != nil {
n, r.err = r.readMessageHeaderClient(header)
} else {
n, r.err = r.readMessageHeader(header)
@@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
}
return buf, nil
}
if r.closeStream != nil {
if r.clientStream != nil {
buf, r.err = r.readClient(n)
} else {
buf, r.err = r.read(n)
@@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
@@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, n)
case m := <-r.recv.get():
@@ -285,27 +286,32 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
id uint32
ctx context.Context // the associated context of the stream
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
trReader *transportReader
fc *inFlow
wq *writeQuota
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if needed.
requestRead func(int)
state streamState
readRequester readRequester
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
trailer metadata.MD // the key-value map of trailer metadata.
// Non-pointer fields are at the end to optimize GC performance.
state streamState
id uint32
buf recvBuffer
trReader transportReader
fc inFlow
wq writeQuota
}
// readRequester is used to state application's intentions to read data. This
// is used to adjust flow control, if needed.
type readRequester interface {
requestRead(int)
}
func (s *Stream) swapState(st streamState) streamState {
@@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) {
if er := s.trReader.er; er != nil {
return er
}
s.requestRead(len(header))
s.readRequester.requestRead(len(header))
for len(header) != 0 {
n, err := s.trReader.ReadMessageHeader(header)
header = header[n:]
@@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
if er := s.trReader.er; er != nil {
return nil, er
}
s.requestRead(n)
s.readRequester.requestRead(n)
for n != 0 {
buf, err := s.trReader.Read(n)
var bufLen int
@@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
return data, nil
}
// noCopy may be embedded into structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
type noCopy struct {
}
func (*noCopy) Lock() {}
func (*noCopy) Unlock() {}
// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
reader *recvBufferReader
_ noCopy
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
windowHandler windowHandler
er error
reader recvBufferReader
}
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
type windowHandler interface {
updateWindow(int)
}
func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
@@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
t.er = err
return 0, err
}
t.windowHandler(n)
t.windowHandler.updateWindow(n)
return n, nil
}
@@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
t.er = err
return buf, err
}
t.windowHandler(buf.Len())
t.windowHandler.updateWindow(buf.Len())
return buf, nil
}
@@ -454,7 +478,7 @@ type ServerConfig struct {
ConnectionTimeout time.Duration
Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
StatsHandlers []stats.Handler
StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
@@ -615,6 +639,8 @@ type internalServerTransport interface {
write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
writeStatus(s *ServerStream, st *status.Status) error
incrMsgRecv()
adjustWindow(s *ServerStream, n uint32)
updateWindow(s *ServerStream, n uint32)
}
// connectionErrorf creates an ConnectionError with the specified error description.

View File

@@ -32,6 +32,9 @@ type BufferPool interface {
Get(length int) *[]byte
// Put returns a buffer to the pool.
//
// The provided pointer must hold a prefix of the buffer obtained via
// BufferPool.Get to ensure the buffer's entire capacity can be re-used.
Put(*[]byte)
}
@@ -118,7 +121,11 @@ type sizedBufferPool struct {
}
func (p *sizedBufferPool) Get(size int) *[]byte {
buf := p.pool.Get().(*[]byte)
buf, ok := p.pool.Get().(*[]byte)
if !ok {
buf := make([]byte, size, p.defaultSize)
return &buf
}
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
@@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
pool: sync.Pool{
New: func() any {
buf := make([]byte, size)
return &buf
},
},
defaultSize: size,
}
}
@@ -160,6 +161,7 @@ type simpleBufferPool struct {
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
clear((*bs)[:cap(*bs)])
*bs = (*bs)[:size]
return bs
}

View File

@@ -19,6 +19,7 @@
package mem
import (
"fmt"
"io"
)
@@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
func (s BufferSlice) Reader() Reader {
func (s BufferSlice) Reader() *Reader {
s.Ref()
return &sliceReader{
return &Reader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
// with other parts systems. It also provides an additional convenience method
// Remaining(), which returns the number of unread bytes remaining in the slice.
// with other systems.
//
// Buffers will be freed as they are read.
type Reader interface {
io.Reader
io.ByteReader
// Close frees the underlying BufferSlice and never returns an error. Subsequent
// calls to Read will return (0, io.EOF).
Close() error
// Remaining returns the number of unread bytes remaining in the slice.
Remaining() int
// Reset frees the currently held buffer slice and starts reading from the
// provided slice. This allows reusing the reader object.
Reset(s BufferSlice)
}
type sliceReader struct {
//
// A Reader can be constructed from a BufferSlice; alternatively the zero value
// of a Reader may be used after calling Reset on it.
type Reader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
func (r *sliceReader) Remaining() int {
// Remaining returns the number of unread bytes remaining in the slice.
func (r *Reader) Remaining() int {
return r.len
}
func (r *sliceReader) Reset(s BufferSlice) {
// Reset frees the currently held buffer slice and starts reading from the
// provided slice. This allows reusing the reader object.
func (r *Reader) Reset(s BufferSlice) {
r.data.Free()
s.Ref()
r.data = s
@@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) {
r.bufferIdx = 0
}
func (r *sliceReader) Close() error {
// Close frees the underlying BufferSlice and never returns an error. Subsequent
// calls to Read will return (0, io.EOF).
func (r *Reader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
func (r *sliceReader) freeFirstBufferIfEmpty() bool {
func (r *Reader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
@@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
return true
}
func (r *sliceReader) Read(buf []byte) (n int, _ error) {
func (r *Reader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
return n, nil
}
func (r *sliceReader) ReadByte() (byte, error) {
// ReadByte reads a single byte.
func (r *Reader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -290,3 +287,59 @@ nextBuffer:
}
}
}
// Discard skips the next n bytes, returning the number of bytes discarded.
//
// It frees buffers as they are fully consumed.
//
// If Discard skips fewer than n bytes, it also returns an error.
func (r *Reader) Discard(n int) (discarded int, err error) {
total := n
for n > 0 && r.len > 0 {
curData := r.data[0].ReadOnlyData()
curSize := min(n, len(curData)-r.bufferIdx)
n -= curSize
r.len -= curSize
r.bufferIdx += curSize
if r.bufferIdx >= len(curData) {
r.data[0].Free()
r.data = r.data[1:]
r.bufferIdx = 0
}
}
discarded = total - n
if n > 0 {
return discarded, fmt.Errorf("insufficient bytes in reader")
}
return discarded, nil
}
// Peek returns the next n bytes without advancing the reader.
//
// Peek appends results to the provided res slice and returns the updated slice.
// This pattern allows re-using the storage of res if it has sufficient
// capacity.
//
// The returned subslices are views into the underlying buffers and are only
// valid until the reader is advanced past the corresponding buffer.
//
// If Peek returns fewer than n bytes, it also returns an error.
func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
for i := 0; n > 0 && i < len(r.data); i++ {
curData := r.data[i].ReadOnlyData()
start := 0
if i == 0 {
start = r.bufferIdx
}
curSize := min(n, len(curData)-start)
if curSize == 0 {
continue
}
res = append(res, curData[start:start+curSize])
n -= curSize
}
if n > 0 {
return nil, fmt.Errorf("insufficient bytes in reader")
}
return res, nil
}

View File

@@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
}
// check if the context has the relevant information to prepareMsg
if rpcInfo.preloaderInfo == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
}
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}

View File

@@ -21,7 +21,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/reflection/v1/reflection.proto

View File

@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.36.10
// protoc v5.27.1
// grpc/reflection/v1alpha/reflection.proto is a deprecated file.

View File

@@ -657,8 +657,20 @@ type streamReader interface {
Read(n int) (mem.BufferSlice, error)
}
// noCopy may be embedded into structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
type noCopy struct {
}
func (*noCopy) Lock() {}
func (*noCopy) Unlock() {}
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
_ noCopy
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
@@ -949,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR
// Information about RPC
type rpcInfo struct {
failfast bool
preloaderInfo *compressorInfo
preloaderInfo compressorInfo
}
// Information about Preloader
@@ -968,7 +980,7 @@ type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
preloaderInfo: &compressorInfo{
preloaderInfo: compressorInfo{
codec: codec,
cp: cp,
comp: comp,

View File

@@ -124,7 +124,8 @@ type serviceInfo struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
opts serverOptions
statsHandler stats.Handler
mu sync.Mutex // guards following
lis map[net.Listener]bool
@@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server {
o.apply(&opts)
}
s := &Server{
lis: make(map[net.Listener]bool),
opts: opts,
conns: make(map[string]map[transport.ServerTransport]bool),
services: make(map[string]*serviceInfo),
quit: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
channelz: channelz.RegisterServer(""),
lis: make(map[net.Listener]bool),
opts: opts,
statsHandler: istats.NewCombinedHandler(opts.statsHandlers...),
conns: make(map[string]map[transport.ServerTransport]bool),
services: make(map[string]*serviceInfo),
quit: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
channelz: channelz.RegisterServer(""),
}
chainUnaryServerInterceptors(s)
chainStreamServerInterceptors(s)
@@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ConnectionTimeout: s.opts.connectionTimeout,
Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
StatsHandlers: s.opts.statsHandlers,
StatsHandler: s.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
@@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
ctx = transport.SetConnection(ctx, rawConn)
ctx = peer.NewContext(ctx, st.Peer())
for _, sh := range s.opts.statsHandlers {
ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
if s.statsHandler != nil {
ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{
RemoteAddr: st.Peer().Addr,
LocalAddr: st.Peer().LocalAddr,
})
sh.HandleConn(ctx, &stats.ConnBegin{})
s.statsHandler.HandleConn(ctx, &stats.ConnBegin{})
}
defer func() {
st.Close(errors.New("finished serving streams for the server transport"))
for _, sh := range s.opts.statsHandlers {
sh.HandleConn(ctx, &stats.ConnEnd{})
if s.statsHandler != nil {
s.statsHandler.HandleConn(ctx, &stats.ConnEnd{})
}
}()
@@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = stream.Write(hdr, payload, opts)
if err == nil {
if len(s.opts.statsHandlers) != 0 {
for _, sh := range s.opts.statsHandlers {
sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
}
if err == nil && s.statsHandler != nil {
s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
return err
}
@@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
shs := s.opts.statsHandlers
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
sh := s.statsHandler
if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
s.incrCallsStarted()
}
var statsBegin *stats.Begin
for _, sh := range shs {
beginTime := time.Now()
if sh != nil {
statsBegin = &stats.Begin{
BeginTime: beginTime,
BeginTime: time.Now(),
IsClientStream: false,
IsServerStream: false,
}
@@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
trInfo.tr.Finish()
}
for _, sh := range shs {
if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
}
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
if sh != nil || len(binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
for _, sh := range shs {
if sh != nil {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
@@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if channelz.IsOn() {
s.incrCallsStarted()
}
shs := s.opts.statsHandlers
sh := s.statsHandler
var statsBegin *stats.Begin
if len(shs) != 0 {
beginTime := time.Now()
if sh != nil {
statsBegin = &stats.Begin{
BeginTime: beginTime,
BeginTime: time.Now(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
for _, sh := range shs {
sh.HandleRPC(ctx, statsBegin)
}
sh.HandleRPC(ctx, statsBegin)
}
ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
s: stream,
p: &parser{r: stream, bufferPool: s.opts.bufferPool},
p: parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
desc: sd,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
statsHandler: shs,
statsHandler: sh,
}
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
if sh != nil || trInfo != nil || channelz.IsOn() {
// See comment in processUnaryRPC on defers.
defer func() {
if trInfo != nil {
@@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
ss.mu.Unlock()
}
if len(shs) != 0 {
if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
for _, sh := range shs {
sh.HandleRPC(ctx, end)
}
sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser
method := sm[pos+1:]
// FromIncomingContext is expensive: skip if there are no statsHandlers
if len(s.opts.statsHandlers) > 0 {
if s.statsHandler != nil {
md, _ := metadata.FromIncomingContext(ctx)
for _, sh := range s.opts.statsHandlers {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
sh.HandleRPC(ctx, &stats.InHeader{
FullMethod: stream.Method(),
RemoteAddr: t.Peer().Addr,
LocalAddr: t.Peer().LocalAddr,
Compression: stream.RecvCompress(),
WireLength: stream.HeaderWireLength(),
Header: md,
})
}
ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
s.statsHandler.HandleRPC(ctx, &stats.InHeader{
FullMethod: stream.Method(),
RemoteAddr: t.Peer().Addr,
LocalAddr: t.Peer().LocalAddr,
Compression: stream.RecvCompress(),
WireLength: stream.HeaderWireLength(),
Header: md,
})
}
// To have calls in stream callouts work. Will delete once all stats handler
// calls come from the gRPC layer.

View File

@@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cc.NewStream(ctx, desc, method, opts...)
}
var emptyMethodConfig = serviceconfig.MethodConfig{}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
@@ -217,7 +219,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return nil, err
}
var mc serviceconfig.MethodConfig
mc := &emptyMethodConfig
var onCommit func()
newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
@@ -240,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if rpcConfig.Context != nil {
ctx = rpcConfig.Context
}
mc = rpcConfig.MethodConfig
mc = &rpcConfig.MethodConfig
onCommit = rpcConfig.OnCommitted
if rpcConfig.Interceptor != nil {
rpcInfo.Context = nil
@@ -258,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return newStream(ctx, func() {})
}
func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
callInfo.failFast = !*mc.WaitForReady
@@ -325,7 +327,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs := &clientStream{
callHdr: callHdr,
ctx: ctx,
methodConfig: &mc,
methodConfig: mc,
opts: opts,
callInfo: callInfo,
cc: cc,
@@ -418,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
shs := cs.cc.dopts.copts.StatsHandlers
for _, sh := range shs {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay})
sh := cs.cc.statsHandler
if sh != nil {
beginTime = time.Now()
begin := &stats.Begin{
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{
FullMethodName: method, FailFast: cs.callInfo.failFast,
NameResolutionDelay: cs.nameResolutionDelay,
})
sh.HandleRPC(ctx, &stats.Begin{
Client: true,
BeginTime: beginTime,
FailFast: cs.callInfo.failFast,
IsClientStream: cs.desc.ClientStreams,
IsServerStream: cs.desc.ServerStreams,
IsTransparentRetryAttempt: isTransparent,
}
sh.HandleRPC(ctx, begin)
})
}
var trInfo *traceInfo
@@ -461,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
beginTime: beginTime,
cs: cs,
decompressorV0: cs.cc.dopts.dc,
statsHandlers: shs,
statsHandler: sh,
trInfo: trInfo,
}, nil
}
@@ -482,10 +486,8 @@ func (a *csAttempt) getTransport() error {
if a.trInfo != nil {
a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
if pick.blocked {
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
}
if pick.blocked && a.statsHandler != nil {
a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
}
return nil
}
@@ -529,7 +531,7 @@ func (a *csAttempt) newStream() error {
}
a.transportStream = s
a.ctx = s.Context()
a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -601,7 +603,7 @@ type csAttempt struct {
cs *clientStream
transport transport.ClientTransport
transportStream *transport.ClientStream
parser *parser
parser parser
pickResult balancer.PickResult
finished bool
@@ -615,8 +617,8 @@ type csAttempt struct {
// and cleared when the finish method is called.
trInfo *traceInfo
statsHandlers []stats.Handler
beginTime time.Time
statsHandler stats.Handler
beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
@@ -1110,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
}
return io.EOF
}
if len(a.statsHandlers) != 0 {
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
if a.statsHandler != nil {
a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
return nil
}
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
if a.statsHandler != nil && payInfo == nil {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1141,7 +1141,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompressorSet = true
}
if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
@@ -1163,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
a.mu.Unlock()
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
if a.statsHandler != nil {
a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
@@ -1179,7 +1179,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
@@ -1217,15 +1217,14 @@ func (a *csAttempt) finish(err error) {
ServerLoad: balancerload.Parse(tr),
})
}
for _, sh := range a.statsHandlers {
end := &stats.End{
if a.statsHandler != nil {
a.statsHandler.HandleRPC(a.ctx, &stats.End{
Client: true,
BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
}
sh.HandleRPC(a.ctx, end)
})
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1331,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.transportStream = s
as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1374,7 +1373,7 @@ type addrConnStream struct {
decompressorSet bool
decompressorV0 Decompressor
decompressorV1 encoding.Compressor
parser *parser
parser parser
// mu guards finished and is held for the entire finish method.
mu sync.Mutex
@@ -1487,7 +1486,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompressorSet = true
}
if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
@@ -1509,7 +1508,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
@@ -1597,7 +1596,7 @@ type ServerStream interface {
type serverStream struct {
ctx context.Context
s *transport.ServerStream
p *parser
p parser
codec baseCodec
desc *StreamDesc
@@ -1614,7 +1613,7 @@ type serverStream struct {
maxSendMessageSize int
trInfo *traceInfo
statsHandler []stats.Handler
statsHandler stats.Handler
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
@@ -1750,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) {
binlog.Log(ss.ctx, sm)
}
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
if ss.statsHandler != nil {
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
return nil
}
@@ -1784,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
}()
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
if ss.statsHandler != nil || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1808,16 +1805,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
return toRPCErr(err)
}
ss.recvFirstMsg = true
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: m,
Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
}
if ss.statsHandler != nil {
ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: m,
Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
@@ -1834,7 +1829,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
// Special handling for non-client-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
return nil
} else if err != nil {
return err

View File

@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.76.0"
const Version = "1.77.0"

8
vendor/modules.txt vendored
View File

@@ -2554,16 +2554,16 @@ golang.org/x/tools/internal/versions
# google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb
## explicit; go 1.23.0
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
## explicit; go 1.24.0
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4
# google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8
## explicit; go 1.24.0
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.76.0
# google.golang.org/grpc v1.77.0
## explicit; go 1.24.0
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -2574,7 +2574,6 @@ google.golang.org/grpc/balancer/endpointsharding
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/pickfirst
google.golang.org/grpc/balancer/pickfirst/internal
google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -2584,6 +2583,7 @@ google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/internal
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog