Bump go dependencies with known vulnerabilities

This bumps the following go dependencies:

google.golang.org/grpc@v1.79.3
go.opentelemetry.io/otel/sdk@v1.42.0
golang.org/x/image@v0.38.0
This commit is contained in:
Ralf Haferkamp
2026-04-02 10:56:23 +02:00
committed by Ralf Haferkamp
parent 6ba2cdf7b6
commit d7f87eeb0b
129 changed files with 22918 additions and 1275 deletions

18
go.mod
View File

@@ -96,21 +96,21 @@ require (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
go.opentelemetry.io/contrib/zpages v0.63.0
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel v1.42.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0
go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
go.opentelemetry.io/otel/sdk v1.42.0
go.opentelemetry.io/otel/trace v1.42.0
golang.org/x/crypto v0.49.0
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
golang.org/x/image v0.33.0
golang.org/x/image v0.38.0
golang.org/x/net v0.51.0
golang.org/x/oauth2 v0.33.0
golang.org/x/oauth2 v0.34.0
golang.org/x/sync v0.20.0
golang.org/x/term v0.41.0
golang.org/x/text v0.35.0
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
google.golang.org/grpc v1.77.0
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217
google.golang.org/grpc v1.79.3
google.golang.org/protobuf v1.36.10
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -379,7 +379,7 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.42.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
@@ -390,7 +390,7 @@ require (
golang.org/x/time v0.15.0 // indirect
golang.org/x/tools v0.42.0 // indirect
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect

40
go.sum
View File

@@ -1301,8 +1301,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/contrib/zpages v0.63.0 h1:TppOKuZGbqXMgsfjqq3i09N5Vbo1JLtLImUqiTPGnX4=
go.opentelemetry.io/contrib/zpages v0.63.0/go.mod h1:5F8uugz75ay/MMhRRhxAXY33FuaI8dl7jTxefrIy5qk=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho=
go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
@@ -1311,14 +1311,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4=
go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI=
go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo=
go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts=
go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA=
go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc=
go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY=
go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -1378,8 +1378,8 @@ golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScy
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ=
golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc=
golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE=
golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1468,8 +1468,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1730,10 +1730,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1749,8 +1749,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU=
google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=

View File

@@ -8,3 +8,4 @@ nam
valu
thirdparty
addOpt
observ

View File

@@ -16,6 +16,7 @@ linters:
- govet
- ineffassign
- misspell
- modernize
- perfsprint
- revive
- staticcheck
@@ -111,6 +112,9 @@ linters:
locale: US
ignore-rules:
- cancelled
modernize:
disable:
- omitzero
perfsprint:
int-conversion: true
err-error: true
@@ -190,6 +194,7 @@ linters:
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
- - skip-package-name-collision-with-go-std: true
- name: waitgroup-by-value
testifylint:
enable-all: true
@@ -197,6 +202,9 @@ linters:
- float-compare
- go-require
- require-error
usetesting:
context-background: true
context-todo: true
exclusions:
generated: lax
presets:

View File

@@ -1,4 +1,5 @@
http://localhost
https://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
@@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
http://4.3.2.1:78/user/123
http://4.3.2.1:78/user/123
file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317
# URL works, but it has blocked link checkers.
https://dl.acm.org/doi/10.1145/198429.198435

View File

@@ -11,6 +11,152 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section -->
<!-- Don't change this section unless doing release -->
## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06
### Added
- Add `go.opentelemetry.io/otel/semconv/v1.40.0` package.
The package contains semantic conventions from the `v1.40.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.40.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.39.0`. (#7985)
- Add `Err` and `SetErr` on `Record` in `go.opentelemetry.io/otel/log` to attach an error and set record exception attributes in `go.opentelemetry.io/otel/log/sdk`. (#7924)
### Changed
- `TracerProvider.ForceFlush` in `go.opentelemetry.io/otel/sdk/trace` joins errors together and continues iteration through SpanProcessors as opposed to returning the first encountered error without attempting exports on subsequent SpanProcessors. (#7856)
### Fixed
- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to correctly handle HTTP2 GOAWAY frame. (#7931)
- Fix semconv v1.39.0 generated metric helpers skipping required attributes when extra attributes were empty. (#7964)
- Preserve W3C TraceFlags bitmask (including the random Trace ID flag) during trace context extraction and injection in `go.opentelemetry.io/otel/propagation`. (#7834)
### Removed
- Drop support for [Go 1.24]. (#7984)
## [1.41.0/0.63.0/0.17.0/0.0.15] 2026-03-02
This release is the last to support [Go 1.24].
The next release will require at least [Go 1.25].
### Added
- Support testing of [Go 1.26]. (#7902)
### Fixed
- Update `Baggage` in `go.opentelemetry.io/otel/propagation` and `Parse` and `New` in `go.opentelemetry.io/otel/baggage` to comply with W3C Baggage specification limits.
`New` and `Parse` now return partial baggage along with an error when limits are exceeded.
Errors from baggage extraction are reported to the global error handler. (#7880)
- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7914)
- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7914)
- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7914)
## [1.40.0/0.62.0/0.16.0] 2026-02-02
### Added
- Add `AlwaysRecord` sampler in `go.opentelemetry.io/otel/sdk/trace`. (#7724)
- Add `Enabled` method to all synchronous instrument interfaces (`Float64Counter`, `Float64UpDownCounter`, `Float64Histogram`, `Float64Gauge`, `Int64Counter`, `Int64UpDownCounter`, `Int64Histogram`, `Int64Gauge`,) in `go.opentelemetry.io/otel/metric`.
This stabilizes the synchronous instrument enabled feature, allowing users to check if an instrument will process measurements before performing computationally expensive operations. (#7763)
- Add `go.opentelemetry.io/otel/semconv/v1.39.0` package.
The package contains semantic conventions from the `v1.39.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.39.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.38.0.` (#7783, #7789)
### Changed
- Improve the concurrent performance of `HistogramReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar` by 4x. (#7443)
- Improve the concurrent performance of `FixedSizeReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar`. (#7447)
- Improve performance of concurrent histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7474)
- Improve performance of concurrent synchronous gauge measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7478)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`. (#7492)
- `Exporter` in `go.opentelemetry.io/otel/exporters/prometheus` ignores metrics with the scope `go.opentelemetry.io/contrib/bridges/prometheus`.
This prevents scrape failures when the Prometheus exporter is misconfigured to get data from the Prometheus bridge. (#7688)
- Improve performance of concurrent exponential histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7702)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
### Fixed
- Fix bad log message when key-value pairs are dropped because of key duplication in `go.opentelemetry.io/otel/sdk/log`. (#7662)
- Fix `DroppedAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not count the non-attribute key-value pairs dropped because of key duplication. (#7662)
- Fix `SetAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not log that attributes are dropped when they are actually not dropped. (#7662)
- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to correctly handle HTTP/2 `GOAWAY` frame. (#7794)
- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `ioreg` command on Darwin (macOS). (#7818)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/exporters/zipkin`.
For more information, see the [OTel blog post deprecating the Zipkin exporter](https://opentelemetry.io/blog/2025/deprecating-zipkin-exporters/). (#7670)
## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
### Added
- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175)
- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages.
This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287)
- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`.
Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353)
- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434)
- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486)
- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512)
- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524)
- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571)
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608)
- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`.
All `Processor` implementations now include an `Enabled` method. (#7639)
- The `go.opentelemetry.io/otel/semconv/v1.38.0` package.
The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648)
### Changed
- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set.
Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266)
- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302)
- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306)
- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`.
⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded.
Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default.
To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363)
- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371)
- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421)
- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427)
- Include W3C TraceFlags (bits 07) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438)
- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types.
If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442)
### Fixed
- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them.
Attributes with duplicate keys will use the last value passed. (#7300)
- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655)
- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656)
### Removed
- Drop support for [Go 1.23]. (#7274)
- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`.
The `Enabled` method has been added to the `Processor` interface instead.
All `Processor` implementations must now implement the `Enabled` method.
Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639)
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
@@ -3430,8 +3576,13 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.42.0...HEAD
[1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0
[1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0
[1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0
[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
@@ -3527,6 +3678,7 @@ It contains api and sdk for trace and meter.
<!-- Released section ended -->
[Go 1.26]: https://go.dev/doc/go1.26
[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23

View File

@@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel
(This may print some warning about "build constraints exclude all Go
files", just ignore it.)
This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
can alternatively use `git` directly with:
This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`.
Alternatively, you can use `git` directly with:
```sh
git clone https://github.com/open-telemetry/opentelemetry-go
@@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go
that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.)
This would put the project in the `opentelemetry-go` directory in
current working directory.
This will add the project as `opentelemetry-go` within the current directory.
Enter the newly created directory and add your fork as a new remote:
@@ -109,7 +108,7 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
* At least one of the qualified approvals need to be from an
* At least one of the qualified approvals needs to be from an
[Approver]/[Maintainer] affiliated with a different company than the author
of the PR.
* PRs introducing changes that have already been discussed and consensus
@@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and
use cases are clear, but the method to satisfy those uses cases are
use cases are clear, but the methods to satisfy those use cases are
not.
As such, Contributions should provide functionality and behavior that
conforms to the specification, but the interface and structure is
conforms to the specification, but the interface and structure are
flexible.
It is preferable to have contributions follow the idioms of the
@@ -217,7 +216,7 @@ about dependency compatibility.
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
Only the dependencies explicitly included in the released modules have be
Only the dependencies explicitly included in the released modules have been
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
@@ -635,8 +634,8 @@ is not in their root name.
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
between the two modules where a user can upgrade the parent without the child
and if the internal package API has changed it will fail to upgrade[^3].
between the two modules where a user can upgrade the parent without the child,
and if the internal package API has changed, it will fail to upgrade[^3].
There are two known exceptions to this rule:
@@ -657,7 +656,7 @@ this.
### Ignoring context cancellation
OpenTelemetry API implementations need to ignore the cancellation of the context that are
OpenTelemetry API implementations need to ignore the cancellation of the context that is
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
@@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat
should be honored. This means all work done on behalf of the user provided context
should be canceled.
### Observability
OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself.
This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications.
This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components.
#### Environment Variable Activation
Observability features are currently experimental.
They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable.
This follows the established experimental feature pattern used throughout the SDK.
Components should check for this environment variable using a consistent pattern:
```go
import "go.opentelemetry.io/otel/*/internal/x"
if x.Observability.Enabled() {
// Initialize observability metrics
}
```
**References**:
- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go)
- [sdk](./sdk/internal/x/x.go)
#### Encapsulation
Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`).
It should not be mixed into the instrumented component.
Prefer this:
```go
type SDKComponent struct {
inst *instrumentation
}
type instrumentation struct {
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
```
To this:
```go
// ❌ Avoid this pattern.
type SDKComponent struct {
/* other SDKComponent fields... */
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
```
The instrumentation code should not bloat the code being instrumented.
Likely, this means its own file, or its own package if it is complex or reused.
#### Initialization
Instrumentation setup should be explicit, side-effect free, and local to the relevant component.
Avoid relying on global or implicit [side effects][side-effect] for initialization.
Encapsulate setup in constructor functions, ensuring clear ownership and scope:
```go
import (
"errors"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
"go.opentelemetry.io/otel/semconv/v1.40.0/otelconv"
)
type SDKComponent struct {
inst *instrumentation
}
func NewSDKComponent(config Config) (*SDKComponent, error) {
inst, err := newInstrumentation()
if err != nil {
return nil, err
}
return &SDKComponent{inst: inst}, nil
}
type instrumentation struct {
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
func newInstrumentation() (*instrumentation, error) {
if !x.Observability.Enabled() {
return nil, nil
}
meter := otel.GetMeterProvider().Meter(
"<component-package-name>",
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
inst := &instrumentation{}
var err, e error
inst.inflight, e = otelconv.NewSDKComponentInflight(meter)
err = errors.Join(err, e)
inst.exported, e = otelconv.NewSDKComponentExported(meter)
err = errors.Join(err, e)
return inst, err
}
```
```go
// ❌ Avoid this pattern.
func (c *Component) initObservability() {
// Initialize observability metrics
if !x.Observability.Enabled() {
return
}
// Initialize observability metrics
c.inst = &instrumentation{/* ... */}
}
```
[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science)
#### Performance
When observability is disabled there should be little to no overhead.
```go
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
if e.inst != nil {
attrs := expensiveOperation()
e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
}
// Export spans...
}
```
```go
// ❌ Avoid this pattern.
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
attrs := expensiveOperation()
e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
// Export spans...
}
func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) {
if i == nil || i.inflight == nil {
return
}
i.inflight.Add(ctx, count, metric.WithAttributes(attrs...))
}
```
When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead.
##### Attribute and Option Allocation Management
Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes.
```go
var (
attrPool = sync.Pool{
New: func() any {
// Pre-allocate common capacity
knownCap := 8 // Adjust based on expected usage
s := make([]attribute.KeyValue, 0, knownCap)
// Return a pointer to avoid extra allocation on Put().
return &s
},
}
addOptPool = &sync.Pool{
New: func() any {
const n = 1 // WithAttributeSet
o := make([]metric.AddOption, 0, n)
// Return a pointer to avoid extra allocation on Put().
return &o
},
}
)
func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) {
attrs := attrPool.Get().(*[]attribute.KeyValue)
defer func() {
*attrs = (*attrs)[:0] // Reset.
attrPool.Put(attrs)
}()
*attrs = append(*attrs, baseAttrs...)
// Add any dynamic attributes.
*attrs = append(*attrs, semconv.OTelComponentName("exporter-1"))
addOpt := addOptPool.Get().(*[]metric.AddOption)
defer func() {
*addOpt = (*addOpt)[:0]
addOptPool.Put(addOpt)
}()
set := attribute.NewSet(*attrs...)
*addOpt = append(*addOpt, metric.WithAttributeSet(set))
i.counter.Add(ctx, value, *addOpt...)
}
```
Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used.
This amortizes the cost of allocation and synchronization.
Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness.
[`sync.Pool`]: https://pkg.go.dev/sync#Pool
##### Cache common attribute sets for repeated measurements
If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes.
```go
type spanLiveSetKey struct {
sampled bool
}
var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
{true}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordAndSample,
),
),
{false}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordOnly,
),
),
}
func spanLiveSet(sampled bool) attribute.Set {
key := spanLiveSetKey{sampled: sampled}
return spanLiveSetCache[key]
}
```
##### Benchmarking
Always provide benchmarks when introducing or refactoring instrumentation.
Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios:
```go
func BenchmarkExportSpans(b *testing.B) {
scenarios := []struct {
name string
obsEnabled bool
}{
{"ObsDisabled", false},
{"ObsEnabled", true},
}
for _, scenario := range scenarios {
b.Run(scenario.name, func(b *testing.B) {
b.Setenv(
"OTEL_GO_X_OBSERVABILITY",
strconv.FormatBool(scenario.obsEnabled),
)
exporter := NewExporter()
spans := generateTestSpans(100)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_ = exporter.ExportSpans(context.Background(), spans)
}
})
}
}
```
#### Error Handling and Robustness
Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible.
```go
func newInstrumentation() (*instrumentation, error) {
if !x.Observability.Enabled() {
return nil, nil
}
m := otel.GetMeterProvider().Meter(/* initialize meter */)
counter, err := otelconv.NewSDKComponentCounter(m)
// Use the partially initialized counter if available.
i := &instrumentation{counter: counter}
// Return any error to the caller.
return i, err
}
```
```go
// ❌ Avoid this pattern.
func newInstrumentation() *instrumentation {
if !x.Observability.Enabled() {
return nil, nil
}
m := otel.GetMeterProvider().Meter(/* initialize meter */)
counter, err := otelconv.NewSDKComponentCounter(m)
if err != nil {
// ❌ Do not dump the error to the OTel Handler. Return it to the
// caller.
otel.Handle(err)
// ❌ Do not return nil if we can still use the partially initialized
// counter.
return nil
}
return &instrumentation{counter: counter}
}
```
If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`.
#### Context Propagation
Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context:
```go
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
// Use the provided context for observability measurements
e.inst.recordSpanExportStarted(ctx, len(spans))
err := e.doExport(ctx, spans)
if err != nil {
e.inst.recordSpanExportFailed(ctx, len(spans), err)
} else {
e.inst.recordSpanExportSucceeded(ctx, len(spans))
}
return err
}
```
```go
// ❌ Avoid this pattern.
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
// ❌ Do not break the context propagation.
e.inst.recordSpanExportStarted(context.Background(), len(spans))
err := e.doExport(ctx, spans)
/* ... */
return err
}
```
#### Semantic Conventions Compliance
All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
Use the metric semantic conventions convenience package [otelconv](./semconv/v1.40.0/otelconv/metric.go).
##### Component Identification
Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes).
If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier.
```go
componentType := "go.opentelemetry.io/otel/sdk/trace.Span"
```
```go
// ❌ Do not do this.
componentType := "trace-span"
```
The component name should be a stable unique identifier for the specific instance of the component.
Use a global counter to ensure uniqueness if necessary.
```go
// Unique 0-based ID counter for component instances.
var componentIDCounter atomic.Int64
// nextID returns the next unique ID for a component.
func nextID() int64 {
return componentIDCounter.Add(1) - 1
}
// componentName returns a unique name for the component instance.
func componentName() attribute.KeyValue {
id := nextID()
name := fmt.Sprintf("%s/%d", componentType, id)
return semconv.OTelComponentName(name)
}
```
The component ID will need to be resettable for deterministic testing.
If tests are in a different package than the component being tested (i.e. a `<component package>_test` package name), use a generated `counter` internal package to manage the counter.
See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference.
#### Testing
Use deterministic testing with isolated state:
```go
func TestObservability(t *testing.T) {
// Restore state after test to ensure this does not affect other tests.
prev := otel.GetMeterProvider()
t.Cleanup(func() { otel.SetMeterProvider(prev) })
// Isolate the meter provider for deterministic testing
reader := metric.NewManualReader()
meterProvider := metric.NewMeterProvider(metric.WithReader(reader))
otel.SetMeterProvider(meterProvider)
// Use t.Setenv to ensure environment variable is restored after test.
t.Setenv("OTEL_GO_X_OBSERVABILITY", "true")
// Reset component ID counter to ensure deterministic component names.
componentIDCounter.Store(0)
/* ... test code ... */
}
```
Test order should not affect results.
Ensure that any global state (e.g. component ID counters) is reset between tests.
## Approvers and Maintainers
### Maintainers
@@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
@@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9)
- [Cheng-Zhen Yang](https://github.com/scorpionknifes)
- [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo)

View File

@@ -146,11 +146,12 @@ build-tests/%:
# Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz
.PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-fuzz: ARGS=-fuzztime=10s -fuzz
test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120
@@ -184,11 +185,10 @@ test-coverage: $(GOCOVMERGE)
.PHONY: benchmark
benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
benchmark/%:
@echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
&& cd $* \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
cd $* && $(GO) test -run='^$$' -bench=. $(ARGS) ./...
print-sharded-benchmarks:
@echo $(OTEL_GO_MOD_DIRS) | jq -cR 'split(" ")'
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
@@ -214,7 +214,7 @@ go-mod-tidy/%: crosslink
&& $(GO) mod tidy -compat=1.21
.PHONY: lint
lint: misspell go-mod-tidy golangci-lint govulncheck
lint: misspell go-mod-tidy golangci-lint
.PHONY: vanity-import-check
vanity-import-check: $(PORTO)

View File

@@ -53,27 +53,20 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
| Ubuntu | 1.26 | amd64 |
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.26 | 386 |
| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.26 | arm64 |
| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 |
| macOS 13 | 1.25 | amd64 |
| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
| macOS | 1.26 | amd64 |
| macOS | 1.25 | amd64 |
| macOS | 1.26 | arm64 |
| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
| Windows | 1.26 | amd64 |
| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
| Windows | 1.26 | 386 |
| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.

View File

@@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit
## Breaking changes validation
You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
@@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
- Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the `<last tag>`.
```
@@ -107,33 +107,49 @@ It is critical you make sure the version you push upstream is correct.
...
```
## Sign artifacts
To ensure we comply with CNCF best practices, we need to sign the release artifacts.
Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag.
Both archives need to be signed with your GPG key.
You can use [this script] to verify the contents of the archives before signing them.
To find your GPG key ID, run:
```terminal
gpg --list-secret-keys --keyid-format=long
```
The key ID is the 16-character string after `sec rsa4096/` (or similar).
Set environment variables and sign both artifacts:
```terminal
export VERSION="<version>" # e.g., v1.32.0
export KEY_ID="<your-gpg-key-id>"
gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz
gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip
```
You can verify the signatures with:
```terminal
gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz
gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip
```
[this script]: https://github.com/MrAlias/attest-sh
## Release
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
### Sign the Release Artifact
To ensure we comply with CNCF best practices, we need to sign the release artifact.
The tarball attached to the GitHub release needs to be signed with your GPG key.
Follow [these steps] to sign the release artifact and upload it to GitHub.
You can use [this script] to verify the contents of the tarball before signing it.
Be sure to use the correct GPG key when signing the release artifact.
```terminal
gpg --local-user <key-id> --armor --detach-sign opentelemetry-go-<version>.tar.gz
```
You can verify the signature with:
```terminal
gpg --verify opentelemetry-go-<version>.tar.gz.asc opentelemetry-go-<version>.tar.gz
```
[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
[this script]: https://github.com/MrAlias/attest-sh
***IMPORTANT***: GitHub Releases are immutable once created.
You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later.
## Post-Release
@@ -160,14 +176,6 @@ This helps track what changes were included in each release.
Once all related issues and PRs have been added to the milestone, close the milestone.
### Demo Repository
Bump the dependencies in the following Go services:
- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
### Close the `Version Release` issue
Once the todo list in the `Version Release` issue is complete, close the issue.

View File

@@ -83,7 +83,7 @@ is designed so the following goals can be achieved.
in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking
alerts and dashboard.
alerts and dashboards.
* Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at

View File

@@ -16,7 +16,7 @@ type (
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the attribute set using
// its Iterator. This result may be cached by a attribute.Set.
// its Iterator. This result may be cached by an attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of attribute

92
vendor/go.opentelemetry.io/otel/attribute/hash.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"fmt"
"reflect"
"go.opentelemetry.io/otel/attribute/internal/xxhash"
)
// Type identifiers. These identifiers are hashed before the value of the
// corresponding type. This is done to distinguish values that are hashed with
// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and
// int64(0)).
//
// These are all 8 byte length strings converted to a uint64 representation. A
// uint64 is used instead of the string directly as an optimization, it avoids
// the for loop in [xxhash] which adds minor overhead.
const (
boolID uint64 = 7953749933313450591 // "_boolean" (little endian)
int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian)
float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian)
stringID uint64 = 6874584755375207263 // "_string_" (little endian)
boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian)
int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian)
float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian)
stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian)
)
// hashKVs returns a new xxHash64 hash of kvs.
func hashKVs(kvs []KeyValue) uint64 {
h := xxhash.New()
for _, kv := range kvs {
h = hashKV(h, kv)
}
return h.Sum64()
}
// hashKV returns the xxHash64 hash of kv with h as the base.
func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash {
h = h.String(string(kv.Key))
switch kv.Value.Type() {
case BOOL:
h = h.Uint64(boolID)
h = h.Uint64(kv.Value.numeric)
case INT64:
h = h.Uint64(int64ID)
h = h.Uint64(kv.Value.numeric)
case FLOAT64:
h = h.Uint64(float64ID)
// Assumes numeric stored with math.Float64bits.
h = h.Uint64(kv.Value.numeric)
case STRING:
h = h.Uint64(stringID)
h = h.String(kv.Value.stringly)
case BOOLSLICE:
h = h.Uint64(boolSliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Bool(rv.Index(i).Bool())
}
case INT64SLICE:
h = h.Uint64(int64SliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Int64(rv.Index(i).Int())
}
case FLOAT64SLICE:
h = h.Uint64(float64SliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Float64(rv.Index(i).Float())
}
case STRINGSLICE:
h = h.Uint64(stringSliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.String(rv.Index(i).String())
}
case INVALID:
default:
// Logging is an alternative, but using the internal logger here
// causes an import cycle so it is not done.
v := kv.Value.AsInterface()
msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v)
panic(msg)
}
return h
}

View File

@@ -13,32 +13,28 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}

View File

@@ -0,0 +1,64 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package xxhash provides a wrapper around the xxhash library for attribute hashing.
package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash"
import (
"encoding/binary"
"math"
"github.com/cespare/xxhash/v2"
)
// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values.
type Hash struct {
d *xxhash.Digest
}
// New returns a new initialized xxHash64 hasher.
func New() Hash {
return Hash{d: xxhash.New()}
}
func (h Hash) Uint64(val uint64) Hash {
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], val)
// errors from Write are always nil for xxhash
// if it returns an err then panic
_, err := h.d.Write(buf[:])
if err != nil {
panic("xxhash write of uint64 failed: " + err.Error())
}
return h
}
func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function.
if val {
return h.Uint64(1)
}
return h.Uint64(0)
}
func (h Hash) Float64(val float64) Hash {
return h.Uint64(math.Float64bits(val))
}
func (h Hash) Int64(val int64) Hash {
return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing.
}
func (h Hash) String(val string) Hash {
// errors from WriteString are always nil for xxhash
// if it returns an err then panic
_, err := h.d.WriteString(val)
if err != nil {
panic("xxhash write of string failed: " + err.Error())
}
return h
}
// Sum64 returns the current hash value.
func (h Hash) Sum64() uint64 {
return h.d.Sum64()
}

View File

@@ -9,6 +9,8 @@ import (
"reflect"
"slices"
"sort"
"go.opentelemetry.io/otel/attribute/internal/xxhash"
)
type (
@@ -23,19 +25,19 @@ type (
// the Equals method to ensure stable equivalence checking.
//
// Users should also use the Distinct returned from Equivalent as a map key
// instead of a Set directly. In addition to that type providing guarantees
// on stable equivalence, it may also provide performance improvements.
// instead of a Set directly. Set has relatively poor performance when used
// as a map key compared to Distinct.
Set struct {
equivalent Distinct
hash uint64
data any
}
// Distinct is a unique identifier of a Set.
// Distinct is an identifier of a Set which is very likely to be unique.
//
// Distinct is designed to ensure equivalence stability: comparisons will
// return the same value across versions. For this reason, Distinct should
// always be used as a map key instead of a Set.
// Distinct should be used as a map key instead of a Set for to provide better
// performance for map operations.
Distinct struct {
iface any
hash uint64
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -46,15 +48,34 @@ type (
Sortable []KeyValue
)
// Compile time check these types remain comparable.
var (
_ = isComparable(Set{})
_ = isComparable(Distinct{})
)
func isComparable[T comparable](t T) T { return t }
var (
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
keyValueType = reflect.TypeFor[KeyValue]()
// emptySet is returned for empty attribute sets.
emptySet = &Set{
equivalent: Distinct{
iface: [0]KeyValue{},
},
// emptyHash is the hash of an empty set.
emptyHash = xxhash.New().Sum64()
// userDefinedEmptySet is an empty set. It was mistakenly exposed to users
// as something they can assign to, so it must remain addressable and
// mutable.
//
// This is kept for backwards compatibility, but should not be used in new code.
userDefinedEmptySet = &Set{
hash: emptyHash,
data: [0]KeyValue{},
}
emptySet = Set{
hash: emptyHash,
data: [0]KeyValue{},
}
)
@@ -62,33 +83,35 @@ var (
//
// This is a convenience provided for optimized calling utility.
func EmptySet() *Set {
return emptySet
}
// reflectValue abbreviates reflect.ValueOf(d).
func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
// Continue to return the pointer to the user-defined empty set for
// backwards-compatibility.
//
// New code should not use this, instead use emptySet.
return userDefinedEmptySet
}
// Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
func (d Distinct) Valid() bool { return d.hash != 0 }
// reflectValue abbreviates reflect.ValueOf(d).
func (l Set) reflectValue() reflect.Value {
return reflect.ValueOf(l.data)
}
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
if l == nil || !l.equivalent.Valid() {
if l == nil || l.hash == 0 {
return 0
}
return l.equivalent.reflectValue().Len()
return l.reflectValue().Len()
}
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
if l == nil || !l.equivalent.Valid() {
if l == nil || l.hash == 0 {
return KeyValue{}, false
}
value := l.equivalent.reflectValue()
value := l.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) {
if l == nil || !l.equivalent.Valid() {
if l == nil || l.hash == 0 {
return Value{}, false
}
rValue := l.equivalent.reflectValue()
rValue := l.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue {
return iter.ToSlice()
}
// Equivalent returns a value that may be used as a map key. The Distinct type
// guarantees that the result will equal the equivalent. Distinct value of any
// Equivalent returns a value that may be used as a map key. Equal Distinct
// values are very likely to be equivalent attribute Sets. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
if l == nil || !l.equivalent.Valid() {
return emptySet.equivalent
if l == nil || l.hash == 0 {
return Distinct{hash: emptySet.hash}
}
return l.equivalent
return Distinct{hash: l.hash}
}
// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
if l.Equivalent() != o.Equivalent() {
return false
}
if l == nil || l.hash == 0 {
l = &emptySet
}
if o == nil || o.hash == 0 {
o = &emptySet
}
return l.data == o.data
}
// Encoded returns the encoded form of this set, according to encoder.
@@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string {
return encoder.Encode(l.Iter())
}
func empty() Set {
return Set{
equivalent: emptySet.equivalent,
}
}
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
@@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
return empty(), nil
return emptySet, nil
}
// Stable sort so the following de-duplication can implement
@@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 {
return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
return newSet(kvs[div:]), kvs[:div]
}
}
return Set{equivalent: computeDistinct(kvs)}, nil
return newSet(kvs), nil
}
// NewSetWithSortableFiltered returns a new Set.
@@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
return newSet(slice[1:]), slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
@@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
return newSet(slice[div:]), slice[:div]
}
// computeDistinct returns a Distinct using either the fixed- or
// reflect-oriented code path, depending on the size of the input. The input
// slice is assumed to already be sorted and de-duplicated.
func computeDistinct(kvs []KeyValue) Distinct {
iface := computeDistinctFixed(kvs)
if iface == nil {
iface = computeDistinctReflect(kvs)
// newSet returns a new set based on the sorted and uniqued kvs.
func newSet(kvs []KeyValue) Set {
s := Set{
hash: hashKVs(kvs),
data: computeDataFixed(kvs),
}
return Distinct{
iface: iface,
if s.data == nil {
s.data = computeDataReflect(kvs)
}
return s
}
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) any {
// computeDataFixed computes a Set data for small slices. It returns nil if the
// input is too large for this code path.
func computeDataFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any {
}
}
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
func computeDistinctReflect(kvs []KeyValue) any {
// computeDataReflect computes a Set data using reflection, works for any size
// input.
func computeDataReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any {
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
return json.Marshal(l.data)
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.

View File

@@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {
idx := int(i) - 0
if i < 0 || idx >= len(_Type_index)-1 {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Type_name[_Type_index[i]:_Type_index[i+1]]
return _Type_name[_Type_index[idx]:_Type_index[idx+1]]
}

View File

@@ -66,8 +66,7 @@ func IntValue(v int) Value {
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
var int64Val int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]()))
for i, val := range v {
cp.Elem().Index(i).SetInt(int64(val))
}

View File

@@ -14,8 +14,7 @@ import (
)
const (
maxMembers = 180
maxBytesPerMembers = 4096
maxMembers = 64
maxBytesPerBaggageString = 8192
listDelimiter = ","
@@ -29,7 +28,6 @@ var (
errInvalidProperty = errors.New("invalid baggage list-member property")
errInvalidMember = errors.New("invalid baggage list-member")
errMemberNumber = errors.New("too many list-members in baggage-string")
errMemberBytes = errors.New("list-member too large")
errBaggageBytes = errors.New("baggage-string too large")
)
@@ -309,15 +307,11 @@ func newInvalidMember() Member {
// an error if the input is invalid according to the W3C Baggage
// specification.
func parseMember(member string) (Member, error) {
if n := len(member); n > maxBytesPerMembers {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
var props properties
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
for _, pStr := range strings.Split(properties, propertyDelimiter) {
for pStr := range strings.SplitSeq(properties, propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
@@ -430,6 +424,10 @@ type Baggage struct { //nolint:golint
// New returns a new valid Baggage. It returns an error if it results in a
// Baggage exceeding limits set in that specification.
//
// If the resulting Baggage exceeds the maximum allowed members or bytes,
// members are dropped until the limits are satisfied and an error is returned
// along with the partial result.
//
// It expects all the provided members to have already been validated.
func New(members ...Member) (Baggage, error) {
if len(members) == 0 {
@@ -441,7 +439,6 @@ func New(members ...Member) (Baggage, error) {
if !m.hasData {
return Baggage{}, errInvalidMember
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
@@ -449,17 +446,42 @@ func New(members ...Member) (Baggage, error) {
}
}
// Check member numbers after deduplication.
var truncateErr error
// Check member count after deduplication.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
truncateErr = errors.Join(truncateErr, errMemberNumber)
for k := range b {
if len(b) <= maxMembers {
break
}
delete(b, k)
}
}
bag := Baggage{b}
if n := len(bag.String()); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
// Check byte size and drop members if necessary.
totalBytes := 0
first := true
for k := range b {
m := Member{
key: k,
value: b[k].Value,
properties: fromInternalProperties(b[k].Properties),
}
memberSize := len(m.String())
if !first {
memberSize++ // comma separator
}
if totalBytes+memberSize > maxBytesPerBaggageString {
truncateErr = errors.Join(truncateErr, fmt.Errorf("%w: %d", errBaggageBytes, totalBytes+memberSize))
delete(b, k)
continue
}
totalBytes += memberSize
first = false
}
return bag, nil
return Baggage{b}, truncateErr
}
// Parse attempts to decode a baggage-string from the passed string. It
@@ -470,36 +492,71 @@ func New(members ...Member) (Baggage, error) {
// defined (reading left-to-right) will be the only one kept. This diverges
// from the W3C Baggage specification which allows duplicate list-members, but
// conforms to the OpenTelemetry Baggage specification.
//
// If the baggage-string exceeds the maximum allowed members (64) or bytes
// (8192), members are dropped until the limits are satisfied and an error is
// returned along with the partial result.
//
// Invalid members are skipped and the error is returned along with the
// partial result containing the valid members.
func Parse(bStr string) (Baggage, error) {
if bStr == "" {
return Baggage{}, nil
}
if n := len(bStr); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) {
sizes := make(map[string]int) // Track per-key byte sizes
var totalBytes int
var truncateErr error
for memberStr := range strings.SplitSeq(bStr, listDelimiter) {
// Check member count limit.
if len(b) >= maxMembers {
truncateErr = errors.Join(truncateErr, errMemberNumber)
break
}
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err
truncateErr = errors.Join(truncateErr, err)
continue // skip invalid member, keep processing
}
// Check byte size limit.
// Account for comma separator between members.
memberBytes := len(m.String())
_, existingKey := b[m.key]
if !existingKey && len(b) > 0 {
memberBytes++ // comma separator only for new keys
}
// Calculate new totalBytes if we add/overwrite this key
var newTotalBytes int
if oldSize, exists := sizes[m.key]; exists {
// Overwriting existing key: subtract old size, add new size
newTotalBytes = totalBytes - oldSize + memberBytes
} else {
// New key
newTotalBytes = totalBytes + memberBytes
}
if newTotalBytes > maxBytesPerBaggageString {
truncateErr = errors.Join(truncateErr, errBaggageBytes)
break
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
sizes[m.key] = memberBytes
totalBytes = newTotalBytes
}
// OpenTelemetry does not allow for duplicate list-members, but the W3C
// specification does. Now that we have deduplicated, ensure the baggage
// does not exceed list-member limits.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
if len(b) == 0 {
return Baggage{}, truncateErr
}
return Baggage{b}, nil
return Baggage{b}, truncateErr
}
// Member returns the baggage list-member identified by key.
@@ -648,7 +705,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
return
return p, ok
}
// Skip spaces after the key: " key< >= value ".
@@ -658,13 +715,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
return
return p, ok
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
return
return p, ok
}
// Attempting to parse the value.
@@ -690,14 +747,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
return
return p, ok
}
// Decode a percent-encoded value.
rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
return
return p, ok
}
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
@@ -706,7 +763,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
p.hasValue = true
p.value = value
return
return p, ok
}
func skipSpace(s string, offset int) int {

View File

@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
FROM otel/weaver:v0.21.2@sha256:2401de985c38bdb98b43918e2f43aa36b2afed4aa5669ac1c1de0a17301cd36d AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown

View File

@@ -0,0 +1,96 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package errorhandler provides the global error handler for OpenTelemetry.
//
// This package has no OTel dependencies, allowing it to be imported by any
// package in the module without creating import cycles.
package errorhandler // import "go.opentelemetry.io/otel/internal/errorhandler"
import (
"errors"
"log"
"sync"
"sync/atomic"
)
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
}
type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler]
}
// Compile-time check that delegator implements ErrorHandler.
var _ ErrorHandler = (*ErrDelegator)(nil)
func (d *ErrDelegator) Handle(err error) {
if eh := d.delegate.Load(); eh != nil {
(*eh).Handle(err)
return
}
log.Print(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh)
}
type errorHandlerHolder struct {
eh ErrorHandler
}
var (
globalErrorHandler = defaultErrorHandler()
delegateErrorHandlerOnce sync.Once
)
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
// until an override ErrorHandler is set with SetErrorHandler. All
// ErrorHandler returned prior to this will automatically forward errors to
// the set instance instead of logging.
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return globalErrorHandler.Load().(errorHandlerHolder).eh
}
// SetErrorHandler sets the global ErrorHandler to h.
//
// The first time this is called all ErrorHandler previously returned from
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
current := GetErrorHandler()
if _, cOk := current.(*ErrDelegator); cOk {
if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
// Do not assign to the delegate of the default ErrDelegator to be
// itself.
log.Print(errors.New("no ErrorHandler delegate configured"), " ErrorHandler remains its current value.")
return
}
}
delegateErrorHandlerOnce.Do(func() {
if def, ok := current.(*ErrDelegator); ok {
def.setDelegate(h)
}
})
globalErrorHandler.Store(errorHandlerHolder{eh: h})
}
func defaultErrorHandler() *atomic.Value {
v := &atomic.Value{}
v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
return v
}

View File

@@ -5,33 +5,13 @@
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"sync/atomic"
"go.opentelemetry.io/otel/internal/errorhandler"
)
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
}
// ErrorHandler is an alias for errorhandler.ErrorHandler, kept for backward
// compatibility with existing callers of internal/global.
type ErrorHandler = errorhandler.ErrorHandler
type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler]
}
// Compile-time check that delegator implements ErrorHandler.
var _ ErrorHandler = (*ErrDelegator)(nil)
func (d *ErrDelegator) Handle(err error) {
if eh := d.delegate.Load(); eh != nil {
(*eh).Handle(err)
return
}
log.Print(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh)
}
// ErrDelegator is an alias for errorhandler.ErrDelegator, kept for backward
// compatibility with existing callers of internal/global.
type ErrDelegator = errorhandler.ErrDelegator

View File

@@ -229,6 +229,13 @@ func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOpt
}
}
func (i *sfCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Counter).Enabled(ctx)
}
return false
}
type sfUpDownCounter struct {
embedded.Float64UpDownCounter
@@ -255,6 +262,13 @@ func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.
}
}
func (i *sfUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64UpDownCounter).Enabled(ctx)
}
return false
}
type sfHistogram struct {
embedded.Float64Histogram
@@ -281,6 +295,13 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
func (i *sfHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Histogram).Enabled(ctx)
}
return false
}
type sfGauge struct {
embedded.Float64Gauge
@@ -307,6 +328,13 @@ func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOp
}
}
func (i *sfGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Gauge).Enabled(ctx)
}
return false
}
type siCounter struct {
embedded.Int64Counter
@@ -333,6 +361,13 @@ func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption)
}
}
func (i *siCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Counter).Enabled(ctx)
}
return false
}
type siUpDownCounter struct {
embedded.Int64UpDownCounter
@@ -359,6 +394,13 @@ func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOp
}
}
func (i *siUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64UpDownCounter).Enabled(ctx)
}
return false
}
type siHistogram struct {
embedded.Int64Histogram
@@ -385,6 +427,13 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
}
}
func (i *siHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Histogram).Enabled(ctx)
}
return false
}
type siGauge struct {
embedded.Int64Gauge
@@ -410,3 +459,10 @@ func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOpti
ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
}
}
func (i *siGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Gauge).Enabled(ctx)
}
return false
}

View File

@@ -105,7 +105,7 @@ type delegatedInstrument interface {
setDelegate(metric.Meter)
}
// instID are the identifying properties of a instrument.
// instID are the identifying properties of an instrument.
type instID struct {
// name is the name of the stream.
name string
@@ -157,7 +157,7 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
cfg := metric.NewInt64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siCounter)(nil)),
kind: reflect.TypeFor[*siCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -183,7 +183,7 @@ func (m *meter) Int64UpDownCounter(
cfg := metric.NewInt64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
kind: reflect.TypeFor[*siUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -206,7 +206,7 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
cfg := metric.NewInt64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siHistogram)(nil)),
kind: reflect.TypeFor[*siHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -229,7 +229,7 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
cfg := metric.NewInt64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siGauge)(nil)),
kind: reflect.TypeFor[*siGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -255,7 +255,7 @@ func (m *meter) Int64ObservableCounter(
cfg := metric.NewInt64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiCounter)(nil)),
kind: reflect.TypeFor[*aiCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -281,7 +281,7 @@ func (m *meter) Int64ObservableUpDownCounter(
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
kind: reflect.TypeFor[*aiUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -307,7 +307,7 @@ func (m *meter) Int64ObservableGauge(
cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiGauge)(nil)),
kind: reflect.TypeFor[*aiGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -330,7 +330,7 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
cfg := metric.NewFloat64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfCounter)(nil)),
kind: reflect.TypeFor[*sfCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -356,7 +356,7 @@ func (m *meter) Float64UpDownCounter(
cfg := metric.NewFloat64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
kind: reflect.TypeFor[*sfUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -382,7 +382,7 @@ func (m *meter) Float64Histogram(
cfg := metric.NewFloat64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfHistogram)(nil)),
kind: reflect.TypeFor[*sfHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -405,7 +405,7 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
cfg := metric.NewFloat64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfGauge)(nil)),
kind: reflect.TypeFor[*sfGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -431,7 +431,7 @@ func (m *meter) Float64ObservableCounter(
cfg := metric.NewFloat64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afCounter)(nil)),
kind: reflect.TypeFor[*afCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -457,7 +457,7 @@ func (m *meter) Float64ObservableUpDownCounter(
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
kind: reflect.TypeFor[*afUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -483,7 +483,7 @@ func (m *meter) Float64ObservableGauge(
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afGauge)(nil)),
kind: reflect.TypeFor[*afGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}

View File

@@ -8,16 +8,13 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/errorhandler"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
type (
errorHandlerHolder struct {
eh ErrorHandler
}
tracerProviderHolder struct {
tp trace.TracerProvider
}
@@ -32,12 +29,10 @@ type (
)
var (
globalErrorHandler = defaultErrorHandler()
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalMeterProvider = defaultMeterProvider()
delegateErrorHandlerOnce sync.Once
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
delegateMeterOnce sync.Once
@@ -53,7 +48,7 @@ var (
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return globalErrorHandler.Load().(errorHandlerHolder).eh
return errorhandler.GetErrorHandler()
}
// SetErrorHandler sets the global ErrorHandler to h.
@@ -63,26 +58,7 @@ func GetErrorHandler() ErrorHandler {
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
current := GetErrorHandler()
if _, cOk := current.(*ErrDelegator); cOk {
if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
// Do not assign to the delegate of the default ErrDelegator to be
// itself.
Error(
errors.New("no ErrorHandler delegate configured"),
"ErrorHandler remains its current value.",
)
return
}
}
delegateErrorHandlerOnce.Do(func() {
if def, ok := current.(*ErrDelegator); ok {
def.setDelegate(h)
}
})
globalErrorHandler.Store(errorHandlerHolder{eh: h})
errorhandler.SetErrorHandler(h)
}
// TracerProvider is the internal implementation for global.TracerProvider.
@@ -174,12 +150,6 @@ func SetMeterProvider(mp metric.MeterProvider) {
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
func defaultErrorHandler() *atomic.Value {
v := &atomic.Value{}
v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
return v
}
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})

View File

@@ -11,7 +11,7 @@ import (
// Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation.
// If the name is empty, then a implementation defined default name will be
// If the name is empty, then an implementation defined default name will be
// used instead.
//
// If this is called before a global MeterProvider is registered the returned

View File

@@ -211,6 +211,9 @@ type Float64Observer interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Observe(value float64, options ...ObserveOption)
}
@@ -227,7 +230,11 @@ type Float64Observer interface {
// attributes as another Float64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Float64Callback func(context.Context, Float64Observer) error
// Float64ObservableOption applies options to float64 Observer instruments.

View File

@@ -210,6 +210,9 @@ type Int64Observer interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Observe(value int64, options ...ObserveOption)
}
@@ -225,7 +228,11 @@ type Int64Observer interface {
// attributes as another Int64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Int64Callback func(context.Context, Int64Observer) error
// Int64ObservableOption applies options to int64 Observer instruments.

View File

@@ -3,7 +3,11 @@
package metric // import "go.opentelemetry.io/otel/metric"
import "go.opentelemetry.io/otel/attribute"
import (
"slices"
"go.opentelemetry.io/otel/attribute"
)
// MeterConfig contains options for Meters.
type MeterConfig struct {
@@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption {
})
}
// WithInstrumentationAttributes sets the instrumentation attributes.
// WithInstrumentationAttributes adds the instrumentation attributes.
//
// The passed attributes will be de-duplicated.
// This is equivalent to calling [WithInstrumentationAttributeSet] with an
// [attribute.Set] created from a clone of the passed attributes.
// [WithInstrumentationAttributeSet] is recommended for more control.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
set := attribute.NewSet(slices.Clone(attr)...)
return WithInstrumentationAttributeSet(set)
}
// WithInstrumentationAttributeSet adds the instrumentation attributes.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributeSet(set attribute.Set) MeterOption {
if set.Len() == 0 {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
return config
})
}
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.attrs = attribute.NewSet(attr...)
if config.attrs.Len() == 0 {
config.attrs = set
} else {
config.attrs = mergeSets(config.attrs, set)
}
return config
})
}

View File

@@ -30,6 +30,9 @@ type MeterProvider interface {
//
// If the name is empty, then an implementation defined default name will
// be used instead.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Meter(name string, opts ...MeterOption) Meter
}
@@ -51,6 +54,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
@@ -61,6 +67,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
// Int64Histogram returns a new Int64Histogram instrument identified by
@@ -71,6 +80,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
// Int64Gauge returns a new Int64Gauge instrument identified by name and
@@ -80,6 +92,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
// Int64ObservableCounter returns a new Int64ObservableCounter identified
@@ -95,6 +110,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
@@ -110,6 +128,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64ObservableUpDownCounter(
name string,
options ...Int64ObservableUpDownCounterOption,
@@ -128,6 +149,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
// Float64Counter returns a new Float64Counter instrument identified by
@@ -148,6 +172,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new Float64Histogram instrument identified by
@@ -158,6 +185,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
// Float64Gauge returns a new Float64Gauge instrument identified by name and
@@ -167,6 +197,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
// Float64ObservableCounter returns a new Float64ObservableCounter
@@ -182,6 +215,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new
@@ -197,6 +233,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64ObservableUpDownCounter(
name string,
options ...Float64ObservableUpDownCounterOption,
@@ -215,6 +254,9 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more
// information.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
// RegisterCallback registers f to be called during the collection of a
@@ -229,6 +271,9 @@ type Meter interface {
// If no instruments are passed, f should not be registered nor called
// during collection.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
//
// The function f needs to be concurrent safe.
RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
}
@@ -244,7 +289,11 @@ type Meter interface {
// Callbacks. Meaning, it should not report measurements for an instrument with
// the same attributes as another Callback will report.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Callback func(context.Context, Observer) error
// Observer records measurements for multiple instruments in a Callback.
@@ -259,9 +308,15 @@ type Observer interface {
embedded.Observer
// ObserveFloat64 records the float64 value for obsrv.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
// ObserveInt64 records the int64 value for obsrv.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
}
@@ -279,6 +334,7 @@ type Registration interface {
// Unregister removes the callback registration from a Meter.
//
// This method needs to be idempotent and concurrent safe.
// Implementations of this method need to be idempotent and safe for a user
// to call concurrently.
Unregister() error
}

View File

@@ -191,6 +191,9 @@ type Int64Counter struct{ embedded.Int64Counter }
// Add performs no operation.
func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Int64Counter) Enabled(context.Context) bool { return false }
// Float64Counter is an OpenTelemetry Counter used to record float64
// measurements. It produces no telemetry.
type Float64Counter struct{ embedded.Float64Counter }
@@ -198,6 +201,9 @@ type Float64Counter struct{ embedded.Float64Counter }
// Add performs no operation.
func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Float64Counter) Enabled(context.Context) bool { return false }
// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
// measurements. It produces no telemetry.
type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
@@ -205,6 +211,9 @@ type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
// Add performs no operation.
func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Int64UpDownCounter) Enabled(context.Context) bool { return false }
// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
// float64 measurements. It produces no telemetry.
type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
@@ -212,6 +221,9 @@ type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
// Add performs no operation.
func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Float64UpDownCounter) Enabled(context.Context) bool { return false }
// Int64Histogram is an OpenTelemetry Histogram used to record int64
// measurements. It produces no telemetry.
type Int64Histogram struct{ embedded.Int64Histogram }
@@ -219,6 +231,9 @@ type Int64Histogram struct{ embedded.Int64Histogram }
// Record performs no operation.
func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Int64Histogram) Enabled(context.Context) bool { return false }
// Float64Histogram is an OpenTelemetry Histogram used to record float64
// measurements. It produces no telemetry.
type Float64Histogram struct{ embedded.Float64Histogram }
@@ -226,6 +241,9 @@ type Float64Histogram struct{ embedded.Float64Histogram }
// Record performs no operation.
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Float64Histogram) Enabled(context.Context) bool { return false }
// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
// measurements. It produces no telemetry.
type Int64Gauge struct{ embedded.Int64Gauge }
@@ -233,6 +251,9 @@ type Int64Gauge struct{ embedded.Int64Gauge }
// Record performs no operation.
func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Int64Gauge) Enabled(context.Context) bool { return false }
// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
// measurements. It produces no telemetry.
type Float64Gauge struct{ embedded.Float64Gauge }
@@ -240,6 +261,9 @@ type Float64Gauge struct{ embedded.Float64Gauge }
// Record performs no operation.
func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Float64Gauge) Enabled(context.Context) bool { return false }
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
// int64 measurements. It produces no telemetry.
type Int64ObservableCounter struct {

View File

@@ -24,7 +24,19 @@ type Float64Counter interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Float64CounterConfig contains options for synchronous counter instruments that
@@ -77,7 +89,19 @@ type Float64UpDownCounter interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Float64UpDownCounterConfig contains options for synchronous counter
@@ -130,7 +154,19 @@ type Float64Histogram interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Record(ctx context.Context, incr float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Float64HistogramConfig contains options for synchronous histogram
@@ -188,7 +224,19 @@ type Float64Gauge interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Record(ctx context.Context, value float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Float64GaugeConfig contains options for synchronous gauge instruments that

View File

@@ -24,7 +24,19 @@ type Int64Counter interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Int64CounterConfig contains options for synchronous counter instruments that
@@ -77,7 +89,19 @@ type Int64UpDownCounter interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Int64UpDownCounterConfig contains options for synchronous counter
@@ -130,7 +154,19 @@ type Int64Histogram interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Record(ctx context.Context, incr int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Int64HistogramConfig contains options for synchronous histogram instruments
@@ -188,7 +224,19 @@ type Int64Gauge interface {
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Record(ctx context.Context, value int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
//
// Implementations of this method need to be safe for a user to call
// concurrently.
Enabled(context.Context) bool
}
// Int64GaugeConfig contains options for synchronous gauge instruments that

View File

@@ -7,9 +7,16 @@ import (
"context"
"go.opentelemetry.io/otel/baggage"
"go.opentelemetry.io/otel/internal/errorhandler"
)
const baggageHeader = "baggage"
const (
baggageHeader = "baggage"
// W3C Baggage specification limits.
// https://www.w3.org/TR/baggage/#limits
maxMembers = 64
)
// Baggage is a propagator that supports the W3C Baggage format.
//
@@ -50,6 +57,9 @@ func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) contex
bag, err := baggage.Parse(bStr)
if err != nil {
errorhandler.GetErrorHandler().Handle(err)
}
if bag.Len() == 0 {
return parent
}
return baggage.ContextWithBaggage(parent, bag)
@@ -60,17 +70,27 @@ func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.C
if len(bVals) == 0 {
return parent
}
var members []baggage.Member
for _, bStr := range bVals {
currBag, err := baggage.Parse(bStr)
if err != nil {
errorhandler.GetErrorHandler().Handle(err)
}
if currBag.Len() == 0 {
continue
}
members = append(members, currBag.Members()...)
if len(members) >= maxMembers {
break
}
}
b, err := baggage.New(members...)
if err != nil || b.Len() == 0 {
if err != nil {
errorhandler.GetErrorHandler().Handle(err)
}
if b.Len() == 0 {
return parent
}
return baggage.ContextWithBaggage(parent, b)

View File

@@ -46,8 +46,8 @@ func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
carrier.Set(tracestateHeader, ts)
}
// Clear all flags other than the trace-context supported sampling bit.
flags := sc.TraceFlags() & trace.FlagsSampled
// Preserve only the spec-defined flags: sampled (0x01) and random (0x02).
flags := sc.TraceFlags() & (trace.FlagsSampled | trace.FlagsRandom)
var sb strings.Builder
sb.Grow(2 + 32 + 16 + 2 + 3)
@@ -104,14 +104,13 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
if !extractPart(opts[:], &h, 2) {
return trace.SpanContext{}
}
if version == 0 && (h != "" || opts[0] > 2) {
// version 0 not allow extra
// version 0 not allow other flag
if version == 0 && (h != "" || opts[0] > 3) {
// version 0 does not allow extra fields or reserved flag bits.
return trace.SpanContext{}
}
// Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
scc.TraceFlags = trace.TraceFlags(opts[0]) & //nolint:gosec // slice size already checked.
(trace.FlagsSampled | trace.FlagsRandom)
// Ignore the error returned here. Failure to parse tracestate MUST NOT
// affect the parsing of traceparent according to the W3C tracecontext

View File

@@ -1 +1 @@
codespell==2.4.1
codespell==2.4.2

View File

@@ -0,0 +1,39 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
package x // import "go.opentelemetry.io/otel/sdk/internal/x"
import "strings"
// Resource is an experimental feature flag that defines if resource detectors
// should be included experimental semantic conventions.
//
// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature(
[]string{"RESOURCE"},
func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
},
)
// Observability is an experimental feature flag that determines if SDK
// observability metrics are enabled.
//
// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Observability = newFeature(
[]string{"OBSERVABILITY", "SELF_OBSERVABILITY"},
func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
},
)

View File

@@ -1,48 +1,38 @@
// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/x/x.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package x contains support for OTel SDK experimental features.
//
// This package should only be used for features defined in the specification.
// It should not be used for experiments or new project ideas.
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
package x // import "go.opentelemetry.io/otel/sdk/internal/x"
import (
"os"
"strings"
)
// Resource is an experimental feature flag that defines if resource detectors
// should be included experimental semantic conventions.
//
// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
})
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
key string
keys []string
parse func(v string) (T, bool)
}
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
keys := make([]string, 0, len(suffix))
for _, s := range suffix {
keys = append(keys, envKeyRoot+s)
}
return Feature[T]{
key: envKeyRoot + suffix,
keys: keys,
parse: parse,
}
}
// Key returns the environment variable key that needs to be set to enable the
// Keys returns the environment variable keys that can be set to enable the
// feature.
func (f Feature[T]) Key() string { return f.key }
func (f Feature[T]) Keys() []string { return f.keys }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
@@ -52,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
vRaw := os.Getenv(f.key)
if vRaw == "" {
return v, ok
for _, key := range f.keys {
vRaw := os.Getenv(key)
if vRaw != "" {
return f.parse(vRaw)
}
}
return f.parse(vRaw)
return v, ok
}
// Enabled reports whether the feature is enabled.

View File

@@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
type (

View File

@@ -11,7 +11,7 @@ import (
"os"
"regexp"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
type containerIDProvider func() (string, error)

View File

@@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
const (

View File

@@ -8,7 +8,7 @@ import (
"errors"
"strings"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
type hostIDProvider func() (string, error)
@@ -51,17 +51,16 @@ type hostIDReaderDarwin struct {
execCommand commandExecutor
}
// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
// read executes `/usr/sbin/ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
// from the IOPlatformUUID line. If the command fails or the uuid cannot be
// parsed an error will be returned.
func (r *hostIDReaderDarwin) read() (string, error) {
result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
result, err := r.execCommand("/usr/sbin/ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
if err != nil {
return "", err
}
lines := strings.Split(result, "\n")
for _, line := range lines {
for line := range strings.SplitSeq(result, "\n") {
if strings.Contains(line, "IOPlatformUUID") {
parts := strings.Split(line, " = ")
if len(parts) == 2 {

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build dragonfly || freebsd || netbsd || openbsd || solaris
// +build dragonfly freebsd netbsd openbsd solaris
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build linux
// +build linux
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -8,7 +8,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
import "os"
func readFile(filename string) (string, error) {
b, err := os.ReadFile(filename)
b, err := os.ReadFile(filename) // nolint:gosec // false positive
if err != nil {
return "", err
}

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build windows
// +build windows
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
type osDescriptionProvider func() (string, error)

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@@ -11,7 +11,7 @@ import (
"path/filepath"
"runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
)
type (

View File

@@ -6,20 +6,14 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/env"
"go.opentelemetry.io/otel/sdk/trace/internal/x"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/sdk/trace/internal/env"
"go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
)
@@ -33,8 +27,6 @@ const (
DefaultMaxExportBatchSize = 512
)
var queueFull = otelconv.ErrorTypeAttr("queue_full")
// BatchSpanProcessorOption configures a BatchSpanProcessor.
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
@@ -78,10 +70,7 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan
dropped uint32
selfObservabilityEnabled bool
callbackRegistration metric.Registration
spansProcessedCounter otelconv.SDKProcessorSpanProcessed
componentNameAttr attribute.KeyValue
inst *observ.BSP
batch []ReadOnlySpan
batchMutex sync.Mutex
@@ -124,27 +113,20 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}),
}
if x.SelfObservability.Enabled() {
bsp.selfObservabilityEnabled = true
bsp.componentNameAttr = componentName()
var err error
bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
bsp.componentNameAttr,
func() int64 { return int64(len(bsp.queue)) },
int64(bsp.o.MaxQueueSize),
)
if err != nil {
otel.Handle(err)
}
var err error
bsp.inst, err = observ.NewBSP(
nextProcessorID(),
func() int64 { return int64(len(bsp.queue)) },
int64(bsp.o.MaxQueueSize),
)
if err != nil {
otel.Handle(err)
}
bsp.stopWait.Add(1)
go func() {
defer bsp.stopWait.Done()
bsp.stopWait.Go(func() {
bsp.processQueue()
bsp.drainQueue()
}()
})
return bsp
}
@@ -157,51 +139,6 @@ func nextProcessorID() int64 {
return processorIDCounter.Add(1) - 1
}
func componentName() attribute.KeyValue {
id := nextProcessorID()
name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
return semconv.OTelComponentName(name)
}
// newBSPObs creates and returns a new set of metrics instruments and a
// registration for a BatchSpanProcessor. It is the caller's responsibility
// to unregister the registration when it is no longer needed.
func newBSPObs(
cmpnt attribute.KeyValue,
qLen func() int64,
qMax int64,
) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
meter := otel.GetMeterProvider().Meter(
selfObsScopeName,
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
err = errors.Join(err, e)
spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
err = errors.Join(err, e)
cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
attrs := metric.WithAttributes(cmpnt, cmpntT)
reg, e := meter.RegisterCallback(
func(_ context.Context, o metric.Observer) error {
o.ObserveInt64(qSize.Inst(), qLen(), attrs)
o.ObserveInt64(qCap.Inst(), qMax, attrs)
return nil
},
qSize.Inst(),
qCap.Inst(),
)
err = errors.Join(err, e)
return spansProcessed, reg, err
}
// OnStart method does nothing.
func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
@@ -242,8 +179,8 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done():
err = ctx.Err()
}
if bsp.selfObservabilityEnabled {
err = errors.Join(err, bsp.callbackRegistration.Unregister())
if bsp.inst != nil {
err = errors.Join(err, bsp.inst.Shutdown())
}
})
return err
@@ -357,10 +294,8 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
if bsp.selfObservabilityEnabled {
bsp.spansProcessedCounter.Add(ctx, int64(l),
bsp.componentNameAttr,
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
if bsp.inst != nil {
bsp.inst.Processed(ctx, int64(l))
}
err := bsp.e.ExportSpans(ctx, bsp.batch)
@@ -470,11 +405,8 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd:
return true
case <-ctx.Done():
if bsp.selfObservabilityEnabled {
bsp.spansProcessedCounter.Add(ctx, 1,
bsp.componentNameAttr,
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
bsp.spansProcessedCounter.AttrErrorType(queueFull))
if bsp.inst != nil {
bsp.inst.ProcessedQueueFull(ctx, 1)
}
return false
}
@@ -490,11 +422,8 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return true
default:
atomic.AddUint32(&bsp.dropped, 1)
if bsp.selfObservabilityEnabled {
bsp.spansProcessedCounter.Add(ctx, 1,
bsp.componentNameAttr,
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
bsp.spansProcessedCounter.AttrErrorType(queueFull))
if bsp.inst != nil {
bsp.inst.ProcessedQueueFull(ctx, 1)
}
}
return false

View File

@@ -7,7 +7,7 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io.
See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
See [go.opentelemetry.io/otel/sdk/internal/x] for information about
the experimental features.
*/
package trace // import "go.opentelemetry.io/otel/sdk/trace"

View File

@@ -3,7 +3,7 @@
// Package env provides types and functionality for environment variable support
// in the OpenTelemetry SDK.
package env // import "go.opentelemetry.io/otel/sdk/internal/env"
package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env"
import (
"os"

View File

@@ -0,0 +1,119 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
import (
"context"
"errors"
"fmt"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/x"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
"go.opentelemetry.io/otel/semconv/v1.40.0/otelconv"
)
const (
// ScopeName is the name of the instrumentation scope.
ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ"
// SchemaURL is the schema URL of the instrumentation.
SchemaURL = semconv.SchemaURL
)
// ErrQueueFull is the attribute value for the "queue_full" error type.
var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType(
otelconv.ErrorTypeAttr("queue_full"),
)
// BSPComponentName returns the component name attribute for a
// BatchSpanProcessor with the given ID.
func BSPComponentName(id int64) attribute.KeyValue {
t := otelconv.ComponentTypeBatchingSpanProcessor
name := fmt.Sprintf("%s/%d", t, id)
return semconv.OTelComponentName(name)
}
// BSP is the instrumentation for an OTel SDK BatchSpanProcessor.
type BSP struct {
reg metric.Registration
processed metric.Int64Counter
processedOpts []metric.AddOption
processedQueueFullOpts []metric.AddOption
}
func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) {
if !x.Observability.Enabled() {
return nil, nil
}
meter := otel.GetMeterProvider().Meter(
ScopeName,
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(SchemaURL),
)
qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
if err != nil {
err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err)
}
qCapInst := qCap.Inst()
qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
if e != nil {
e := fmt.Errorf("failed to create BSP queue size metric: %w", e)
err = errors.Join(err, e)
}
qSizeInst := qSize.Inst()
cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
cmpnt := BSPComponentName(id)
set := attribute.NewSet(cmpnt, cmpntT)
obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)}
reg, e := meter.RegisterCallback(
func(_ context.Context, o metric.Observer) error {
o.ObserveInt64(qSizeInst, qLen(), obsOpts...)
o.ObserveInt64(qCapInst, qMax, obsOpts...)
return nil
},
qSizeInst,
qCapInst,
)
if e != nil {
e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e)
err = errors.Join(err, e)
}
processed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
if e != nil {
e := fmt.Errorf("failed to create BSP processed spans metric: %w", e)
err = errors.Join(err, e)
}
processedOpts := []metric.AddOption{metric.WithAttributeSet(set)}
set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull)
processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)}
return &BSP{
reg: reg,
processed: processed.Inst(),
processedOpts: processedOpts,
processedQueueFullOpts: processedQueueFullOpts,
}, err
}
func (b *BSP) Shutdown() error { return b.reg.Unregister() }
func (b *BSP) Processed(ctx context.Context, n int64) {
b.processed.Add(ctx, n, b.processedOpts...)
}
func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) {
b.processed.Add(ctx, n, b.processedQueueFullOpts...)
}

View File

@@ -0,0 +1,6 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package observ provides observability instrumentation for the OTel trace SDK
// package.
package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"

View File

@@ -0,0 +1,97 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/x"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
"go.opentelemetry.io/otel/semconv/v1.40.0/otelconv"
)
var measureAttrsPool = sync.Pool{
New: func() any {
// "component.name" + "component.type" + "error.type"
const n = 1 + 1 + 1
s := make([]attribute.KeyValue, 0, n)
// Return a pointer to a slice instead of a slice itself
// to avoid allocations on every call.
return &s
},
}
// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor.
type SSP struct {
spansProcessedCounter metric.Int64Counter
addOpts []metric.AddOption
attrs []attribute.KeyValue
}
// SSPComponentName returns the component name attribute for a
// SimpleSpanProcessor with the given ID.
func SSPComponentName(id int64) attribute.KeyValue {
t := otelconv.ComponentTypeSimpleSpanProcessor
name := fmt.Sprintf("%s/%d", t, id)
return semconv.OTelComponentName(name)
}
// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the
// provided ID.
//
// If the experimental observability is disabled, nil is returned.
func NewSSP(id int64) (*SSP, error) {
if !x.Observability.Enabled() {
return nil, nil
}
meter := otel.GetMeterProvider().Meter(
ScopeName,
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(SchemaURL),
)
spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter)
if err != nil {
err = fmt.Errorf("failed to create SSP processed spans metric: %w", err)
}
componentName := SSPComponentName(id)
componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor)
attrs := []attribute.KeyValue{componentName, componentType}
addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))}
return &SSP{
spansProcessedCounter: spansProcessedCounter.Inst(),
addOpts: addOpts,
attrs: attrs,
}, err
}
// SpanProcessed records that a span has been processed by the SimpleSpanProcessor.
// If err is non-nil, it records the processing error as an attribute.
func (ssp *SSP) SpanProcessed(ctx context.Context, err error) {
ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...)
}
func (ssp *SSP) addOption(err error) []metric.AddOption {
if err == nil {
return ssp.addOpts
}
attrs := measureAttrsPool.Get().(*[]attribute.KeyValue)
defer func() {
*attrs = (*attrs)[:0] // reset the slice for reuse
measureAttrsPool.Put(attrs)
}()
*attrs = append(*attrs, ssp.attrs...)
*attrs = append(*attrs, semconv.ErrorType(err))
// Do not inefficiently make a copy of attrs by using
// WithAttributes instead of WithAttributeSet.
return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))}
}

View File

@@ -0,0 +1,223 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
import (
"context"
"errors"
"fmt"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/x"
"go.opentelemetry.io/otel/semconv/v1.40.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
var meterOpts = []metric.MeterOption{
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(SchemaURL),
}
// Tracer is instrumentation for an OTel SDK Tracer.
type Tracer struct {
enabled bool
live metric.Int64UpDownCounter
started metric.Int64Counter
}
func NewTracer() (Tracer, error) {
if !x.Observability.Enabled() {
return Tracer{}, nil
}
meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...)
var err error
l, e := otelconv.NewSDKSpanLive(meter)
if e != nil {
e = fmt.Errorf("failed to create span live metric: %w", e)
err = errors.Join(err, e)
}
s, e := otelconv.NewSDKSpanStarted(meter)
if e != nil {
e = fmt.Errorf("failed to create span started metric: %w", e)
err = errors.Join(err, e)
}
return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err
}
func (t Tracer) Enabled() bool { return t.enabled }
func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) {
key := spanStartedKey{
parent: parentStateNoParent,
sampling: samplingStateDrop,
}
if psc.IsValid() {
if psc.IsRemote() {
key.parent = parentStateRemoteParent
} else {
key.parent = parentStateLocalParent
}
}
if span.IsRecording() {
if span.SpanContext().IsSampled() {
key.sampling = samplingStateRecordAndSample
} else {
key.sampling = samplingStateRecordOnly
}
}
opts := spanStartedOpts[key]
t.started.Add(ctx, 1, opts...)
}
func (t Tracer) SpanLive(ctx context.Context, span trace.Span) {
t.spanLive(ctx, 1, span)
}
func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) {
t.spanLive(ctx, -1, span)
}
func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) {
key := spanLiveKey{sampled: span.SpanContext().IsSampled()}
opts := spanLiveOpts[key]
t.live.Add(ctx, value, opts...)
}
type parentState int
const (
parentStateNoParent parentState = iota
parentStateLocalParent
parentStateRemoteParent
)
type samplingState int
const (
samplingStateDrop samplingState = iota
samplingStateRecordOnly
samplingStateRecordAndSample
)
type spanStartedKey struct {
parent parentState
sampling samplingState
}
var spanStartedOpts = map[spanStartedKey][]metric.AddOption{
{
parentStateNoParent,
samplingStateDrop,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
)),
},
{
parentStateLocalParent,
samplingStateDrop,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
)),
},
{
parentStateRemoteParent,
samplingStateDrop,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
)),
},
{
parentStateNoParent,
samplingStateRecordOnly,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
)),
},
{
parentStateLocalParent,
samplingStateRecordOnly,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
)),
},
{
parentStateRemoteParent,
samplingStateRecordOnly,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
)),
},
{
parentStateNoParent,
samplingStateRecordAndSample,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
)),
},
{
parentStateLocalParent,
samplingStateRecordAndSample,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
)),
},
{
parentStateRemoteParent,
samplingStateRecordAndSample,
}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
)),
},
}
type spanLiveKey struct {
sampled bool
}
var spanLiveOpts = map[spanLiveKey][]metric.AddOption{
{true}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordAndSample,
),
)),
},
{false}: {
metric.WithAttributeSet(attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordOnly,
),
)),
},
}

View File

@@ -1,35 +0,0 @@
# Experimental Features
The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
These features may change in backwards incompatible ways as feedback is applied.
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
## Features
- [Self-Observability](#self-observability)
### Self-Observability
The SDK provides a self-observability feature that allows you to monitor the SDK itself.
To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
When enabled, the SDK will create the following metrics using the global `MeterProvider`:
- `otel.sdk.span.live`
- `otel.sdk.span.started`
Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
## Compatibility and Stability
Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
These features may be removed or modified in successive version releases, including patch versions.
When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.

View File

@@ -1,63 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
import (
"os"
"strings"
)
// SelfObservability is an experimental feature flag that determines if SDK
// self-observability metrics are enabled.
//
// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
})
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
key string
parse func(v string) (T, bool)
}
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
return Feature[T]{
key: envKeyRoot + suffix,
parse: parse,
}
}
// Key returns the environment variable key that needs to be set to enable the
// feature.
func (f Feature[T]) Key() string { return f.key }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
// zero-value and false are returned.
func (f Feature[T]) Lookup() (v T, ok bool) {
// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
vRaw := os.Getenv(f.key)
if vRaw == "" {
return v, ok
}
return f.parse(vRaw)
}
// Enabled reports whether the feature is enabled.
func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
}

View File

@@ -12,22 +12,15 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace/internal/x"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
)
const (
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
)
const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
// tracerProviderConfig.
type tracerProviderConfig struct {
@@ -163,19 +156,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
t, ok := p.namedTracer[is]
if !ok {
t = &tracer{
provider: p,
instrumentationScope: is,
selfObservabilityEnabled: x.SelfObservability.Enabled(),
provider: p,
instrumentationScope: is,
}
if t.selfObservabilityEnabled {
var err error
t.spanLiveMetric, t.spanStartedMetric, err = newInst()
if err != nil {
msg := "failed to create self-observability metrics for tracer: %w"
err := fmt.Errorf(msg, err)
otel.Handle(err)
}
var err error
t.inst, err = observ.NewTracer()
if err != nil {
otel.Handle(err)
}
p.namedTracer[is] = t
}
return t, ok
@@ -201,23 +191,6 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
m := otel.GetMeterProvider().Meter(
selfObsScopeName,
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
var err error
spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
err = errors.Join(err, e)
spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
err = errors.Join(err, e)
return spanLiveMetric, spanStartedMetric, err
}
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
// This check prevents calls during a shutdown.
@@ -290,6 +263,7 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error {
return nil
}
var err error
for _, sps := range spss {
select {
case <-ctx.Done():
@@ -297,11 +271,9 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error {
default:
}
if err := sps.sp.ForceFlush(ctx); err != nil {
return err
}
err = errors.Join(err, sps.sp.ForceFlush(ctx))
}
return nil
return err
}
// Shutdown shuts down TracerProvider. All registered span processors are shut down

View File

@@ -280,3 +280,31 @@ func (pb parentBased) Description() string {
pb.config.localParentNotSampled.Description(),
)
}
// AlwaysRecord returns a sampler decorator which ensures that every span
// is passed to the SpanProcessor, even those that would be normally dropped.
// It converts `Drop` decisions from the root sampler into `RecordOnly` decisions,
// allowing processors to see all spans without sending them to exporters. This is
// typically used to enable accurate span-to-metrics processing.
func AlwaysRecord(root Sampler) Sampler {
return alwaysRecord{root}
}
type alwaysRecord struct {
root Sampler
}
func (ar alwaysRecord) ShouldSample(p SamplingParameters) SamplingResult {
rootSamplerSamplingResult := ar.root.ShouldSample(p)
if rootSamplerSamplingResult.Decision == Drop {
return SamplingResult{
Decision: RecordOnly,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
return rootSamplerSamplingResult
}
func (ar alwaysRecord) Description() string {
return "AlwaysRecord{root:" + ar.root.Description() + "}"
}

View File

@@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
)
// simpleSpanProcessor is a SpanProcessor that synchronously sends all
@@ -17,6 +20,8 @@ type simpleSpanProcessor struct {
exporterMu sync.Mutex
exporter SpanExporter
stopOnce sync.Once
inst *observ.SSP
}
var _ SpanProcessor = (*simpleSpanProcessor)(nil)
@@ -33,11 +38,26 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
ssp := &simpleSpanProcessor{
exporter: exporter,
}
var err error
ssp.inst, err = observ.NewSSP(nextSimpleProcessorID())
if err != nil {
otel.Handle(err)
}
global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
return ssp
}
var simpleProcessorIDCounter atomic.Int64
// nextSimpleProcessorID returns an identifier for this simple span processor,
// starting with 0 and incrementing by 1 each time it is called.
func nextSimpleProcessorID() int64 {
return simpleProcessorIDCounter.Add(1) - 1
}
// OnStart does nothing.
func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
@@ -46,11 +66,20 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
ssp.exporterMu.Lock()
defer ssp.exporterMu.Unlock()
var err error
if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s})
if err != nil {
otel.Handle(err)
}
}
if ssp.inst != nil {
// Add the span to the context to ensure the metric is recorded
// with the correct span context.
ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext())
ssp.inst.SpanProcessed(ctx, err)
}
}
// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.

View File

@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -151,6 +151,12 @@ type recordingSpan struct {
// tracer is the SDK tracer that created this span.
tracer *tracer
// origCtx is the context used when starting this span that has the
// recordingSpan instance set as the active span. If not nil, it is used
// when ending the span to ensure any metrics are recorded with a context
// containing this span without requiring an additional allocation.
origCtx context.Context
}
var (
@@ -158,6 +164,10 @@ var (
_ runtimeTracer = (*recordingSpan)(nil)
)
func (s *recordingSpan) setOrigCtx(ctx context.Context) {
s.origCtx = ctx
}
// SpanContext returns the SpanContext of this span.
func (s *recordingSpan) SpanContext() trace.SpanContext {
if s == nil {
@@ -496,14 +506,15 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
if s.tracer.selfObservabilityEnabled {
defer func() {
// Add the span to the context to ensure the metric is recorded
// with the correct span context.
ctx := trace.ContextWithSpan(context.Background(), s)
set := spanLiveSet(s.spanContext.IsSampled())
s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
}()
if s.tracer.inst.Enabled() {
ctx := s.origCtx
if ctx == nil {
// This should not happen as the origCtx should be set, but
// ensure trace information is propagated in the case of an
// error.
ctx = trace.ContextWithSpan(context.Background(), s)
}
defer s.tracer.inst.SpanEnded(ctx, s)
}
sps := s.tracer.provider.getSpanProcessors()

View File

@@ -3,7 +3,7 @@
package trace // import "go.opentelemetry.io/otel/sdk/trace"
import "go.opentelemetry.io/otel/sdk/internal/env"
import "go.opentelemetry.io/otel/sdk/trace/internal/env"
const (
// DefaultAttributeValueLengthLimit is the default maximum allowed

View File

@@ -7,9 +7,8 @@ import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -20,9 +19,7 @@ type tracer struct {
provider *TracerProvider
instrumentationScope instrumentation.Scope
selfObservabilityEnabled bool
spanLiveMetric otelconv.SDKSpanLive
spanStartedMetric otelconv.SDKSpanStarted
inst observ.Tracer
}
var _ trace.Tracer = &tracer{}
@@ -53,10 +50,17 @@ func (tr *tracer) Start(
s := tr.newSpan(ctx, name, &config)
newCtx := trace.ContextWithSpan(ctx, s)
if tr.selfObservabilityEnabled {
if tr.inst.Enabled() {
if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok {
// If this is a recording span, store the original context.
// This allows later retrieval of baggage and other information
// that may have been stored in the context at span start time and
// to avoid the allocation of repeatedly calling
// trace.ContextWithSpan.
o.setOrigCtx(newCtx)
}
psc := trace.SpanContextFromContext(ctx)
set := spanStartedSet(psc, s)
tr.spanStartedMetric.AddSet(newCtx, 1, set)
tr.inst.SpanStarted(newCtx, psc, s)
}
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
@@ -168,12 +172,11 @@ func (tr *tracer) newRecordingSpan(
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
if tr.selfObservabilityEnabled {
if tr.inst.Enabled() {
// Propagate any existing values from the context with the new span to
// the measurement context.
ctx = trace.ContextWithSpan(ctx, s)
set := spanLiveSet(s.spanContext.IsSampled())
tr.spanLiveMetric.AddSet(ctx, 1, set)
tr.inst.SpanLive(ctx, s)
}
return s
@@ -183,112 +186,3 @@ func (tr *tracer) newRecordingSpan(
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
type parentState int
const (
parentStateNoParent parentState = iota
parentStateLocalParent
parentStateRemoteParent
)
type samplingState int
const (
samplingStateDrop samplingState = iota
samplingStateRecordOnly
samplingStateRecordAndSample
)
type spanStartedSetKey struct {
parent parentState
sampling samplingState
}
var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{
{parentStateNoParent, samplingStateDrop}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
),
{parentStateLocalParent, samplingStateDrop}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
),
{parentStateRemoteParent, samplingStateDrop}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
),
{parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
),
{parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
),
{parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
),
{parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
),
{parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
),
{parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet(
otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
),
}
func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set {
key := spanStartedSetKey{
parent: parentStateNoParent,
sampling: samplingStateDrop,
}
if psc.IsValid() {
if psc.IsRemote() {
key.parent = parentStateRemoteParent
} else {
key.parent = parentStateLocalParent
}
}
if span.IsRecording() {
if span.SpanContext().IsSampled() {
key.sampling = samplingStateRecordAndSample
} else {
key.sampling = samplingStateRecordOnly
}
}
return spanStartedSetCache[key]
}
type spanLiveSetKey struct {
sampled bool
}
var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
{true}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordAndSample,
),
),
{false}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordOnly,
),
),
}
func spanLiveSet(sampled bool) attribute.Set {
key := spanLiveSetKey{sampled: sampled}
return spanLiveSetCache[key]
}

View File

@@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
return "1.38.0"
return "1.42.0"
}

View File

@@ -286,7 +286,7 @@ func firstHostPort(source ...string) (host string, port int) {
break
}
}
return
return host, port
}
// RequestHeader returns the contents of h as OpenTelemetry attributes.

View File

@@ -287,27 +287,27 @@ func splitHostPort(hostport string) (host string, port int) {
addrEnd := strings.LastIndex(hostport, "]")
if addrEnd < 0 {
// Invalid hostport.
return
return host, port
}
if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
host = hostport[1:addrEnd]
return
return host, port
}
} else {
if i := strings.LastIndex(hostport, ":"); i < 0 {
host = hostport
return
return host, port
}
}
host, pStr, err := net.SplitHostPort(hostport)
if err != nil {
return
return host, port
}
p, err := strconv.ParseUint(pStr, 10, 16)
if err != nil {
return
return host, port
}
return host, int(p) // nolint: gosec // Bit size of 16 checked above.
}

View File

@@ -4,28 +4,53 @@
package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
import (
"fmt"
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
//
// If err is nil, the returned attribute has the default value
// [ErrorTypeOther].
//
// If err's type has the method
//
// ErrorType() string
//
// then the returned attribute has the value of err.ErrorType(). Otherwise, the
// returned attribute has a value derived from the concrete type of err.
//
// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
t := reflect.TypeOf(err)
var value string
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
value = t.String()
} else {
value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
}
if value == "" {
return ErrorTypeOther
}
return ErrorTypeKey.String(value)
return ErrorTypeKey.String(errorType(err))
}
func errorType(err error) string {
var s string
if et, ok := err.(interface{ ErrorType() string }); ok {
// Prioritize the ErrorType method if available.
s = et.ErrorType()
}
if s == "" {
// Fallback to reflection if the ErrorType method is not supported or
// returns an empty value.
t := reflect.TypeOf(err)
pkg, name := t.PkgPath(), t.Name()
if pkg != "" && name != "" {
s = pkg + "." + name
} else {
// The type has no package path or name (predeclared, not-defined,
// or alias for a not-defined type).
//
// This is not guaranteed to be unique, but is a best effort.
s = t.String()
}
}
return s
}

View File

@@ -91,6 +91,11 @@ type ClientActiveRequests struct {
metric.Int64UpDownCounter
}
var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("Number of active HTTP requests."),
metric.WithUnit("{request}"),
}
// NewClientActiveRequests returns a new ClientActiveRequests instrument.
func NewClientActiveRequests(
m metric.Meter,
@@ -101,15 +106,18 @@ func NewClientActiveRequests(
return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newClientActiveRequestsOpts
} else {
opt = append(opt, newClientActiveRequestsOpts...)
}
i, err := m.Int64UpDownCounter(
"http.client.active_requests",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("Number of active HTTP requests."),
metric.WithUnit("{request}"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ClientActiveRequests{i}, nil
}
@@ -223,6 +231,11 @@ type ClientConnectionDuration struct {
metric.Float64Histogram
}
var newClientConnectionDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
metric.WithUnit("s"),
}
// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
func NewClientConnectionDuration(
m metric.Meter,
@@ -233,15 +246,18 @@ func NewClientConnectionDuration(
return ClientConnectionDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientConnectionDurationOpts
} else {
opt = append(opt, newClientConnectionDurationOpts...)
}
i, err := m.Float64Histogram(
"http.client.connection.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
metric.WithUnit("s"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientConnectionDuration{noop.Float64Histogram{}}, err
return ClientConnectionDuration{noop.Float64Histogram{}}, err
}
return ClientConnectionDuration{i}, nil
}
@@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record(
func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -353,6 +370,11 @@ type ClientOpenConnections struct {
metric.Int64UpDownCounter
}
var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
metric.WithUnit("{connection}"),
}
// NewClientOpenConnections returns a new ClientOpenConnections instrument.
func NewClientOpenConnections(
m metric.Meter,
@@ -363,15 +385,18 @@ func NewClientOpenConnections(
return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newClientOpenConnectionsOpts
} else {
opt = append(opt, newClientOpenConnectionsOpts...)
}
i, err := m.Int64UpDownCounter(
"http.client.open_connections",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
metric.WithUnit("{connection}"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
}
return ClientOpenConnections{i}, nil
}
@@ -488,6 +513,11 @@ type ClientRequestBodySize struct {
metric.Int64Histogram
}
var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP client request bodies."),
metric.WithUnit("By"),
}
// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
func NewClientRequestBodySize(
m metric.Meter,
@@ -498,15 +528,18 @@ func NewClientRequestBodySize(
return ClientRequestBodySize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientRequestBodySizeOpts
} else {
opt = append(opt, newClientRequestBodySizeOpts...)
}
i, err := m.Int64Histogram(
"http.client.request.body.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP client request bodies."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientRequestBodySize{noop.Int64Histogram{}}, err
return ClientRequestBodySize{noop.Int64Histogram{}}, err
}
return ClientRequestBodySize{i}, nil
}
@@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record(
func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -662,6 +696,11 @@ type ClientRequestDuration struct {
metric.Float64Histogram
}
var newClientRequestDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("Duration of HTTP client requests."),
metric.WithUnit("s"),
}
// NewClientRequestDuration returns a new ClientRequestDuration instrument.
func NewClientRequestDuration(
m metric.Meter,
@@ -672,15 +711,18 @@ func NewClientRequestDuration(
return ClientRequestDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientRequestDurationOpts
} else {
opt = append(opt, newClientRequestDurationOpts...)
}
i, err := m.Float64Histogram(
"http.client.request.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("Duration of HTTP client requests."),
metric.WithUnit("s"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientRequestDuration{noop.Float64Histogram{}}, err
return ClientRequestDuration{noop.Float64Histogram{}}, err
}
return ClientRequestDuration{i}, nil
}
@@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record(
func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -822,6 +865,11 @@ type ClientResponseBodySize struct {
metric.Int64Histogram
}
var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP client response bodies."),
metric.WithUnit("By"),
}
// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
func NewClientResponseBodySize(
m metric.Meter,
@@ -832,15 +880,18 @@ func NewClientResponseBodySize(
return ClientResponseBodySize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientResponseBodySizeOpts
} else {
opt = append(opt, newClientResponseBodySizeOpts...)
}
i, err := m.Int64Histogram(
"http.client.response.body.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP client response bodies."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientResponseBodySize{noop.Int64Histogram{}}, err
return ClientResponseBodySize{noop.Int64Histogram{}}, err
}
return ClientResponseBodySize{i}, nil
}
@@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record(
func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -996,6 +1048,11 @@ type ServerActiveRequests struct {
metric.Int64UpDownCounter
}
var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("Number of active HTTP server requests."),
metric.WithUnit("{request}"),
}
// NewServerActiveRequests returns a new ServerActiveRequests instrument.
func NewServerActiveRequests(
m metric.Meter,
@@ -1006,15 +1063,18 @@ func NewServerActiveRequests(
return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newServerActiveRequestsOpts
} else {
opt = append(opt, newServerActiveRequestsOpts...)
}
i, err := m.Int64UpDownCounter(
"http.server.active_requests",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("Number of active HTTP server requests."),
metric.WithUnit("{request}"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ServerActiveRequests{i}, nil
}
@@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct {
metric.Int64Histogram
}
var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP server request bodies."),
metric.WithUnit("By"),
}
// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
func NewServerRequestBodySize(
m metric.Meter,
@@ -1128,15 +1193,18 @@ func NewServerRequestBodySize(
return ServerRequestBodySize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerRequestBodySizeOpts
} else {
opt = append(opt, newServerRequestBodySizeOpts...)
}
i, err := m.Int64Histogram(
"http.server.request.body.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP server request bodies."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerRequestBodySize{noop.Int64Histogram{}}, err
return ServerRequestBodySize{noop.Int64Histogram{}}, err
}
return ServerRequestBodySize{i}, nil
}
@@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record(
func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1299,6 +1368,11 @@ type ServerRequestDuration struct {
metric.Float64Histogram
}
var newServerRequestDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("Duration of HTTP server requests."),
metric.WithUnit("s"),
}
// NewServerRequestDuration returns a new ServerRequestDuration instrument.
func NewServerRequestDuration(
m metric.Meter,
@@ -1309,15 +1383,18 @@ func NewServerRequestDuration(
return ServerRequestDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerRequestDurationOpts
} else {
opt = append(opt, newServerRequestDurationOpts...)
}
i, err := m.Float64Histogram(
"http.server.request.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("Duration of HTTP server requests."),
metric.WithUnit("s"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerRequestDuration{noop.Float64Histogram{}}, err
return ServerRequestDuration{noop.Float64Histogram{}}, err
}
return ServerRequestDuration{i}, nil
}
@@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record(
func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct {
metric.Int64Histogram
}
var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP server response bodies."),
metric.WithUnit("By"),
}
// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
func NewServerResponseBodySize(
m metric.Meter,
@@ -1476,15 +1559,18 @@ func NewServerResponseBodySize(
return ServerResponseBodySize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerResponseBodySizeOpts
} else {
opt = append(opt, newServerResponseBodySizeOpts...)
}
i, err := m.Int64Histogram(
"http.server.response.body.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Size of HTTP server response bodies."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerResponseBodySize{noop.Int64Histogram{}}, err
return ServerResponseBodySize{noop.Int64Histogram{}}, err
}
return ServerResponseBodySize{i}, nil
}
@@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record(
func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
// the category of synthetic traffic, such as tests or bots.
func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
return attribute.String("user_agent.synthetic.type", string(val))
}
}

View File

@@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package httpconv provides types and functionality for OpenTelemetry semantic
// Package otelconv provides types and functionality for OpenTelemetry semantic
// conventions in the "otel" namespace.
package otelconv
@@ -172,6 +172,11 @@ type SDKExporterLogExported struct {
metric.Int64Counter
}
var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
metric.WithUnit("{log_record}"),
}
// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
func NewSDKExporterLogExported(
m metric.Meter,
@@ -182,15 +187,18 @@ func NewSDKExporterLogExported(
return SDKExporterLogExported{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterLogExportedOpts
} else {
opt = append(opt, newSDKExporterLogExportedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.exporter.log.exported",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterLogExported{noop.Int64Counter{}}, err
return SDKExporterLogExported{noop.Int64Counter{}}, err
}
return SDKExporterLogExported{i}, nil
}
@@ -319,6 +327,11 @@ type SDKExporterLogInflight struct {
metric.Int64UpDownCounter
}
var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{log_record}"),
}
// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
func NewSDKExporterLogInflight(
m metric.Meter,
@@ -329,15 +342,18 @@ func NewSDKExporterLogInflight(
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterLogInflightOpts
} else {
opt = append(opt, newSDKExporterLogInflightOpts...)
}
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.log.inflight",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterLogInflight{i}, nil
}
@@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct {
metric.Int64Counter
}
var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
metric.WithUnit("{data_point}"),
}
// NewSDKExporterMetricDataPointExported returns a new
// SDKExporterMetricDataPointExported instrument.
func NewSDKExporterMetricDataPointExported(
@@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported(
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterMetricDataPointExportedOpts
} else {
opt = append(opt, newSDKExporterMetricDataPointExportedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.exporter.metric_data_point.exported",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
metric.WithUnit("{data_point}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
}
return SDKExporterMetricDataPointExported{i}, nil
}
@@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct {
metric.Int64UpDownCounter
}
var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{data_point}"),
}
// NewSDKExporterMetricDataPointInflight returns a new
// SDKExporterMetricDataPointInflight instrument.
func NewSDKExporterMetricDataPointInflight(
@@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight(
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterMetricDataPointInflightOpts
} else {
opt = append(opt, newSDKExporterMetricDataPointInflightOpts...)
}
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.metric_data_point.inflight",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{data_point}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterMetricDataPointInflight{i}, nil
}
@@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct {
metric.Float64Histogram
}
var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("The duration of exporting a batch of telemetry records."),
metric.WithUnit("s"),
}
// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
// instrument.
func NewSDKExporterOperationDuration(
@@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration(
return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterOperationDurationOpts
} else {
opt = append(opt, newSDKExporterOperationDurationOpts...)
}
i, err := m.Float64Histogram(
"otel.sdk.exporter.operation.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("The duration of exporting a batch of telemetry records."),
metric.WithUnit("s"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
}
return SDKExporterOperationDuration{i}, nil
}
@@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record(
func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -893,6 +934,11 @@ type SDKExporterSpanExported struct {
metric.Int64Counter
}
var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
metric.WithUnit("{span}"),
}
// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
func NewSDKExporterSpanExported(
m metric.Meter,
@@ -903,15 +949,18 @@ func NewSDKExporterSpanExported(
return SDKExporterSpanExported{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterSpanExportedOpts
} else {
opt = append(opt, newSDKExporterSpanExportedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.exporter.span.exported",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterSpanExported{noop.Int64Counter{}}, err
return SDKExporterSpanExported{noop.Int64Counter{}}, err
}
return SDKExporterSpanExported{i}, nil
}
@@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct {
metric.Int64UpDownCounter
}
var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{span}"),
}
// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
func NewSDKExporterSpanInflight(
m metric.Meter,
@@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight(
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKExporterSpanInflightOpts
} else {
opt = append(opt, newSDKExporterSpanInflightOpts...)
}
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.span.inflight",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterSpanInflight{i}, nil
}
@@ -1169,6 +1226,11 @@ type SDKLogCreated struct {
metric.Int64Counter
}
var newSDKLogCreatedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
metric.WithUnit("{log_record}"),
}
// NewSDKLogCreated returns a new SDKLogCreated instrument.
func NewSDKLogCreated(
m metric.Meter,
@@ -1179,15 +1241,18 @@ func NewSDKLogCreated(
return SDKLogCreated{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKLogCreatedOpts
} else {
opt = append(opt, newSDKLogCreatedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.log.created",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKLogCreated{noop.Int64Counter{}}, err
return SDKLogCreated{noop.Int64Counter{}}, err
}
return SDKLogCreated{i}, nil
}
@@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct {
metric.Float64Histogram
}
var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("The duration of the collect operation of the metric reader."),
metric.WithUnit("s"),
}
// NewSDKMetricReaderCollectionDuration returns a new
// SDKMetricReaderCollectionDuration instrument.
func NewSDKMetricReaderCollectionDuration(
@@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration(
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newSDKMetricReaderCollectionDurationOpts
} else {
opt = append(opt, newSDKMetricReaderCollectionDurationOpts...)
}
i, err := m.Float64Histogram(
"otel.sdk.metric_reader.collection.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("The duration of the collect operation of the metric reader."),
metric.WithUnit("s"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
}
return SDKMetricReaderCollectionDuration{i}, nil
}
@@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record(
func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct {
metric.Int64Counter
}
var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
metric.WithUnit("{log_record}"),
}
// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
func NewSDKProcessorLogProcessed(
m metric.Meter,
@@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed(
return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorLogProcessedOpts
} else {
opt = append(opt, newSDKProcessorLogProcessedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.processor.log.processed",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorLogProcessed{i}, nil
}
@@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
metric.WithUnit("{log_record}"),
}
// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
// instrument.
func NewSDKProcessorLogQueueCapacity(
@@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity(
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorLogQueueCapacityOpts
} else {
opt = append(opt, newSDKProcessorLogQueueCapacityOpts...)
}
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.capacity",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueCapacity{i}, nil
}
@@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct {
metric.Int64ObservableUpDownCounter
}
var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
metric.WithUnit("{log_record}"),
}
// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
func NewSDKProcessorLogQueueSize(
m metric.Meter,
@@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize(
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorLogQueueSizeOpts
} else {
opt = append(opt, newSDKProcessorLogQueueSizeOpts...)
}
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.size",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
metric.WithUnit("{log_record}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueSize{i}, nil
}
@@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct {
metric.Int64Counter
}
var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
metric.WithUnit("{span}"),
}
// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
// instrument.
func NewSDKProcessorSpanProcessed(
@@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed(
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorSpanProcessedOpts
} else {
opt = append(opt, newSDKProcessorSpanProcessedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.processor.span.processed",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorSpanProcessed{i}, nil
}
@@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
metric.WithUnit("{span}"),
}
// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
// instrument.
func NewSDKProcessorSpanQueueCapacity(
@@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity(
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorSpanQueueCapacityOpts
} else {
opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...)
}
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.capacity",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueCapacity{i}, nil
}
@@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct {
metric.Int64ObservableUpDownCounter
}
var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
metric.WithUnit("{span}"),
}
// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
// instrument.
func NewSDKProcessorSpanQueueSize(
@@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize(
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKProcessorSpanQueueSizeOpts
} else {
opt = append(opt, newSDKProcessorSpanQueueSizeOpts...)
}
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.size",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueSize{i}, nil
}
@@ -1910,6 +2032,11 @@ type SDKSpanLive struct {
metric.Int64UpDownCounter
}
var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{
metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
metric.WithUnit("{span}"),
}
// NewSDKSpanLive returns a new SDKSpanLive instrument.
func NewSDKSpanLive(
m metric.Meter,
@@ -1920,15 +2047,18 @@ func NewSDKSpanLive(
return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
}
if len(opt) == 0 {
opt = newSDKSpanLiveOpts
} else {
opt = append(opt, newSDKSpanLiveOpts...)
}
i, err := m.Int64UpDownCounter(
"otel.sdk.span.live",
append([]metric.Int64UpDownCounterOption{
metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKSpanLive{noop.Int64UpDownCounter{}}, err
return SDKSpanLive{noop.Int64UpDownCounter{}}, err
}
return SDKSpanLive{i}, nil
}
@@ -2013,6 +2143,11 @@ type SDKSpanStarted struct {
metric.Int64Counter
}
var newSDKSpanStartedOpts = []metric.Int64CounterOption{
metric.WithDescription("The number of created spans."),
metric.WithUnit("{span}"),
}
// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
func NewSDKSpanStarted(
m metric.Meter,
@@ -2023,15 +2158,18 @@ func NewSDKSpanStarted(
return SDKSpanStarted{noop.Int64Counter{}}, nil
}
if len(opt) == 0 {
opt = newSDKSpanStartedOpts
} else {
opt = append(opt, newSDKSpanStartedOpts...)
}
i, err := m.Int64Counter(
"otel.sdk.span.started",
append([]metric.Int64CounterOption{
metric.WithDescription("The number of created spans."),
metric.WithUnit("{span}"),
}, opt...)...,
opt...,
)
if err != nil {
return SDKSpanStarted{noop.Int64Counter{}}, err
return SDKSpanStarted{noop.Int64Counter{}}, err
}
return SDKSpanStarted{i}, nil
}
@@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K
// value of the sampler for this span.
func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
return attribute.String("otel.span.sampling_result", string(val))
}
}

View File

@@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package httpconv provides types and functionality for OpenTelemetry semantic
// Package rpcconv provides types and functionality for OpenTelemetry semantic
// conventions in the "rpc" namespace.
package rpcconv
@@ -28,6 +28,11 @@ type ClientDuration struct {
metric.Float64Histogram
}
var newClientDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("Measures the duration of outbound RPC."),
metric.WithUnit("ms"),
}
// NewClientDuration returns a new ClientDuration instrument.
func NewClientDuration(
m metric.Meter,
@@ -38,15 +43,18 @@ func NewClientDuration(
return ClientDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientDurationOpts
} else {
opt = append(opt, newClientDurationOpts...)
}
i, err := m.Float64Histogram(
"rpc.client.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("Measures the duration of outbound RPC."),
metric.WithUnit("ms"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientDuration{noop.Float64Histogram{}}, err
return ClientDuration{noop.Float64Histogram{}}, err
}
return ClientDuration{i}, nil
}
@@ -102,6 +110,7 @@ func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attrib
func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -121,6 +130,11 @@ type ClientRequestSize struct {
metric.Int64Histogram
}
var newClientRequestSizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC request messages (uncompressed)."),
metric.WithUnit("By"),
}
// NewClientRequestSize returns a new ClientRequestSize instrument.
func NewClientRequestSize(
m metric.Meter,
@@ -131,15 +145,18 @@ func NewClientRequestSize(
return ClientRequestSize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientRequestSizeOpts
} else {
opt = append(opt, newClientRequestSizeOpts...)
}
i, err := m.Int64Histogram(
"rpc.client.request.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC request messages (uncompressed)."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientRequestSize{noop.Int64Histogram{}}, err
return ClientRequestSize{noop.Int64Histogram{}}, err
}
return ClientRequestSize{i}, nil
}
@@ -189,6 +206,7 @@ func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attri
func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -208,6 +226,11 @@ type ClientRequestsPerRPC struct {
metric.Int64Histogram
}
var newClientRequestsPerRPCOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages received per RPC."),
metric.WithUnit("{count}"),
}
// NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument.
func NewClientRequestsPerRPC(
m metric.Meter,
@@ -218,15 +241,18 @@ func NewClientRequestsPerRPC(
return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientRequestsPerRPCOpts
} else {
opt = append(opt, newClientRequestsPerRPCOpts...)
}
i, err := m.Int64Histogram(
"rpc.client.requests_per_rpc",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages received per RPC."),
metric.WithUnit("{count}"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientRequestsPerRPC{noop.Int64Histogram{}}, err
return ClientRequestsPerRPC{noop.Int64Histogram{}}, err
}
return ClientRequestsPerRPC{i}, nil
}
@@ -280,6 +306,7 @@ func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...at
func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -299,6 +326,11 @@ type ClientResponseSize struct {
metric.Int64Histogram
}
var newClientResponseSizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC response messages (uncompressed)."),
metric.WithUnit("By"),
}
// NewClientResponseSize returns a new ClientResponseSize instrument.
func NewClientResponseSize(
m metric.Meter,
@@ -309,15 +341,18 @@ func NewClientResponseSize(
return ClientResponseSize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientResponseSizeOpts
} else {
opt = append(opt, newClientResponseSizeOpts...)
}
i, err := m.Int64Histogram(
"rpc.client.response.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC response messages (uncompressed)."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientResponseSize{noop.Int64Histogram{}}, err
return ClientResponseSize{noop.Int64Histogram{}}, err
}
return ClientResponseSize{i}, nil
}
@@ -367,6 +402,7 @@ func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attr
func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -386,6 +422,11 @@ type ClientResponsesPerRPC struct {
metric.Int64Histogram
}
var newClientResponsesPerRPCOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages sent per RPC."),
metric.WithUnit("{count}"),
}
// NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument.
func NewClientResponsesPerRPC(
m metric.Meter,
@@ -396,15 +437,18 @@ func NewClientResponsesPerRPC(
return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newClientResponsesPerRPCOpts
} else {
opt = append(opt, newClientResponsesPerRPCOpts...)
}
i, err := m.Int64Histogram(
"rpc.client.responses_per_rpc",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages sent per RPC."),
metric.WithUnit("{count}"),
}, opt...)...,
opt...,
)
if err != nil {
return ClientResponsesPerRPC{noop.Int64Histogram{}}, err
return ClientResponsesPerRPC{noop.Int64Histogram{}}, err
}
return ClientResponsesPerRPC{i}, nil
}
@@ -458,6 +502,7 @@ func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...a
func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -477,6 +522,11 @@ type ServerDuration struct {
metric.Float64Histogram
}
var newServerDurationOpts = []metric.Float64HistogramOption{
metric.WithDescription("Measures the duration of inbound RPC."),
metric.WithUnit("ms"),
}
// NewServerDuration returns a new ServerDuration instrument.
func NewServerDuration(
m metric.Meter,
@@ -487,15 +537,18 @@ func NewServerDuration(
return ServerDuration{noop.Float64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerDurationOpts
} else {
opt = append(opt, newServerDurationOpts...)
}
i, err := m.Float64Histogram(
"rpc.server.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("Measures the duration of inbound RPC."),
metric.WithUnit("ms"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerDuration{noop.Float64Histogram{}}, err
return ServerDuration{noop.Float64Histogram{}}, err
}
return ServerDuration{i}, nil
}
@@ -551,6 +604,7 @@ func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attrib
func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -570,6 +624,11 @@ type ServerRequestSize struct {
metric.Int64Histogram
}
var newServerRequestSizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC request messages (uncompressed)."),
metric.WithUnit("By"),
}
// NewServerRequestSize returns a new ServerRequestSize instrument.
func NewServerRequestSize(
m metric.Meter,
@@ -580,15 +639,18 @@ func NewServerRequestSize(
return ServerRequestSize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerRequestSizeOpts
} else {
opt = append(opt, newServerRequestSizeOpts...)
}
i, err := m.Int64Histogram(
"rpc.server.request.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC request messages (uncompressed)."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerRequestSize{noop.Int64Histogram{}}, err
return ServerRequestSize{noop.Int64Histogram{}}, err
}
return ServerRequestSize{i}, nil
}
@@ -638,6 +700,7 @@ func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attri
func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -657,6 +720,11 @@ type ServerRequestsPerRPC struct {
metric.Int64Histogram
}
var newServerRequestsPerRPCOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages received per RPC."),
metric.WithUnit("{count}"),
}
// NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument.
func NewServerRequestsPerRPC(
m metric.Meter,
@@ -667,15 +735,18 @@ func NewServerRequestsPerRPC(
return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerRequestsPerRPCOpts
} else {
opt = append(opt, newServerRequestsPerRPCOpts...)
}
i, err := m.Int64Histogram(
"rpc.server.requests_per_rpc",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages received per RPC."),
metric.WithUnit("{count}"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerRequestsPerRPC{noop.Int64Histogram{}}, err
return ServerRequestsPerRPC{noop.Int64Histogram{}}, err
}
return ServerRequestsPerRPC{i}, nil
}
@@ -729,6 +800,7 @@ func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...at
func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -748,6 +820,11 @@ type ServerResponseSize struct {
metric.Int64Histogram
}
var newServerResponseSizeOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC response messages (uncompressed)."),
metric.WithUnit("By"),
}
// NewServerResponseSize returns a new ServerResponseSize instrument.
func NewServerResponseSize(
m metric.Meter,
@@ -758,15 +835,18 @@ func NewServerResponseSize(
return ServerResponseSize{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerResponseSizeOpts
} else {
opt = append(opt, newServerResponseSizeOpts...)
}
i, err := m.Int64Histogram(
"rpc.server.response.size",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the size of RPC response messages (uncompressed)."),
metric.WithUnit("By"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerResponseSize{noop.Int64Histogram{}}, err
return ServerResponseSize{noop.Int64Histogram{}}, err
}
return ServerResponseSize{i}, nil
}
@@ -816,6 +896,7 @@ func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attr
func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -835,6 +916,11 @@ type ServerResponsesPerRPC struct {
metric.Int64Histogram
}
var newServerResponsesPerRPCOpts = []metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages sent per RPC."),
metric.WithUnit("{count}"),
}
// NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument.
func NewServerResponsesPerRPC(
m metric.Meter,
@@ -845,15 +931,18 @@ func NewServerResponsesPerRPC(
return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil
}
if len(opt) == 0 {
opt = newServerResponsesPerRPCOpts
} else {
opt = append(opt, newServerResponsesPerRPCOpts...)
}
i, err := m.Int64Histogram(
"rpc.server.responses_per_rpc",
append([]metric.Int64HistogramOption{
metric.WithDescription("Measures the number of messages sent per RPC."),
metric.WithUnit("{count}"),
}, opt...)...,
opt...,
)
if err != nil {
return ServerResponsesPerRPC{noop.Int64Histogram{}}, err
return ServerResponsesPerRPC{noop.Int64Histogram{}}, err
}
return ServerResponsesPerRPC{i}, nil
}
@@ -907,6 +996,7 @@ func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...a
func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -917,4 +1007,4 @@ func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set att
*o = append(*o, metric.WithAttributeSet(set))
m.Int64Histogram.Record(ctx, val, *o...)
}
}

View File

@@ -0,0 +1,27 @@
<!-- Generated. DO NOT MODIFY. -->
# Migration from v1.39.0 to v1.40.0
The `go.opentelemetry.io/otel/semconv/v1.40.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.39.0` with the following exceptions.
## Removed
The following declarations have been removed.
Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
- `ErrorMessage`
- `ErrorMessageKey`
- `RPCMessageCompressedSize`
- `RPCMessageCompressedSizeKey`
- `RPCMessageID`
- `RPCMessageIDKey`
- `RPCMessageTypeKey`
- `RPCMessageTypeReceived`
- `RPCMessageTypeSent`
- `RPCMessageUncompressedSize`
- `RPCMessageUncompressedSizeKey`
[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue

View File

@@ -0,0 +1,3 @@
# Semconv v1.40.0
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.40.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.40.0)

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the v1.40.0
// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0"

View File

@@ -0,0 +1,66 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0"
import (
"errors"
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
//
// If err is nil, the returned attribute has the default value
// [ErrorTypeOther].
//
// If err or one of the errors in its chain has the method
//
// ErrorType() string
//
// the returned attribute has that method's return value. If multiple errors in
// the chain implement this method, the value from the first match found by
// [errors.As] is used. Otherwise, the returned attribute has a value derived
// from the concrete type of err.
//
// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
return ErrorTypeKey.String(errorType(err))
}
func errorType(err error) string {
var s string
if et, ok := err.(interface{ ErrorType() string }); ok {
// Fast path: check the top-level error first.
s = et.ErrorType()
} else {
// Fallback: search the error chain for an ErrorType method.
var et interface{ ErrorType() string }
if errors.As(err, &et) {
// Prioritize the ErrorType method if available.
s = et.ErrorType()
}
}
if s == "" {
// Fallback to reflection if the ErrorType method is not supported or
// returns an empty value.
t := reflect.TypeOf(err)
pkg, name := t.PkgPath(), t.Name()
if pkg != "" && name != "" {
s = pkg + "." + name
} else {
// The type has no package path or name (predeclared, not-defined,
// or alias for a not-defined type).
//
// This is not guaranteed to be unique, but is a best effort.
s = t.String()
}
}
return s
}

View File

@@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.40.0"

View File

@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.40.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)

View File

@@ -4,6 +4,7 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
"slices"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
// WithInstrumentationAttributes sets the instrumentation attributes.
// mergeSets returns the union of keys between a and b. Any duplicate keys will
// use the value associated with b.
func mergeSets(a, b attribute.Set) attribute.Set {
// NewMergeIterator uses the first value for any duplicates.
iter := attribute.NewMergeIterator(&b, &a)
merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
for iter.Next() {
merged = append(merged, iter.Attribute())
}
return attribute.NewSet(merged...)
}
// WithInstrumentationAttributes adds the instrumentation attributes.
//
// The passed attributes will be de-duplicated.
// This is equivalent to calling [WithInstrumentationAttributeSet] with an
// [attribute.Set] created from a clone of the passed attributes.
// [WithInstrumentationAttributeSet] is recommended for more control.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
set := attribute.NewSet(slices.Clone(attr)...)
return WithInstrumentationAttributeSet(set)
}
// WithInstrumentationAttributeSet adds the instrumentation attributes.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributeSet(set attribute.Set) TracerOption {
if set.Len() == 0 {
return tracerOptionFunc(func(config TracerConfig) TracerConfig {
return config
})
}
return tracerOptionFunc(func(config TracerConfig) TracerConfig {
config.attrs = attribute.NewSet(attr...)
if config.attrs.Len() == 0 {
config.attrs = set
} else {
config.attrs = mergeSets(config.attrs, set)
}
return config
})
}

View File

@@ -66,6 +66,10 @@ type Span interface {
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
//
// Note that adding attributes at span creation using [WithAttributes] is preferred
// to calling SetAttribute later, as samplers can only consider information
// already present during span creation.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate

View File

@@ -12,6 +12,11 @@ const (
// with the sampling bit set means the span is sampled.
FlagsSampled = TraceFlags(0x01)
// FlagsRandom is a bitmask with the random trace ID flag set. When
// set, it signals that the trace ID was generated randomly with at
// least 56 bits of randomness (W3C Trace Context Level 2).
FlagsRandom = TraceFlags(0x02)
errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"

View File

@@ -61,7 +61,10 @@ func checkValue(val string) bool {
func checkKeyRemain(key string) bool {
// ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
for _, v := range key {
if isAlphaNum(byte(v)) {
if v > 127 {
return false
}
if isAlphaNumASCII(v) {
continue
}
switch v {
@@ -89,7 +92,7 @@ func checkKeyPart(key string, n int) bool {
return ret && checkKeyRemain(key[1:])
}
func isAlphaNum(c byte) bool {
func isAlphaNumASCII[T rune | byte](c T) bool {
if c >= 'a' && c <= 'z' {
return true
}
@@ -105,7 +108,7 @@ func checkKeyTenant(key string, n int) bool {
if key == "" {
return false
}
return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
return isAlphaNumASCII(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
}
// based on the W3C Trace Context specification

View File

@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.38.0"
return "1.42.0"
}

View File

@@ -3,7 +3,7 @@
module-sets:
stable-v1:
version: v1.38.0
version: v1.42.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.60.0
version: v0.64.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
version: v0.14.0
version: v0.18.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,9 +36,31 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
version: v0.0.13
version: v0.0.16
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- go.opentelemetry.io/otel/trace/internal/telemetry/test
modules:
go.opentelemetry.io/otel/exporters/stdout/stdouttrace:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/prometheus:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp:
version-refs:
- ./internal/version.go

View File

@@ -4,7 +4,10 @@
package tiff
import "io"
import (
"io"
"slices"
)
// buffer buffers an io.Reader to satisfy io.ReaderAt.
type buffer struct {
@@ -12,24 +15,19 @@ type buffer struct {
buf []byte
}
const fillChunkSize = 10 << 20 // 10 MB
// fill reads data from b.r until the buffer contains at least end bytes.
func (b *buffer) fill(end int) error {
m := len(b.buf)
if end > m {
if end > cap(b.buf) {
newcap := 1024
for newcap < end {
newcap *= 2
}
newbuf := make([]byte, end, newcap)
copy(newbuf, b.buf)
b.buf = newbuf
} else {
b.buf = b.buf[:end]
}
if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil {
end = m + n
b.buf = b.buf[:end]
for m < end {
next := min(end-m, fillChunkSize)
b.buf = slices.Grow(b.buf, next)
b.buf = b.buf[:m+next]
n, err := io.ReadFull(b.r, b.buf[m:m+next])
m += n
b.buf = b.buf[:m]
if err != nil {
return err
}
}
@@ -44,7 +42,8 @@ func (b *buffer) ReadAt(p []byte, off int64) (int, error) {
}
err := b.fill(end)
return copy(p, b.buf[o:end]), err
end = min(end, len(b.buf))
return copy(p, b.buf[min(o, end):end]), err
}
// Slice returns a slice of the underlying buffer. The slice contains

View File

@@ -103,7 +103,7 @@ func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) {
return m, image.Config{}, nil
case fccVP8L:
if wantAlpha || alpha != nil {
if alpha != nil {
return nil, image.Config{}, errInvalidFormat
}
if configOnly {

View File

@@ -75,8 +75,6 @@ func unregisterForTesting(name string) {
func init() {
internal.BalancerUnregister = unregisterForTesting
internal.ConnectedAddress = connectedAddress
internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.

View File

@@ -26,6 +26,8 @@ import (
var (
// RandShuffle pseudo-randomizes the order of addresses.
RandShuffle = rand.Shuffle
// RandFloat64 returns, as a float64, a pseudo-random number in [0.0,1.0).
RandFloat64 = rand.Float64
// TimeAfterFunc allows mocking the timer for testing connection delay
// related functionality.
TimeAfterFunc = func(d time.Duration, f func()) func() {

View File

@@ -21,11 +21,14 @@
package pickfirst
import (
"cmp"
"encoding/json"
"errors"
"fmt"
"math"
"net"
"net/netip"
"slices"
"sync"
"time"
@@ -34,6 +37,8 @@ import (
"google.golang.org/grpc/connectivity"
expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/balancer/weight"
"google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
@@ -258,8 +263,42 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// will change the order of endpoints but not touch the order of the
// addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
if envconfig.PickFirstWeightedShuffling {
type weightedEndpoint struct {
endpoint resolver.Endpoint
weight float64
}
// For each endpoint, compute a key as described in A113 and
// https://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf:
var weightedEndpoints []weightedEndpoint
for _, endpoint := range endpoints {
u := internal.RandFloat64() // Random number in [0.0, 1.0)
weight := weightAttribute(endpoint)
weightedEndpoints = append(weightedEndpoints, weightedEndpoint{
endpoint: endpoint,
weight: math.Pow(u, 1.0/float64(weight)),
})
}
// Sort endpoints by key in descending order and reconstruct the
// endpoints slice.
slices.SortFunc(weightedEndpoints, func(a, b weightedEndpoint) int {
return cmp.Compare(b.weight, a.weight)
})
// Here, and in the "else" block below, we clone the endpoints
// slice to avoid mutating the resolver state. Doing the latter
// would lead to data races if the caller is accessing the same
// slice concurrently.
sortedEndpoints := make([]resolver.Endpoint, len(endpoints))
for i, we := range weightedEndpoints {
sortedEndpoints[i] = we.endpoint
}
endpoints = sortedEndpoints
} else {
endpoints = slices.Clone(endpoints)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
}
// "Flatten the list by concatenating the ordered list of addresses for
@@ -906,3 +945,17 @@ func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
a.Attributes.Equal(b.Attributes)
}
// weightAttribute is a convenience function which returns the value of the
// weight endpoint Attribute.
//
// When used in the xDS context, the weight attribute is guaranteed to be
// non-zero. But, when used in a non-xDS context, the weight attribute could be
// unset. A Default of 1 is used in the latter case.
func weightAttribute(e resolver.Endpoint) uint32 {
w := weight.FromEndpoint(e).Weight
if w == 0 {
return 1
}
return w
}

View File

@@ -111,20 +111,6 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
// connectedAddr contains the connected address when ConnectivityState is
// Ready. Otherwise, it is indeterminate.
connectedAddress resolver.Address
}
// connectedAddress returns the connected address for a SubConnState. The
// address is only valid if the state is READY.
func connectedAddress(scs SubConnState) resolver.Address {
return scs.connectedAddress
}
// setConnectedAddress sets the connected address for a SubConnState.
func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
scs.connectedAddress = addr
}
// A Producer is a type shared among potentially many consumers. It is

View File

@@ -36,7 +36,6 @@ import (
)
var (
setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
// noOpRegisterHealthListenerFn is used when client side health checking is
// disabled. It sends a single READY update on the registered listener.
noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
@@ -305,7 +304,7 @@ func newHealthData(s connectivity.State) *healthData {
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
@@ -317,9 +316,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
if s == connectivity.Ready {
setConnectedAddress(&scs, curAddr)
}
// Invalidate the health listener by updating the healthData.
acbw.healthMu.Lock()
// A race may occur if a health listener is registered soon after the

View File

@@ -35,6 +35,8 @@ import (
"google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
@@ -98,6 +100,41 @@ var (
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
)
var (
disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.subchannel.disconnections",
Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
Unit: "{disconnection}",
Labels: []string{"grpc.target"},
OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"},
Default: false,
})
connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.subchannel.connection_attempts_succeeded",
Description: "EXPERIMENTAL. Number of successful connection attempts.",
Unit: "{attempt}",
Labels: []string{"grpc.target"},
OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
Default: false,
})
connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
Name: "grpc.subchannel.connection_attempts_failed",
Description: "EXPERIMENTAL. Number of failed connection attempts.",
Unit: "{attempt}",
Labels: []string{"grpc.target"},
OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
Default: false,
})
openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{
Name: "grpc.subchannel.open_connections",
Description: "EXPERIMENTAL. Number of open connections.",
Unit: "{attempt}",
Labels: []string{"grpc.target"},
OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"},
Default: false,
})
)
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = math.MaxInt32
@@ -262,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}()
// This creates the name resolver, load balancer, etc.
if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
return nil, err
if err := cc.exitIdleMode(); err != nil {
return nil, fmt.Errorf("failed to exit idle mode: %w", err)
}
cc.idlenessMgr.UnsafeSetNotIdle()
// Return now for non-blocking dials.
if !cc.dopts.block {
@@ -332,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) {
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
channelz.AddTraceEvent(logger, cc.channelz, 1, ted)
}
type idler ClientConn
@@ -341,14 +379,17 @@ func (i *idler) EnterIdleMode() {
(*ClientConn)(i).enterIdleMode()
}
func (i *idler) ExitIdleMode() error {
return (*ClientConn)(i).exitIdleMode()
func (i *idler) ExitIdleMode() {
// Ignore the error returned from this method, because from the perspective
// of the caller (idleness manager), the channel would have always moved out
// of IDLE by the time this method returns.
(*ClientConn)(i).exitIdleMode()
}
// exitIdleMode moves the channel out of idle mode by recreating the name
// resolver and load balancer. This should never be called directly; use
// cc.idlenessMgr.ExitIdleMode instead.
func (cc *ClientConn) exitIdleMode() (err error) {
func (cc *ClientConn) exitIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
@@ -356,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) {
}
cc.mu.Unlock()
// Set state to CONNECTING before building the name resolver
// so the channel does not remain in IDLE.
cc.csMgr.updateState(connectivity.Connecting)
// This needs to be called without cc.mu because this builds a new resolver
// which might update state or report error inline, which would then need to
// acquire cc.mu.
if err := cc.resolverWrapper.start(); err != nil {
return err
// If resolver creation fails, treat it like an error reported by the
// resolver before any valid updates. Set channel's state to
// TransientFailure, and set an erroring picker with the resolver build
// error, which will returned as part of any subsequent RPCs.
logger.Warningf("Failed to start resolver: %v", err)
cc.csMgr.updateState(connectivity.TransientFailure)
cc.mu.Lock()
cc.updateResolverStateAndUnlock(resolver.State{}, err)
return fmt.Errorf("failed to start resolver: %w", err)
}
cc.addTraceEvent("exiting idle mode")
@@ -681,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
cc.addTraceEvent(err.Error())
return
}
cc.idlenessMgr.ExitIdleMode()
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
cc.mu.Lock()
@@ -735,8 +786,8 @@ func init() {
internal.EnterIdleModeForTesting = func(cc *ClientConn) {
cc.idlenessMgr.EnterIdleModeForTesting()
}
internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
return cc.idlenessMgr.ExitIdleMode()
internal.ExitIdleModeForTesting = func(cc *ClientConn) {
cc.idlenessMgr.ExitIdleMode()
}
}
@@ -861,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
}
ac.updateTelemetryLabelsLocked()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
// we connect to different addresses.
@@ -925,25 +977,24 @@ func (cc *ClientConn) incrCallsFailed() {
// connect starts creating a transport.
// It does nothing if the ac is not IDLE.
// TODO(bar) Move this to the addrConn section.
func (ac *addrConn) connect() error {
func (ac *addrConn) connect() {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
if logger.V(2) {
logger.Infof("connect called on shutdown addrConn; ignoring.")
}
ac.mu.Unlock()
return errConnClosing
return
}
if ac.state != connectivity.Idle {
if logger.V(2) {
logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
}
ac.mu.Unlock()
return nil
return
}
ac.resetTransportAndUnlock()
return nil
}
// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
@@ -977,7 +1028,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
}
ac.addrs = addrs
ac.updateTelemetryLabelsLocked()
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
@@ -1216,6 +1267,9 @@ type addrConn struct {
resetBackoff chan struct{}
channelz *channelz.SubChannel
localityLabel string
backendServiceLabel string
}
// Note: this requires a lock on ac.mu.
@@ -1223,6 +1277,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
// If we are transitioning out of Ready, it means there is a disconnection.
// A SubConn can also transition from CONNECTING directly to IDLE when
// a transport is successfully created, but the connection fails
// before the SubConn can send the notification for READY. We treat
// this as a successful connection and transition to IDLE.
// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
// part of the if condition below once the issue is fixed.
if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) {
disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown")
openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
}
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1230,7 +1296,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
ac.acbw.updateState(s, ac.curAddr, lastErr)
ac.acbw.updateState(s, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1280,6 +1346,15 @@ func (ac *addrConn) resetTransportAndUnlock() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
if !errors.Is(err, context.Canceled) {
connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
} else {
if logger.V(2) {
// This records cancelled connection attempts which can be later
// replaced by a metric.
logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.")
}
}
// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
// to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
@@ -1319,10 +1394,50 @@ func (ac *addrConn) resetTransportAndUnlock() {
}
// Success; reset backoff.
ac.mu.Lock()
connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
ac.backoffIdx = 0
ac.mu.Unlock()
}
// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the
// first address in addrConn.
func (ac *addrConn) updateTelemetryLabelsLocked() {
labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string)
if !ok || len(ac.addrs) == 0 {
// Reset defaults
ac.localityLabel = ""
ac.backendServiceLabel = ""
return
}
labels := labelsFunc(ac.addrs[0])
ac.localityLabel = labels["grpc.lb.locality"]
ac.backendServiceLabel = labels["grpc.lb.backend_service"]
}
type securityLevelKey struct{}
func (ac *addrConn) securityLevelLocked() string {
var secLevel string
// During disconnection, ac.transport is nil. Fall back to the security level
// stored in the current address during connection.
if ac.transport == nil {
secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string)
return secLevel
}
authInfo := ac.transport.Peer().AuthInfo
if ci, ok := authInfo.(interface {
GetCommonAuthInfo() credentials.CommonAuthInfo
}); ok {
secLevel = ci.GetCommonAuthInfo().SecurityLevel.String()
// Store the security level in the current address' attributes so
// that it remains available for disconnection metrics after the
// transport is closed.
ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel)
}
return secLevel
}
// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
@@ -1412,25 +1527,26 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
}
ac.mu.Lock()
defer ac.mu.Unlock()
if ctx.Err() != nil {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
// have been set at that point.
//
// We run this in a goroutine because newTr.Close() calls onClose()
// We unlock ac.mu because newTr.Close() calls onClose()
// inline, which requires locking ac.mu.
//
ac.mu.Unlock()
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
//
// This can also happen when updateAddrs is called during a connection
// attempt.
go newTr.Close(transport.ErrConnClosing)
newTr.Close(transport.ErrConnClosing)
return nil
}
defer ac.mu.Unlock()
if hctx.Err() != nil {
// onClose was already called for this connection, but the connection
// was successfully established first. Consider it a success and set

View File

@@ -56,9 +56,13 @@ func (t TLSInfo) AuthType() string {
// non-nil error if the validation fails.
func (t TLSInfo) ValidateAuthority(authority string) error {
var errs []error
host, _, err := net.SplitHostPort(authority)
if err != nil {
host = authority
}
for _, cert := range t.State.PeerCertificates {
var err error
if err = cert.VerifyHostname(authority); err == nil {
if err = cert.VerifyHostname(host); err == nil {
return nil
}
errs = append(errs, err)

View File

@@ -58,10 +58,6 @@ func init() {
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
//
// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
// gRPC will invoke it to determine the size of the buffer allocated for the
// result of decompression. A return value of -1 indicates unknown size.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an
// error occurs while initializing the compressor, that error is returned

View File

@@ -27,7 +27,6 @@ package gzip
import (
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"sync"
@@ -111,17 +110,6 @@ func (z *reader) Read(p []byte) (n int, err error) {
return n, err
}
// RFC1952 specifies that the last four bytes "contains the size of
// the original (uncompressed) input data modulo 2^32."
// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
func (c *compressor) DecompressedSize(buf []byte) int {
last := len(buf)
if last < 4 {
return -1
}
return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
}
func (c *compressor) Name() string {
return Name
}

Some files were not shown because too many files have changed in this diff Show More