From a3ef7f6d799ab7706d578fa95049b510bfb7b733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Tue, 25 Nov 2025 15:18:23 +0100 Subject: [PATCH] update otlp tracing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- go.mod | 2 +- go.sum | 4 +- opencloud/pkg/runtime/service/service.go | 2 +- pkg/config/config.go | 1 - pkg/config/parser/parse.go | 4 - pkg/shared/shared_types.go | 10 +- pkg/tracing/config.go | 14 - pkg/tracing/tracing.go | 205 +- pkg/tracing/tracing_test.go | 77 - services/activitylog/pkg/command/server.go | 2 +- services/activitylog/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 13 - services/activitylog/pkg/config/tracing.go | 21 - services/antivirus/pkg/command/server.go | 2 +- services/antivirus/pkg/config/config.go | 9 +- .../pkg/config/defaults/defaultconfig.go | 4 - services/antivirus/pkg/config/tracing.go | 21 - services/app-provider/pkg/command/server.go | 2 +- services/app-provider/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/app-provider/pkg/config/tracing.go | 21 - services/app-registry/pkg/command/server.go | 2 +- services/app-registry/pkg/config/config.go | 7 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/app-registry/pkg/config/tracing.go | 21 - .../app-registry/pkg/revaconfig/config.go | 7 - services/audit/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 12 - services/auth-app/pkg/command/create.go | 2 +- services/auth-app/pkg/command/server.go | 2 +- services/auth-app/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/auth-app/pkg/config/tracing.go | 21 - services/auth-basic/pkg/command/server.go | 2 +- services/auth-basic/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/auth-basic/pkg/config/tracing.go | 21 - services/auth-bearer/pkg/command/server.go | 2 +- services/auth-bearer/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/auth-bearer/pkg/config/tracing.go | 21 - services/auth-machine/pkg/command/server.go | 2 +- services/auth-machine/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/auth-machine/pkg/config/tracing.go | 21 - services/auth-service/pkg/command/server.go | 2 +- services/auth-service/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/auth-service/pkg/config/tracing.go | 21 - .../auth-service/pkg/revaconfig/config.go | 7 - services/clientlog/pkg/command/server.go | 2 +- services/clientlog/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/clientlog/pkg/config/tracing.go | 21 - services/collaboration/pkg/command/server.go | 2 +- services/collaboration/pkg/config/config.go | 24 +- .../pkg/config/defaults/defaultconfig.go | 12 - services/eventhistory/pkg/command/server.go | 2 +- services/eventhistory/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/eventhistory/pkg/config/tracing.go | 21 - services/frontend/pkg/command/events.go | 2 +- services/frontend/pkg/command/server.go | 2 +- services/frontend/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/frontend/pkg/config/tracing.go | 21 - services/gateway/pkg/command/server.go | 2 +- services/gateway/pkg/config/config.go | 7 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/gateway/pkg/config/tracing.go | 21 - services/gateway/pkg/revaconfig/config.go | 7 - services/graph/pkg/command/server.go | 2 +- services/graph/pkg/config/config.go | 7 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/graph/pkg/config/tracing.go | 21 - services/groups/pkg/command/server.go | 2 +- services/groups/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/groups/pkg/config/tracing.go | 21 - services/groups/pkg/revaconfig/config.go | 7 - services/idm/pkg/config/config.go | 5 +- .../idm/pkg/config/defaults/defaultconfig.go | 11 - services/idm/pkg/config/tracing.go | 9 - services/idp/pkg/command/server.go | 2 +- services/idp/pkg/config/config.go | 5 +- .../idp/pkg/config/defaults/defaultconfig.go | 11 - services/idp/pkg/config/tracing.go | 21 - services/invitations/pkg/command/server.go | 2 +- services/invitations/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/invitations/pkg/config/tracing.go | 21 - services/nats/pkg/config/config.go | 9 - .../nats/pkg/config/defaults/defaultconfig.go | 11 - services/notifications/pkg/command/server.go | 2 +- services/notifications/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 12 - services/notifications/pkg/config/tracing.go | 21 - services/ocdav/pkg/command/server.go | 3 +- services/ocdav/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/ocdav/pkg/config/tracing.go | 21 - services/ocm/pkg/command/server.go | 2 +- services/ocm/pkg/config/config.go | 5 +- .../ocm/pkg/config/defaults/defaultconfig.go | 11 - services/ocm/pkg/config/tracing.go | 21 - services/ocs/pkg/command/server.go | 2 +- services/ocs/pkg/config/config.go | 5 +- .../ocs/pkg/config/defaults/defaultconfig.go | 11 - services/ocs/pkg/config/tracing.go | 21 - services/policies/pkg/command/server.go | 2 +- services/policies/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/policies/pkg/config/tracing.go | 21 - services/postprocessing/pkg/command/server.go | 2 +- services/postprocessing/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/postprocessing/pkg/config/tracing.go | 21 - services/proxy/pkg/command/server.go | 2 +- services/proxy/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/proxy/pkg/config/tracing.go | 21 - .../proxy/pkg/proxy/proxy_integration_test.go | 1 - services/search/pkg/command/index.go | 2 +- services/search/pkg/command/server.go | 2 +- services/search/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/search/pkg/config/tracing.go | 21 - services/settings/pkg/command/server.go | 2 +- services/settings/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/settings/pkg/config/tracing.go | 21 - services/sharing/pkg/command/server.go | 2 +- services/sharing/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/sharing/pkg/config/tracing.go | 21 - services/sse/pkg/command/server.go | 2 +- services/sse/pkg/config/config.go | 3 +- .../sse/pkg/config/defaults/defaultconfig.go | 11 - services/sse/pkg/config/tracing.go | 21 - .../storage-publiclink/pkg/command/server.go | 2 +- .../storage-publiclink/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - .../storage-publiclink/pkg/config/tracing.go | 21 - services/storage-shares/pkg/command/server.go | 2 +- services/storage-shares/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/storage-shares/pkg/config/tracing.go | 21 - services/storage-system/pkg/command/server.go | 2 +- services/storage-system/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/storage-system/pkg/config/tracing.go | 21 - services/storage-users/pkg/command/server.go | 2 +- services/storage-users/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/storage-users/pkg/config/tracing.go | 21 - services/thumbnails/pkg/command/server.go | 2 +- services/thumbnails/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/thumbnails/pkg/config/tracing.go | 21 - services/userlog/pkg/command/server.go | 2 +- services/userlog/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/userlog/pkg/config/tracing.go | 21 - services/users/pkg/command/server.go | 2 +- services/users/pkg/config/config.go | 1 - .../pkg/config/defaults/defaultconfig.go | 11 - services/users/pkg/config/tracing.go | 21 - services/web/pkg/command/server.go | 2 +- services/web/pkg/config/config.go | 5 +- .../web/pkg/config/defaults/defaultconfig.go | 11 - services/web/pkg/config/tracing.go | 21 - services/webdav/pkg/command/server.go | 2 +- services/webdav/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/webdav/pkg/config/tracing.go | 21 - services/webfinger/pkg/command/server.go | 2 +- services/webfinger/pkg/config/config.go | 5 +- .../pkg/config/defaults/defaultconfig.go | 11 - services/webfinger/pkg/config/tracing.go | 21 - .../otel/exporters/jaeger/README.md | 56 - .../otel/exporters/jaeger/agent.go | 213 -- .../otel/exporters/jaeger/doc.go | 22 - .../otel/exporters/jaeger/env.go | 44 - .../gen-go/agent/GoUnusedProtection__.go | 6 - .../internal/gen-go/agent/agent-consts.go | 27 - .../jaeger/internal/gen-go/agent/agent.go | 412 --- .../gen-go/jaeger/GoUnusedProtection__.go | 6 - .../internal/gen-go/jaeger/jaeger-consts.go | 22 - .../jaeger/internal/gen-go/jaeger/jaeger.go | 3022 ----------------- .../gen-go/zipkincore/GoUnusedProtection__.go | 6 - .../gen-go/zipkincore/zipkincore-consts.go | 39 - .../internal/gen-go/zipkincore/zipkincore.go | 2067 ----------- .../internal/third_party/thrift/LICENSE | 306 -- .../jaeger/internal/third_party/thrift/NOTICE | 5 - .../lib/go/thrift/application_exception.go | 180 - .../thrift/lib/go/thrift/binary_protocol.go | 555 --- .../lib/go/thrift/buffered_transport.go | 99 - .../thrift/lib/go/thrift/client.go | 109 - .../thrift/lib/go/thrift/compact_protocol.go | 865 ----- .../thrift/lib/go/thrift/configuration.go | 378 --- .../thrift/lib/go/thrift/context.go | 24 - .../thrift/lib/go/thrift/debug_protocol.go | 447 --- .../thrift/lib/go/thrift/deserializer.go | 121 - .../thrift/lib/go/thrift/exception.go | 116 - .../thrift/lib/go/thrift/framed_transport.go | 223 -- .../thrift/lib/go/thrift/header_context.go | 110 - .../thrift/lib/go/thrift/header_protocol.go | 351 -- .../thrift/lib/go/thrift/header_transport.go | 809 ----- .../thrift/lib/go/thrift/http_client.go | 257 -- .../thrift/lib/go/thrift/http_transport.go | 74 - .../lib/go/thrift/iostream_transport.go | 222 -- .../thrift/lib/go/thrift/json_protocol.go | 591 ---- .../thrift/lib/go/thrift/logger.go | 69 - .../thrift/lib/go/thrift/memory_buffer.go | 80 - .../thrift/lib/go/thrift/messagetype.go | 31 - .../thrift/lib/go/thrift/middleware.go | 109 - .../lib/go/thrift/multiplexed_protocol.go | 235 -- .../thrift/lib/go/thrift/numeric.go | 164 - .../thrift/lib/go/thrift/pointerize.go | 52 - .../thrift/lib/go/thrift/processor_factory.go | 80 - .../thrift/lib/go/thrift/protocol.go | 177 - .../lib/go/thrift/protocol_exception.go | 104 - .../thrift/lib/go/thrift/protocol_factory.go | 25 - .../thrift/lib/go/thrift/response_helper.go | 94 - .../thrift/lib/go/thrift/rich_transport.go | 71 - .../thrift/lib/go/thrift/serializer.go | 136 - .../thrift/lib/go/thrift/server.go | 35 - .../thrift/lib/go/thrift/server_socket.go | 137 - .../thrift/lib/go/thrift/server_transport.go | 34 - .../lib/go/thrift/simple_json_protocol.go | 1373 -------- .../thrift/lib/go/thrift/simple_server.go | 332 -- .../thrift/lib/go/thrift/socket.go | 238 -- .../thrift/lib/go/thrift/socket_conn.go | 102 - .../thrift/lib/go/thrift/socket_unix_conn.go | 83 - .../lib/go/thrift/socket_windows_conn.go | 34 - .../thrift/lib/go/thrift/ssl_server_socket.go | 112 - .../thrift/lib/go/thrift/ssl_socket.go | 258 -- .../thrift/lib/go/thrift/transport.go | 70 - .../lib/go/thrift/transport_exception.go | 131 - .../thrift/lib/go/thrift/transport_factory.go | 39 - .../third_party/thrift/lib/go/thrift/type.go | 69 - .../thrift/lib/go/thrift/zlib_transport.go | 137 - .../otel/exporters/jaeger/jaeger.go | 360 -- .../jaeger/reconnecting_udp_client.go | 204 -- .../otel/exporters/jaeger/uploader.go | 339 -- .../{jaeger => stdout/stdouttrace}/LICENSE | 30 + .../exporters/stdout/stdouttrace/README.md | 3 + .../exporters/stdout/stdouttrace/config.go | 85 + .../otel/exporters/stdout/stdouttrace/doc.go | 9 + .../stdouttrace/internal/counter/counter.go | 31 + .../stdout/stdouttrace/internal/x/README.md | 36 + .../stdout/stdouttrace/internal/x/x.go | 63 + .../exporters/stdout/stdouttrace/trace.go | 255 ++ .../otel/sdk/trace/tracetest/README.md | 3 + .../otel/sdk/trace/tracetest/exporter.go | 74 + .../otel/sdk/trace/tracetest/recorder.go | 94 + .../otel/sdk/trace/tracetest/span.go | 166 + .../otel/semconv/internal/http.go | 338 -- .../otel/semconv/v1.21.0/README.md | 3 - .../otel/semconv/v1.21.0/attribute_group.go | 1866 ---------- .../otel/semconv/v1.21.0/doc.go | 9 - .../otel/semconv/v1.21.0/event.go | 188 - .../otel/semconv/v1.21.0/exception.go | 9 - .../otel/semconv/v1.21.0/resource.go | 2299 ------------- .../otel/semconv/v1.21.0/schema.go | 9 - .../otel/semconv/v1.21.0/trace.go | 2484 -------------- .../otel/semconv/v1.4.0/README.md | 3 - .../otel/semconv/v1.4.0/doc.go | 9 - .../otel/semconv/v1.4.0/exception.go | 9 - .../otel/semconv/v1.4.0/http.go | 103 - .../otel/semconv/v1.4.0/resource.go | 895 ----- .../otel/semconv/v1.4.0/schema.go | 9 - .../otel/semconv/v1.4.0/trace.go | 1367 -------- vendor/modules.txt | 16 +- 274 files changed, 1032 insertions(+), 28608 deletions(-) delete mode 100644 pkg/tracing/config.go delete mode 100644 pkg/tracing/tracing_test.go delete mode 100644 services/activitylog/pkg/config/tracing.go delete mode 100644 services/antivirus/pkg/config/tracing.go delete mode 100644 services/app-provider/pkg/config/tracing.go delete mode 100644 services/app-registry/pkg/config/tracing.go delete mode 100644 services/auth-app/pkg/config/tracing.go delete mode 100644 services/auth-basic/pkg/config/tracing.go delete mode 100644 services/auth-bearer/pkg/config/tracing.go delete mode 100644 services/auth-machine/pkg/config/tracing.go delete mode 100644 services/auth-service/pkg/config/tracing.go delete mode 100644 services/clientlog/pkg/config/tracing.go delete mode 100644 services/eventhistory/pkg/config/tracing.go delete mode 100644 services/frontend/pkg/config/tracing.go delete mode 100644 services/gateway/pkg/config/tracing.go delete mode 100644 services/graph/pkg/config/tracing.go delete mode 100644 services/groups/pkg/config/tracing.go delete mode 100644 services/idm/pkg/config/tracing.go delete mode 100644 services/idp/pkg/config/tracing.go delete mode 100644 services/invitations/pkg/config/tracing.go delete mode 100644 services/notifications/pkg/config/tracing.go delete mode 100644 services/ocdav/pkg/config/tracing.go delete mode 100644 services/ocm/pkg/config/tracing.go delete mode 100644 services/ocs/pkg/config/tracing.go delete mode 100644 services/policies/pkg/config/tracing.go delete mode 100644 services/postprocessing/pkg/config/tracing.go delete mode 100644 services/proxy/pkg/config/tracing.go delete mode 100644 services/search/pkg/config/tracing.go delete mode 100644 services/settings/pkg/config/tracing.go delete mode 100644 services/sharing/pkg/config/tracing.go delete mode 100644 services/sse/pkg/config/tracing.go delete mode 100644 services/storage-publiclink/pkg/config/tracing.go delete mode 100644 services/storage-shares/pkg/config/tracing.go delete mode 100644 services/storage-system/pkg/config/tracing.go delete mode 100644 services/storage-users/pkg/config/tracing.go delete mode 100644 services/thumbnails/pkg/config/tracing.go delete mode 100644 services/userlog/pkg/config/tracing.go delete mode 100644 services/users/pkg/config/tracing.go delete mode 100644 services/web/pkg/config/tracing.go delete mode 100644 services/webdav/pkg/config/tracing.go delete mode 100644 services/webfinger/pkg/config/tracing.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go rename vendor/go.opentelemetry.io/otel/exporters/{jaeger => stdout/stdouttrace}/LICENSE (88%) create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go diff --git a/go.mod b/go.mod index 1ffd714788..efc22e00a6 100644 --- a/go.mod +++ b/go.mod @@ -97,8 +97,8 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 go.opentelemetry.io/contrib/zpages v0.63.0 go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 go.opentelemetry.io/otel/sdk v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 golang.org/x/crypto v0.44.0 diff --git a/go.sum b/go.sum index 5d593b627f..1a023d07e3 100644 --- a/go.sum +++ b/go.sum @@ -1299,14 +1299,14 @@ go.opentelemetry.io/contrib/zpages v0.63.0 h1:TppOKuZGbqXMgsfjqq3i09N5Vbo1JLtLIm go.opentelemetry.io/contrib/zpages v0.63.0/go.mod h1:5F8uugz75ay/MMhRRhxAXY33FuaI8dl7jTxefrIy5qk= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= -go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= diff --git a/opencloud/pkg/runtime/service/service.go b/opencloud/pkg/runtime/service/service.go index d3ff48dab2..a02bab58cf 100644 --- a/opencloud/pkg/runtime/service/service.go +++ b/opencloud/pkg/runtime/service/service.go @@ -320,7 +320,7 @@ func NewService(ctx context.Context, options ...Option) (*Service, error) { } areg(opts.Config.Antivirus.Service.Name, func(ctx context.Context, cfg *occfg.Config) error { cfg.Antivirus.Context = ctx - // cfg.Antivirus.Commons = cfg.Commons // antivirus holds no Commons atm + cfg.Antivirus.Commons = cfg.Commons return antivirus.Execute(cfg.Antivirus) }) areg(opts.Config.Audit.Service.Name, func(ctx context.Context, cfg *occfg.Config) error { diff --git a/pkg/config/config.go b/pkg/config/config.go index 131ac2bcdb..73325a8bac 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -62,7 +62,6 @@ type Runtime struct { type Config struct { *shared.Commons `yaml:"shared"` - Tracing *shared.Tracing `yaml:"tracing"` Log *shared.Log `yaml:"log"` Cache *shared.Cache `yaml:"cache"` GRPCClientTLS *shared.GRPCClientTLS `yaml:"grpc_client_tls"` diff --git a/pkg/config/parser/parse.go b/pkg/config/parser/parse.go index aaa53af1b6..52e971636e 100644 --- a/pkg/config/parser/parse.go +++ b/pkg/config/parser/parse.go @@ -40,9 +40,6 @@ func ParseConfig(cfg *config.Config, skipValidate bool) error { // EnsureDefaults ensures that all pointers in the // OpenCloud config (not the services configs) are initialized func EnsureDefaults(cfg *config.Config) { - if cfg.Tracing == nil { - cfg.Tracing = &shared.Tracing{} - } if cfg.Log == nil { cfg.Log = &shared.Log{} } @@ -71,7 +68,6 @@ func EnsureCommons(cfg *config.Config) { } cfg.Commons.Log = structs.CopyOrZeroValue(cfg.Log) - cfg.Commons.Tracing = structs.CopyOrZeroValue(cfg.Tracing) cfg.Commons.Cache = structs.CopyOrZeroValue(cfg.Cache) if cfg.GRPCClientTLS != nil { diff --git a/pkg/shared/shared_types.go b/pkg/shared/shared_types.go index 2f9170017d..5a17fd3b39 100644 --- a/pkg/shared/shared_types.go +++ b/pkg/shared/shared_types.go @@ -18,14 +18,6 @@ type Log struct { File string `yaml:"file" env:"OC_LOG_FILE" desc:"The path to the log file. Activates logging to this file if set." introductionVersion:"1.0.0"` } -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - // TokenManager is the config for using the reva token manager type TokenManager struct { JWTSecret string `mask:"password" yaml:"jwt_secret" env:"OC_JWT_SECRET" desc:"The secret to mint and validate jwt tokens." introductionVersion:"1.0.0"` @@ -69,8 +61,8 @@ type Cache struct { // Commons holds configuration that are common to all extensions. Each extension can then decide whether // to overwrite its values. type Commons struct { + TracesExporter string `yaml:"traces_exporter" env:"OTEL_TRACES_EXPORTER" desc:"The exporter used for traces. Supports 'otlp', 'console' and 'none' (default)." introductionVersion:"%%NEXT%%"` Log *Log `yaml:"log"` - Tracing *Tracing `yaml:"tracing"` Cache *Cache `yaml:"cache"` GRPCClientTLS *GRPCClientTLS `yaml:"grpc_client_tls"` GRPCServiceTLS *GRPCServiceTLS `yaml:"grpc_service_tls"` diff --git a/pkg/tracing/config.go b/pkg/tracing/config.go deleted file mode 100644 index 5988e5fbe6..0000000000 --- a/pkg/tracing/config.go +++ /dev/null @@ -1,14 +0,0 @@ -package tracing - -// ConfigConverter is the interface for external configuration. -type ConfigConverter interface { - Convert() Config -} - -// Tracing defines the available tracing configuration. -type Config struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE" desc:"The type of tracing. Defaults to \"\", which is the same as \"jaeger\". Allowed tracing types are \"jaeger\" and \"\" as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 734116bfac..2fa4e168e9 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -3,22 +3,16 @@ package tracing import ( "context" "fmt" - "net/url" - "reflect" - "strings" - "time" rtrace "github.com/opencloud-eu/reva/v2/pkg/trace" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/jaeger" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" ) // Propagator ensures the importer module uses the same trace propagation strategy. @@ -27,25 +21,9 @@ var Propagator = propagation.NewCompositeTextMapPropagator( propagation.TraceContext{}, ) -// GetServiceTraceProvider returns a configured open-telemetry trace provider. -func GetServiceTraceProvider(c ConfigConverter, serviceName string) (trace.TracerProvider, error) { - var cfg Config - if c == nil || reflect.ValueOf(c).IsNil() { - cfg = Config{Enabled: false} - } else { - cfg = c.Convert() - } - - if cfg.Enabled { - return GetTraceProvider(cfg.Endpoint, cfg.Collector, serviceName, cfg.Type) - } - - tp := sdktrace.NewTracerProvider( - sdktrace.WithSampler(sdktrace.NeverSample()), - ) - rtrace.SetDefaultTracerProvider(tp) - - return tp, nil +// Deprecated: GetServiceTraceProvider returns a configured open-telemetry trace provider. Use GetTraceProvider. +func GetServiceTraceProvider(exporter, serviceName string) (trace.TracerProvider, error) { + return GetTraceProvider(context.Background(), exporter, serviceName) } // GetPropagator gets a configured propagator. @@ -57,116 +35,83 @@ func GetPropagator() propagation.TextMapPropagator { } // GetTraceProvider returns a configured open-telemetry trace provider. -func GetTraceProvider(endpoint, collector, serviceName, traceType string) (*sdktrace.TracerProvider, error) { - switch t := traceType; t { - case "", "jaeger": - var ( - exp *jaeger.Exporter - err error - ) +func GetTraceProvider(ctx context.Context, exporter, serviceName string) (*sdktrace.TracerProvider, error) { - if endpoint != "" { - var agentHost string - var agentPort string + // Create resource - shared across all exporters + resources, err := createResource(ctx, serviceName) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } - agentHost, agentPort, err = parseAgentConfig(endpoint) - if err != nil { - return nil, err - } + var tp *sdktrace.TracerProvider - exp, err = jaeger.New( - jaeger.WithAgentEndpoint( - jaeger.WithAgentHost(agentHost), - jaeger.WithAgentPort(agentPort), - ), - ) - } else if collector != "" { - exp, err = jaeger.New( - jaeger.WithCollectorEndpoint( - jaeger.WithEndpoint(collector), - ), - ) - } else { - return sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.NeverSample())), nil - } - if err != nil { - return nil, err - } - - tp := sdktrace.NewTracerProvider( - sdktrace.WithBatcher(exp), - sdktrace.WithResource(resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(serviceName)), - ), - ) - rtrace.SetDefaultTracerProvider(tp) - return tp, nil - case "otlp": - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - conn, err := grpc.DialContext(ctx, endpoint, - // Note the use of insecure transport here. TLS is recommended in production. - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err) - } - exporter, err := otlptracegrpc.New( - context.Background(), - otlptracegrpc.WithGRPCConn(conn), - ) - if err != nil { - return nil, err - } - resources, err := resource.New( - context.Background(), - resource.WithAttributes( - attribute.String("service.name", serviceName), - attribute.String("library.language", "go"), - ), - ) - if err != nil { - return nil, err - } - - tp := sdktrace.NewTracerProvider( - sdktrace.WithSampler(sdktrace.AlwaysSample()), - sdktrace.WithBatcher(exporter), + switch exporter { + case "", "none": + // No-op exporter - never sample + tp = sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.NeverSample()), sdktrace.WithResource(resources), ) - rtrace.SetDefaultTracerProvider(tp) - return tp, nil - case "agent": - fallthrough - case "zipkin": - fallthrough + + case "console": + // Console exporter - prints to stdout (useful for debugging) + consoleExporter, err := stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create console exporter: %w", err) + } + + // Use SimpleSpanProcessor for console to get immediate output + tp = sdktrace.NewTracerProvider( + sdktrace.WithSpanProcessor(sdktrace.NewSimpleSpanProcessor(consoleExporter)), + sdktrace.WithResource(resources), + ) + + case "otlp": + // OTLP exporter - connects to collector + // This automatically reads: + // - OTEL_EXPORTER_OTLP_ENDPOINT + // - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (takes precedence) + // - OTEL_EXPORTER_OTLP_HEADERS + // - OTEL_EXPORTER_OTLP_INSECURE + // - OTEL_EXPORTER_OTLP_CERTIFICATE (for custom CA) + otlpExporter, err := otlptracegrpc.New(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create OTLP exporter: %w", err) + } + + // Create tracer provider + // This automatically reads: + // - OTEL_TRACES_SAMPLER + // - OTEL_TRACES_SAMPLER_ARG + tp = sdktrace.NewTracerProvider( + sdktrace.WithBatcher(otlpExporter), + sdktrace.WithResource(resources), + ) + default: - return nil, fmt.Errorf("unknown trace type %s", traceType) + return nil, fmt.Errorf("unsupported trace exporter: %q (supported: none, console, otlp)", exporter) } + + // Set as global default + rtrace.SetDefaultTracerProvider(tp) + + return tp, nil } -func parseAgentConfig(ae string) (string, string, error) { - u, err := url.Parse(ae) - // as per url.go: - // [...] Trying to parse a hostname and path - // without a scheme is invalid but may not necessarily return an - // error, due to parsing ambiguities. - if err == nil && u.Hostname() != "" && u.Port() != "" { - return u.Hostname(), u.Port(), nil - } - - p := strings.Split(ae, ":") - if len(p) != 2 { - return "", "", fmt.Errorf("invalid agent endpoint `%s`. expected format: `hostname:port`", ae) - } - - switch { - case p[0] == "" && p[1] == "": // case ae = ":" - return "", "", fmt.Errorf("invalid agent endpoint `%s`. expected format: `hostname:port`", ae) - case p[0] == "": - return "", "", fmt.Errorf("invalid agent endpoint `%s`. expected format: `hostname:port`", ae) - } - return p[0], p[1], nil +// createResource creates a resource with service information +func createResource(ctx context.Context, serviceName string) (*resource.Resource, error) { + return resource.New(ctx, + // Reads OTEL_RESOURCE_ATTRIBUTES and OTEL_SERVICE_NAME + resource.WithFromEnv(), + // Host and process information + resource.WithHost(), + resource.WithProcess(), + // Service attributes + resource.WithAttributes( + semconv.ServiceName(serviceName), + attribute.String("library.language", "go"), + ), + ) } diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go deleted file mode 100644 index a2509bfe37..0000000000 --- a/pkg/tracing/tracing_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package tracing - -import "testing" - -func Test_parseAgentConfig(t *testing.T) { - type args struct { - ae string - } - tests := []struct { - name string - args args - want string - want1 string - wantErr bool - }{ - { - name: "docker-style config", - args: args{ - ae: "docker-jaeger:6666", - }, - want: "docker-jaeger", - want1: "6666", - wantErr: false, - }, - { - name: "agent in an url config", - args: args{ - ae: "https://example-agent.com:6666", - }, - want: "example-agent.com", - want1: "6666", - wantErr: false, - }, - { - name: "agent as ipv4", - args: args{ - ae: "127.0.0.1:6666", - }, - want: "127.0.0.1", - want1: "6666", - wantErr: false, - }, - { - name: "no hostname config should error", - args: args{ - ae: ":6666", - }, - want: "", - want1: "", - wantErr: true, - }, - { - name: "no hostname nor port but separator should error", - args: args{ - ae: ":", - }, - want: "", - want1: "", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := parseAgentConfig(tt.args.ae) - if (err != nil) != tt.wantErr { - t.Errorf("parseAgentConfig() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("parseAgentConfig() got = %v, want %v", got, tt.want) - } - if got1 != tt.want1 { - t.Errorf("parseAgentConfig() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} diff --git a/services/activitylog/pkg/command/server.go b/services/activitylog/pkg/command/server.go index 4c479956c8..ae925779b0 100644 --- a/services/activitylog/pkg/command/server.go +++ b/services/activitylog/pkg/command/server.go @@ -57,7 +57,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - tracerProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + tracerProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { logger.Error().Err(err).Msg("Failed to initialize tracer") return err diff --git a/services/activitylog/pkg/config/config.go b/services/activitylog/pkg/config/config.go index 48e9ae7a9d..2441f52188 100644 --- a/services/activitylog/pkg/config/config.go +++ b/services/activitylog/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` Events Events `yaml:"events"` Store Store `yaml:"store"` diff --git a/services/activitylog/pkg/config/defaults/defaultconfig.go b/services/activitylog/pkg/config/defaults/defaultconfig.go index 043646ce93..a6da0cf297 100644 --- a/services/activitylog/pkg/config/defaults/defaultconfig.go +++ b/services/activitylog/pkg/config/defaults/defaultconfig.go @@ -86,19 +86,6 @@ func EnsureDefaults(cfg *config.Config) { if cfg.Commons != nil { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS } - - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } - } // Sanitize sanitizes the config diff --git a/services/activitylog/pkg/config/tracing.go b/services/activitylog/pkg/config/tracing.go deleted file mode 100644 index 394f2de3fc..0000000000 --- a/services/activitylog/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;ACTIVITYLOG_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;ACTIVITYLOG_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;ACTIVITYLOG_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;ACTIVITYLOG_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/antivirus/pkg/command/server.go b/services/antivirus/pkg/command/server.go index 17eb51cf55..24566d431b 100644 --- a/services/antivirus/pkg/command/server.go +++ b/services/antivirus/pkg/command/server.go @@ -42,7 +42,7 @@ func Server(cfg *config.Config) *cli.Command { log.File(cfg.Log.File), ) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/antivirus/pkg/config/config.go b/services/antivirus/pkg/config/config.go index 69f5b1950e..275d2cbc9e 100644 --- a/services/antivirus/pkg/config/config.go +++ b/services/antivirus/pkg/config/config.go @@ -3,6 +3,8 @@ package config import ( "context" "time" + + "github.com/opencloud-eu/opencloud/pkg/shared" ) // ScannerType gives info which scanner is used @@ -27,15 +29,14 @@ const ( // Config combines all available configuration parts. type Config struct { - File string - Log *Log + Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service + File string + Log *Log Debug Debug `yaml:"debug" mask:"struct"` Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - InfectedFileHandling string `yaml:"infected-file-handling" env:"ANTIVIRUS_INFECTED_FILE_HANDLING" desc:"Defines the behaviour when a virus has been found. Supported options are: 'delete', 'continue' and 'abort '. Delete will delete the file. Continue will mark the file as infected but continues further processing. Abort will keep the file in the uploads folder for further admin inspection and will not move it to its final destination." introductionVersion:"1.0.0"` Events Events Workers int `yaml:"workers" env:"ANTIVIRUS_WORKERS" desc:"The number of concurrent go routines that fetch events from the event queue." introductionVersion:"1.0.0"` diff --git a/services/antivirus/pkg/config/defaults/defaultconfig.go b/services/antivirus/pkg/config/defaults/defaultconfig.go index 992ac9b15f..446e7c215b 100644 --- a/services/antivirus/pkg/config/defaults/defaultconfig.go +++ b/services/antivirus/pkg/config/defaults/defaultconfig.go @@ -54,10 +54,6 @@ func EnsureDefaults(cfg *config.Config) { if cfg.Log == nil { cfg.Log = &config.Log{} } - - if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitizes the configuration diff --git a/services/antivirus/pkg/config/tracing.go b/services/antivirus/pkg/config/tracing.go deleted file mode 100644 index 375b4024db..0000000000 --- a/services/antivirus/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;ANTIVIRUS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;ANTIVIRUS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;ANTIVIRUS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;ANTIVIRUS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/app-provider/pkg/command/server.go b/services/app-provider/pkg/command/server.go index 88e8ef0732..ddef630133 100644 --- a/services/app-provider/pkg/command/server.go +++ b/services/app-provider/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/app-provider/pkg/config/config.go b/services/app-provider/pkg/config/config.go index 58e5181ca3..ee89e56083 100644 --- a/services/app-provider/pkg/config/config.go +++ b/services/app-provider/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/app-provider/pkg/config/defaults/defaultconfig.go b/services/app-provider/pkg/config/defaults/defaultconfig.go index 4c1da4b889..c33e6db4ca 100644 --- a/services/app-provider/pkg/config/defaults/defaultconfig.go +++ b/services/app-provider/pkg/config/defaults/defaultconfig.go @@ -56,17 +56,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/app-provider/pkg/config/tracing.go b/services/app-provider/pkg/config/tracing.go deleted file mode 100644 index e755079a33..0000000000 --- a/services/app-provider/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the configuration options for tracing. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;APP_PROVIDER_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;APP_PROVIDER_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;APP_PROVIDER_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;APP_PROVIDER_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/app-registry/pkg/command/server.go b/services/app-registry/pkg/command/server.go index bd9540acfb..7958ed977c 100644 --- a/services/app-registry/pkg/command/server.go +++ b/services/app-registry/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/app-registry/pkg/config/config.go b/services/app-registry/pkg/config/config.go index 2074ec9192..a32386e8f3 100644 --- a/services/app-registry/pkg/config/config.go +++ b/services/app-registry/pkg/config/config.go @@ -9,10 +9,9 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service - Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Service Service `yaml:"-"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPC GRPCConfig `yaml:"grpc"` diff --git a/services/app-registry/pkg/config/defaults/defaultconfig.go b/services/app-registry/pkg/config/defaults/defaultconfig.go index 9a4c52f788..8bab2a9fb9 100644 --- a/services/app-registry/pkg/config/defaults/defaultconfig.go +++ b/services/app-registry/pkg/config/defaults/defaultconfig.go @@ -133,17 +133,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/app-registry/pkg/config/tracing.go b/services/app-registry/pkg/config/tracing.go deleted file mode 100644 index 319e55364f..0000000000 --- a/services/app-registry/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing contains the tracing config parameters. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;APP_REGISTRY_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;APP_REGISTRY_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;APP_REGISTRY_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;APP_REGISTRY_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/app-registry/pkg/revaconfig/config.go b/services/app-registry/pkg/revaconfig/config.go index 218c0f55b5..6797b693e6 100644 --- a/services/app-registry/pkg/revaconfig/config.go +++ b/services/app-registry/pkg/revaconfig/config.go @@ -9,13 +9,6 @@ import ( // AppRegistryConfigFromStruct will adapt an OpenCloud config struct into a reva mapstructure to start a reva service. func AppRegistryConfigFromStruct(cfg *config.Config, logger log.Logger) map[string]interface{} { rcfg := map[string]interface{}{ - "core": map[string]interface{}{ - "tracing_enabled": cfg.Tracing.Enabled, - "tracing_exporter": cfg.Tracing.Type, - "tracing_endpoint": cfg.Tracing.Endpoint, - "tracing_collector": cfg.Tracing.Collector, - "tracing_service_name": cfg.Service.Name, - }, "shared": map[string]interface{}{ "jwt_secret": cfg.TokenManager.JWTSecret, "gatewaysvc": cfg.Reva.Address, diff --git a/services/audit/pkg/config/config.go b/services/audit/pkg/config/config.go index 66a0986708..74dd6a66b0 100644 --- a/services/audit/pkg/config/config.go +++ b/services/audit/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` Events Events `yaml:"events"` Auditlog Auditlog `yaml:"auditlog"` diff --git a/services/audit/pkg/config/defaults/defaultconfig.go b/services/audit/pkg/config/defaults/defaultconfig.go index f4bed88ac7..20d8818928 100644 --- a/services/audit/pkg/config/defaults/defaultconfig.go +++ b/services/audit/pkg/config/defaults/defaultconfig.go @@ -48,18 +48,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitized the configuration diff --git a/services/auth-app/pkg/command/create.go b/services/auth-app/pkg/command/create.go index e67b10fe3f..a037e04aa1 100644 --- a/services/auth-app/pkg/command/create.go +++ b/services/auth-app/pkg/command/create.go @@ -47,7 +47,7 @@ func Create(cfg *config.Config) *cli.Command { return configlog.ReturnError(parser.ParseConfig(cfg)) }, Action: func(c *cli.Context) error { - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-app/pkg/command/server.go b/services/auth-app/pkg/command/server.go index 8ca8b6befd..2c3bb774f6 100644 --- a/services/auth-app/pkg/command/server.go +++ b/services/auth-app/pkg/command/server.go @@ -38,7 +38,7 @@ func Server(cfg *config.Config) *cli.Command { } logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-app/pkg/config/config.go b/services/auth-app/pkg/config/config.go index 656f0a4087..44de3fb68a 100644 --- a/services/auth-app/pkg/config/config.go +++ b/services/auth-app/pkg/config/config.go @@ -10,7 +10,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/auth-app/pkg/config/defaults/defaultconfig.go b/services/auth-app/pkg/config/defaults/defaultconfig.go index d083dcdca0..3acce5d06c 100644 --- a/services/auth-app/pkg/config/defaults/defaultconfig.go +++ b/services/auth-app/pkg/config/defaults/defaultconfig.go @@ -74,17 +74,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.GRPCClientTLS == nil && cfg.Commons != nil { cfg.GRPCClientTLS = structs.CopyOrZeroValue(cfg.Commons.GRPCClientTLS) diff --git a/services/auth-app/pkg/config/tracing.go b/services/auth-app/pkg/config/tracing.go deleted file mode 100644 index b3b9d4992f..0000000000 --- a/services/auth-app/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;AUTH_APP_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;AUTH_APP_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;AUTH_APP_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;AUTH_APP_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/auth-basic/pkg/command/server.go b/services/auth-basic/pkg/command/server.go index 0d6576dd79..44d3281500 100644 --- a/services/auth-basic/pkg/command/server.go +++ b/services/auth-basic/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-basic/pkg/config/config.go b/services/auth-basic/pkg/config/config.go index 5e39cbf37f..786446208a 100644 --- a/services/auth-basic/pkg/config/config.go +++ b/services/auth-basic/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/auth-basic/pkg/config/defaults/defaultconfig.go b/services/auth-basic/pkg/config/defaults/defaultconfig.go index 65b805c2bd..01f53a27ae 100644 --- a/services/auth-basic/pkg/config/defaults/defaultconfig.go +++ b/services/auth-basic/pkg/config/defaults/defaultconfig.go @@ -97,17 +97,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/auth-basic/pkg/config/tracing.go b/services/auth-basic/pkg/config/tracing.go deleted file mode 100644 index 195d742bb3..0000000000 --- a/services/auth-basic/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;AUTH_BASIC_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;AUTH_BASIC_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;AUTH_BASIC_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;AUTH_BASIC_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/auth-bearer/pkg/command/server.go b/services/auth-bearer/pkg/command/server.go index d78b93bbbc..172fb9be76 100644 --- a/services/auth-bearer/pkg/command/server.go +++ b/services/auth-bearer/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-bearer/pkg/config/config.go b/services/auth-bearer/pkg/config/config.go index 9491bec1ce..6887362892 100644 --- a/services/auth-bearer/pkg/config/config.go +++ b/services/auth-bearer/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/auth-bearer/pkg/config/defaults/defaultconfig.go b/services/auth-bearer/pkg/config/defaults/defaultconfig.go index b35d063463..8bc9c99d9f 100644 --- a/services/auth-bearer/pkg/config/defaults/defaultconfig.go +++ b/services/auth-bearer/pkg/config/defaults/defaultconfig.go @@ -53,17 +53,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/auth-bearer/pkg/config/tracing.go b/services/auth-bearer/pkg/config/tracing.go deleted file mode 100644 index 49a2d7a75b..0000000000 --- a/services/auth-bearer/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the tracing parameters. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;AUTH_BEARER_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;AUTH_BEARER_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;AUTH_BEARER_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;AUTH_BEARER_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/auth-machine/pkg/command/server.go b/services/auth-machine/pkg/command/server.go index d3d78682a7..4cbab5be6b 100644 --- a/services/auth-machine/pkg/command/server.go +++ b/services/auth-machine/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-machine/pkg/config/config.go b/services/auth-machine/pkg/config/config.go index 71737637cf..be007ab264 100644 --- a/services/auth-machine/pkg/config/config.go +++ b/services/auth-machine/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/auth-machine/pkg/config/defaults/defaultconfig.go b/services/auth-machine/pkg/config/defaults/defaultconfig.go index 7bb753874c..17dcf313f4 100644 --- a/services/auth-machine/pkg/config/defaults/defaultconfig.go +++ b/services/auth-machine/pkg/config/defaults/defaultconfig.go @@ -48,17 +48,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/auth-machine/pkg/config/tracing.go b/services/auth-machine/pkg/config/tracing.go deleted file mode 100644 index 5217db8a10..0000000000 --- a/services/auth-machine/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing is the config for tracing parameters -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;AUTH_MACHINE_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;AUTH_MACHINE_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;AUTH_MACHINE_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;AUTH_MACHINE_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/auth-service/pkg/command/server.go b/services/auth-service/pkg/command/server.go index 03b4181f65..1ab9fe87af 100644 --- a/services/auth-service/pkg/command/server.go +++ b/services/auth-service/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/auth-service/pkg/config/config.go b/services/auth-service/pkg/config/config.go index be7413e82d..231f6bcd3d 100644 --- a/services/auth-service/pkg/config/config.go +++ b/services/auth-service/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/auth-service/pkg/config/defaults/defaultconfig.go b/services/auth-service/pkg/config/defaults/defaultconfig.go index de1e960eea..d1a9f7d08f 100644 --- a/services/auth-service/pkg/config/defaults/defaultconfig.go +++ b/services/auth-service/pkg/config/defaults/defaultconfig.go @@ -48,17 +48,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/auth-service/pkg/config/tracing.go b/services/auth-service/pkg/config/tracing.go deleted file mode 100644 index 6026b5a5f0..0000000000 --- a/services/auth-service/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing is the config for tracing parameters -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;AUTH_SERVICE_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;AUTH_SERVICE_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;AUTH_SERVICE_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;AUTH_SERVICE_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/auth-service/pkg/revaconfig/config.go b/services/auth-service/pkg/revaconfig/config.go index 4f970742aa..1a49488b0e 100644 --- a/services/auth-service/pkg/revaconfig/config.go +++ b/services/auth-service/pkg/revaconfig/config.go @@ -7,13 +7,6 @@ import ( // AuthMachineConfigFromStruct will adapt an OpenCloud config struct into a reva mapstructure to start a reva service. func AuthMachineConfigFromStruct(cfg *config.Config) map[string]interface{} { return map[string]interface{}{ - "core": map[string]interface{}{ - "tracing_enabled": cfg.Tracing.Enabled, - "tracing_exporter": cfg.Tracing.Type, - "tracing_endpoint": cfg.Tracing.Endpoint, - "tracing_collector": cfg.Tracing.Collector, - "tracing_service_name": cfg.Service.Name, - }, "shared": map[string]interface{}{ "jwt_secret": cfg.TokenManager.JWTSecret, "gatewaysvc": cfg.Reva.Address, diff --git a/services/clientlog/pkg/command/server.go b/services/clientlog/pkg/command/server.go index a82fadd6ae..41bc8b24da 100644 --- a/services/clientlog/pkg/command/server.go +++ b/services/clientlog/pkg/command/server.go @@ -57,7 +57,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - tracerProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + tracerProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/clientlog/pkg/config/config.go b/services/clientlog/pkg/config/config.go index bf3d5637d6..5544265ad2 100644 --- a/services/clientlog/pkg/config/config.go +++ b/services/clientlog/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPCClientTLS *shared.GRPCClientTLS `yaml:"grpc_client_tls"` diff --git a/services/clientlog/pkg/config/defaults/defaultconfig.go b/services/clientlog/pkg/config/defaults/defaultconfig.go index 4f7ba91be6..5c615e374b 100644 --- a/services/clientlog/pkg/config/defaults/defaultconfig.go +++ b/services/clientlog/pkg/config/defaults/defaultconfig.go @@ -61,17 +61,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.TokenManager = &config.TokenManager{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitizes the config diff --git a/services/clientlog/pkg/config/tracing.go b/services/clientlog/pkg/config/tracing.go deleted file mode 100644 index 494201ba85..0000000000 --- a/services/clientlog/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;CLIENTLOG_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;CLIENTLOG_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;CLIENTLOG_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;CLIENTLOG_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/collaboration/pkg/command/server.go b/services/collaboration/pkg/command/server.go index 36f660a256..e6f3e135dc 100644 --- a/services/collaboration/pkg/command/server.go +++ b/services/collaboration/pkg/command/server.go @@ -37,7 +37,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/collaboration/pkg/config/config.go b/services/collaboration/pkg/config/config.go index 85aeca287b..bdf62a8c4d 100644 --- a/services/collaboration/pkg/config/config.go +++ b/services/collaboration/pkg/config/config.go @@ -4,7 +4,6 @@ import ( "context" "github.com/opencloud-eu/opencloud/pkg/shared" - "github.com/opencloud-eu/opencloud/pkg/tracing" ) // Config combines all available configuration parts. @@ -23,27 +22,8 @@ type Config struct { Wopi Wopi `yaml:"wopi"` CS3Api CS3Api `yaml:"cs3api"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` Context context.Context `yaml:"-"` } - -// Tracing defines the available tracing configuration. Not used at the moment -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;COLLABORATION_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;COLLABORATION_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;COLLABORATION_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;COLLABORATION_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/collaboration/pkg/config/defaults/defaultconfig.go b/services/collaboration/pkg/config/defaults/defaultconfig.go index a25a96d6cd..c8119dab37 100644 --- a/services/collaboration/pkg/config/defaults/defaultconfig.go +++ b/services/collaboration/pkg/config/defaults/defaultconfig.go @@ -84,18 +84,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } - if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil { cfg.TokenManager = &config.TokenManager{ JWTSecret: cfg.Commons.TokenManager.JWTSecret, diff --git a/services/eventhistory/pkg/command/server.go b/services/eventhistory/pkg/command/server.go index 245b77bb07..12200b4072 100644 --- a/services/eventhistory/pkg/command/server.go +++ b/services/eventhistory/pkg/command/server.go @@ -35,7 +35,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/eventhistory/pkg/config/config.go b/services/eventhistory/pkg/config/config.go index 73e4ccbdd1..b4a46ce2b5 100644 --- a/services/eventhistory/pkg/config/config.go +++ b/services/eventhistory/pkg/config/config.go @@ -14,9 +14,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPC GRPCConfig `yaml:"grpc"` GRPCClientTLS *shared.GRPCClientTLS `yaml:"grpc_client_tls"` diff --git a/services/eventhistory/pkg/config/defaults/defaultconfig.go b/services/eventhistory/pkg/config/defaults/defaultconfig.go index 82369db88e..f8180be7dc 100644 --- a/services/eventhistory/pkg/config/defaults/defaultconfig.go +++ b/services/eventhistory/pkg/config/defaults/defaultconfig.go @@ -60,17 +60,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } - if cfg.GRPCClientTLS == nil && cfg.Commons != nil { cfg.GRPCClientTLS = structs.CopyOrZeroValue(cfg.Commons.GRPCClientTLS) } diff --git a/services/eventhistory/pkg/config/tracing.go b/services/eventhistory/pkg/config/tracing.go deleted file mode 100644 index d3a062a9aa..0000000000 --- a/services/eventhistory/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;EVENTHISTORY_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;EVENTHISTORY_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;EVENTHISTORY_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;EVENTHISTORY_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/frontend/pkg/command/events.go b/services/frontend/pkg/command/events.go index d4ee94d0e6..24f618d8f1 100644 --- a/services/frontend/pkg/command/events.go +++ b/services/frontend/pkg/command/events.go @@ -53,7 +53,7 @@ func ListenForEvents(ctx context.Context, cfg *config.Config, l log.Logger) erro return err } - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(ctx, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { l.Error().Err(err).Msg("cannot initialize tracing") return err diff --git a/services/frontend/pkg/command/server.go b/services/frontend/pkg/command/server.go index 909475f369..355f040a01 100644 --- a/services/frontend/pkg/command/server.go +++ b/services/frontend/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/frontend/pkg/config/config.go b/services/frontend/pkg/config/config.go index 4583d221ca..34ea42cce3 100644 --- a/services/frontend/pkg/config/config.go +++ b/services/frontend/pkg/config/config.go @@ -10,7 +10,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/frontend/pkg/config/defaults/defaultconfig.go b/services/frontend/pkg/config/defaults/defaultconfig.go index aed80f2cd3..709cca9727 100644 --- a/services/frontend/pkg/config/defaults/defaultconfig.go +++ b/services/frontend/pkg/config/defaults/defaultconfig.go @@ -158,17 +158,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/frontend/pkg/config/tracing.go b/services/frontend/pkg/config/tracing.go deleted file mode 100644 index e5a7350d35..0000000000 --- a/services/frontend/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing sets the tracing parameters for the frontend service. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;FRONTEND_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;FRONTEND_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;FRONTEND_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;FRONTEND_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/gateway/pkg/command/server.go b/services/gateway/pkg/command/server.go index b0dca37d1a..f9f2a846a1 100644 --- a/services/gateway/pkg/command/server.go +++ b/services/gateway/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/gateway/pkg/config/config.go b/services/gateway/pkg/config/config.go index 5ba9addd32..4f6a50f15f 100644 --- a/services/gateway/pkg/config/config.go +++ b/services/gateway/pkg/config/config.go @@ -10,10 +10,9 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service - Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Service Service `yaml:"-"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPC GRPCConfig `yaml:"grpc"` diff --git a/services/gateway/pkg/config/defaults/defaultconfig.go b/services/gateway/pkg/config/defaults/defaultconfig.go index f3036ed170..c0fc844284 100644 --- a/services/gateway/pkg/config/defaults/defaultconfig.go +++ b/services/gateway/pkg/config/defaults/defaultconfig.go @@ -85,17 +85,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/gateway/pkg/config/tracing.go b/services/gateway/pkg/config/tracing.go deleted file mode 100644 index b7c1afd3fc..0000000000 --- a/services/gateway/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the configuration options for tracing. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;GATEWAY_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;GATEWAY_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;GATEWAY_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;GATEWAY_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/gateway/pkg/revaconfig/config.go b/services/gateway/pkg/revaconfig/config.go index 9969d9790d..8bd8e20806 100644 --- a/services/gateway/pkg/revaconfig/config.go +++ b/services/gateway/pkg/revaconfig/config.go @@ -16,13 +16,6 @@ func GatewayConfigFromStruct(cfg *config.Config, logger log.Logger) map[string]i localEndpoint := pkgconfig.LocalEndpoint(cfg.GRPC.Protocol, cfg.GRPC.Addr) rcfg := map[string]interface{}{ - "core": map[string]interface{}{ - "tracing_enabled": cfg.Tracing.Enabled, - "tracing_exporter": cfg.Tracing.Type, - "tracing_endpoint": cfg.Tracing.Endpoint, - "tracing_collector": cfg.Tracing.Collector, - "tracing_service_name": cfg.Service.Name, - }, "shared": map[string]interface{}{ "jwt_secret": cfg.TokenManager.JWTSecret, "gatewaysvc": cfg.Reva.Address, diff --git a/services/graph/pkg/command/server.go b/services/graph/pkg/command/server.go index c96e413795..4642f88023 100644 --- a/services/graph/pkg/command/server.go +++ b/services/graph/pkg/command/server.go @@ -33,7 +33,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/graph/pkg/config/config.go b/services/graph/pkg/config/config.go index e95c3e6523..7841f82543 100644 --- a/services/graph/pkg/config/config.go +++ b/services/graph/pkg/config/config.go @@ -13,10 +13,9 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Cache *Cache `yaml:"cache"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Cache *Cache `yaml:"cache"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/graph/pkg/config/defaults/defaultconfig.go b/services/graph/pkg/config/defaults/defaultconfig.go index d840868bfc..0a97411a82 100644 --- a/services/graph/pkg/config/defaults/defaultconfig.go +++ b/services/graph/pkg/config/defaults/defaultconfig.go @@ -151,17 +151,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Cache == nil && cfg.Commons != nil && cfg.Commons.Cache != nil { cfg.Cache = &config.Cache{ diff --git a/services/graph/pkg/config/tracing.go b/services/graph/pkg/config/tracing.go deleted file mode 100644 index 720a868329..0000000000 --- a/services/graph/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;GRAPH_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;GRAPH_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;GRAPH_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;GRAPH_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/groups/pkg/command/server.go b/services/groups/pkg/command/server.go index 270400fc17..2c570d7ae5 100644 --- a/services/groups/pkg/command/server.go +++ b/services/groups/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/groups/pkg/config/config.go b/services/groups/pkg/config/config.go index 9b08053ace..ae7f59b75f 100644 --- a/services/groups/pkg/config/config.go +++ b/services/groups/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/groups/pkg/config/defaults/defaultconfig.go b/services/groups/pkg/config/defaults/defaultconfig.go index 71c099fdf7..568addf4cb 100644 --- a/services/groups/pkg/config/defaults/defaultconfig.go +++ b/services/groups/pkg/config/defaults/defaultconfig.go @@ -95,17 +95,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/groups/pkg/config/tracing.go b/services/groups/pkg/config/tracing.go deleted file mode 100644 index aa6aa42959..0000000000 --- a/services/groups/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing contains the tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;GROUPS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;GROUPS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;GROUPS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;GROUPS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/groups/pkg/revaconfig/config.go b/services/groups/pkg/revaconfig/config.go index 49443c5610..9fb5a4ffc5 100644 --- a/services/groups/pkg/revaconfig/config.go +++ b/services/groups/pkg/revaconfig/config.go @@ -7,13 +7,6 @@ import ( // GroupsConfigFromStruct will adapt an OpenCloud config struct into a reva mapstructure to start a reva service. func GroupsConfigFromStruct(cfg *config.Config) map[string]interface{} { return map[string]interface{}{ - "core": map[string]interface{}{ - "tracing_enabled": cfg.Tracing.Enabled, - "tracing_exporter": cfg.Tracing.Type, - "tracing_endpoint": cfg.Tracing.Endpoint, - "tracing_collector": cfg.Tracing.Collector, - "tracing_service_name": cfg.Service.Name, - }, "shared": map[string]interface{}{ "jwt_secret": cfg.TokenManager.JWTSecret, "gatewaysvc": cfg.Reva.Address, diff --git a/services/idm/pkg/config/config.go b/services/idm/pkg/config/config.go index 225ebbfb29..173c6f4803 100644 --- a/services/idm/pkg/config/config.go +++ b/services/idm/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` IDM Settings `yaml:"idm"` CreateDemoUsers bool `yaml:"create_demo_users" env:"IDM_CREATE_DEMO_USERS" desc:"Flag to enable or disable the creation of the demo users." introductionVersion:"1.0.0"` diff --git a/services/idm/pkg/config/defaults/defaultconfig.go b/services/idm/pkg/config/defaults/defaultconfig.go index b85c83fce0..0bf7b8afb1 100644 --- a/services/idm/pkg/config/defaults/defaultconfig.go +++ b/services/idm/pkg/config/defaults/defaultconfig.go @@ -51,17 +51,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.AdminUserID == "" && cfg.Commons != nil { cfg.AdminUserID = cfg.Commons.AdminUserID diff --git a/services/idm/pkg/config/tracing.go b/services/idm/pkg/config/tracing.go deleted file mode 100644 index 19e026e5ce..0000000000 --- a/services/idm/pkg/config/tracing.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;IDM_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;IDM_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;IDM_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;IDM_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} diff --git a/services/idp/pkg/command/server.go b/services/idp/pkg/command/server.go index 5240e488da..9112a481a6 100644 --- a/services/idp/pkg/command/server.go +++ b/services/idp/pkg/command/server.go @@ -54,7 +54,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/idp/pkg/config/config.go b/services/idp/pkg/config/config.go index dbb6457738..af16cb0763 100644 --- a/services/idp/pkg/config/config.go +++ b/services/idp/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/idp/pkg/config/defaults/defaultconfig.go b/services/idp/pkg/config/defaults/defaultconfig.go index 176a3ac5d2..09c07d957d 100644 --- a/services/idp/pkg/config/defaults/defaultconfig.go +++ b/services/idp/pkg/config/defaults/defaultconfig.go @@ -149,17 +149,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/idp/pkg/config/tracing.go b/services/idp/pkg/config/tracing.go deleted file mode 100644 index 483ac85b86..0000000000 --- a/services/idp/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;IDP_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;IDP_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;IDP_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;IDP_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/invitations/pkg/command/server.go b/services/invitations/pkg/command/server.go index 066080ebb8..5b0a6a739a 100644 --- a/services/invitations/pkg/command/server.go +++ b/services/invitations/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/invitations/pkg/config/config.go b/services/invitations/pkg/config/config.go index 03a3ddf9e0..b3b363f10a 100644 --- a/services/invitations/pkg/config/config.go +++ b/services/invitations/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/invitations/pkg/config/defaults/defaultconfig.go b/services/invitations/pkg/config/defaults/defaultconfig.go index 7da0027ff5..664815e3d1 100644 --- a/services/invitations/pkg/config/defaults/defaultconfig.go +++ b/services/invitations/pkg/config/defaults/defaultconfig.go @@ -54,17 +54,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Commons != nil { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS diff --git a/services/invitations/pkg/config/tracing.go b/services/invitations/pkg/config/tracing.go deleted file mode 100644 index b992054761..0000000000 --- a/services/invitations/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;INVITATIONS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;INVITATIONS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;INVITATIONS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;INVITATIONS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/nats/pkg/config/config.go b/services/nats/pkg/config/config.go index 92fd4139d5..3f4cb34eb1 100644 --- a/services/nats/pkg/config/config.go +++ b/services/nats/pkg/config/config.go @@ -10,7 +10,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` @@ -30,11 +29,3 @@ type Nats struct { TLSSkipVerifyClientCert bool `yaml:"tls_skip_verify_client_cert" env:"OC_INSECURE;NATS_TLS_SKIP_VERIFY_CLIENT_CERT" desc:"Whether the NATS server should skip the client certificate verification during the TLS handshake." introductionVersion:"1.0.0"` EnableTLS bool `yaml:"enable_tls" env:"OC_EVENTS_ENABLE_TLS;NATS_EVENTS_ENABLE_TLS" desc:"Enable TLS for the connection to the events broker. The events broker is the OpenCloud service which receives and delivers events between the services." introductionVersion:"1.0.0"` } - -// Tracing is the tracing config -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;NATS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;NATS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;NATS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;NATS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} diff --git a/services/nats/pkg/config/defaults/defaultconfig.go b/services/nats/pkg/config/defaults/defaultconfig.go index af1e0d0c35..05a2f4e341 100644 --- a/services/nats/pkg/config/defaults/defaultconfig.go +++ b/services/nats/pkg/config/defaults/defaultconfig.go @@ -56,17 +56,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitizes the configuration diff --git a/services/notifications/pkg/command/server.go b/services/notifications/pkg/command/server.go index 6297a8fac0..91ab0863ff 100644 --- a/services/notifications/pkg/command/server.go +++ b/services/notifications/pkg/command/server.go @@ -43,7 +43,7 @@ func Server(cfg *config.Config) *cli.Command { Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/notifications/pkg/config/config.go b/services/notifications/pkg/config/config.go index d30e96acdd..8dc4905cff 100644 --- a/services/notifications/pkg/config/config.go +++ b/services/notifications/pkg/config/config.go @@ -14,9 +14,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` WebUIURL string `yaml:"opencloud_url" env:"OC_URL;NOTIFICATIONS_WEB_UI_URL" desc:"The public facing URL of the OpenCloud Web UI, used e.g. when sending notification eMails" introductionVersion:"1.0.0"` diff --git a/services/notifications/pkg/config/defaults/defaultconfig.go b/services/notifications/pkg/config/defaults/defaultconfig.go index b4ab58225f..3e2a07b54a 100644 --- a/services/notifications/pkg/config/defaults/defaultconfig.go +++ b/services/notifications/pkg/config/defaults/defaultconfig.go @@ -66,18 +66,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } - if cfg.Notifications.GRPCClientTLS == nil && cfg.Commons != nil { cfg.Notifications.GRPCClientTLS = structs.CopyOrZeroValue(cfg.Commons.GRPCClientTLS) } diff --git a/services/notifications/pkg/config/tracing.go b/services/notifications/pkg/config/tracing.go deleted file mode 100644 index 40532ac4ba..0000000000 --- a/services/notifications/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;NOTIFICATIONS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;NOTIFICATIONS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;NOTIFICATIONS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;NOTIFICATIONS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/ocdav/pkg/command/server.go b/services/ocdav/pkg/command/server.go index cc1573a6ab..300878cadf 100644 --- a/services/ocdav/pkg/command/server.go +++ b/services/ocdav/pkg/command/server.go @@ -32,7 +32,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } @@ -91,7 +91,6 @@ func Server(cfg *config.Config) *cli.Command { // ocdav.TLSConfig() // tls config for the http server ocdav.MetricsEnabled(true), ocdav.MetricsNamespace("ocis"), - ocdav.Tracing("Adding these strings is a workaround for ->", "https://github.com/cs3org/reva/issues/4131"), ocdav.WithTraceProvider(traceProvider), ocdav.RegisterTTL(registry.GetRegisterTTL()), ocdav.RegisterInterval(registry.GetRegisterInterval()), diff --git a/services/ocdav/pkg/config/config.go b/services/ocdav/pkg/config/config.go index 627a6530d9..45a46c9922 100644 --- a/services/ocdav/pkg/config/config.go +++ b/services/ocdav/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/ocdav/pkg/config/defaults/defaultconfig.go b/services/ocdav/pkg/config/defaults/defaultconfig.go index a2eca1c8c5..e6a4130d9e 100644 --- a/services/ocdav/pkg/config/defaults/defaultconfig.go +++ b/services/ocdav/pkg/config/defaults/defaultconfig.go @@ -110,17 +110,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/ocdav/pkg/config/tracing.go b/services/ocdav/pkg/config/tracing.go deleted file mode 100644 index 880de268cf..0000000000 --- a/services/ocdav/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;OCDAV_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;OCDAV_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;OCDAV_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;OCDAV_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/ocm/pkg/command/server.go b/services/ocm/pkg/command/server.go index 6b2cd8406d..c58d31de00 100644 --- a/services/ocm/pkg/command/server.go +++ b/services/ocm/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/ocm/pkg/config/config.go b/services/ocm/pkg/config/config.go index 4b9d7de901..26a7cab621 100644 --- a/services/ocm/pkg/config/config.go +++ b/services/ocm/pkg/config/config.go @@ -15,9 +15,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTPConfig `yaml:"http"` Middleware Middleware `yaml:"middleware"` diff --git a/services/ocm/pkg/config/defaults/defaultconfig.go b/services/ocm/pkg/config/defaults/defaultconfig.go index ce2f4e5e7a..8fc8db5ad2 100644 --- a/services/ocm/pkg/config/defaults/defaultconfig.go +++ b/services/ocm/pkg/config/defaults/defaultconfig.go @@ -151,17 +151,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } - if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) } diff --git a/services/ocm/pkg/config/tracing.go b/services/ocm/pkg/config/tracing.go deleted file mode 100644 index 1c2ffbf694..0000000000 --- a/services/ocm/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;OCM_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;OCM_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;OCM_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;OCM_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/ocs/pkg/command/server.go b/services/ocs/pkg/command/server.go index c0df575082..67927152b3 100644 --- a/services/ocs/pkg/command/server.go +++ b/services/ocs/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/ocs/pkg/config/config.go b/services/ocs/pkg/config/config.go index c6031123dc..46c4e440b5 100644 --- a/services/ocs/pkg/config/config.go +++ b/services/ocs/pkg/config/config.go @@ -14,9 +14,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/ocs/pkg/config/defaults/defaultconfig.go b/services/ocs/pkg/config/defaults/defaultconfig.go index 9d453105f5..1b0a7d55ce 100644 --- a/services/ocs/pkg/config/defaults/defaultconfig.go +++ b/services/ocs/pkg/config/defaults/defaultconfig.go @@ -60,17 +60,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil { cfg.TokenManager = &config.TokenManager{ diff --git a/services/ocs/pkg/config/tracing.go b/services/ocs/pkg/config/tracing.go deleted file mode 100644 index f8d1e1bbb1..0000000000 --- a/services/ocs/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;OCS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;OCS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;OCS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;OCS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/policies/pkg/command/server.go b/services/policies/pkg/command/server.go index de1e8fec1c..8726bf4297 100644 --- a/services/policies/pkg/command/server.go +++ b/services/policies/pkg/command/server.go @@ -49,7 +49,7 @@ func Server(cfg *config.Config) *cli.Command { log.File(cfg.Log.File), ).SubloggerWithRequestID(ctx) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/policies/pkg/config/config.go b/services/policies/pkg/config/config.go index 92d1d59e64..c7084051a2 100644 --- a/services/policies/pkg/config/config.go +++ b/services/policies/pkg/config/config.go @@ -19,7 +19,6 @@ type Config struct { Log *Log `yaml:"log"` Engine Engine `yaml:"engine"` Postprocessing Postprocessing `yaml:"postprocessing"` - Tracing *Tracing `yaml:"tracing"` } // Service defines the available service configuration. diff --git a/services/policies/pkg/config/defaults/defaultconfig.go b/services/policies/pkg/config/defaults/defaultconfig.go index 3dc97bec52..2791396eb1 100644 --- a/services/policies/pkg/config/defaults/defaultconfig.go +++ b/services/policies/pkg/config/defaults/defaultconfig.go @@ -61,17 +61,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.GRPC.TLS = structs.CopyOrZeroValue(cfg.Commons.GRPCServiceTLS) } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } func Sanitize(_ *config.Config) {} diff --git a/services/policies/pkg/config/tracing.go b/services/policies/pkg/config/tracing.go deleted file mode 100644 index 62cb8b7705..0000000000 --- a/services/policies/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;POLICIES_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;POLICIES_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;POLICIES_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;POLICIES_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/postprocessing/pkg/command/server.go b/services/postprocessing/pkg/command/server.go index b48e022fb9..95df483841 100644 --- a/services/postprocessing/pkg/command/server.go +++ b/services/postprocessing/pkg/command/server.go @@ -43,7 +43,7 @@ func Server(cfg *config.Config) *cli.Command { } ctx := cfg.Context - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/postprocessing/pkg/config/config.go b/services/postprocessing/pkg/config/config.go index 5ff29a7ee4..29dfd76421 100644 --- a/services/postprocessing/pkg/config/config.go +++ b/services/postprocessing/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` Store Store `yaml:"store"` Postprocessing Postprocessing `yaml:"postprocessing"` diff --git a/services/postprocessing/pkg/config/defaults/defaultconfig.go b/services/postprocessing/pkg/config/defaults/defaultconfig.go index 2a58db25db..09305ba7c9 100644 --- a/services/postprocessing/pkg/config/defaults/defaultconfig.go +++ b/services/postprocessing/pkg/config/defaults/defaultconfig.go @@ -60,17 +60,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize does nothing atm diff --git a/services/postprocessing/pkg/config/tracing.go b/services/postprocessing/pkg/config/tracing.go deleted file mode 100644 index e70927679d..0000000000 --- a/services/postprocessing/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;POSTPROCESSING_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;POSTPROCESSING_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;POSTPROCESSING_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;POSTPROCESSING_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/proxy/pkg/command/server.go b/services/proxy/pkg/command/server.go index 0cecb4aa44..a1f2f842dd 100644 --- a/services/proxy/pkg/command/server.go +++ b/services/proxy/pkg/command/server.go @@ -77,7 +77,7 @@ func Server(cfg *config.Config) *cli.Command { ) logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/proxy/pkg/config/config.go b/services/proxy/pkg/config/config.go index 659c675eb7..2ec77cc364 100644 --- a/services/proxy/pkg/config/config.go +++ b/services/proxy/pkg/config/config.go @@ -14,9 +14,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug" mask:"struct"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug" mask:"struct"` HTTP HTTP `yaml:"http"` diff --git a/services/proxy/pkg/config/defaults/defaultconfig.go b/services/proxy/pkg/config/defaults/defaultconfig.go index e0b2bcf5c8..1d6f8c3ef1 100644 --- a/services/proxy/pkg/config/defaults/defaultconfig.go +++ b/services/proxy/pkg/config/defaults/defaultconfig.go @@ -297,17 +297,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.OIDC.UserinfoCache == nil && cfg.Commons != nil && cfg.Commons.Cache != nil { cfg.OIDC.UserinfoCache = &config.Cache{ diff --git a/services/proxy/pkg/config/tracing.go b/services/proxy/pkg/config/tracing.go deleted file mode 100644 index d404ebacb5..0000000000 --- a/services/proxy/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;PROXY_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;PROXY_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;PROXY_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;PROXY_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/proxy/pkg/proxy/proxy_integration_test.go b/services/proxy/pkg/proxy/proxy_integration_test.go index 12189d44c0..ea32170e04 100644 --- a/services/proxy/pkg/proxy/proxy_integration_test.go +++ b/services/proxy/pkg/proxy/proxy_integration_test.go @@ -224,7 +224,6 @@ func testConfig(policy []config.Policy) *config.Config { Log: &config.Log{}, Debug: config.Debug{}, HTTP: config.HTTP{}, - Tracing: &config.Tracing{}, Policies: policy, OIDC: config.OIDC{}, PolicySelector: nil, diff --git a/services/search/pkg/command/index.go b/services/search/pkg/command/index.go index 2980ca35e3..9ea4a66161 100644 --- a/services/search/pkg/command/index.go +++ b/services/search/pkg/command/index.go @@ -43,7 +43,7 @@ func Index(cfg *config.Config) *cli.Command { return errors.New("either --space or --all-spaces is required") } - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(ctx.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/search/pkg/command/server.go b/services/search/pkg/command/server.go index 20fc32144f..85c8755227 100644 --- a/services/search/pkg/command/server.go +++ b/services/search/pkg/command/server.go @@ -46,7 +46,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/search/pkg/config/config.go b/services/search/pkg/config/config.go index c00196b5f0..0ea50423df 100644 --- a/services/search/pkg/config/config.go +++ b/services/search/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPC GRPCConfig `yaml:"grpc"` GrpcClient client.Client `yaml:"-"` diff --git a/services/search/pkg/config/defaults/defaultconfig.go b/services/search/pkg/config/defaults/defaultconfig.go index 1a17faee72..d55702f35c 100644 --- a/services/search/pkg/config/defaults/defaultconfig.go +++ b/services/search/pkg/config/defaults/defaultconfig.go @@ -81,17 +81,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil { cfg.TokenManager = &config.TokenManager{ diff --git a/services/search/pkg/config/tracing.go b/services/search/pkg/config/tracing.go deleted file mode 100644 index 3542953ad7..0000000000 --- a/services/search/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;SEARCH_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;SEARCH_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;SEARCH_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;SEARCH_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/settings/pkg/command/server.go b/services/settings/pkg/command/server.go index b44dec3894..f32903c222 100644 --- a/services/settings/pkg/command/server.go +++ b/services/settings/pkg/command/server.go @@ -32,7 +32,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/settings/pkg/config/config.go b/services/settings/pkg/config/config.go index 1513481601..17305a390a 100644 --- a/services/settings/pkg/config/config.go +++ b/services/settings/pkg/config/config.go @@ -15,9 +15,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` GRPC GRPCConfig `yaml:"grpc"` diff --git a/services/settings/pkg/config/defaults/defaultconfig.go b/services/settings/pkg/config/defaults/defaultconfig.go index 9a5a8ed9c4..b2c1df408e 100644 --- a/services/settings/pkg/config/defaults/defaultconfig.go +++ b/services/settings/pkg/config/defaults/defaultconfig.go @@ -80,17 +80,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil { cfg.TokenManager = &config.TokenManager{ diff --git a/services/settings/pkg/config/tracing.go b/services/settings/pkg/config/tracing.go deleted file mode 100644 index e9c13e5067..0000000000 --- a/services/settings/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;SETTINGS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;SETTINGS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;SETTINGS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;SETTINGS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/sharing/pkg/command/server.go b/services/sharing/pkg/command/server.go index 9f7394d606..5e5c74081c 100644 --- a/services/sharing/pkg/command/server.go +++ b/services/sharing/pkg/command/server.go @@ -32,7 +32,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/sharing/pkg/config/config.go b/services/sharing/pkg/config/config.go index 988c9485df..9c76f3ab35 100644 --- a/services/sharing/pkg/config/config.go +++ b/services/sharing/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/sharing/pkg/config/defaults/defaultconfig.go b/services/sharing/pkg/config/defaults/defaultconfig.go index 535634ff53..cce41eae9c 100644 --- a/services/sharing/pkg/config/defaults/defaultconfig.go +++ b/services/sharing/pkg/config/defaults/defaultconfig.go @@ -100,17 +100,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/sharing/pkg/config/tracing.go b/services/sharing/pkg/config/tracing.go deleted file mode 100644 index 23dc94e44f..0000000000 --- a/services/sharing/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;SHARING_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;SHARING_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;SHARING_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;SHARING_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/sse/pkg/command/server.go b/services/sse/pkg/command/server.go index e916204b46..60708f29dc 100644 --- a/services/sse/pkg/command/server.go +++ b/services/sse/pkg/command/server.go @@ -50,7 +50,7 @@ func Server(cfg *config.Config) *cli.Command { log.File(cfg.Log.File), ) - tracerProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + tracerProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/sse/pkg/config/config.go b/services/sse/pkg/config/config.go index 6392868437..dc7904a5a5 100644 --- a/services/sse/pkg/config/config.go +++ b/services/sse/pkg/config/config.go @@ -12,8 +12,7 @@ type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Log *Log - Debug Debug `mask:"struct" yaml:"debug"` - Tracing *Tracing `yaml:"tracing"` + Debug Debug `mask:"struct" yaml:"debug"` Service Service `yaml:"-"` KeepAliveInterval time.Duration `yaml:"keepalive_interval" env:"SSE_KEEPALIVE_INTERVAL" desc:"To prevent intermediate proxies from closing the SSE connection, send periodic SSE comments to keep it open." introductionVersion:"1.0.0"` diff --git a/services/sse/pkg/config/defaults/defaultconfig.go b/services/sse/pkg/config/defaults/defaultconfig.go index d342cfee46..8ea79910f6 100644 --- a/services/sse/pkg/config/defaults/defaultconfig.go +++ b/services/sse/pkg/config/defaults/defaultconfig.go @@ -68,17 +68,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitizes the configuration diff --git a/services/sse/pkg/config/tracing.go b/services/sse/pkg/config/tracing.go deleted file mode 100644 index 1997fa0796..0000000000 --- a/services/sse/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;SSE_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;SSE_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;SSE_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;SSE_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/storage-publiclink/pkg/command/server.go b/services/storage-publiclink/pkg/command/server.go index 97ff68a690..45be6bc369 100644 --- a/services/storage-publiclink/pkg/command/server.go +++ b/services/storage-publiclink/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/storage-publiclink/pkg/config/config.go b/services/storage-publiclink/pkg/config/config.go index 4d258257aa..c0728bc0d0 100644 --- a/services/storage-publiclink/pkg/config/config.go +++ b/services/storage-publiclink/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/storage-publiclink/pkg/config/defaults/defaultconfig.go b/services/storage-publiclink/pkg/config/defaults/defaultconfig.go index 5c149918a0..33f01baf1e 100644 --- a/services/storage-publiclink/pkg/config/defaults/defaultconfig.go +++ b/services/storage-publiclink/pkg/config/defaults/defaultconfig.go @@ -51,17 +51,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/storage-publiclink/pkg/config/tracing.go b/services/storage-publiclink/pkg/config/tracing.go deleted file mode 100644 index df67f8b189..0000000000 --- a/services/storage-publiclink/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the tracing config. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;STORAGE_PUBLICLINK_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;STORAGE_PUBLICLINK_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;STORAGE_PUBLICLINK_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;STORAGE_PUBLICLINK_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/storage-shares/pkg/command/server.go b/services/storage-shares/pkg/command/server.go index 7347252e57..58c90d6711 100644 --- a/services/storage-shares/pkg/command/server.go +++ b/services/storage-shares/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/storage-shares/pkg/config/config.go b/services/storage-shares/pkg/config/config.go index c5f5a0d6b4..531a3752eb 100644 --- a/services/storage-shares/pkg/config/config.go +++ b/services/storage-shares/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/storage-shares/pkg/config/defaults/defaultconfig.go b/services/storage-shares/pkg/config/defaults/defaultconfig.go index 9ad9dee361..28e2edcac7 100644 --- a/services/storage-shares/pkg/config/defaults/defaultconfig.go +++ b/services/storage-shares/pkg/config/defaults/defaultconfig.go @@ -51,17 +51,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/storage-shares/pkg/config/tracing.go b/services/storage-shares/pkg/config/tracing.go deleted file mode 100644 index b5c7a71d42..0000000000 --- a/services/storage-shares/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines configuration options for tracing. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;STORAGE_SHARES_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;STORAGE_SHARES_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;STORAGE_SHARES_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;STORAGE_SHARES_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/storage-system/pkg/command/server.go b/services/storage-system/pkg/command/server.go index c96a376aa3..b3f0323c44 100644 --- a/services/storage-system/pkg/command/server.go +++ b/services/storage-system/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/storage-system/pkg/config/config.go b/services/storage-system/pkg/config/config.go index 79ddca21f2..a8404c1dc9 100644 --- a/services/storage-system/pkg/config/config.go +++ b/services/storage-system/pkg/config/config.go @@ -11,7 +11,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/storage-system/pkg/config/defaults/defaultconfig.go b/services/storage-system/pkg/config/defaults/defaultconfig.go index 86e40a83ea..e1de99edc3 100644 --- a/services/storage-system/pkg/config/defaults/defaultconfig.go +++ b/services/storage-system/pkg/config/defaults/defaultconfig.go @@ -72,17 +72,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/storage-system/pkg/config/tracing.go b/services/storage-system/pkg/config/tracing.go deleted file mode 100644 index a9e505d3ee..0000000000 --- a/services/storage-system/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing holds Tracing config -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;STORAGE_SYSTEM_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;STORAGE_SYSTEM_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;STORAGE_SYSTEM_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;STORAGE_SYSTEM_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/storage-users/pkg/command/server.go b/services/storage-users/pkg/command/server.go index a374757ac9..95d62944d2 100644 --- a/services/storage-users/pkg/command/server.go +++ b/services/storage-users/pkg/command/server.go @@ -32,7 +32,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/storage-users/pkg/config/config.go b/services/storage-users/pkg/config/config.go index 11e6788821..bef03d66d9 100644 --- a/services/storage-users/pkg/config/config.go +++ b/services/storage-users/pkg/config/config.go @@ -11,7 +11,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/storage-users/pkg/config/defaults/defaultconfig.go b/services/storage-users/pkg/config/defaults/defaultconfig.go index 40149b5c0c..1a94c8d439 100644 --- a/services/storage-users/pkg/config/defaults/defaultconfig.go +++ b/services/storage-users/pkg/config/defaults/defaultconfig.go @@ -195,17 +195,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/storage-users/pkg/config/tracing.go b/services/storage-users/pkg/config/tracing.go deleted file mode 100644 index 31956ea7e8..0000000000 --- a/services/storage-users/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing configures the tracing -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;STORAGE_USERS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;STORAGE_USERS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;STORAGE_USERS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;STORAGE_USERS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/thumbnails/pkg/command/server.go b/services/thumbnails/pkg/command/server.go index 491428a243..195f70866b 100644 --- a/services/thumbnails/pkg/command/server.go +++ b/services/thumbnails/pkg/command/server.go @@ -32,7 +32,7 @@ func Server(cfg *config.Config) *cli.Command { Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/thumbnails/pkg/config/config.go b/services/thumbnails/pkg/config/config.go index a64671dc90..cd2f407b99 100644 --- a/services/thumbnails/pkg/config/config.go +++ b/services/thumbnails/pkg/config/config.go @@ -14,9 +14,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPC GRPCConfig `yaml:"grpc"` HTTP HTTP `yaml:"http"` diff --git a/services/thumbnails/pkg/config/defaults/defaultconfig.go b/services/thumbnails/pkg/config/defaults/defaultconfig.go index 37551d8d24..949823d546 100644 --- a/services/thumbnails/pkg/config/defaults/defaultconfig.go +++ b/services/thumbnails/pkg/config/defaults/defaultconfig.go @@ -75,17 +75,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.GRPCClientTLS == nil && cfg.Commons != nil { cfg.GRPCClientTLS = structs.CopyOrZeroValue(cfg.Commons.GRPCClientTLS) diff --git a/services/thumbnails/pkg/config/tracing.go b/services/thumbnails/pkg/config/tracing.go deleted file mode 100644 index 3d0d1bc7ae..0000000000 --- a/services/thumbnails/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;THUMBNAILS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;THUMBNAILS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;THUMBNAILS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;THUMBNAILS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/userlog/pkg/command/server.go b/services/userlog/pkg/command/server.go index 056540ee3e..25815dbcce 100644 --- a/services/userlog/pkg/command/server.go +++ b/services/userlog/pkg/command/server.go @@ -58,7 +58,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - tracerProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + tracerProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/userlog/pkg/config/config.go b/services/userlog/pkg/config/config.go index c57853339b..307c5b6ea5 100644 --- a/services/userlog/pkg/config/config.go +++ b/services/userlog/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` GRPCClientTLS *shared.GRPCClientTLS `yaml:"grpc_client_tls"` diff --git a/services/userlog/pkg/config/defaults/defaultconfig.go b/services/userlog/pkg/config/defaults/defaultconfig.go index fcdf6bd35a..72e4a9af4f 100644 --- a/services/userlog/pkg/config/defaults/defaultconfig.go +++ b/services/userlog/pkg/config/defaults/defaultconfig.go @@ -86,17 +86,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } } // Sanitize sanitizes the config diff --git a/services/userlog/pkg/config/tracing.go b/services/userlog/pkg/config/tracing.go deleted file mode 100644 index 94b5fa5d03..0000000000 --- a/services/userlog/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;USERLOG_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;USERLOG_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;USERLOG_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;USERLOG_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/users/pkg/command/server.go b/services/users/pkg/command/server.go index 939b27c9db..50a549708d 100644 --- a/services/users/pkg/command/server.go +++ b/services/users/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/users/pkg/config/config.go b/services/users/pkg/config/config.go index 63b426cc84..0773ceb440 100644 --- a/services/users/pkg/config/config.go +++ b/services/users/pkg/config/config.go @@ -9,7 +9,6 @@ import ( type Config struct { Commons *shared.Commons `yaml:"-"` // don't use this directly as configuration for a service Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` Log *Log `yaml:"log"` Debug Debug `yaml:"debug"` diff --git a/services/users/pkg/config/defaults/defaultconfig.go b/services/users/pkg/config/defaults/defaultconfig.go index 39051f6e43..bed8a152b7 100644 --- a/services/users/pkg/config/defaults/defaultconfig.go +++ b/services/users/pkg/config/defaults/defaultconfig.go @@ -100,17 +100,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Reva == nil && cfg.Commons != nil { cfg.Reva = structs.CopyOrZeroValue(cfg.Commons.Reva) diff --git a/services/users/pkg/config/tracing.go b/services/users/pkg/config/tracing.go deleted file mode 100644 index ad19291c9b..0000000000 --- a/services/users/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the configuration options for tracing. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;USERS_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;USERS_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;USERS_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;USERS_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/web/pkg/command/server.go b/services/web/pkg/command/server.go index 6b0c4a6244..007d123d10 100644 --- a/services/web/pkg/command/server.go +++ b/services/web/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/web/pkg/config/config.go b/services/web/pkg/config/config.go index 4fe5217080..c304c6d3c6 100644 --- a/services/web/pkg/config/config.go +++ b/services/web/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/web/pkg/config/defaults/defaultconfig.go b/services/web/pkg/config/defaults/defaultconfig.go index 68e41a60d5..6286414cee 100644 --- a/services/web/pkg/config/defaults/defaultconfig.go +++ b/services/web/pkg/config/defaults/defaultconfig.go @@ -130,17 +130,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil { cfg.TokenManager = &config.TokenManager{ diff --git a/services/web/pkg/config/tracing.go b/services/web/pkg/config/tracing.go deleted file mode 100644 index abad9af5a6..0000000000 --- a/services/web/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;WEB_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;WEB_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;WEB_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;WEB_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/webdav/pkg/command/server.go b/services/webdav/pkg/command/server.go index 2ed4233b54..750f933f82 100644 --- a/services/webdav/pkg/command/server.go +++ b/services/webdav/pkg/command/server.go @@ -30,7 +30,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/webdav/pkg/config/config.go b/services/webdav/pkg/config/config.go index 47ecde9dcb..2e3c803b6b 100644 --- a/services/webdav/pkg/config/config.go +++ b/services/webdav/pkg/config/config.go @@ -13,9 +13,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` GRPCClientTLS *shared.GRPCClientTLS `yaml:"grpc_client_tls"` GrpcClient client.Client `yaml:"-"` diff --git a/services/webdav/pkg/config/defaults/defaultconfig.go b/services/webdav/pkg/config/defaults/defaultconfig.go index 1fbfc4010d..38dec14008 100644 --- a/services/webdav/pkg/config/defaults/defaultconfig.go +++ b/services/webdav/pkg/config/defaults/defaultconfig.go @@ -58,17 +58,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.GRPCClientTLS == nil && cfg.Commons != nil { cfg.GRPCClientTLS = structs.CopyOrZeroValue(cfg.Commons.GRPCClientTLS) diff --git a/services/webdav/pkg/config/tracing.go b/services/webdav/pkg/config/tracing.go deleted file mode 100644 index ef54752e97..0000000000 --- a/services/webdav/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;WEBDAV_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;WEBDAV_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;WEBDAV_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;WEBDAV_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/services/webfinger/pkg/command/server.go b/services/webfinger/pkg/command/server.go index 5a9504e79e..2d9a0c723b 100644 --- a/services/webfinger/pkg/command/server.go +++ b/services/webfinger/pkg/command/server.go @@ -31,7 +31,7 @@ func Server(cfg *config.Config) *cli.Command { }, Action: func(c *cli.Context) error { logger := logging.Configure(cfg.Service.Name, cfg.Log) - traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name) + traceProvider, err := tracing.GetTraceProvider(c.Context, cfg.Commons.TracesExporter, cfg.Service.Name) if err != nil { return err } diff --git a/services/webfinger/pkg/config/config.go b/services/webfinger/pkg/config/config.go index 2c5d8d56b4..7055a97ee7 100644 --- a/services/webfinger/pkg/config/config.go +++ b/services/webfinger/pkg/config/config.go @@ -12,9 +12,8 @@ type Config struct { Service Service `yaml:"-"` - Tracing *Tracing `yaml:"tracing"` - Log *Log `yaml:"log"` - Debug Debug `yaml:"debug"` + Log *Log `yaml:"log"` + Debug Debug `yaml:"debug"` HTTP HTTP `yaml:"http"` diff --git a/services/webfinger/pkg/config/defaults/defaultconfig.go b/services/webfinger/pkg/config/defaults/defaultconfig.go index 35ffe22e72..3d698ce7ff 100644 --- a/services/webfinger/pkg/config/defaults/defaultconfig.go +++ b/services/webfinger/pkg/config/defaults/defaultconfig.go @@ -67,17 +67,6 @@ func EnsureDefaults(cfg *config.Config) { } else if cfg.Log == nil { cfg.Log = &config.Log{} } - // provide with defaults for shared tracing, since we need a valid destination address for "envdecode". - if cfg.Tracing == nil && cfg.Commons != nil && cfg.Commons.Tracing != nil { - cfg.Tracing = &config.Tracing{ - Enabled: cfg.Commons.Tracing.Enabled, - Type: cfg.Commons.Tracing.Type, - Endpoint: cfg.Commons.Tracing.Endpoint, - Collector: cfg.Commons.Tracing.Collector, - } - } else if cfg.Tracing == nil { - cfg.Tracing = &config.Tracing{} - } if cfg.Commons != nil { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS diff --git a/services/webfinger/pkg/config/tracing.go b/services/webfinger/pkg/config/tracing.go deleted file mode 100644 index d73fbe0d88..0000000000 --- a/services/webfinger/pkg/config/tracing.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import "github.com/opencloud-eu/opencloud/pkg/tracing" - -// Tracing defines the available tracing configuration. -type Tracing struct { - Enabled bool `yaml:"enabled" env:"OC_TRACING_ENABLED;WEBFINGER_TRACING_ENABLED" desc:"Activates tracing." introductionVersion:"1.0.0"` - Type string `yaml:"type" env:"OC_TRACING_TYPE;WEBFINGER_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now." introductionVersion:"1.0.0"` - Endpoint string `yaml:"endpoint" env:"OC_TRACING_ENDPOINT;WEBFINGER_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent." introductionVersion:"1.0.0"` - Collector string `yaml:"collector" env:"OC_TRACING_COLLECTOR;WEBFINGER_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset." introductionVersion:"1.0.0"` -} - -// Convert Tracing to the tracing package's Config struct. -func (t Tracing) Convert() tracing.Config { - return tracing.Config{ - Enabled: t.Enabled, - Type: t.Type, - Endpoint: t.Endpoint, - Collector: t.Collector, - } -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md deleted file mode 100644 index 439bf79a90..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# OpenTelemetry-Go Jaeger Exporter - -[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger) - -> **Deprecated:** This module is no longer supported. -> OpenTelemetry dropped support for Jaeger exporter in July 2023. -> Jaeger officially accepts and recommends using OTLP. -> Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) -> or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) instead. - -[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) implementation. - -## Installation - -``` -go get -u go.opentelemetry.io/otel/exporters/jaeger -``` - -## Example - -See [../../example/jaeger](../../example/jaeger). - -## Configuration - -The exporter can be used to send spans to: - -- Jaeger agent using `jaeger.thrift` over compact thrift protocol via - [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option. -- Jaeger collector using `jaeger.thrift` over HTTP via - [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option. - -### Environment Variables - -The following environment variables can be used -(instead of options objects) to override the default configuration. - -| Environment variable | Option | Default value | -| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | -| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` | -| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` | -| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` | -| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | | -| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | | - -Configuration using options have precedence over the environment variables. - -## Contributing - -This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path. -When re-generating Thrift code in the future, please adapt import paths as necessary. - -## References - -- [Jaeger](https://www.jaegertracing.io/) -- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) -- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/sdk-environment-variables.md#jaeger-exporter) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go deleted file mode 100644 index a050020bb4..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "context" - "fmt" - "io" - "net" - "strings" - "time" - - "github.com/go-logr/logr" - - genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -const ( - // udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent. - udpPacketMaxLength = 65000 - // emitBatchOverhead is the additional overhead bytes used for enveloping the datagram, - // synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37 - emitBatchOverhead = 70 -) - -// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. -type agentClientUDP struct { - genAgent.Agent - io.Closer - - connUDP udpConn - client *genAgent.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span - thriftProtocol thrift.TProtocol -} - -type udpConn interface { - Write([]byte) (int, error) - SetWriteBuffer(int) error - Close() error -} - -type agentClientUDPParams struct { - Host string - Port string - MaxPacketSize int - Logger logr.Logger - AttemptReconnecting bool - AttemptReconnectInterval time.Duration -} - -// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { - hostPort := net.JoinHostPort(params.Host, params.Port) - // validate hostport - if _, _, err := net.SplitHostPort(hostPort); err != nil { - return nil, err - } - - if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength { - params.MaxPacketSize = udpPacketMaxLength - } - - if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { - params.AttemptReconnectInterval = time.Second * 30 - } - - thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{}) - thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory) - - var connUDP udpConn - var err error - - if params.AttemptReconnecting { - // host is hostname, setup resolver loop in case host record changes during operation - connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) - if err != nil { - return nil, err - } - } else { - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } - - connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - } - - if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { - return nil, err - } - - return &agentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: params.MaxPacketSize, - thriftBuffer: thriftBuffer, - thriftProtocol: thriftProtocol, - }, nil -} - -// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent. -func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error { - var errs []error - processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process) - if err != nil { - // drop the batch if serialization of process fails. - return err - } - - maxPacketSize := a.maxPacketSize - if maxPacketSize > udpPacketMaxLength-emitBatchOverhead { - maxPacketSize = udpPacketMaxLength - emitBatchOverhead - } - totalSize := processSize - var spans []*gen.Span - for _, span := range batch.Spans { - spanSize, err := a.calcSizeOfSerializedThrift(ctx, span) - if err != nil { - errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span)) - continue - } - if spanSize+processSize >= maxPacketSize { - // drop the span that exceeds the limit. - errs = append(errs, fmt.Errorf("span too large to send: %v", span)) - continue - } - if totalSize+spanSize >= maxPacketSize { - if err := a.flush(ctx, &gen.Batch{ - Process: batch.Process, - Spans: spans, - }); err != nil { - errs = append(errs, err) - } - spans = spans[:0] - totalSize = processSize - } - totalSize += spanSize - spans = append(spans, span) - } - - if len(spans) > 0 { - if err := a.flush(ctx, &gen.Batch{ - Process: batch.Process, - Spans: spans, - }); err != nil { - errs = append(errs, err) - } - } - - if len(errs) == 1 { - return errs[0] - } else if len(errs) > 1 { - joined := a.makeJoinedErrorString(errs) - return fmt.Errorf("multiple errors during transform: %s", joined) - } - return nil -} - -// makeJoinedErrorString join all the errors to one error message. -func (a *agentClientUDP) makeJoinedErrorString(errs []error) string { - var errMsgs []string - for _, err := range errs { - errMsgs = append(errMsgs, err.Error()) - } - return strings.Join(errMsgs, ", ") -} - -// flush will send the batch of spans to the agent. -func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error { - a.thriftBuffer.Reset() - if err := a.client.EmitBatch(ctx, batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// calcSizeOfSerializedThrift calculate the serialized thrift packet size. -func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) { - a.thriftBuffer.Reset() - err := thriftStruct.Write(ctx, a.thriftProtocol) - return a.thriftBuffer.Len(), err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *agentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go deleted file mode 100644 index a735965411..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. -// -// Deprecated: This module is no longer supported. -// OpenTelemetry dropped support for Jaeger exporter in July 2023. -// Jaeger officially accepts and recommends using OTLP. -// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp] -// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead. -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go deleted file mode 100644 index 460fb5e135..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "os" -) - -// Environment variable names. -const ( - // Hostname for the Jaeger agent, part of address where exporter sends spans - // i.e. "localhost". - envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST" - // Port for the Jaeger agent, part of address where exporter sends spans - // i.e. 6831. - envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT" - // The HTTP endpoint for sending spans directly to a collector, - // i.e. http://jaeger-collector:14268/api/traces. - envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT" - // Username to send as part of "Basic" authentication to the collector endpoint. - envUser = "OTEL_EXPORTER_JAEGER_USER" - // Password to send as part of "Basic" authentication to the collector endpoint. - envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD" -) - -// envOr returns an env variable's value if it is exists or the default if not. -func envOr(key, defaultValue string) string { - if v := os.Getenv(key); v != "" { - return v - } - return defaultValue -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go deleted file mode 100644 index 54cd3b0867..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go deleted file mode 100644 index 3b96e3222e..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go deleted file mode 100644 index c7c8e9ca3e..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go +++ /dev/null @@ -1,412 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) - // Parameters: - // - Batch - EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) -} - -type AgentClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAgentClient(c thrift.TClient) *AgentClient { - return &AgentClient{ - c: c, - } -} - -func (p *AgentClient) Client_() thrift.TClient { - return p.c -} - -func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { - var _args0 AgentEmitZipkinBatchArgs - _args0.Spans = spans - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { - return err - } - return nil -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { - var _args1 AgentEmitBatchArgs - _args1.Batch = batch - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { - return err - } - return nil -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} - self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self2 -} - -func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x3.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x3 - -} - -type agentProcessorEmitZipkinBatch struct { - handler Agent -} - -func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} -} - -func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem4 := &zipkincore.Span{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Spans = append(p.Spans, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch - -func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go deleted file mode 100644 index fe45a9f9ad..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go deleted file mode 100644 index 10162857fb..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go deleted file mode 100644 index b1fe26c57d..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go +++ /dev/null @@ -1,3022 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type TagType int64 - -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: - return "STRING" - case TagType_DOUBLE: - return "DOUBLE" - case TagType_BOOL: - return "BOOL" - case TagType_LONG: - return "LONG" - case TagType_BINARY: - return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": - return TagType_STRING, nil - case "DOUBLE": - return TagType_DOUBLE, nil - case "BOOL": - return TagType_BOOL, nil - case "LONG": - return TagType_LONG, nil - case "BINARY": - return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { - q, err := TagTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *TagType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = TagType(v) - return nil -} - -func (p *TagType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type SpanRefType int64 - -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: - return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: - return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": - return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": - return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { - q, err := SpanRefTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *SpanRefType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = SpanRefType(v) - return nil -} - -func (p *SpanRefType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" db:"key" json:"key"` - VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` - VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} - -var Tag_VStr_DEFAULT string - -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } - return *p.VStr -} - -var Tag_VDouble_DEFAULT float64 - -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } - return *p.VDouble -} - -var Tag_VBool_DEFAULT bool - -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } - return *p.VBool -} - -var Tag_VLong_DEFAULT int64 - -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } - return *p.VLong -} - -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false - var issetVType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetVType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) - } - if !issetVType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) - } - return nil -} - -func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := TagType(v) - p.VType = temp - } - return nil -} - -func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.VStr = &v - } - return nil -} - -func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.VDouble = &v - } - return nil -} - -func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.VBool = &v - } - return nil -} - -func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.VLong = &v - } - return nil -} - -func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.VBinary = v - } - return nil -} - -func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField7(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) - } - return err -} - -func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) - } - if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) - } - } - return err -} - -func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) - } - if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) - } - } - return err -} - -func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) - } - } - return err -} - -func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) - } - } - return err -} - -func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) - } - } - return err -} - -func (p *Tag) Equals(other *Tag) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { - return false - } - if p.VType != other.VType { - return false - } - if p.VStr != other.VStr { - if p.VStr == nil || other.VStr == nil { - return false - } - if (*p.VStr) != (*other.VStr) { - return false - } - } - if p.VDouble != other.VDouble { - if p.VDouble == nil || other.VDouble == nil { - return false - } - if (*p.VDouble) != (*other.VDouble) { - return false - } - } - if p.VBool != other.VBool { - if p.VBool == nil || other.VBool == nil { - return false - } - if (*p.VBool) != (*other.VBool) { - return false - } - } - if p.VLong != other.VLong { - if p.VLong == nil || other.VLong == nil { - return false - } - if (*p.VLong) != (*other.VLong) { - return false - } - } - if bytes.Compare(p.VBinary, other.VBinary) != 0 { - return false - } - return true -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false - var issetFields bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTimestamp = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetFields = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) - } - if !issetFields { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) - } - return nil -} - -func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i++ { - _elem0 := &Tag{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) - } - return err -} - -func (p *Log) Equals(other *Log) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { - return false - } - if len(p.Fields) != len(other.Fields) { - return false - } - for i, _tgt := range p.Fields { - _src1 := other.Fields[i] - if !_tgt.Equals(_src1) { - return false - } - } - return true -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetRefType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - return nil -} - -func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SpanRefType(v) - p.RefType = temp - } - return nil -} - -func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) - } - return err -} - -func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) - } - return err -} - -func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) - } - return err -} - -func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) - } - return err -} - -func (p *SpanRef) Equals(other *SpanRef) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.RefType != other.RefType { - return false - } - if p.TraceIdLow != other.TraceIdLow { - return false - } - if p.TraceIdHigh != other.TraceIdHigh { - return false - } - if p.SpanId != other.SpanId { - return false - } - return true -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` - References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` - Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` - Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} - -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} - -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} - -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - var issetParentSpanId bool = false - var issetOperationName bool = false - var issetFlags bool = false - var issetStartTime bool = false - var issetDuration bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetParentSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - issetOperationName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - issetFlags = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - issetStartTime = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - issetDuration = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.LIST { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.LIST { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - if !issetParentSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) - } - if !issetOperationName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) - } - if !issetFlags { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) - } - if !issetStartTime { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) - } - if !issetDuration { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ParentSpanId = v - } - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.OperationName = v - } - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i++ { - _elem2 := &SpanRef{} - if err := _elem2.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.References = append(p.References, _elem2) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Flags = v - } - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Duration = v - } - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem3 := &Tag{} - if err := _elem3.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Tags = append(p.Tags, _elem3) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i++ { - _elem4 := &Log{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Logs = append(p.Logs, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField7(ctx, oprot); err != nil { - return err - } - if err := p.writeField8(ctx, oprot); err != nil { - return err - } - if err := p.writeField9(ctx, oprot); err != nil { - return err - } - if err := p.writeField10(ctx, oprot); err != nil { - return err - } - if err := p.writeField11(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) - } - return err -} - -func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) - } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) - } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) - } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) - } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) - } - } - return err -} - -func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) - } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) - } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) - } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) - } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceIdLow != other.TraceIdLow { - return false - } - if p.TraceIdHigh != other.TraceIdHigh { - return false - } - if p.SpanId != other.SpanId { - return false - } - if p.ParentSpanId != other.ParentSpanId { - return false - } - if p.OperationName != other.OperationName { - return false - } - if len(p.References) != len(other.References) { - return false - } - for i, _tgt := range p.References { - _src5 := other.References[i] - if !_tgt.Equals(_src5) { - return false - } - } - if p.Flags != other.Flags { - return false - } - if p.StartTime != other.StartTime { - return false - } - if p.Duration != other.Duration { - return false - } - if len(p.Tags) != len(other.Tags) { - return false - } - for i, _tgt := range p.Tags { - _src6 := other.Tags[i] - if !_tgt.Equals(_src6) { - return false - } - } - if len(p.Logs) != len(other.Logs) { - return false - } - for i, _tgt := range p.Logs { - _src7 := other.Logs[i] - if !_tgt.Equals(_src7) { - return false - } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - -func (p *Process) GetServiceName() string { - return p.ServiceName -} - -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetServiceName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) - } - return nil -} - -func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem8 := &Tag{} - if err := _elem8.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) - } - p.Tags = append(p.Tags, _elem8) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) - } - } - return err -} - -func (p *Process) Equals(other *Process) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.ServiceName != other.ServiceName { - return false - } - if len(p.Tags) != len(other.Tags) { - return false - } - for i, _tgt := range p.Tags { - _src9 := other.Tags[i] - if !_tgt.Equals(_src9) { - return false - } - } - return true -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - FullQueueDroppedSpans -// - TooLargeDroppedSpans -// - FailedToEmitSpans -type ClientStats struct { - FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` - TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` - FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` -} - -func NewClientStats() *ClientStats { - return &ClientStats{} -} - -func (p *ClientStats) GetFullQueueDroppedSpans() int64 { - return p.FullQueueDroppedSpans -} - -func (p *ClientStats) GetTooLargeDroppedSpans() int64 { - return p.TooLargeDroppedSpans -} - -func (p *ClientStats) GetFailedToEmitSpans() int64 { - return p.FailedToEmitSpans -} -func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFullQueueDroppedSpans bool = false - var issetTooLargeDroppedSpans bool = false - var issetFailedToEmitSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetFullQueueDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTooLargeDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetFailedToEmitSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFullQueueDroppedSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")) - } - if !issetTooLargeDroppedSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")) - } - if !issetFailedToEmitSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")) - } - return nil -} - -func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.FullQueueDroppedSpans = v - } - return nil -} - -func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TooLargeDroppedSpans = v - } - return nil -} - -func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.FailedToEmitSpans = v - } - return nil -} - -func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) - } - return err -} - -func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) - } - return err -} - -func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) - } - return err -} - -func (p *ClientStats) Equals(other *ClientStats) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { - return false - } - if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { - return false - } - if p.FailedToEmitSpans != other.FailedToEmitSpans { - return false - } - return true -} - -func (p *ClientStats) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ClientStats(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -// - SeqNo -// - Stats -type Batch struct { - Process *Process `thrift:"process,1,required" db:"process" json:"process"` - Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` - SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` - Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process - -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } - return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} - -var Batch_SeqNo_DEFAULT int64 - -func (p *Batch) GetSeqNo() int64 { - if !p.IsSetSeqNo() { - return Batch_SeqNo_DEFAULT - } - return *p.SeqNo -} - -var Batch_Stats_DEFAULT *ClientStats - -func (p *Batch) GetStats() *ClientStats { - if !p.IsSetStats() { - return Batch_Stats_DEFAULT - } - return p.Stats -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) IsSetSeqNo() bool { - return p.SeqNo != nil -} - -func (p *Batch) IsSetStats() bool { - return p.Stats != nil -} - -func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false - var issetSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetProcess = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) - } - if !issetSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) - } - return nil -} - -func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem10 := &Span{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Spans = append(p.Spans, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SeqNo = &v - } - return nil -} - -func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Stats = &ClientStats{} - if err := p.Stats.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) - } - return nil -} - -func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) - } - if err := p.Process.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) - } - return err -} - -func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) - } - return err -} - -func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSeqNo() { - if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) - } - } - return err -} - -func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStats() { - if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) - } - if err := p.Stats.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) - } - } - return err -} - -func (p *Batch) Equals(other *Batch) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.Process.Equals(other.Process) { - return false - } - if len(p.Spans) != len(other.Spans) { - return false - } - for i, _tgt := range p.Spans { - _src11 := other.Spans[i] - if !_tgt.Equals(_src11) { - return false - } - } - if p.SeqNo != other.SeqNo { - if p.SeqNo == nil || other.SeqNo == nil { - return false - } - if (*p.SeqNo) != (*other.SeqNo) { - return false - } - } - if !p.Stats.Equals(other.Stats) { - return false - } - return true -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { - return false - } - return true -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} - -type Collector interface { - // Parameters: - // - Batches - SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) -} - -type CollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewCollectorClient(c thrift.TClient) *CollectorClient { - return &CollectorClient{ - c: c, - } -} - -func (p *CollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Batches -func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { - var _args12 CollectorSubmitBatchesArgs - _args12.Batches = batches - var _result14 CollectorSubmitBatchesResult - var _meta13 thrift.ResponseMeta - _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) - p.SetLastResponseMeta_(_meta13) - if _err != nil { - return - } - return _result14.GetSuccess(), nil -} - -type CollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Collector -} - -func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewCollectorProcessor(handler Collector) *CollectorProcessor { - - self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} - return self15 -} - -func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x16.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x16 - -} - -type collectorProcessorSubmitBatches struct { - handler Collector -} - -func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := CollectorSubmitBatchesArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := CollectorSubmitBatchesResult{} - var retval []*BatchSubmitResponse - if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batches -type CollectorSubmitBatchesArgs struct { - Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` -} - -func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { - return &CollectorSubmitBatchesArgs{} -} - -func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { - return p.Batches -} -func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Batch, 0, size) - p.Batches = tSlice - for i := 0; i < size; i++ { - _elem17 := &Batch{} - if err := _elem17.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) - } - p.Batches = append(p.Batches, _elem17) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Batches { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) - } - return err -} - -func (p *CollectorSubmitBatchesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) -} - -// Attributes: -// - Success -type CollectorSubmitBatchesResult struct { - Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { - return &CollectorSubmitBatchesResult{} -} - -var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse - -func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { - return p.Success -} -func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BatchSubmitResponse, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem18 := &BatchSubmitResponse{} - if err := _elem18.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) - } - p.Success = append(p.Success, _elem18) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *CollectorSubmitBatchesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go deleted file mode 100644 index ebf43018fe..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go deleted file mode 100644 index 043ecba962..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -const CLIENT_SEND = "cs" -const CLIENT_RECV = "cr" -const SERVER_SEND = "ss" -const SERVER_RECV = "sr" -const MESSAGE_SEND = "ms" -const MESSAGE_RECV = "mr" -const WIRE_SEND = "ws" -const WIRE_RECV = "wr" -const CLIENT_SEND_FRAGMENT = "csf" -const CLIENT_RECV_FRAGMENT = "crf" -const SERVER_SEND_FRAGMENT = "ssf" -const SERVER_RECV_FRAGMENT = "srf" -const LOCAL_COMPONENT = "lc" -const CLIENT_ADDR = "ca" -const SERVER_ADDR = "sa" -const MESSAGE_ADDR = "ma" - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go deleted file mode 100644 index 7f46810e0d..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go +++ /dev/null @@ -1,2067 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type AnnotationType int64 - -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: - return "BOOL" - case AnnotationType_BYTES: - return "BYTES" - case AnnotationType_I16: - return "I16" - case AnnotationType_I32: - return "I32" - case AnnotationType_I64: - return "I64" - case AnnotationType_DOUBLE: - return "DOUBLE" - case AnnotationType_STRING: - return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": - return AnnotationType_BOOL, nil - case "BYTES": - return AnnotationType_BYTES, nil - case "I16": - return AnnotationType_I16, nil - case "I32": - return AnnotationType_I32, nil - case "I64": - return AnnotationType_I64, nil - case "DOUBLE": - return AnnotationType_DOUBLE, nil - case "STRING": - return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { - q, err := AnnotationTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *AnnotationType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = AnnotationType(v) - return nil -} - -func (p *AnnotationType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` - Port int16 `thrift:"port,2" db:"port" json:"port"` - ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` - Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} - -var Endpoint_Ipv6_DEFAULT []byte - -func (p *Endpoint) GetIpv6() []byte { - return p.Ipv6 -} -func (p *Endpoint) IsSetIpv6() bool { - return p.Ipv6 != nil -} - -func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I16 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ipv4 = v - } - return nil -} - -func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Port = v - } - return nil -} - -func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Ipv6 = v - } - return nil -} - -func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) - } - return err -} - -func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) - } - if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) - } - return err -} - -func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) - } - return err -} - -func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIpv6() { - if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) - } - } - return err -} - -func (p *Endpoint) Equals(other *Endpoint) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ipv4 != other.Ipv4 { - return false - } - if p.Port != other.Port { - return false - } - if p.ServiceName != other.ServiceName { - return false - } - if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { - return false - } - return true -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` - Value string `thrift:"value,2" db:"value" json:"value"` - Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} - -var Annotation_Host_DEFAULT *Endpoint - -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } - return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) - } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) - } - } - return err -} - -func (p *Annotation) Equals(other *Annotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { - return false - } - if p.Value != other.Value { - return false - } - if !p.Host.Equals(other.Host) { - return false - } - return true -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" db:"key" json:"key"` - Value []byte `thrift:"value,2" db:"value" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} - -var BinaryAnnotation_Host_DEFAULT *Endpoint - -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } - return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := AnnotationType(v) - p.AnnotationType = temp - } - return nil -} - -func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) - } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) - } - } - return err -} - -func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { - return false - } - if bytes.Compare(p.Value, other.Value) != 0 { - return false - } - if p.AnnotationType != other.AnnotationType { - return false - } - if !p.Host.Equals(other.Host) { - return false - } - return true -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this -// means the trace uses 128 bit traceIds instead of 64 bit. -type Span struct { - TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" db:"name" json:"name"` - ID int64 `thrift:"id,4" db:"id" json:"id"` - ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` - Debug bool `thrift:"debug,9" db:"debug" json:"debug"` - Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` - TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} - -var Span_ParentID_DEFAULT int64 - -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } - return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} - -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} - -var Span_Timestamp_DEFAULT int64 - -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var Span_Duration_DEFAULT int64 - -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } - return *p.Duration -} - -var Span_TraceIDHigh_DEFAULT int64 - -func (p *Span) GetTraceIDHigh() int64 { - if !p.IsSetTraceIDHigh() { - return Span_TraceIDHigh_DEFAULT - } - return *p.TraceIDHigh -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) IsSetTraceIDHigh() bool { - return p.TraceIDHigh != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.LIST { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.I64 { - if err := p.ReadField12(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceID = v - } - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Name = v - } - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ID = v - } - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ParentID = &v - } - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i++ { - _elem0 := &Annotation{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Debug = v - } - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Duration = &v - } - return nil -} - -func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 12: ", err) - } else { - p.TraceIDHigh = &v - } - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField8(ctx, oprot); err != nil { - return err - } - if err := p.writeField9(ctx, oprot); err != nil { - return err - } - if err := p.writeField10(ctx, oprot); err != nil { - return err - } - if err := p.writeField11(ctx, oprot); err != nil { - return err - } - if err := p.writeField12(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) - } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) - } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) - } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) - } - } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) - } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) - } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) - } - } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) - } - } - return err -} - -func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTraceIDHigh() { - if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) - } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceID != other.TraceID { - return false - } - if p.Name != other.Name { - return false - } - if p.ID != other.ID { - return false - } - if p.ParentID != other.ParentID { - if p.ParentID == nil || other.ParentID == nil { - return false - } - if (*p.ParentID) != (*other.ParentID) { - return false - } - } - if len(p.Annotations) != len(other.Annotations) { - return false - } - for i, _tgt := range p.Annotations { - _src2 := other.Annotations[i] - if !_tgt.Equals(_src2) { - return false - } - } - if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { - return false - } - for i, _tgt := range p.BinaryAnnotations { - _src3 := other.BinaryAnnotations[i] - if !_tgt.Equals(_src3) { - return false - } - } - if p.Debug != other.Debug { - return false - } - if p.Timestamp != other.Timestamp { - if p.Timestamp == nil || other.Timestamp == nil { - return false - } - if (*p.Timestamp) != (*other.Timestamp) { - return false - } - } - if p.Duration != other.Duration { - if p.Duration == nil || other.Duration == nil { - return false - } - if (*p.Duration) != (*other.Duration) { - return false - } - } - if p.TraceIDHigh != other.TraceIDHigh { - if p.TraceIDHigh == nil || other.TraceIDHigh == nil { - return false - } - if (*p.TraceIDHigh) != (*other.TraceIDHigh) { - return false - } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *Response) Equals(other *Response) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { - return false - } - return true -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) -} - -type ZipkinCollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: c, - } -} - -func (p *ZipkinCollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { - var _args4 ZipkinCollectorSubmitZipkinBatchArgs - _args4.Spans = spans - var _result6 ZipkinCollectorSubmitZipkinBatchResult - var _meta5 thrift.ResponseMeta - _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) - p.SetLastResponseMeta_(_meta5) - if _err != nil { - return - } - return _result6.GetSuccess(), nil -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} - return self7 -} - -func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x8.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x8 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem9 := &Span{} - if err := _elem9.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) - } - p.Spans = append(p.Spans, _elem9) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem10 := &Response{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Success = append(p.Success, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE deleted file mode 100644 index 2bc6fbbf65..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE +++ /dev/null @@ -1,306 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) - --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp - -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ - - --------------------------------------------------- -For lib/py/compat/win32/stdint.h - -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - - --------------------------------------------------- -Codegen template in t_html_generator.h - -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. - ---------------------------------------------------- -For t_cl_generator.cc - - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook - ---------------------------------------------------- diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE deleted file mode 100644 index 37824e7fb6..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go deleted file mode 100644 index 32d5b0147a..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 - INVALID_TRANSFORM = 8 - INVALID_PROTOCOL = 9 - UNSUPPORTED_CLIENT_TYPE = 10 -) - -var defaultApplicationExceptionMessage = map[int32]string{ - UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", - UNKNOWN_METHOD: "unknown method", - INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", - WRONG_METHOD_NAME: "wrong method name", - BAD_SEQUENCE_ID: "bad sequence ID", - MISSING_RESULT: "missing result", - INTERNAL_ERROR: "unknown internal error", - PROTOCOL_ERROR: "unknown protocol error", - INVALID_TRANSFORM: "Invalid transform", - INVALID_PROTOCOL: "Invalid protocol", - UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", -} - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(ctx context.Context, iprot TProtocol) error - Write(ctx context.Context, oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -var _ TApplicationException = (*tApplicationException)(nil) - -func (tApplicationException) TExceptionType() TExceptionType { - return TExceptionTypeApplication -} - -func (e tApplicationException) Error() string { - if e.message != "" { - return e.message - } - return defaultApplicationExceptionMessage[e.type_] -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { - // TODO: this should really be generated by the compiler - _, err := iprot.ReadStructBegin(ctx) - if err != nil { - return err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - default: - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - if err = iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return err - } - - p.message = message - p.type_ = type_ - - return nil -} - -func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { - err = oprot.WriteStructBegin(ctx, "TApplicationException") - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(ctx, p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - } - err = oprot.WriteFieldBegin(ctx, "type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(ctx, p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - err = oprot.WriteFieldStop(ctx) - if err != nil { - return - } - err = oprot.WriteStructEnd(ctx) - return -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 45c880d32f..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - cfg *TConfiguration - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { - PropagateTConfiguration(t, conf) - p := &TBinaryProtocol{ - origTransport: t, - cfg: conf, - } - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - return p -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{ - cfg: conf, - } -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocolConf(t, p.cfg) -} - -func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - if p.cfg.GetTBinaryStrictWrite() { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(ctx, int32(version)) - if e != nil { - return e - } - e = p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } else { - e := p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - e := p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(ctx, id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { - e := p.WriteByte(ctx, STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - e := p.WriteByte(ctx, int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { - if value { - return p.WriteByte(ctx, 1) - } - return p.WriteByte(ctx, 0) -} - -func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.trans.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.WriteI64(ctx, int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.cfg.GetTBinaryStrictRead() { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte(ctx) - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32(ctx) - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte(ctx) - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16(ctx) - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { - k, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { - b, e := p.ReadByte(ctx) - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(ctx, buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(ctx, buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", e - } - err = checkSizeForProtocol(size, p.cfg) - if err != nil { - return - } - if size < 0 { - err = invalidDataLength - return - } - if size == 0 { - return "", nil - } - if size < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:size] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - size, e := p.ReadI32(ctx) - if e != nil { - return nil, e - } - if err := checkSizeForProtocol(size, p.cfg); err != nil { - return nil, err - } - - buf, err := safeReadBytes(size, p.trans) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { - var read int - _, deadlineSet := ctx.Deadline() - for { - read, err = io.ReadFull(p.trans, buf) - if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout without anything read, - // and we still have time left, keep retrying. - continue - } - // For anything else, don't retry - break - } - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - buf, err := safeReadBytes(size, p.trans) - return string(buf), NewTProtocolException(err) -} - -func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) - _ TConfigurationSetter = (*TBinaryProtocol)(nil) -) - -// This function is shared between TBinaryProtocol and TCompactProtocol. -// -// It tries to read size bytes from trans, in a way that prevents large -// allocations when size is insanely large (mostly caused by malformed message). -func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { - if size < 0 { - return nil, nil - } - - buf := new(bytes.Buffer) - _, err := io.CopyN(buf, trans, int64(size)) - return buf.Bytes(), err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go deleted file mode 100644 index aa551b4ab3..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" -) - -type TBufferedTransportFactory struct { - size int -} - -type TBufferedTransport struct { - bufio.ReadWriter - tp TTransport -} - -func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return NewTBufferedTransport(trans, p.size), nil -} - -func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { - return &TBufferedTransportFactory{size: bufferSize} -} - -func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { - return &TBufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *TBufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *TBufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *TBufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *TBufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Flush(ctx context.Context) error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush(ctx) -} - -func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.tp, conf) -} - -var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go deleted file mode 100644 index ea2c01fdad..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go +++ /dev/null @@ -1,109 +0,0 @@ -package thrift - -import ( - "context" - "fmt" -) - -// ResponseMeta represents the metadata attached to the response. -type ResponseMeta struct { - // The headers in the response, if any. - // If the underlying transport/protocol is not THeader, this will always be nil. - Headers THeaderMap -} - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -type TStandardClient struct { - seqId int32 - iprot, oprot TProtocol -} - -// TStandardClient implements TClient, and uses the standard message format for Thrift. -// It is not safe for concurrent use. -func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { - return &TStandardClient{ - iprot: inputProtocol, - oprot: outputProtocol, - } -} - -func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { - // Set headers from context object on THeaderProtocol - if headerProt, ok := oprot.(*THeaderProtocol); ok { - headerProt.ClearWriteHeaders() - for _, key := range GetWriteHeaderList(ctx) { - if value, ok := GetHeader(ctx, key); ok { - headerProt.SetWriteHeader(key, value) - } - } - } - - if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { - return err - } - if err := args.Write(ctx, oprot); err != nil { - return err - } - if err := oprot.WriteMessageEnd(ctx); err != nil { - return err - } - return oprot.Flush(ctx) -} - -func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { - rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) - if err != nil { - return err - } - - if method != rMethod { - return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) - } else if seqId != rSeqId { - return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) - } else if rTypeId == EXCEPTION { - var exception tApplicationException - if err := exception.Read(ctx, iprot); err != nil { - return err - } - - if err := iprot.ReadMessageEnd(ctx); err != nil { - return err - } - - return &exception - } else if rTypeId != REPLY { - return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) - } - - if err := result.Read(ctx, iprot); err != nil { - return err - } - - return iprot.ReadMessageEnd(ctx) -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - p.seqId++ - seqId := p.seqId - - if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { - return ResponseMeta{}, err - } - - // method is oneway - if result == nil { - return ResponseMeta{}, nil - } - - err := p.Recv(ctx, p.iprot, seqId, method, result) - var headers THeaderMap - if hp, ok := p.iprot.(*THeaderProtocol); ok { - headers = hp.transport.readHeaders - } - return ResponseMeta{ - Headers: headers, - }, err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go deleted file mode 100644 index a49225dabf..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,865 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTCompactProtocolFactoryConf instead. -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return NewTCompactProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { - return &TCompactProtocolFactory{ - cfg: conf, - } -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocolConf(trans, p.cfg) -} - -func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - cfg *TConfiguration - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Deprecated: Use NewTCompactProtocolConf instead. -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - return NewTCompactProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { - PropagateTConfiguration(trans, conf) - p := &TCompactProtocol{ - origTransport: trans, - cfg: conf, - } - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(ctx, name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(ctx, id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - var protocolId byte - - _, deadlineSet := ctx.Deadline() - for { - protocolId, err = p.readByteDirect() - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // keep retrying I/O timeout errors since we still have - // time left - continue - } - // For anything else, don't retry - break - } - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString(ctx) - return -} - -func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { - // consume the last field we read off the wire. - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16(ctx) - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.ReadListBegin(ctx) -} - -func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { - v, err := p.ReadI32(ctx) - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return "", nil - } - if length < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:length] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - buf, e := safeReadBytes(length, p.trans) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return []byte{}, nil - } - - buf, e := safeReadBytes(length, p.trans) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} - -func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) - _ TConfigurationSetter = (*TCompactProtocol)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go deleted file mode 100644 index 454d9f3774..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "fmt" - "time" -) - -// Default TConfiguration values. -const ( - DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 - DEFAULT_MAX_FRAME_SIZE = 16384000 - - DEFAULT_TBINARY_STRICT_READ = false - DEFAULT_TBINARY_STRICT_WRITE = true - - DEFAULT_CONNECT_TIMEOUT = 0 - DEFAULT_SOCKET_TIMEOUT = 0 -) - -// TConfiguration defines some configurations shared between TTransport, -// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. -// -// When constructing TConfiguration, you only need to specify the non-default -// fields. All zero values have sane default values. -// -// Not all configurations defined are applicable to all implementations. -// Implementations are free to ignore the configurations not applicable to them. -// -// All functions attached to this type are nil-safe. -// -// See [1] for spec. -// -// NOTE: When using TConfiguration, fill in all the configurations you want to -// set across the stack, not only the ones you want to set in the immediate -// TTransport/TProtocol. -// -// For example, say you want to migrate this old code into using TConfiguration: -// -// sccket := thrift.NewTSocketTimeout("host:port", time.Second) -// transFactory := thrift.NewTFramedTransportFactoryMaxLength( -// thrift.NewTTransportFactory(), -// 1024 * 1024 * 256, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) -// -// This is the wrong way to do it because in the end the TConfiguration used by -// socket and transFactory will be overwritten by the one used by protoFactory -// because of TConfiguration propagation: -// -// // bad example, DO NOT USE -// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// }) -// transFactory := thrift.NewTFramedTransportFactoryConf( -// thrift.NewTTransportFactory(), -// &thrift.TConfiguration{ -// MaxFrameSize: 1024 * 1024 * 256, -// }, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// }) -// -// This is the correct way to do it: -// -// conf := &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// -// MaxFrameSize: 1024 * 1024 * 256, -// -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// } -// sccket := thrift.NewTSocketConf("host:port", conf) -// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) -// -// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md -type TConfiguration struct { - // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. - MaxMessageSize int32 - - // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. - // - // Also if MaxMessageSize < MaxFrameSize, - // MaxMessageSize will be used instead. - MaxFrameSize int32 - - // Connect and socket timeouts to be used by TSocket and TSSLSocket. - // - // 0 means no timeout. - // - // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be - // used. - ConnectTimeout time.Duration - SocketTimeout time.Duration - - // TLS config to be used by TSSLSocket. - TLSConfig *tls.Config - - // Strict read/write configurations for TBinaryProtocol. - // - // BoolPtr helper function is available to use literal values. - TBinaryStrictRead *bool - TBinaryStrictWrite *bool - - // The wrapped protocol id to be used in THeader transport/protocol. - // - // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions - // are provided to help filling this value. - THeaderProtocolID *THeaderProtocolID - - // Used internally by deprecated constructors, to avoid overriding - // underlying TTransport/TProtocol's cfg by accidental propagations. - // - // For external users this is always false. - noPropagation bool -} - -// GetMaxMessageSize returns the max message size an implementation should -// follow. -// -// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. -func (tc *TConfiguration) GetMaxMessageSize() int32 { - if tc == nil || tc.MaxMessageSize <= 0 { - return DEFAULT_MAX_MESSAGE_SIZE - } - return tc.MaxMessageSize -} - -// GetMaxFrameSize returns the max frame size an implementation should follow. -// -// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. -// -// If the configured max message size is smaller than the configured max frame -// size, the smaller one will be returned instead. -func (tc *TConfiguration) GetMaxFrameSize() int32 { - if tc == nil { - return DEFAULT_MAX_FRAME_SIZE - } - maxFrameSize := tc.MaxFrameSize - if maxFrameSize <= 0 { - maxFrameSize = DEFAULT_MAX_FRAME_SIZE - } - if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { - return maxMessageSize - } - return maxFrameSize -} - -// GetConnectTimeout returns the connect timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetConnectTimeout() time.Duration { - if tc == nil || tc.ConnectTimeout < 0 { - return DEFAULT_CONNECT_TIMEOUT - } - return tc.ConnectTimeout -} - -// GetSocketTimeout returns the socket timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetSocketTimeout() time.Duration { - if tc == nil || tc.SocketTimeout < 0 { - return DEFAULT_SOCKET_TIMEOUT - } - return tc.SocketTimeout -} - -// GetTLSConfig returns the tls config should be used by TSSLSocket. -// -// It's nil-safe. If tc is nil, nil will be returned instead. -func (tc *TConfiguration) GetTLSConfig() *tls.Config { - if tc == nil { - return nil - } - return tc.TLSConfig -} - -// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or -// tc.TBinaryStrictRead is nil. -func (tc *TConfiguration) GetTBinaryStrictRead() bool { - if tc == nil || tc.TBinaryStrictRead == nil { - return DEFAULT_TBINARY_STRICT_READ - } - return *tc.TBinaryStrictRead -} - -// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or -// tc.TBinaryStrictWrite is nil. -func (tc *TConfiguration) GetTBinaryStrictWrite() bool { - if tc == nil || tc.TBinaryStrictWrite == nil { - return DEFAULT_TBINARY_STRICT_WRITE - } - return *tc.TBinaryStrictWrite -} - -// GetTHeaderProtocolID returns the THeaderProtocolID should be used by -// THeaderProtocol clients (for servers, they always use the same one as the -// client instead). -// -// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, -// THeaderProtocolDefault will be returned instead. -// THeaderProtocolDefault will also be returned if configured value is invalid. -func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { - if tc == nil || tc.THeaderProtocolID == nil { - return THeaderProtocolDefault - } - protoID := *tc.THeaderProtocolID - if err := protoID.Validate(); err != nil { - return THeaderProtocolDefault - } - return protoID -} - -// THeaderProtocolIDPtr validates and returns the pointer to id. -// -// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault -// and the validation error will be returned. -func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { - err := id.Validate() - if err != nil { - id = THeaderProtocolDefault - } - return &id, err -} - -// THeaderProtocolIDPtrMust validates and returns the pointer to id. -// -// It's similar to THeaderProtocolIDPtr, but it panics on validation errors -// instead of returning them. -func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { - ptr, err := THeaderProtocolIDPtr(id) - if err != nil { - panic(err) - } - return ptr -} - -// TConfigurationSetter is an optional interface TProtocol, TTransport, -// TProtocolFactory, TTransportFactory, and other implementations can implement. -// -// It's intended to be called during intializations. -// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the -// middle of a message is undefined: -// It may or may not change the behavior of the current processing message, -// and it may even cause the current message to fail. -// -// Note for implementations: SetTConfiguration might be called multiple times -// with the same value in quick successions due to the implementation of the -// propagation. Implementations should make SetTConfiguration as simple as -// possible (usually just overwrite the stored configuration and propagate it to -// the wrapped TTransports/TProtocols). -type TConfigurationSetter interface { - SetTConfiguration(*TConfiguration) -} - -// PropagateTConfiguration propagates cfg to impl if impl implements -// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. -// -// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration -// with everything being default value, use &TConfiguration{} explicitly instead. -func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { - if cfg == nil || cfg.noPropagation { - return - } - - if setter, ok := impl.(TConfigurationSetter); ok { - setter.SetTConfiguration(cfg) - } -} - -func checkSizeForProtocol(size int32, cfg *TConfiguration) error { - if size < 0 { - return NewTProtocolExceptionWithType( - NEGATIVE_SIZE, - fmt.Errorf("negative size: %d", size), - ) - } - if size > cfg.GetMaxMessageSize() { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - fmt.Errorf("size exceeded max allowed: %d", size), - ) - } - return nil -} - -type tTransportFactoryConf struct { - delegate TTransportFactory - cfg *TConfiguration -} - -func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { - trans, err := f.delegate.GetTransport(orig) - if err == nil { - PropagateTConfiguration(orig, f.cfg) - PropagateTConfiguration(trans, f.cfg) - } - return trans, err -} - -func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TTransportFactoryConf wraps a TTransportFactory to propagate -// TConfiguration on the factory's GetTransport calls. -func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { - return &tTransportFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -type tProtocolFactoryConf struct { - delegate TProtocolFactory - cfg *TConfiguration -} - -func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { - proto := f.delegate.GetProtocol(trans) - PropagateTConfiguration(trans, f.cfg) - PropagateTConfiguration(proto, f.cfg) - return proto -} - -func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TProtocolFactoryConf wraps a TProtocolFactory to propagate -// TConfiguration on the factory's GetProtocol calls. -func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { - return &tProtocolFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -var ( - _ TConfigurationSetter = (*tTransportFactoryConf)(nil) - _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go deleted file mode 100644 index d15c1bcf89..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -var defaultCtx = context.Background() diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index fdf9bfec15..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" -) - -type TDebugProtocol struct { - // Required. The actual TProtocol to do the read/write. - Delegate TProtocol - - // Optional. The logger and prefix to log all the args/return values - // from Delegate TProtocol calls. - // - // If Logger is nil, StdLogger using stdlib log package with os.Stderr - // will be used. If disable logging is desired, set Logger to NopLogger - // explicitly instead of leaving it as nil/unset. - Logger Logger - LogPrefix string - - // Optional. An TProtocol to duplicate everything read/written from Delegate. - // - // A typical use case of this is to use TSimpleJSONProtocol wrapping - // TMemoryBuffer in a middleware to json logging requests/responses. - // - // This feature is not available from TDebugProtocolFactory. In order to - // use it you have to construct TDebugProtocol directly, or set DuplicateTo - // field after getting a TDebugProtocol from the factory. - DuplicateTo TProtocol -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string - Logger Logger -} - -// NewTDebugProtocolFactory creates a TDebugProtocolFactory. -// -// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct -// itself instead. This version will use the default logger from standard -// library. -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: StdLogger(nil), - } -} - -// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. -func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: logger, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - Logger: fallbackLogger(t.Logger), - } -} - -func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { - fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) -} - -func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) - tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMessageEnd(ctx) - tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { - err := tdp.Delegate.WriteStructBegin(ctx, name) - tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return err -} -func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { - err := tdp.Delegate.WriteStructEnd(ctx) - tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) - tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { - err := tdp.Delegate.WriteFieldEnd(ctx) - tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { - err := tdp.Delegate.WriteFieldStop(ctx) - tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldStop(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) - tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMapEnd(ctx) - tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(ctx, elemType, size) - tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { - err := tdp.Delegate.WriteListEnd(ctx) - tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) - tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { - err := tdp.Delegate.WriteSetEnd(ctx) - tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { - err := tdp.Delegate.WriteBool(ctx, value) - tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { - err := tdp.Delegate.WriteByte(ctx, value) - tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { - err := tdp.Delegate.WriteI16(ctx, value) - tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { - err := tdp.Delegate.WriteI32(ctx, value) - tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { - err := tdp.Delegate.WriteI64(ctx, value) - tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { - err := tdp.Delegate.WriteDouble(ctx, value) - tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { - err := tdp.Delegate.WriteString(ctx, value) - tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { - err := tdp.Delegate.WriteBinary(ctx, value) - tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) - tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return -} -func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMessageEnd(ctx) - tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin(ctx) - tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return -} -func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadStructEnd(ctx) - tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) - tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return -} -func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadFieldEnd(ctx) - tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) - tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return -} -func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMapEnd(ctx) - tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin(ctx) - tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadListEnd(ctx) - tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) - tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadSetEnd(ctx) - tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { - value, err = tdp.Delegate.ReadBool(ctx) - tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { - value, err = tdp.Delegate.ReadByte(ctx) - tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { - value, err = tdp.Delegate.ReadI16(ctx) - tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { - value, err = tdp.Delegate.ReadI32(ctx) - tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { - value, err = tdp.Delegate.ReadI64(ctx) - tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - value, err = tdp.Delegate.ReadDouble(ctx) - tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { - value, err = tdp.Delegate.ReadString(ctx) - tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary(ctx) - tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return -} -func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - err = tdp.Delegate.Skip(ctx, fieldType) - tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Skip(ctx, fieldType) - } - return -} -func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { - err = tdp.Delegate.Flush(ctx) - tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Flush(ctx) - } - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(tdp.Delegate, conf) - PropagateTConfiguration(tdp.DuplicateTo, conf) -} - -var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index cefc7ecda5..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TDeserializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -// TDeserializerPool is the thread-safe version of TDeserializer, -// it uses resource pool of TDeserializer under the hood. -// -// It must be initialized with either NewTDeserializerPool or -// NewTDeserializerPoolSizeFactory. -type TDeserializerPool struct { - pool sync.Pool -} - -// NewTDeserializerPool creates a new TDeserializerPool. -// -// NewTDeserializer can be used as the arg here. -func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with -// the given size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.ReadString(ctx, msg, s) -} - -func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.Read(ctx, msg, b) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go deleted file mode 100644 index 630b938f00..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error - - TExceptionType() TExceptionType -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - msg := prepend + err.Error() - - var te TException - if errors.As(err, &te) { - switch te.TExceptionType() { - case TExceptionTypeTransport: - if t, ok := err.(TTransportException); ok { - return prependTTransportException(prepend, t) - } - case TExceptionTypeProtocol: - if t, ok := err.(TProtocolException); ok { - return prependTProtocolException(prepend, t) - } - case TExceptionTypeApplication: - var t TApplicationException - if errors.As(err, &t) { - return NewTApplicationException(t.TypeId(), msg) - } - } - - return wrappedTException{ - err: err, - msg: msg, - tExceptionType: te.TExceptionType(), - } - } - - return errors.New(msg) -} - -// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. -type TExceptionType byte - -// TExceptionType values -const ( - TExceptionTypeUnknown TExceptionType = iota - TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler - TExceptionTypeApplication // TApplicationExceptions - TExceptionTypeProtocol // TProtocolExceptions - TExceptionTypeTransport // TTransportExceptions -) - -// WrapTException wraps an error into TException. -// -// If err is nil or already TException, it's returned as-is. -// Otherwise it will be wrapped into TException with TExceptionType() returning -// TExceptionTypeUnknown, and Unwrap() returning the original error. -func WrapTException(err error) TException { - if err == nil { - return nil - } - - if te, ok := err.(TException); ok { - return te - } - - return wrappedTException{ - err: err, - msg: err.Error(), - tExceptionType: TExceptionTypeUnknown, - } -} - -type wrappedTException struct { - err error - msg string - tExceptionType TExceptionType -} - -func (w wrappedTException) Error() string { - return w.msg -} - -func (w wrappedTException) TExceptionType() TExceptionType { - return w.tExceptionType -} - -func (w wrappedTException) Unwrap() error { - return w.err -} - -var _ TException = wrappedTException{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index f683e7f544..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" -) - -// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - - cfg *TConfiguration - - writeBuf bytes.Buffer - - reader *bufio.Reader - readBuf bytes.Buffer - - buffer [4]byte -} - -type tFramedTransportFactory struct { - factory TTransportFactory - cfg *TConfiguration -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - PropagateTConfiguration(factory, conf) - return &tFramedTransportFactory{ - factory: factory, - cfg: conf, - } -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { - PropagateTConfiguration(base, p.cfg) - tt, err := p.factory.GetTransport(base) - if err != nil { - return nil, err - } - return NewTFramedTransportConf(tt, p.cfg), nil -} - -func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.factory, cfg) - p.cfg = cfg -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { - PropagateTConfiguration(transport, conf) - return &TFramedTransport{ - transport: transport, - reader: bufio.NewReader(transport), - cfg: conf, - } -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (read int, err error) { - read, err = p.readBuf.Read(buf) - if err != io.EOF { - return - } - - // For bytes.Buffer.Read, EOF would only happen when read is zero, - // but still, do a sanity check, - // in case that behavior is changed in a future version of go stdlib. - // When that happens, just return nil error, - // and let the caller call Read again to read the next frame. - if read > 0 { - return read, nil - } - - // Reaching here means that the last Read finished the last frame, - // so we need to read the next frame into readBuf now. - if err = p.readFrame(); err != nil { - return read, err - } - newRead, err := p.Read(buf[read:]) - return read + newRead, err -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - buf := p.buffer[:1] - _, err = p.Read(buf) - if err != nil { - return - } - c = buf[0] - return -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - n, err := p.writeBuf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - return p.writeBuf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - return p.writeBuf.WriteString(s) -} - -func (p *TFramedTransport) Flush(ctx context.Context) error { - size := p.writeBuf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - p.writeBuf.Reset() - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if _, err := io.Copy(p.transport, &p.writeBuf); err != nil { - p.writeBuf.Reset() - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush(ctx) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrame() error { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) { - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - _, err := io.CopyN(&p.readBuf, p.reader, int64(size)) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.readBuf.Len()) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tFramedTransportFactory)(nil) - _ TConfigurationSetter = (*TFramedTransport)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go deleted file mode 100644 index ac9bd4882b..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type ( - headerKey string - headerKeyList int -) - -// Values for headerKeyList. -const ( - headerKeyListRead headerKeyList = iota - headerKeyListWrite -) - -// SetHeader sets a header in the context. -func SetHeader(ctx context.Context, key, value string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - value, - ) -} - -// UnsetHeader unsets a previously set header in the context. -func UnsetHeader(ctx context.Context, key string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - nil, - ) -} - -// GetHeader returns a value of the given header from the context. -func GetHeader(ctx context.Context, key string) (value string, ok bool) { - if v := ctx.Value(headerKey(key)); v != nil { - value, ok = v.(string) - } - return -} - -// SetReadHeaderList sets the key list of read THeaders in the context. -func SetReadHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListRead, - keys, - ) -} - -// GetReadHeaderList returns the key list of read THeaders from the context. -func GetReadHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListRead); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// SetWriteHeaderList sets the key list of THeaders to write in the context. -func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListWrite, - keys, - ) -} - -// GetWriteHeaderList returns the key list of THeaders to write from the context. -func GetWriteHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListWrite); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// AddReadTHeaderToContext adds the whole THeader headers into context. -func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { - keys := make([]string, 0, len(headers)) - for key, value := range headers { - ctx = SetHeader(ctx, key, value) - keys = append(keys, key) - } - return SetReadHeaderList(ctx, keys) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go deleted file mode 100644 index 878041f8df..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" -) - -// THeaderProtocol is a thrift protocol that implements THeader: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -// -// It supports either binary or compact protocol as the wrapped protocol. -// -// Most of the THeader handlings are happening inside THeaderTransport. -type THeaderProtocol struct { - transport *THeaderTransport - - // Will be initialized on first read/write. - protocol TProtocol - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderProtocolConf instead. -func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { - return newTHeaderProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying -// transport with given TConfiguration. -// -// The passed in transport will be wrapped with THeaderTransport. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { - return newTHeaderProtocolConf(trans, conf) -} - -func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { - t := NewTHeaderTransportConf(trans, cfg) - p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) - PropagateTConfiguration(p, cfg) - return &THeaderProtocol{ - transport: t, - protocol: p, - cfg: cfg, - } -} - -type tHeaderProtocolFactory struct { - cfg *TConfiguration -} - -func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return newTHeaderProtocolConf(trans, f.cfg) -} - -func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { - f.cfg = cfg -} - -// Deprecated: Use NewTHeaderProtocolFactoryConf instead. -func NewTHeaderProtocolFactory() TProtocolFactory { - return NewTHeaderProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolFactoryConf creates a factory for THeader with given -// TConfiguration. -func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { - return tHeaderProtocolFactory{ - cfg: conf, - } -} - -// Transport returns the underlying transport. -// -// It's guaranteed to be of type *THeaderTransport. -func (p *THeaderProtocol) Transport() TTransport { - return p.transport -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (p *THeaderProtocol) GetReadHeaders() THeaderMap { - return p.transport.GetReadHeaders() -} - -// SetWriteHeader sets a header for write. -func (p *THeaderProtocol) SetWriteHeader(key, value string) { - p.transport.SetWriteHeader(key, value) -} - -// ClearWriteHeaders clears all write headers previously set. -func (p *THeaderProtocol) ClearWriteHeaders() { - p.transport.ClearWriteHeaders() -} - -// AddTransform add a transform for writing. -func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { - return p.transport.AddTransform(transform) -} - -func (p *THeaderProtocol) Flush(ctx context.Context) error { - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { - newProto, err := p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - return err - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - p.transport.SequenceID = seqID - return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) -} - -func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { - if err := p.protocol.WriteMessageEnd(ctx); err != nil { - return err - } - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { - return p.protocol.WriteStructBegin(ctx, name) -} - -func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { - return p.protocol.WriteStructEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { - return p.protocol.WriteFieldBegin(ctx, name, typeID, id) -} - -func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { - return p.protocol.WriteFieldEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { - return p.protocol.WriteFieldStop(ctx) -} - -func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) -} - -func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { - return p.protocol.WriteMapEnd(ctx) -} - -func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteListBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { - return p.protocol.WriteListEnd(ctx) -} - -func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteSetBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { - return p.protocol.WriteSetEnd(ctx) -} - -func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { - return p.protocol.WriteBool(ctx, value) -} - -func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { - return p.protocol.WriteByte(ctx, value) -} - -func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { - return p.protocol.WriteI16(ctx, value) -} - -func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { - return p.protocol.WriteI32(ctx, value) -} - -func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { - return p.protocol.WriteI64(ctx, value) -} - -func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.protocol.WriteDouble(ctx, value) -} - -func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { - return p.protocol.WriteString(ctx, value) -} - -func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { - return p.protocol.WriteBinary(ctx, value) -} - -// ReadFrame calls underlying THeaderTransport's ReadFrame function. -func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { - return p.transport.ReadFrame(ctx) -} - -func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { - if err = p.transport.ReadFrame(ctx); err != nil { - return - } - - var newProto TProtocol - newProto, err = p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - var tAppExc TApplicationException - if !errors.As(err, &tAppExc) { - return - } - if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { - return - } - if e := tAppExc.Write(ctx, p.protocol); e != nil { - return - } - if e := p.protocol.WriteMessageEnd(ctx); e != nil { - return - } - if e := p.transport.Flush(ctx); e != nil { - return - } - return - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - - return p.protocol.ReadMessageBegin(ctx) -} - -func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { - return p.protocol.ReadMessageEnd(ctx) -} - -func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return p.protocol.ReadStructBegin(ctx) -} - -func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { - return p.protocol.ReadStructEnd(ctx) -} - -func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { - return p.protocol.ReadFieldBegin(ctx) -} - -func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { - return p.protocol.ReadFieldEnd(ctx) -} - -func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - return p.protocol.ReadMapBegin(ctx) -} - -func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { - return p.protocol.ReadMapEnd(ctx) -} - -func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadListBegin(ctx) -} - -func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { - return p.protocol.ReadListEnd(ctx) -} - -func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadSetBegin(ctx) -} - -func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { - return p.protocol.ReadSetEnd(ctx) -} - -func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { - return p.protocol.ReadBool(ctx) -} - -func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { - return p.protocol.ReadByte(ctx) -} - -func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { - return p.protocol.ReadI16(ctx) -} - -func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { - return p.protocol.ReadI32(ctx) -} - -func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { - return p.protocol.ReadI64(ctx) -} - -func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - return p.protocol.ReadDouble(ctx) -} - -func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { - return p.protocol.ReadString(ctx) -} - -func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - return p.protocol.ReadBinary(ctx) -} - -func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { - return p.protocol.Skip(ctx, fieldType) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - PropagateTConfiguration(p.protocol, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) - _ TConfigurationSetter = (*THeaderProtocol)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go deleted file mode 100644 index 6a99535a45..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go +++ /dev/null @@ -1,809 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "compress/zlib" - "context" - "encoding/binary" - "errors" - "fmt" - "io" -) - -// Size in bytes for 32-bit ints. -const size32 = 4 - -type headerMeta struct { - MagicFlags uint32 - SequenceID int32 - HeaderLength uint16 -} - -const headerMetaSize = 10 - -type clientType int - -const ( - clientUnknown clientType = iota - clientHeaders - clientFramedBinary - clientUnframedBinary - clientFramedCompact - clientUnframedCompact -) - -// Constants defined in THeader format: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -const ( - THeaderHeaderMagic uint32 = 0x0fff0000 - THeaderHeaderMask uint32 = 0xffff0000 - THeaderFlagsMask uint32 = 0x0000ffff - THeaderMaxFrameSize uint32 = 0x3fffffff -) - -// THeaderMap is the type of the header map in THeader transport. -type THeaderMap map[string]string - -// THeaderProtocolID is the wrapped protocol id used in THeader. -type THeaderProtocolID int32 - -// Supported THeaderProtocolID values. -const ( - THeaderProtocolBinary THeaderProtocolID = 0x00 - THeaderProtocolCompact THeaderProtocolID = 0x02 - THeaderProtocolDefault = THeaderProtocolBinary -) - -// Declared globally to avoid repetitive allocations, not really used. -var globalMemoryBuffer = NewTMemoryBuffer() - -// Validate checks whether the THeaderProtocolID is a valid/supported one. -func (id THeaderProtocolID) Validate() error { - _, err := id.GetProtocol(globalMemoryBuffer) - return err -} - -// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. -func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { - switch id { - default: - return nil, NewTApplicationException( - INVALID_PROTOCOL, - fmt.Sprintf("THeader protocol id %d not supported", id), - ) - case THeaderProtocolBinary: - return NewTBinaryProtocolTransport(trans), nil - case THeaderProtocolCompact: - return NewTCompactProtocol(trans), nil - } -} - -// THeaderTransformID defines the numeric id of the transform used. -type THeaderTransformID int32 - -// THeaderTransformID values. -// -// Values not defined here are not currently supported, namely HMAC and Snappy. -const ( - TransformNone THeaderTransformID = iota // 0, no special handling - TransformZlib // 1, zlib -) - -var supportedTransformIDs = map[THeaderTransformID]bool{ - TransformNone: true, - TransformZlib: true, -} - -// TransformReader is an io.ReadCloser that handles transforms reading. -type TransformReader struct { - io.Reader - - closers []io.Closer -} - -var _ io.ReadCloser = (*TransformReader)(nil) - -// NewTransformReaderWithCapacity initializes a TransformReader with expected -// closers capacity. -// -// If you don't know the closers capacity beforehand, just use -// -// &TransformReader{Reader: baseReader} -// -// instead would be sufficient. -func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { - return &TransformReader{ - Reader: baseReader, - closers: make([]io.Closer, 0, capacity), - } -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tr *TransformReader) Close() error { - // Call closers in reversed order - for i := len(tr.closers) - 1; i >= 0; i-- { - if err := tr.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tr *TransformReader) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - readCloser, err := zlib.NewReader(tr.Reader) - if err != nil { - return err - } - tr.Reader = readCloser - tr.closers = append(tr.closers, readCloser) - } - return nil -} - -// TransformWriter is an io.WriteCloser that handles transforms writing. -type TransformWriter struct { - io.Writer - - closers []io.Closer -} - -var _ io.WriteCloser = (*TransformWriter)(nil) - -// NewTransformWriter creates a new TransformWriter with base writer and transforms. -func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { - writer := &TransformWriter{ - Writer: baseWriter, - closers: make([]io.Closer, 0, len(transforms)), - } - for _, id := range transforms { - if err := writer.AddTransform(id); err != nil { - return nil, err - } - } - return writer, nil -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tw *TransformWriter) Close() error { - // Call closers in reversed order - for i := len(tw.closers) - 1; i >= 0; i-- { - if err := tw.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - writeCloser := zlib.NewWriter(tw.Writer) - tw.Writer = writeCloser - tw.closers = append(tw.closers, writeCloser) - } - return nil -} - -// THeaderInfoType is the type id of the info headers. -type THeaderInfoType int32 - -// Supported THeaderInfoType values. -const ( - _ THeaderInfoType = iota // Skip 0 - InfoKeyValue // 1 - // Rest of the info types are not supported. -) - -// THeaderTransport is a Transport mode that implements THeader. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -type THeaderTransport struct { - SequenceID int32 - Flags uint32 - - transport TTransport - - // THeaderMap for read and write - readHeaders THeaderMap - writeHeaders THeaderMap - - // Reading related variables. - reader *bufio.Reader - // When frame is detected, we read the frame fully into frameBuffer. - frameBuffer bytes.Buffer - // When it's non-nil, Read should read from frameReader instead of - // reader, and EOF error indicates end of frame instead of end of all - // transport. - frameReader io.ReadCloser - - // Writing related variables - writeBuffer bytes.Buffer - writeTransforms []THeaderTransformID - - clientType clientType - protocolID THeaderProtocolID - cfg *TConfiguration - - // buffer is used in the following scenarios to avoid repetitive - // allocations, while 4 is big enough for all those scenarios: - // - // * header padding (max size 4) - // * write the frame size (size 4) - buffer [4]byte -} - -var _ TTransport = (*THeaderTransport)(nil) - -// Deprecated: Use NewTHeaderTransportConf instead. -func NewTHeaderTransport(trans TTransport) *THeaderTransport { - return NewTHeaderTransportConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportConf creates THeaderTransport from the -// underlying transport, with given TConfiguration attached. -// -// If trans is already a *THeaderTransport, it will be returned as is, -// but with TConfiguration overridden by the value passed in. -// -// The protocol ID in TConfiguration is only useful for client transports. -// For servers, -// the protocol ID will be overridden again to the one set by the client, -// to ensure that servers always speak the same dialect as the client. -func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { - if ht, ok := trans.(*THeaderTransport); ok { - ht.SetTConfiguration(conf) - return ht - } - PropagateTConfiguration(trans, conf) - return &THeaderTransport{ - transport: trans, - reader: bufio.NewReader(trans), - writeHeaders: make(THeaderMap), - protocolID: conf.GetTHeaderProtocolID(), - cfg: conf, - } -} - -// Open calls the underlying transport's Open function. -func (t *THeaderTransport) Open() error { - return t.transport.Open() -} - -// IsOpen calls the underlying transport's IsOpen function. -func (t *THeaderTransport) IsOpen() bool { - return t.transport.IsOpen() -} - -// ReadFrame tries to read the frame header, guess the client type, and handle -// unframed clients. -func (t *THeaderTransport) ReadFrame(ctx context.Context) error { - if !t.needReadFrame() { - // No need to read frame, skipping. - return nil - } - - // Peek and handle the first 32 bits. - // They could either be the length field of a framed message, - // or the first bytes of an unframed message. - var buf []byte - var err error - // This is also usually the first read from a connection, - // so handle retries around socket timeouts. - _, deadlineSet := ctx.Deadline() - for { - buf, err = t.reader.Peek(size32) - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout and we still have time, - // continue trying - continue - } - // For anything else, do not retry - break - } - if err != nil { - return err - } - - frameSize := binary.BigEndian.Uint32(buf) - if frameSize&VERSION_MASK == VERSION_1 { - t.clientType = clientUnframedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientUnframedCompact - return nil - } - - // At this point it should be a framed message, - // sanity check on frameSize then discard the peeked part. - if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("frame too large"), - ) - } - t.reader.Discard(size32) - - // Read the frame fully into frameBuffer. - _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize)) - if err != nil { - return err - } - t.frameReader = io.NopCloser(&t.frameBuffer) - - // Peek and handle the next 32 bits. - buf = t.frameBuffer.Bytes()[:size32] - version := binary.BigEndian.Uint32(buf) - if version&THeaderHeaderMask == THeaderHeaderMagic { - t.clientType = clientHeaders - return t.parseHeaders(ctx, frameSize) - } - if version&VERSION_MASK == VERSION_1 { - t.clientType = clientFramedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientFramedCompact - return nil - } - if err := t.endOfFrame(); err != nil { - return err - } - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - errors.New("unsupported client transport type"), - ) -} - -// endOfFrame does end of frame handling. -// -// It closes frameReader, and also resets frame related states. -func (t *THeaderTransport) endOfFrame() error { - defer func() { - t.frameBuffer.Reset() - t.frameReader = nil - }() - return t.frameReader.Close() -} - -func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { - if t.clientType != clientHeaders { - return nil - } - - var err error - var meta headerMeta - if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil { - return err - } - frameSize -= headerMetaSize - t.Flags = meta.MagicFlags & THeaderFlagsMask - t.SequenceID = meta.SequenceID - headerLength := int64(meta.HeaderLength) * 4 - if int64(frameSize) < headerLength { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("header size is larger than the whole frame"), - ) - } - headerBuf := NewTMemoryBuffer() - _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength) - if err != nil { - return err - } - hp := NewTCompactProtocol(headerBuf) - hp.SetTConfiguration(t.cfg) - - // At this point the header is already read into headerBuf, - // and t.frameBuffer starts from the actual payload. - protoID, err := hp.readVarint32() - if err != nil { - return err - } - t.protocolID = THeaderProtocolID(protoID) - - var transformCount int32 - transformCount, err = hp.readVarint32() - if err != nil { - return err - } - if transformCount > 0 { - reader := NewTransformReaderWithCapacity( - &t.frameBuffer, - int(transformCount), - ) - t.frameReader = reader - transformIDs := make([]THeaderTransformID, transformCount) - for i := 0; i < int(transformCount); i++ { - id, err := hp.readVarint32() - if err != nil { - return err - } - transformIDs[i] = THeaderTransformID(id) - } - // The transform IDs on the wire was added based on the order of - // writing, so on the reading side we need to reverse the order. - for i := transformCount - 1; i >= 0; i-- { - id := transformIDs[i] - if err := reader.AddTransform(id); err != nil { - return err - } - } - } - - // The info part does not use the transforms yet, so it's - // important to continue using headerBuf. - headers := make(THeaderMap) - for { - infoType, err := hp.readVarint32() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return err - } - if THeaderInfoType(infoType) == InfoKeyValue { - count, err := hp.readVarint32() - if err != nil { - return err - } - for i := 0; i < int(count); i++ { - key, err := hp.ReadString(ctx) - if err != nil { - return err - } - value, err := hp.ReadString(ctx) - if err != nil { - return err - } - headers[key] = value - } - } else { - // Skip reading info section on the first - // unsupported info type. - break - } - } - t.readHeaders = headers - - return nil -} - -func (t *THeaderTransport) needReadFrame() bool { - if t.clientType == clientUnknown { - // This is a new connection that's never read before. - return true - } - if t.isFramed() && t.frameReader == nil { - // We just finished the last frame. - return true - } - return false -} - -func (t *THeaderTransport) Read(p []byte) (read int, err error) { - // Here using context.Background instead of a context passed in is safe. - // First is that there's no way to pass context into this function. - // Then, 99% of the case when calling this Read frame is already read - // into frameReader. ReadFrame here is more of preventing bugs that - // didn't call ReadFrame before calling Read. - err = t.ReadFrame(context.Background()) - if err != nil { - return - } - if t.frameReader != nil { - read, err = t.frameReader.Read(p) - if err == nil && t.frameBuffer.Len() <= 0 { - // the last Read finished the frame, do endOfFrame - // handling here. - err = t.endOfFrame() - } else if err == io.EOF { - err = t.endOfFrame() - if err != nil { - return - } - if read == 0 { - // Try to read the next frame when we hit EOF - // (end of frame) immediately. - // When we got here, it means the last read - // finished the previous frame, but didn't - // do endOfFrame handling yet. - // We have to read the next frame here, - // as otherwise we would return 0 and nil, - // which is a case not handled well by most - // protocol implementations. - return t.Read(p) - } - } - return - } - return t.reader.Read(p) -} - -// Write writes data to the write buffer. -// -// You need to call Flush to actually write them to the transport. -func (t *THeaderTransport) Write(p []byte) (int, error) { - return t.writeBuffer.Write(p) -} - -// Flush writes the appropriate header and the write buffer to the underlying transport. -func (t *THeaderTransport) Flush(ctx context.Context) error { - if t.writeBuffer.Len() == 0 { - return nil - } - - defer t.writeBuffer.Reset() - - switch t.clientType { - default: - fallthrough - case clientUnknown: - t.clientType = clientHeaders - fallthrough - case clientHeaders: - headers := NewTMemoryBuffer() - hp := NewTCompactProtocol(headers) - hp.SetTConfiguration(t.cfg) - if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { - return NewTTransportExceptionFromError(err) - } - for _, transform := range t.writeTransforms { - if _, err := hp.writeVarint32(int32(transform)); err != nil { - return NewTTransportExceptionFromError(err) - } - } - if len(t.writeHeaders) > 0 { - if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { - return NewTTransportExceptionFromError(err) - } - for key, value := range t.writeHeaders { - if err := hp.WriteString(ctx, key); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := hp.WriteString(ctx, value); err != nil { - return NewTTransportExceptionFromError(err) - } - } - } - padding := 4 - headers.Len()%4 - if padding < 4 { - buf := t.buffer[:padding] - for i := range buf { - buf[i] = 0 - } - if _, err := headers.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - var payload bytes.Buffer - meta := headerMeta{ - MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, - SequenceID: t.SequenceID, - HeaderLength: uint16(headers.Len() / 4), - } - if err := binary.Write(&payload, binary.BigEndian, meta); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(&payload, headers); err != nil { - return NewTTransportExceptionFromError(err) - } - - writer, err := NewTransformWriter(&payload, t.writeTransforms) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(writer, &t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := writer.Close(); err != nil { - return NewTTransportExceptionFromError(err) - } - - // First write frame length - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(payload.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - // Then write the payload - if _, err := io.Copy(t.transport, &payload); err != nil { - return NewTTransportExceptionFromError(err) - } - - case clientFramedBinary, clientFramedCompact: - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - fallthrough - case clientUnframedBinary, clientUnframedCompact: - if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - select { - default: - case <-ctx.Done(): - return NewTTransportExceptionFromError(ctx.Err()) - } - - return t.transport.Flush(ctx) -} - -// Close closes the transport, along with its underlying transport. -func (t *THeaderTransport) Close() error { - if err := t.Flush(context.Background()); err != nil { - return err - } - return t.transport.Close() -} - -// RemainingBytes calls underlying transport's RemainingBytes. -// -// Even in framed cases, because of all the possible compression transforms -// involved, the remaining frame size is likely to be different from the actual -// remaining readable bytes, so we don't bother to keep tracking the remaining -// frame size by ourselves and just use the underlying transport's -// RemainingBytes directly. -func (t *THeaderTransport) RemainingBytes() uint64 { - return t.transport.RemainingBytes() -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (t *THeaderTransport) GetReadHeaders() THeaderMap { - return t.readHeaders -} - -// SetWriteHeader sets a header for write. -func (t *THeaderTransport) SetWriteHeader(key, value string) { - t.writeHeaders[key] = value -} - -// ClearWriteHeaders clears all write headers previously set. -func (t *THeaderTransport) ClearWriteHeaders() { - t.writeHeaders = make(THeaderMap) -} - -// AddTransform add a transform for writing. -func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { - if !supportedTransformIDs[transform] { - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - fmt.Errorf("THeaderTransformID %d not supported", transform), - ) - } - t.writeTransforms = append(t.writeTransforms, transform) - return nil -} - -// Protocol returns the wrapped protocol id used in this THeaderTransport. -func (t *THeaderTransport) Protocol() THeaderProtocolID { - switch t.clientType { - default: - return t.protocolID - case clientFramedBinary, clientUnframedBinary: - return THeaderProtocolBinary - case clientFramedCompact, clientUnframedCompact: - return THeaderProtocolCompact - } -} - -func (t *THeaderTransport) isFramed() bool { - switch t.clientType { - default: - return false - case clientHeaders, clientFramedBinary, clientFramedCompact: - return true - } -} - -// SetTConfiguration implements TConfigurationSetter. -func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(t.transport, cfg) - t.cfg = cfg -} - -// THeaderTransportFactory is a TTransportFactory implementation to create -// THeaderTransport. -// -// It also implements TConfigurationSetter. -type THeaderTransportFactory struct { - // The underlying factory, could be nil. - Factory TTransportFactory - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderTransportFactoryConf instead. -func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with -// the given *TConfiguration. -func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - return &THeaderTransportFactory{ - Factory: factory, - - cfg: conf, - } -} - -// GetTransport implements TTransportFactory. -func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if f.Factory != nil { - t, err := f.Factory.GetTransport(trans) - if err != nil { - return nil, err - } - return NewTHeaderTransportConf(t, f.cfg), nil - } - return NewTHeaderTransportConf(trans, f.cfg), nil -} - -// SetTConfiguration implements TConfigurationSetter. -func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.Factory, f.cfg) - f.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*THeaderTransportFactory)(nil) - _ TConfigurationSetter = (*THeaderTransport)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go deleted file mode 100644 index 9a2cc98cc7..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// THttpClientOptions. -var DefaultHttpClient *http.Client = http.DefaultClient - -type THttpClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header - nsecConnectTimeout int64 - nsecReadTimeout int64 -} - -type THttpClientTransportFactory struct { - options THttpClientOptions - url string -} - -func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*THttpClient) - if ok && t.url != nil { - return NewTHttpClientWithOptions(t.url.String(), p.options) - } - } - return NewTHttpClientWithOptions(p.url, p.options) -} - -type THttpClientOptions struct { - // If nil, DefaultHttpClient is used - Client *http.Client -} - -func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, options: options} -} - -func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} - return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil -} - -func NewTHttpClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *THttpClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *THttpClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.DelHeader("User-Agent") -func (p *THttpClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *THttpClient) Open() error { - // do nothing - return nil -} - -func (p *THttpClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *THttpClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(io.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *THttpClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *THttpClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return n, nil - } - return n, NewTTransportExceptionFromError(err) -} - -func (p *THttpClient) ReadByte() (c byte, err error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - return readByte(p.response.Body) -} - -func (p *THttpClient) Write(buf []byte) (int, error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.Write(buf) -} - -func (p *THttpClient) WriteByte(c byte) error { - if p.requestBuffer == nil { - return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteByte(c) -} - -func (p *THttpClient) WriteString(s string) (n int, err error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteString(s) -} - -func (p *THttpClient) Flush(ctx context.Context) error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - // Give up the ownership of the current request buffer to http request, - // and create a new buffer for the next request. - buf := p.requestBuffer - p.requestBuffer = new(bytes.Buffer) - req, err := http.NewRequest("POST", p.url.String(), buf) - if err != nil { - return NewTTransportExceptionFromError(err) - } - req.Header = p.header - if ctx != nil { - req = req.WithContext(ctx) - } - response, err := p.client.Do(req) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *THttpClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// Deprecated: Use NewTHttpClientTransportFactory instead. -func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. -func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, options) -} - -// Deprecated: Use NewTHttpClientWithOptions instead. -func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, options) -} - -// Deprecated: Use NewTHttpClient instead. -func NewTHttpPostClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go deleted file mode 100644 index bc6922762a..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "compress/gzip" - "io" - "net/http" - "strings" - "sync" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} - -// gz transparently compresses the HTTP response if the client supports it. -func gz(handler http.HandlerFunc) http.HandlerFunc { - sp := &sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, - } - - return func(w http.ResponseWriter, r *http.Request) { - if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - handler(w, r) - return - } - w.Header().Set("Content-Encoding", "gzip") - gz := sp.Get().(*gzip.Writer) - gz.Reset(w) - defer func() { - _ = gz.Close() - sp.Put(gz) - }() - gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} - handler(gzw, r) - } -} - -type gzipResponseWriter struct { - io.Writer - http.ResponseWriter -} - -func (w gzipResponseWriter) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go deleted file mode 100644 index 1c477990fe..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer), nil - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader), nil - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer), nil - } - return &StreamTransport{}, nil - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer), nil - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader), nil - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer), nil - } - return &StreamTransport{}, nil -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush(ctx context.Context) error { - if p.Writer == nil { - return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.Reader, conf) - PropagateTConfiguration(p.Writer, conf) -} - -var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go deleted file mode 100644 index 8e59d16cfd..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,591 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// Utilizes Simple JSON protocol -// -type TJSONProtocol struct { - *TSimpleJSONProtocol -} - -// Constructor -func NewTJSONProtocol(t TTransport) *TJSONProtocol { - v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TJSONProtocolFactory struct{} - -func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTJSONProtocol(trans) -} - -func NewTJSONProtocolFactory() *TJSONProtocolFactory { - return &TJSONProtocolFactory{} -} - -func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteI16(ctx, id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - if e := p.WriteI64(ctx, int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { - if b { - return p.WriteI32(ctx, 1) - } - return p.WriteI32(ctx, 0) -} - -func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32(ctx) - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { - err := p.ParseListEnd() - return err -} - -func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16(ctx) - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString(ctx) - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, e := p.ReadI64(ctx) - if e != nil { - return keyType, valueType, size, e - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - value, err := p.ReadI32(ctx) - return (value != 0), err -} - -func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush(ctx) - } - return NewTProtocolException(err) -} - -func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - // We don't really use the ctx in ReadString implementation, - // so this is safe for now. - // We might want to add context to ParseElemListBegin if we start to use - // ctx in ReadString implementation in the future. - sElemType, err := p.ReadString(context.Background()) - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - // We don't really use the ctx in ReadString implementation, - // so this is safe for now. - // We might want to add context to ParseElemListBegin if we start to use - // ctx in ReadString implementation in the future. - sElemType, err := p.ReadString(context.Background()) - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { - switch fieldType { - case "tf": - return TType(BOOL), nil - case "i8": - return TType(BYTE), nil - case "i16": - return TType(I16), nil - case "i32": - return TType(I32), nil - case "i64": - return TType(I64), nil - case "dbl": - return TType(DOUBLE), nil - case "str": - return TType(STRING), nil - case "rec": - return TType(STRUCT), nil - case "map": - return TType(MAP), nil - case "set": - return TType(SET), nil - case "lst": - return TType(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go deleted file mode 100644 index c42aac998b..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "os" - "testing" -) - -// Logger is a simple wrapper of a logging function. -// -// In reality the users might actually use different logging libraries, and they -// are not always compatible with each other. -// -// Logger is meant to be a simple common ground that it's easy to wrap whatever -// logging library they use into. -// -// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design -// discussion behind it. -type Logger func(msg string) - -// NopLogger is a Logger implementation that does nothing. -func NopLogger(msg string) {} - -// StdLogger wraps stdlib log package into a Logger. -// -// If logger passed in is nil, it will fallback to use stderr and default flags. -func StdLogger(logger *log.Logger) Logger { - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - return func(msg string) { - logger.Print(msg) - } -} - -// TestLogger is a Logger implementation can be used in test codes. -// -// It fails the test when being called. -func TestLogger(tb testing.TB) Logger { - return func(msg string) { - tb.Errorf("logger called with msg: %q", msg) - } -} - -func fallbackLogger(logger Logger) Logger { - if logger == nil { - return StdLogger(nil) - } - return logger -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go deleted file mode 100644 index 5936d27303..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" -) - -// Memory buffer-based implementation of the TTransport interface. -type TMemoryBuffer struct { - *bytes.Buffer - size int -} - -type TMemoryBufferTransportFactory struct { - size int -} - -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*TMemoryBuffer) - if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size), nil - } - } - return NewTMemoryBufferLen(p.size), nil -} - -func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { - return &TMemoryBufferTransportFactory{size: size} -} - -func NewTMemoryBuffer() *TMemoryBuffer { - return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewTMemoryBufferLen(size int) *TMemoryBuffer { - buf := make([]byte, 0, size) - return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *TMemoryBuffer) IsOpen() bool { - return true -} - -func (p *TMemoryBuffer) Open() error { - return nil -} - -func (p *TMemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush(ctx context.Context) error { - return nil -} - -func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go deleted file mode 100644 index 25ab2e98a2..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Message type constants in the Thrift protocol. -type TMessageType int32 - -const ( - INVALID_TMESSAGE_TYPE TMessageType = 0 - CALL TMessageType = 1 - REPLY TMessageType = 2 - EXCEPTION TMessageType = 3 - ONEWAY TMessageType = 4 -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go deleted file mode 100644 index 8a788df02b..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the -// TProcessorFunctions for that TProcessor. -// -// Middlewares are passed in the name of the function as set in the processor -// map of the TProcessor. -type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction - -// WrapProcessor takes an existing TProcessor and wraps each of its inner -// TProcessorFunctions with the middlewares passed in and returns it. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { - for name, processorFunc := range processor.ProcessorMap() { - wrapped := processorFunc - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - wrapped = middlewares[i](name, wrapped) - } - processor.AddToProcessorMap(name, wrapped) - } - return processor -} - -// WrappedTProcessorFunction is a convenience struct that implements the -// TProcessorFunction interface that can be used when implementing custom -// Middleware. -type WrappedTProcessorFunction struct { - // Wrapped is called by WrappedTProcessorFunction.Process and should be a - // "wrapped" call to a base TProcessorFunc.Process call. - Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// Process implements the TProcessorFunction interface using p.Wrapped. -func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { - return p.Wrapped(ctx, seqID, in, out) -} - -// verify that WrappedTProcessorFunction implements TProcessorFunction -var ( - _ TProcessorFunction = WrappedTProcessorFunction{} - _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) -) - -// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls -// with custom middleware. -type ClientMiddleware func(TClient) TClient - -// WrappedTClient is a convenience struct that implements the TClient interface -// using inner Wrapped function. -// -// This is provided to aid in developing ClientMiddleware. -type WrappedTClient struct { - Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -// Call implements the TClient interface by calling and returning c.Wrapped. -func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - return c.Wrapped(ctx, method, args, result) -} - -// verify that WrappedTClient implements TClient -var ( - _ TClient = WrappedTClient{} - _ TClient = (*WrappedTClient)(nil) -) - -// WrapClient wraps the given TClient in the given middlewares. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - client = middlewares[i](client) - } - return client -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index d542b23a99..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" - "strings" -) - -/* -TMultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) -transport := thrift.NewTFramedTransport(socket) -protocol := thrift.NewTBinaryProtocolTransport(transport) - -mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type TMultiplexedProtocol struct { - TProtocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { - return &TMultiplexedProtocol{ - TProtocol: protocol, - serviceName: serviceName, - } -} - -func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) - } -} - -/* -TMultiplexedProcessor is a TProcessor allowing -a single TServer to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewTMultiplexedProcessor() - -firstProcessor := -processor.RegisterProcessor("FirstService", firstProcessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewTSimpleServer2(processor, serverTransport) -server.Serve(); -*/ - -type TMultiplexedProcessor struct { - serviceProcessorMap map[string]TProcessor - DefaultProcessor TProcessor -} - -func NewTMultiplexedProcessor() *TMultiplexedProcessor { - return &TMultiplexedProcessor{ - serviceProcessorMap: make(map[string]TProcessor), - } -} - -// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" -// to TProcessorFunction for any registered processors. If there is also a -// DefaultProcessor, the keys for the methods on that processor will simply be -// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and -// other registered processors, then the keys will be a mix of both formats. -// -// The implementation differs with other TProcessors in that the map returned is -// a new map, while most TProcessors just return their internal mapping directly. -// This means that edits to the map returned by this implementation of ProcessorMap -// will not affect the underlying mapping within the TMultiplexedProcessor. -func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { - processorFuncMap := make(map[string]TProcessorFunction) - for name, processor := range t.serviceProcessorMap { - for method, processorFunc := range processor.ProcessorMap() { - processorFuncName := name + MULTIPLEXED_SEPARATOR + method - processorFuncMap[processorFuncName] = processorFunc - } - } - if t.DefaultProcessor != nil { - for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { - processorFuncMap[method] = processorFunc - } - } - return processorFuncMap -} - -// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on -// the format of "name". -// -// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", -// then it sets the given TProcessorFunction on the inner TProcessor with the -// ProcessorName component using the FunctionName component. -// -// If "name" is just in the format "{FunctionName}", that is to say there is no -// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor -// configured, then it will set the given TProcessorFunction on the DefaultProcessor -// using the given name. -// -// If there is not a TProcessor available for the given name, then this function -// does nothing. This can happen when there is no TProcessor registered for -// the given ProcessorName or if all that is given is the FunctionName and there -// is no DefaultProcessor set. -func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { - processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR) - if !found { - if t.DefaultProcessor != nil { - t.DefaultProcessor.AddToProcessorMap(processorName, processorFunc) - } - return - } - if processor, ok := t.serviceProcessorMap[processorName]; ok { - processor.AddToProcessorMap(funcName, processorFunc) - } - -} - -// verify that TMultiplexedProcessor implements TProcessor -var _ TProcessor = (*TMultiplexedProcessor)(nil) - -func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { - t.DefaultProcessor = processor -} - -func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]TProcessor) - } - t.serviceProcessorMap[name] = processor -} - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin(ctx) - if err != nil { - return false, NewTProtocolException(err) - } - if typeId != CALL && typeId != ONEWAY { - return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) - } - // extract the service name - processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR) - if !found { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, NewTProtocolException(fmt.Errorf( - "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", - name, - )) - } - actualProcessor, ok := t.serviceProcessorMap[processorName] - if !ok { - return false, NewTProtocolException(fmt.Errorf( - "Service name not found: %s. Did you forget to call registerProcessor()?", - processorName, - )) - } - smb := NewStoredMessageProtocol(in, funcName, typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} - -// Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - TProtocol - name string - typeId TMessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go deleted file mode 100644 index e4512d204c..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(iValue, 10) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(int64(iValue), 10) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go deleted file mode 100644 index fb564ea819..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int8Ptr(v int8) *int8 { return &v } -func Int16Ptr(v int16) *int16 { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go deleted file mode 100644 index 245a3ccfc9..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(ctx context.Context, in, out TProtocol) (bool, TException) - - // ProcessorMap returns a map of thrift method names to TProcessorFunctions. - ProcessorMap() map[string]TProcessorFunction - - // AddToProcessorMap adds the given TProcessorFunction to the internal - // processor map at the given key. - // - // If one is already set at the given key, it will be replaced with the new - // TProcessorFunction. - AddToProcessorMap(string, TProcessorFunction) -} - -type TProcessorFunction interface { - Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// The default processor factory just returns a singleton -// instance. -type TProcessorFactory interface { - GetProcessor(trans TTransport) TProcessor -} - -type tProcessorFactory struct { - processor TProcessor -} - -func NewTProcessorFactory(p TProcessor) TProcessorFactory { - return &tProcessorFactory{processor: p} -} - -func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type TProcessorFunctionFactory interface { - GetProcessorFunction(trans TTransport) TProcessorFunction -} - -type tProcessorFunctionFactory struct { - processor TProcessorFunction -} - -func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { - return &tProcessorFunctionFactory{processor: p} -} - -func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { - return p.processor -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 0a69bd4162..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "fmt" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error - WriteMessageEnd(ctx context.Context) error - WriteStructBegin(ctx context.Context, name string) error - WriteStructEnd(ctx context.Context) error - WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error - WriteFieldEnd(ctx context.Context) error - WriteFieldStop(ctx context.Context) error - WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error - WriteMapEnd(ctx context.Context) error - WriteListBegin(ctx context.Context, elemType TType, size int) error - WriteListEnd(ctx context.Context) error - WriteSetBegin(ctx context.Context, elemType TType, size int) error - WriteSetEnd(ctx context.Context) error - WriteBool(ctx context.Context, value bool) error - WriteByte(ctx context.Context, value int8) error - WriteI16(ctx context.Context, value int16) error - WriteI32(ctx context.Context, value int32) error - WriteI64(ctx context.Context, value int64) error - WriteDouble(ctx context.Context, value float64) error - WriteString(ctx context.Context, value string) error - WriteBinary(ctx context.Context, value []byte) error - - ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd(ctx context.Context) error - ReadStructBegin(ctx context.Context) (name string, err error) - ReadStructEnd(ctx context.Context) error - ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) - ReadFieldEnd(ctx context.Context) error - ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) - ReadMapEnd(ctx context.Context) error - ReadListBegin(ctx context.Context) (elemType TType, size int, err error) - ReadListEnd(ctx context.Context) error - ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) - ReadSetEnd(ctx context.Context) error - ReadBool(ctx context.Context) (value bool, err error) - ReadByte(ctx context.Context) (value int8, err error) - ReadI16(ctx context.Context) (value int16, err error) - ReadI32(ctx context.Context) (value int32, err error) - ReadI64(ctx context.Context) (value int64, err error) - ReadDouble(ctx context.Context) (value float64, err error) - ReadString(ctx context.Context) (value string, err error) - ReadBinary(ctx context.Context) (value []byte, err error) - - Skip(ctx context.Context, fieldType TType) (err error) - Flush(ctx context.Context) (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { - return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case BOOL: - _, err = self.ReadBool(ctx) - return - case BYTE: - _, err = self.ReadByte(ctx) - return - case I16: - _, err = self.ReadI16(ctx) - return - case I32: - _, err = self.ReadI32(ctx) - return - case I64: - _, err = self.ReadI64(ctx) - return - case DOUBLE: - _, err = self.ReadDouble(ctx) - return - case STRING: - _, err = self.ReadString(ctx) - return - case STRUCT: - if _, err = self.ReadStructBegin(ctx); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin(ctx) - if typeId == STOP { - break - } - err := Skip(ctx, self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd(ctx) - } - return self.ReadStructEnd(ctx) - case MAP: - keyType, valueType, size, err := self.ReadMapBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(ctx, valueType) - } - return self.ReadMapEnd(ctx) - case SET: - elemType, size, err := self.ReadSetBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd(ctx) - case LIST: - elemType, size, err := self.ReadListBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd(ctx) - default: - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 9dcf4bfd94..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" - "errors" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - err error - msg string -} - -var _ TProtocolException = (*tProtocolException)(nil) - -func (tProtocolException) TExceptionType() TExceptionType { - return TExceptionTypeProtocol -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.msg -} - -func (p *tProtocolException) Error() string { - return p.msg -} - -func (p *tProtocolException) Unwrap() error { - return p.err -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - - if e, ok := err.(TProtocolException); ok { - return e - } - - if errors.As(err, new(base64.CorruptInputError)) { - return NewTProtocolExceptionWithType(INVALID_DATA, err) - } - - return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{ - typeId: errType, - err: err, - msg: err.Error(), - } -} - -func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { - return &tProtocolException{ - typeId: err.TypeId(), - err: err, - msg: prepend + err.Error(), - } -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go deleted file mode 100644 index c40f796d88..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory interface for constructing protocol instances. -type TProtocolFactory interface { - GetProtocol(trans TTransport) TProtocol -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go deleted file mode 100644 index d884c6ac6c..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type responseHelperKey struct{} - -// TResponseHelper defines a object with a set of helper functions that can be -// retrieved from the context object passed into server handler functions. -// -// Use GetResponseHelper to retrieve the injected TResponseHelper implementation -// from the context object. -// -// The zero value of TResponseHelper is valid with all helper functions being -// no-op. -type TResponseHelper struct { - // THeader related functions - *THeaderResponseHelper -} - -// THeaderResponseHelper defines THeader related TResponseHelper functions. -// -// The zero value of *THeaderResponseHelper is valid with all helper functions -// being no-op. -type THeaderResponseHelper struct { - proto *THeaderProtocol -} - -// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the -// underlying TProtocol. -func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { - if hp, ok := proto.(*THeaderProtocol); ok { - return &THeaderResponseHelper{ - proto: hp, - } - } - return nil -} - -// SetHeader sets a response header. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) SetHeader(key, value string) { - if h != nil && h.proto != nil { - h.proto.SetWriteHeader(key, value) - } -} - -// ClearHeaders clears all the response headers previously set. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) ClearHeaders() { - if h != nil && h.proto != nil { - h.proto.ClearWriteHeaders() - } -} - -// GetResponseHelper retrieves the TResponseHelper implementation injected into -// the context object. -// -// If no helper was found in the context object, a nop helper with ok == false -// will be returned. -func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { - if v := ctx.Value(responseHelperKey{}); v != nil { - helper, ok = v.(TResponseHelper) - } - return -} - -// SetResponseHelper injects TResponseHelper into the context object. -func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { - return context.WithValue(ctx, responseHelperKey{}, helper) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go deleted file mode 100644 index 83fdf29f5c..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type RichTransport struct { - TTransport -} - -// Wraps Transport to provide TRichTransport interface -func NewTRichTransport(trans TTransport) *RichTransport { - return &RichTransport{trans} -} - -func (r *RichTransport) ReadByte() (c byte, err error) { - return readByte(r.TTransport) -} - -func (r *RichTransport) WriteByte(c byte) error { - return writeByte(r.TTransport, c) -} - -func (r *RichTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *RichTransport) RemainingBytes() (num_bytes uint64) { - return r.TTransport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index c44979094c..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(ctx context.Context, p TProtocol) error - Read(ctx context.Context, p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - if err = t.Transport.Flush(ctx); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - - if err = t.Transport.Flush(ctx); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} - -// TSerializerPool is the thread-safe version of TSerializer, it uses resource -// pool of TSerializer under the hood. -// -// It must be initialized with either NewTSerializerPool or -// NewTSerializerPoolSizeFactory. -type TSerializerPool struct { - pool sync.Pool -} - -// NewTSerializerPool creates a new TSerializerPool. -// -// NewTSerializer can be used as the arg here. -func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given -// size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.WriteString(ctx, msg) -} - -func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.Write(ctx, msg) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go deleted file mode 100644 index f813fa3532..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TServer interface { - ProcessorFactory() TProcessorFactory - ServerTransport() TServerTransport - InputTransportFactory() TTransportFactory - OutputTransportFactory() TTransportFactory - InputProtocolFactory() TProtocolFactory - OutputProtocolFactory() TProtocolFactory - - // Starts the server - Serve() error - // Stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go deleted file mode 100644 index 7dd24ae364..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "sync" - "time" -) - -type TServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewTServerSocket(listenAddr string) (*TServerSocket, error) { - return NewTServerSocketTimeout(listenAddr, 0) -} - -func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -// Creates a TServerSocket from a net.Addr -func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { - return &TServerSocket{addr: addr, clientTimeout: clientTimeout} -} - -func (p *TServerSocket) Listen() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TServerSocket) Accept() (TTransport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - - p.mu.Lock() - listener := p.listener - p.mu.Unlock() - if listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - - conn, err := listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TServerSocket) Open() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *TServerSocket) Close() error { - var err error - p.mu.Lock() - if p.IsListening() { - err = p.listener.Close() - p.listener = nil - } - p.mu.Unlock() - return err -} - -func (p *TServerSocket) Interrupt() error { - p.mu.Lock() - p.interrupted = true - p.mu.Unlock() - p.Close() - - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go deleted file mode 100644 index 51c40b64a1..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Server transport. Object which provides client transports. -type TServerTransport interface { - Listen() error - Accept() (TTransport, error) - Close() error - - // Optional method implementation. This signals to the server transport - // that it should break out of any accept() or listen() that it is currently - // blocked on. This method, if implemented, MUST be thread safe, as it may - // be called from a different thread context than the other TServerTransport - // methods. - Interrupt() error -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go deleted file mode 100644 index d1a8154532..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1373 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_INVALID _ParseContext = iota - _CONTEXT_IN_TOPLEVEL // 1 - _CONTEXT_IN_LIST_FIRST // 2 - _CONTEXT_IN_LIST // 3 - _CONTEXT_IN_OBJECT_FIRST // 4 - _CONTEXT_IN_OBJECT_NEXT_KEY // 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -type jsonContextStack []_ParseContext - -func (s *jsonContextStack) push(v _ParseContext) { - *s = append(*s, v) -} - -func (s jsonContextStack) peek() (v _ParseContext, ok bool) { - l := len(s) - if l <= 0 { - return - } - return s[l-1], true -} - -func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { - l := len(*s) - if l <= 0 { - return - } - v = (*s)[l-1] - *s = (*s)[0 : l-1] - return v, true -} - -var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) - -// Simple JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TSimpleJSONProtocol struct { - trans TTransport - - parseContextStack jsonContextStack - dumpContext jsonContextStack - - writer *bufio.Writer - reader *bufio.Reader -} - -// Constructor -func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { - v := &TSimpleJSONProtocol{trans: t, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TSimpleJSONProtocolFactory struct{} - -func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTSimpleJSONProtocol(trans) -} - -func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{} -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte - json_nonbase_map_elem_bytes []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} - json_nonbase_map_elem_bytes = []byte{']', ',', '['} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteString(ctx, name); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(keyType)); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(valueType)); e != nil { - return e - } - return p.WriteI32(ctx, int32(size)) -} - -func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { - return p.OutputBool(b) -} - -func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewTProtocolException(io.EOF) -} - -func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte(ctx) - keyType = TType(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte(ctx) - valueType = TType(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64(ctx) - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.writer.Flush()) -} - -func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TSimpleJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewTProtocolException(e) - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewTProtocolException(e) - } - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v = strconv.FormatFloat(value, 'g', -1, 64) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v := strconv.FormatInt(value, 10) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewTProtocolException(e) -} - -func (p *TSimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.OutputI64(int64(elemType)); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - default: - break - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewTProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewTProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewTProtocolException(err) -} - -func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TSimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - break - } - } - p.parseContextStack.pop() - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, _, err := p.ParseI64() - elemType = TType(bElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TSimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - break - } - } - p.parseContextStack.pop() - if cxt, ok := p.parseContextStack.peek(); !ok { - return errEmptyJSONContextStack - } else if cxt == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { - e := p.readNonSignificantWhitespace() - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - b, e := p.reader.Peek(1) - if len(b) > 0 { - c := b[0] - switch c { - case JSON_NULL[0]: - buf := make([]byte, len(JSON_NULL)) - _, e := p.reader.Read(buf) - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - if string(JSON_NULL) != string(buf) { - e = mismatch(string(JSON_NULL), string(buf)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return nil, VOID, nil - case JSON_QUOTE: - p.reader.ReadByte() - v, e := p.ParseStringBody() - if e != nil { - return v, UTF8, NewTProtocolException(e) - } - if v == JSON_INFINITY { - return INFINITY, DOUBLE, nil - } else if v == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY, DOUBLE, nil - } else if v == JSON_NAN { - return NAN, DOUBLE, nil - } - return v, UTF8, nil - case JSON_TRUE[0]: - buf := make([]byte, len(JSON_TRUE)) - _, e := p.reader.Read(buf) - if e != nil { - return true, BOOL, NewTProtocolException(e) - } - if string(JSON_TRUE) != string(buf) { - e := mismatch(string(JSON_TRUE), string(buf)) - return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return true, BOOL, nil - case JSON_FALSE[0]: - buf := make([]byte, len(JSON_FALSE)) - _, e := p.reader.Read(buf) - if e != nil { - return false, BOOL, NewTProtocolException(e) - } - if string(JSON_FALSE) != string(buf) { - e := mismatch(string(JSON_FALSE), string(buf)) - return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return false, BOOL, nil - case JSON_LBRACKET[0]: - _, e := p.reader.ReadByte() - return make([]interface{}, 0), LIST, NewTProtocolException(e) - case JSON_LBRACE[0]: - _, e := p.reader.ReadByte() - return make(map[string]interface{}), STRUCT, NewTProtocolException(e) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: - // assume numeric - v, e := p.readNumeric() - return v, DOUBLE, e - default: - e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - e = fmt.Errorf("Cannot read a single element while parsing JSON.") - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - -} - -func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - break - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - break - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *TSimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewTProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } else { - break - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) < (i+1) || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} - p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} -} - -func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) -} - -var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go deleted file mode 100644 index 563cbfc694..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" -) - -// ErrAbandonRequest is a special error server handler implementations can -// return to indicate that the request has been abandoned. -// -// TSimpleServer will check for this error, and close the client connection -// instead of writing the response/error back to the client. -// -// It shall only be used when the server handler implementation know that the -// client already abandoned the request (by checking that the passed in context -// is already canceled, for example). -var ErrAbandonRequest = errors.New("request abandoned") - -// ServerConnectivityCheckInterval defines the ticker interval used by -// connectivity check in thrift compiled TProcessorFunc implementations. -// -// It's defined as a variable instead of constant, so that thrift server -// implementations can change its value to control the behavior. -// -// If it's changed to <=0, the feature will be disabled. -var ServerConnectivityCheckInterval = time.Millisecond * 5 - -/* - * This is not a typical TSimpleServer as it is not blocked after accept a socket. - * It is more like a TThreadedServer that can handle different connections in different goroutines. - * This will work if golang user implements a conn-pool like thing in client side. - */ -type TSimpleServer struct { - closed int32 - wg sync.WaitGroup - mu sync.Mutex - - processorFactory TProcessorFactory - serverTransport TServerTransport - inputTransportFactory TTransportFactory - outputTransportFactory TTransportFactory - inputProtocolFactory TProtocolFactory - outputProtocolFactory TProtocolFactory - - // Headers to auto forward in THeaderProtocol - forwardHeaders []string - - logger Logger -} - -func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) -} - -func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory4(NewTProcessorFactory(processor), - serverTransport, - transportFactory, - protocolFactory, - ) -} - -func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(NewTProcessorFactory(processor), - serverTransport, - inputTransportFactory, - outputTransportFactory, - inputProtocolFactory, - outputProtocolFactory, - ) -} - -func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - NewTTransportFactory(), - NewTTransportFactory(), - NewTBinaryProtocolFactoryDefault(), - NewTBinaryProtocolFactoryDefault(), - ) -} - -func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - transportFactory, - transportFactory, - protocolFactory, - protocolFactory, - ) -} - -func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return &TSimpleServer{ - processorFactory: processorFactory, - serverTransport: serverTransport, - inputTransportFactory: inputTransportFactory, - outputTransportFactory: outputTransportFactory, - inputProtocolFactory: inputProtocolFactory, - outputProtocolFactory: outputProtocolFactory, - } -} - -func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { - return p.processorFactory -} - -func (p *TSimpleServer) ServerTransport() TServerTransport { - return p.serverTransport -} - -func (p *TSimpleServer) InputTransportFactory() TTransportFactory { - return p.inputTransportFactory -} - -func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { - return p.outputTransportFactory -} - -func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { - return p.inputProtocolFactory -} - -func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { - return p.outputProtocolFactory -} - -func (p *TSimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -// SetForwardHeaders sets the list of header keys that will be auto forwarded -// while using THeaderProtocol. -// -// "forward" means that when the server is also a client to other upstream -// thrift servers, the context object user gets in the processor functions will -// have both read and write headers set, with write headers being forwarded. -// Users can always override the write headers by calling SetWriteHeaderList -// before calling thrift client functions. -func (p *TSimpleServer) SetForwardHeaders(headers []string) { - size := len(headers) - if size == 0 { - p.forwardHeaders = nil - return - } - - keys := make([]string, size) - copy(keys, headers) - p.forwardHeaders = keys -} - -// SetLogger sets the logger used by this TSimpleServer. -// -// If no logger was set before Serve is called, a default logger using standard -// log library will be used. -func (p *TSimpleServer) SetLogger(logger Logger) { - p.logger = logger -} - -func (p *TSimpleServer) innerAccept() (int32, error) { - client, err := p.serverTransport.Accept() - p.mu.Lock() - defer p.mu.Unlock() - closed := atomic.LoadInt32(&p.closed) - if closed != 0 { - return closed, nil - } - if err != nil { - return 0, err - } - if client != nil { - p.wg.Add(1) - go func() { - defer p.wg.Done() - if err := p.processRequests(client); err != nil { - p.logger(fmt.Sprintf("error processing request: %v", err)) - } - }() - } - return 0, nil -} - -func (p *TSimpleServer) AcceptLoop() error { - for { - closed, err := p.innerAccept() - if err != nil { - return err - } - if closed != 0 { - return nil - } - } -} - -func (p *TSimpleServer) Serve() error { - p.logger = fallbackLogger(p.logger) - - err := p.Listen() - if err != nil { - return err - } - p.AcceptLoop() - return nil -} - -func (p *TSimpleServer) Stop() error { - p.mu.Lock() - defer p.mu.Unlock() - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - atomic.StoreInt32(&p.closed, 1) - p.serverTransport.Interrupt() - p.wg.Wait() - return nil -} - -// If err is actually EOF, return nil, otherwise return err as-is. -func treatEOFErrorsAsNil(err error) error { - if err == nil { - return nil - } - if errors.Is(err, io.EOF) { - return nil - } - var te TTransportException - if errors.As(err, &te) && te.TypeId() == END_OF_FILE { - return nil - } - return err -} - -func (p *TSimpleServer) processRequests(client TTransport) (err error) { - defer func() { - err = treatEOFErrorsAsNil(err) - }() - - processor := p.processorFactory.GetProcessor(client) - inputTransport, err := p.inputTransportFactory.GetTransport(client) - if err != nil { - return err - } - inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) - var outputTransport TTransport - var outputProtocol TProtocol - - // for THeaderProtocol, we must use the same protocol instance for - // input and output so that the response is in the same dialect that - // the server detected the request was in. - headerProtocol, ok := inputProtocol.(*THeaderProtocol) - if ok { - outputProtocol = inputProtocol - } else { - oTrans, err := p.outputTransportFactory.GetTransport(client) - if err != nil { - return err - } - outputTransport = oTrans - outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) - } - - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - for { - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - - ctx := SetResponseHelper( - defaultCtx, - TResponseHelper{ - THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), - }, - ) - if headerProtocol != nil { - // We need to call ReadFrame here, otherwise we won't - // get any headers on the AddReadTHeaderToContext call. - // - // ReadFrame is safe to be called multiple times so it - // won't break when it's called again later when we - // actually start to read the message. - if err := headerProtocol.ReadFrame(ctx); err != nil { - return err - } - ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) - ctx = SetWriteHeaderList(ctx, p.forwardHeaders) - } - - ok, err := processor.Process(ctx, inputProtocol, outputProtocol) - if errors.Is(err, ErrAbandonRequest) { - return client.Close() - } - if errors.As(err, new(TTransportException)) && err != nil { - return err - } - var tae TApplicationException - if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { - continue - } - if !ok { - break - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go deleted file mode 100644 index e911bf1668..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "net" - "time" -) - -type TSocket struct { - conn *socketConn - addr net.Addr - cfg *TConfiguration - - connectTimeout time.Duration - socketTimeout time.Duration -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// }) -func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return nil, err - } - return NewTSocketFromAddrConf(addr, conf), nil -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }) -} - -// NewTSocketFromAddrConf creates a TSocket from a net.Addr -func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { - return &TSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromAddrConf instead. -func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { - return NewTSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }) -} - -// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. -func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { - return &TSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromConnConf instead. -func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { - return NewTSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to set connect and socket timeouts. -func (p *TSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = createSocketConnFromReturn(net.DialTimeout( - p.addr.Network(), - p.addr.String(), - p.cfg.GetConnectTimeout(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go deleted file mode 100644 index c1cc30c6cc..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" -) - -// socketConn is a wrapped net.Conn that tries to do connectivity check. -type socketConn struct { - net.Conn - - buffer [1]byte -} - -var _ net.Conn = (*socketConn)(nil) - -// createSocketConnFromReturn is a language sugar to help create socketConn from -// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. -func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { - if err != nil { - return nil, err - } - return &socketConn{ - Conn: conn, - }, nil -} - -// wrapSocketConn wraps an existing net.Conn into *socketConn. -func wrapSocketConn(conn net.Conn) *socketConn { - // In case conn is already wrapped, - // return it as-is and avoid double wrapping. - if sc, ok := conn.(*socketConn); ok { - return sc - } - - return &socketConn{ - Conn: conn, - } -} - -// isValid checks whether there's a valid connection. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// It's the same as the previous implementation of TSocket.IsOpen and -// TSSLSocket.IsOpen before we added connectivity check. -func (sc *socketConn) isValid() bool { - return sc != nil && sc.Conn != nil -} - -// IsOpen checks whether the connection is open. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// Otherwise, it tries to do a connectivity check and returns the result. -// -// It also has the side effect of resetting the previously set read deadline on -// the socket. As a result, it shouldn't be called between setting read deadline -// and doing actual read. -func (sc *socketConn) IsOpen() bool { - if !sc.isValid() { - return false - } - return sc.checkConn() == nil -} - -// Read implements io.Reader. -// -// On Windows, it behaves the same as the underlying net.Conn.Read. -// -// On non-Windows, it treats len(p) == 0 as a connectivity check instead of -// readability check, which means instead of blocking until there's something to -// read (readability check), or always return (0, nil) (the default behavior of -// go's stdlib implementation on non-Windows), it never blocks, and will return -// an error if the connection is lost. -func (sc *socketConn) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, sc.read0() - } - - return sc.Conn.Read(p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go deleted file mode 100644 index f5fab3ab65..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build !windows - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" - "syscall" - "time" -) - -// We rely on this variable to be the zero time, -// but define it as global variable to avoid repetitive allocations. -// Please DO NOT mutate this variable in any way. -var zeroTime time.Time - -func (sc *socketConn) read0() error { - return sc.checkConn() -} - -func (sc *socketConn) checkConn() error { - syscallConn, ok := sc.Conn.(syscall.Conn) - if !ok { - // No way to check, return nil - return nil - } - - // The reading about to be done here is non-blocking so we don't really - // need a read deadline. We just need to clear the previously set read - // deadline, if any. - sc.Conn.SetReadDeadline(zeroTime) - - rc, err := syscallConn.SyscallConn() - if err != nil { - return err - } - - var n int - - if readErr := rc.Read(func(fd uintptr) bool { - n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) - return true - }); readErr != nil { - return readErr - } - - if n > 0 { - // We got something, which means we are good - return nil - } - - if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { - // This means the connection is still open but we don't have - // anything to read right now. - return nil - } - - if err != nil { - return err - } - - // At this point, it means the other side already closed the connection. - return io.EOF -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go deleted file mode 100644 index 679838c3b6..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -func (sc *socketConn) read0() error { - // On windows, we fallback to the default behavior of reading 0 bytes. - var p []byte - _, err := sc.Conn.Read(p) - return err -} - -func (sc *socketConn) checkConn() error { - // On windows, we always return nil for this check. - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 907afca326..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type TSSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { - return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *TSSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TSSLServerSocket) Accept() (TTransport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TSSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TSSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TSSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index 6359a74ceb..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn *socketConn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - - cfg *TConfiguration -} - -// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// }) -func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) { - if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - return &TSSLSocket{ - hostPort: hostPort, - cfg: conf, - }, nil -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. -func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromAddrConf instead. -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. -func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromConnConf instead. -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to change connect and socket timeouts. -func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - "tcp", - p.hostPort, - p.cfg.GetTLSConfig(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - p.addr.Network(), - p.addr.String(), - p.cfg.GetTLSConfig(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go deleted file mode 100644 index d68d0b3179..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -type Flusher interface { - Flush() (err error) -} - -type ContextFlusher interface { - Flush(ctx context.Context) (err error) -} - -type ReadSizeProvider interface { - RemainingBytes() (num_bytes uint64) -} - -// Encapsulates the I/O layer -type TTransport interface { - io.ReadWriteCloser - ContextFlusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// This is "enhanced" transport with extra capabilities. You need to use one of these -// to construct protocol. -// Notably, TSocket does not implement this interface, and it is always a mistake to use -// TSocket directly in protocol. -type TRichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - ContextFlusher - ReadSizeProvider -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go deleted file mode 100644 index 0a3f07646d..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error - msg string -} - -var _ TTransportException = (*tTransportException)(nil) - -func (tTransportException) TExceptionType() TExceptionType { - return TExceptionTypeTransport -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.msg -} - -func (p *tTransportException) Err() error { - return p.err -} - -func (p *tTransportException) Unwrap() error { - return p.err -} - -func (p *tTransportException) Timeout() bool { - return p.typeId == TIMED_OUT -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{ - typeId: t, - err: errors.New(e), - msg: e, - } -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - te := &tTransportException{ - typeId: UNKNOWN_TRANSPORT_EXCEPTION, - err: e, - msg: e.Error(), - } - - if isTimeoutError(e) { - te.typeId = TIMED_OUT - return te - } - - if errors.Is(e, io.EOF) { - te.typeId = END_OF_FILE - return te - } - - return te -} - -func prependTTransportException(prepend string, e TTransportException) TTransportException { - return &tTransportException{ - typeId: e.TypeId(), - err: e, - msg: prepend + e.Error(), - } -} - -// isTimeoutError returns true when err is an error caused by timeout. -// -// Note that this also includes TTransportException wrapped timeout errors. -func isTimeoutError(err error) bool { - var t timeoutable - if errors.As(err, &t) { - return t.Timeout() - } - return false -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go deleted file mode 100644 index c805807940..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory class used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TTransportFactory interface { - GetTransport(trans TTransport) (TTransport, error) -} - -type tTransportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return trans, nil -} - -func NewTTransportFactory() TTransportFactory { - return &tTransportFactory{} -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go deleted file mode 100644 index b24f1b05c4..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Type constants in the Thrift protocol -type TType byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - //BINARY = 18 wrong and unused -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - DOUBLE: "DOUBLE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p TType) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go deleted file mode 100644 index 259943a627..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. - */ - -package thrift - -import ( - "compress/zlib" - "context" - "io" -) - -// TZlibTransportFactory is a factory for TZlibTransport instances -type TZlibTransportFactory struct { - level int - factory TTransportFactory -} - -// TZlibTransport is a TTransport implementation that makes use of zlib compression. -type TZlibTransport struct { - reader io.ReadCloser - transport TTransport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewTZlibTransport -func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if p.factory != nil { - // wrap other factory - var err error - trans, err = p.factory.GetTransport(trans) - if err != nil { - return nil, err - } - } - return NewTZlibTransport(trans, p.level) -} - -// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory -func NewTZlibTransportFactory(level int) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: nil} -} - -// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory -// as a wrapper over existing transport factory -func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: factory} -} - -// NewTZlibTransport constructs a new instance of TZlibTransport -func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - return nil, err - } - - return &TZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *TZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush(ctx context.Context) error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush(ctx) -} - -// IsOpen returns true if the transport is open -func (z *TZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *TZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *TZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -// RemainingBytes returns the size in bytes of the data that is still to be -// read. -func (z *TZlibTransport) RemainingBytes() uint64 { - return z.transport.RemainingBytes() -} - -func (z *TZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(z.transport, conf) -} - -var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go deleted file mode 100644 index 9b4a54afc6..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.21.0" - "go.opentelemetry.io/otel/trace" -) - -const ( - keyInstrumentationLibraryName = "otel.library.name" - keyInstrumentationLibraryVersion = "otel.library.version" - keyError = "error" - keySpanKind = "span.kind" - keyStatusCode = "otel.status_code" - keyStatusMessage = "otel.status_description" - keyDroppedAttributeCount = "otel.event.dropped_attributes_count" - keyEventName = "event" -) - -// New returns an OTel Exporter implementation that exports the collected -// spans to Jaeger. -func New(endpointOption EndpointOption) (*Exporter, error) { - uploader, err := endpointOption.newBatchUploader() - if err != nil { - return nil, err - } - - // Fetch default service.name from default resource for backup - var defaultServiceName string - defaultResource := resource.Default() - if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { - defaultServiceName = value.AsString() - } - if defaultServiceName == "" { - return nil, fmt.Errorf("failed to get service name from default resource") - } - - stopCh := make(chan struct{}) - e := &Exporter{ - uploader: uploader, - stopCh: stopCh, - defaultServiceName: defaultServiceName, - } - return e, nil -} - -// Exporter exports OpenTelemetry spans to a Jaeger agent or collector. -type Exporter struct { - uploader batchUploader - stopOnce sync.Once - stopCh chan struct{} - defaultServiceName string -} - -var _ sdktrace.SpanExporter = (*Exporter)(nil) - -// ExportSpans transforms and exports OpenTelemetry spans to Jaeger. -func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { - // Return fast if context is already canceled or Exporter shutdown. - select { - case <-ctx.Done(): - return ctx.Err() - case <-e.stopCh: - return nil - default: - } - - // Cancel export if Exporter is shutdown. - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - go func(ctx context.Context, cancel context.CancelFunc) { - select { - case <-ctx.Done(): - case <-e.stopCh: - cancel() - } - }(ctx, cancel) - - for _, batch := range jaegerBatchList(spans, e.defaultServiceName) { - if err := e.uploader.upload(ctx, batch); err != nil { - return err - } - } - - return nil -} - -// Shutdown stops the Exporter. This will close all connections and release -// all resources held by the Exporter. -func (e *Exporter) Shutdown(ctx context.Context) error { - // Stop any active and subsequent exports. - e.stopOnce.Do(func() { close(e.stopCh) }) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - return e.uploader.shutdown(ctx) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (e *Exporter) MarshalLog() interface{} { - return struct { - Type string - }{ - Type: "jaeger", - } -} - -func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span { - attr := ss.Attributes() - tags := make([]*gen.Tag, 0, len(attr)) - for _, kv := range attr { - tag := keyValueToTag(kv) - if tag != nil { - tags = append(tags, tag) - } - } - - if is := ss.InstrumentationScope(); is.Name != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name)) - if is.Version != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version)) - } - } - - if ss.SpanKind() != trace.SpanKindInternal { - tags = append(tags, - getStringTag(keySpanKind, ss.SpanKind().String()), - ) - } - - if ss.Status().Code != codes.Unset { - switch ss.Status().Code { - case codes.Ok: - tags = append(tags, getStringTag(keyStatusCode, "OK")) - case codes.Error: - tags = append(tags, getBoolTag(keyError, true)) - tags = append(tags, getStringTag(keyStatusCode, "ERROR")) - } - if ss.Status().Description != "" { - tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description)) - } - } - - var logs []*gen.Log - for _, a := range ss.Events() { - nTags := len(a.Attributes) - if a.Name != "" { - nTags++ - } - if a.DroppedAttributeCount != 0 { - nTags++ - } - fields := make([]*gen.Tag, 0, nTags) - if a.Name != "" { - // If an event contains an attribute with the same key, it needs - // to be given precedence and overwrite this. - fields = append(fields, getStringTag(keyEventName, a.Name)) - } - for _, kv := range a.Attributes { - tag := keyValueToTag(kv) - if tag != nil { - fields = append(fields, tag) - } - } - if a.DroppedAttributeCount != 0 { - fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount))) - } - logs = append(logs, &gen.Log{ - Timestamp: a.Time.UnixNano() / 1000, - Fields: fields, - }) - } - - var refs []*gen.SpanRef - for _, link := range ss.Links() { - tid := link.SpanContext.TraceID() - sid := link.SpanContext.SpanID() - refs = append(refs, &gen.SpanRef{ - TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), - SpanId: int64(binary.BigEndian.Uint64(sid[:])), - RefType: gen.SpanRefType_FOLLOWS_FROM, - }) - } - - tid := ss.SpanContext().TraceID() - sid := ss.SpanContext().SpanID() - psid := ss.Parent().SpanID() - return &gen.Span{ - TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), - SpanId: int64(binary.BigEndian.Uint64(sid[:])), - ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])), - OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv" - Flags: int32(ss.SpanContext().TraceFlags()), - StartTime: ss.StartTime().UnixNano() / 1000, - Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000, - Tags: tags, - Logs: logs, - References: refs, - } -} - -func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag { - var tag *gen.Tag - switch keyValue.Value.Type() { - case attribute.STRING: - s := keyValue.Value.AsString() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VStr: &s, - VType: gen.TagType_STRING, - } - case attribute.BOOL: - b := keyValue.Value.AsBool() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VBool: &b, - VType: gen.TagType_BOOL, - } - case attribute.INT64: - i := keyValue.Value.AsInt64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - case attribute.FLOAT64: - f := keyValue.Value.AsFloat64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VDouble: &f, - VType: gen.TagType_DOUBLE, - } - case attribute.BOOLSLICE, - attribute.INT64SLICE, - attribute.FLOAT64SLICE, - attribute.STRINGSLICE: - data, _ := json.Marshal(keyValue.Value.AsInterface()) - a := (string)(data) - tag = &gen.Tag{ - Key: string(keyValue.Key), - VStr: &a, - VType: gen.TagType_STRING, - } - } - return tag -} - -func getInt64Tag(k string, i int64) *gen.Tag { - return &gen.Tag{ - Key: k, - VLong: &i, - VType: gen.TagType_LONG, - } -} - -func getStringTag(k, s string) *gen.Tag { - return &gen.Tag{ - Key: k, - VStr: &s, - VType: gen.TagType_STRING, - } -} - -func getBoolTag(k string, b bool) *gen.Tag { - return &gen.Tag{ - Key: k, - VBool: &b, - VType: gen.TagType_BOOL, - } -} - -// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch. -func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch { - if len(ssl) == 0 { - return nil - } - - batchDict := make(map[attribute.Distinct]*gen.Batch) - - for _, ss := range ssl { - if ss == nil { - continue - } - - resourceKey := ss.Resource().Equivalent() - batch, bOK := batchDict[resourceKey] - if !bOK { - batch = &gen.Batch{ - Process: process(ss.Resource(), defaultServiceName), - Spans: []*gen.Span{}, - } - } - batch.Spans = append(batch.Spans, spanToThrift(ss)) - batchDict[resourceKey] = batch - } - - // Transform the categorized map into a slice - batchList := make([]*gen.Batch, 0, len(batchDict)) - for _, batch := range batchDict { - batchList = append(batchList, batch) - } - return batchList -} - -// process transforms an OTel Resource into a jaeger Process. -func process(res *resource.Resource, defaultServiceName string) *gen.Process { - var process gen.Process - - var serviceName attribute.KeyValue - if res != nil { - for iter := res.Iter(); iter.Next(); { - if iter.Attribute().Key == semconv.ServiceNameKey { - serviceName = iter.Attribute() - // Don't convert service.name into tag. - continue - } - if tag := keyValueToTag(iter.Attribute()); tag != nil { - process.Tags = append(process.Tags, tag) - } - } - } - - // If no service.name is contained in a Span's Resource, - // that field MUST be populated from the default Resource. - if serviceName.Value.AsString() == "" { - serviceName = semconv.ServiceName(defaultServiceName) - } - process.ServiceName = serviceName.Value.AsString() - - return &process -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go deleted file mode 100644 index 88055c8a30..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "fmt" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-logr/logr" -) - -// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -type reconnectingUDPConn struct { - // `sync/atomic` expects the first word in an allocated struct to be 64-bit - // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. - bufferBytes int64 - hostPort string - resolveFunc resolveFunc - dialFunc dialFunc - logger logr.Logger - - connMtx sync.RWMutex - conn *net.UDPConn - destAddr *net.UDPAddr - closeChan chan struct{} -} - -type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) -type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) - -// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) { - conn := &reconnectingUDPConn{ - hostPort: hostPort, - resolveFunc: resolveFunc, - dialFunc: dialFunc, - logger: logger, - closeChan: make(chan struct{}), - bufferBytes: int64(bufferBytes), - } - - if err := conn.attemptResolveAndDial(); err != nil { - conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout) - } - - go conn.reconnectLoop(resolveTimeout) - - return conn, nil -} - -func (c *reconnectingUDPConn) logf(format string, args ...interface{}) { - if c.logger != emptyLogger { - c.logger.Info(format, args...) - } -} - -func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { - ticker := time.NewTicker(resolveTimeout) - defer ticker.Stop() - - for { - select { - case <-c.closeChan: - return - case <-ticker.C: - if err := c.attemptResolveAndDial(); err != nil { - c.logf("%s", err.Error()) - } - } - } -} - -func (c *reconnectingUDPConn) attemptResolveAndDial() error { - newAddr, err := c.resolveFunc("udp", c.hostPort) - if err != nil { - return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) - } - - c.connMtx.RLock() - curAddr := c.destAddr - c.connMtx.RUnlock() - - // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn - if curAddr != nil && newAddr.String() == curAddr.String() { - return nil - } - - if err := c.attemptDialNewAddr(newAddr); err != nil { - return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) - } - - return nil -} - -func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { - connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) - if err != nil { - return err - } - - if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { - if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { - return err - } - } - - c.connMtx.Lock() - c.destAddr = newAddr - // store prev to close later - prevConn := c.conn - c.conn = connUDP - c.connMtx.Unlock() - - if prevConn != nil { - return prevConn.Close() - } - - return nil -} - -// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning. -func (c *reconnectingUDPConn) Write(b []byte) (int, error) { - var bytesWritten int - var err error - - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - if conn == nil { - // if connection is not initialized indicate this with err in order to hook into retry logic - err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") - } else { - bytesWritten, err = conn.Write(b) - } - - if err == nil { - return bytesWritten, nil - } - - // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again - if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - return conn.Write(b) - } - - // return original error if reconn fails - return bytesWritten, err -} - -// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation. -func (c *reconnectingUDPConn) Close() error { - close(c.closeChan) - - // acquire rw lock before closing conn to ensure calls to Write drain - c.connMtx.Lock() - defer c.connMtx.Unlock() - - if c.conn != nil { - return c.conn.Close() - } - - return nil -} - -// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held -// and SetWriteBuffer is called store bufferBytes to be set for new conns. -func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { - var err error - - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - if conn != nil { - err = c.conn.SetWriteBuffer(bytes) - } - - if err == nil { - atomic.StoreInt64(&c.bufferBytes, int64(bytes)) - } - - return err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go deleted file mode 100644 index f65e3a6782..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "net/http" - "time" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" - - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// batchUploader send a batch of spans to Jaeger. -type batchUploader interface { - upload(context.Context, *gen.Batch) error - shutdown(context.Context) error -} - -// EndpointOption configures a Jaeger endpoint. -type EndpointOption interface { - newBatchUploader() (batchUploader, error) -} - -type endpointOptionFunc func() (batchUploader, error) - -func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) { - return fn() -} - -// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent -// over compact thrift protocol. This will use the following environment variables for -// configuration if no explicit option is provided: -// -// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host -// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port -// -// The passed options will take precedence over any environment variables and default values -// will be used if neither are provided. -func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption { - return endpointOptionFunc(func() (batchUploader, error) { - cfg := agentEndpointConfig{ - agentClientUDPParams{ - AttemptReconnecting: true, - Host: envOr(envAgentHost, "localhost"), - Port: envOr(envAgentPort, "6831"), - }, - } - for _, opt := range options { - cfg = opt.apply(cfg) - } - - client, err := newAgentClientUDP(cfg.agentClientUDPParams) - if err != nil { - return nil, err - } - - return &agentUploader{client: client}, nil - }) -} - -// AgentEndpointOption configures a Jaeger agent endpoint. -type AgentEndpointOption interface { - apply(agentEndpointConfig) agentEndpointConfig -} - -type agentEndpointConfig struct { - agentClientUDPParams -} - -type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig - -func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig { - return fn(cfg) -} - -// WithAgentHost sets a host to be used in the agent client endpoint. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable. -// If this option is not passed and the env var is not set, "localhost" will be used by default. -func WithAgentHost(host string) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Host = host - return o - }) -} - -// WithAgentPort sets a port to be used in the agent client endpoint. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable. -// If this option is not passed and the env var is not set, "6831" will be used by default. -func WithAgentPort(port string) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Port = port - return o - }) -} - -var emptyLogger = logr.Logger{} - -// WithLogger sets a logger to be used by agent client. -// WithLogger and WithLogr will overwrite each other. -func WithLogger(logger *log.Logger) AgentEndpointOption { - return WithLogr(stdr.New(logger)) -} - -// WithLogr sets a logr.Logger to be used by agent client. -// WithLogr and WithLogger will overwrite each other. -func WithLogr(logger logr.Logger) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Logger = logger - return o - }) -} - -// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. -func WithDisableAttemptReconnecting() AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.AttemptReconnecting = false - return o - }) -} - -// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. -func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.AttemptReconnectInterval = interval - return o - }) -} - -// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent. -func WithMaxPacketSize(size int) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.MaxPacketSize = size - return o - }) -} - -// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will -// use the following environment variables for configuration if no explicit option is provided: -// -// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector. -// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint. -// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint. -// -// The passed options will take precedence over any environment variables. -// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used. -// If neither values are provided for the username or the password, they will not be set since there is no default. -func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption { - return endpointOptionFunc(func() (batchUploader, error) { - cfg := collectorEndpointConfig{ - endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"), - username: envOr(envUser, ""), - password: envOr(envPassword, ""), - httpClient: http.DefaultClient, - } - - for _, opt := range options { - cfg = opt.apply(cfg) - } - - return &collectorUploader{ - endpoint: cfg.endpoint, - username: cfg.username, - password: cfg.password, - httpClient: cfg.httpClient, - }, nil - }) -} - -// CollectorEndpointOption configures a Jaeger collector endpoint. -type CollectorEndpointOption interface { - apply(collectorEndpointConfig) collectorEndpointConfig -} - -type collectorEndpointConfig struct { - // endpoint for sending spans directly to a collector. - endpoint string - - // username to be used for authentication with the collector endpoint. - username string - - // password to be used for authentication with the collector endpoint. - password string - - // httpClient to be used to make requests to the collector endpoint. - httpClient *http.Client -} - -type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig - -func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig { - return fn(cfg) -} - -// WithEndpoint is the URL for the Jaeger collector that spans are sent to. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable. -// If this option is not passed and the environment variable is not set, -// "http://localhost:14268/api/traces" will be used by default. -func WithEndpoint(endpoint string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.endpoint = endpoint - return o - }) -} - -// WithUsername sets the username to be used in the authorization header sent for all requests to the collector. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_USER environment variable. -// If this option is not passed and the environment variable is not set, no username will be set. -func WithUsername(username string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.username = username - return o - }) -} - -// WithPassword sets the password to be used in the authorization header sent for all requests to the collector. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_PASSWORD environment variable. -// If this option is not passed and the environment variable is not set, no password will be set. -func WithPassword(password string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.password = password - return o - }) -} - -// WithHTTPClient sets the http client to be used to make request to the collector endpoint. -func WithHTTPClient(client *http.Client) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.httpClient = client - return o - }) -} - -// agentUploader implements batchUploader interface sending batches to -// Jaeger through the UDP agent. -type agentUploader struct { - client *agentClientUDP -} - -var _ batchUploader = (*agentUploader)(nil) - -func (a *agentUploader) shutdown(ctx context.Context) error { - done := make(chan error, 1) - go func() { - done <- a.client.Close() - }() - - select { - case <-ctx.Done(): - // Prioritize not blocking the calling thread and just leak the - // spawned goroutine to close the client. - return ctx.Err() - case err := <-done: - return err - } -} - -func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error { - return a.client.EmitBatch(ctx, batch) -} - -// collectorUploader implements batchUploader interface sending batches to -// Jaeger through the collector http endpoint. -type collectorUploader struct { - endpoint string - username string - password string - httpClient *http.Client -} - -var _ batchUploader = (*collectorUploader)(nil) - -func (c *collectorUploader) shutdown(ctx context.Context) error { - // The Exporter will cancel any active exports and will prevent all - // subsequent exports, so nothing to do here. - return nil -} - -func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error { - body, err := serialize(batch) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body) - if err != nil { - return err - } - if c.username != "" && c.password != "" { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Content-Type", "application/x-thrift") - - resp, err := c.httpClient.Do(req) - if err != nil { - return err - } - - _, _ = io.Copy(io.Discard, resp.Body) - if err = resp.Body.Close(); err != nil { - return err - } - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) - } - return nil -} - -func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { - buf := thrift.NewTMemoryBuffer() - if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil { - return nil, err - } - return buf.Buffer, nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE similarity index 88% rename from vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE rename to vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE index 261eeb9e9f..f1aee0f110 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md new file mode 100644 index 0000000000..f84dee7ee4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md @@ -0,0 +1,3 @@ +# STDOUT Trace Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/stdout/stdouttrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/stdout/stdouttrace) diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go new file mode 100644 index 0000000000..0ba3424e29 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + +import ( + "io" + "os" +) + +var ( + defaultWriter = os.Stdout + defaultPrettyPrint = false + defaultTimestamps = true +) + +// config contains options for the STDOUT exporter. +type config struct { + // Writer is the destination. If not set, os.Stdout is used. + Writer io.Writer + + // PrettyPrint will encode the output into readable JSON. Default is + // false. + PrettyPrint bool + + // Timestamps specifies if timestamps should be printed. Default is + // true. + Timestamps bool +} + +// newConfig creates a validated Config configured with options. +func newConfig(options ...Option) config { + cfg := config{ + Writer: defaultWriter, + PrettyPrint: defaultPrettyPrint, + Timestamps: defaultTimestamps, + } + for _, opt := range options { + cfg = opt.apply(cfg) + } + return cfg +} + +// Option sets the value of an option for a Config. +type Option interface { + apply(config) config +} + +// WithWriter sets the export stream destination. +func WithWriter(w io.Writer) Option { + return writerOption{w} +} + +type writerOption struct { + W io.Writer +} + +func (o writerOption) apply(cfg config) config { + cfg.Writer = o.W + return cfg +} + +// WithPrettyPrint prettifies the emitted output. +func WithPrettyPrint() Option { + return prettyPrintOption(true) +} + +type prettyPrintOption bool + +func (o prettyPrintOption) apply(cfg config) config { + cfg.PrettyPrint = bool(o) + return cfg +} + +// WithoutTimestamps sets the export stream to not include timestamps. +func WithoutTimestamps() Option { + return timestampsOption(false) +} + +type timestampsOption bool + +func (o timestampsOption) apply(cfg config) config { + cfg.Timestamps = bool(o) + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go new file mode 100644 index 0000000000..648bc0749f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package stdouttrace contains an OpenTelemetry exporter for tracing +// telemetry to be written to an output destination as JSON. +// +// See [go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x] for information about +// the experimental features. +package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go new file mode 100644 index 0000000000..8c780afb02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md new file mode 100644 index 0000000000..6b7d1aec87 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `stdouttrace` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `stdouttrace` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The `stdouttrace` exporter provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go new file mode 100644 index 0000000000..55bb98a965 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/stdout/stdouttrace]. +package x // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go new file mode 100644 index 0000000000..d61324d2ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go @@ -0,0 +1,255 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +// otelComponentType is a name identifying the type of the OpenTelemetry +// component. It is not a standardized OTel component type, so it uses the +// Go package prefixed type name to ensure uniqueness and identity. +const otelComponentType = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter" + +var zeroTime time.Time + +var _ trace.SpanExporter = &Exporter{} + +// New creates an Exporter with the passed options. +func New(options ...Option) (*Exporter, error) { + cfg := newConfig(options...) + + enc := json.NewEncoder(cfg.Writer) + if cfg.PrettyPrint { + enc.SetIndent("", "\t") + } + + exporter := &Exporter{ + encoder: enc, + timestamps: cfg.Timestamps, + } + + if !x.SelfObservability.Enabled() { + return exporter, nil + } + + exporter.selfObservabilityEnabled = true + exporter.selfObservabilityAttrs = []attribute.KeyValue{ + semconv.OTelComponentName(fmt.Sprintf("%s/%d", otelComponentType, counter.NextExporterID())), + semconv.OTelComponentTypeKey.String(otelComponentType), + } + s := attribute.NewSet(exporter.selfObservabilityAttrs...) + exporter.selfObservabilitySetOpt = metric.WithAttributeSet(s) + + mp := otel.GetMeterProvider() + m := mp.Meter( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err, e error + if exporter.spanInflightMetric, e = otelconv.NewSDKExporterSpanInflight(m); e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + if exporter.spanExportedMetric, e = otelconv.NewSDKExporterSpanExported(m); e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + if exporter.operationDurationMetric, e = otelconv.NewSDKExporterOperationDuration(m); e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + + return exporter, err +} + +// Exporter is an implementation of trace.SpanSyncer that writes spans to stdout. +type Exporter struct { + encoder *json.Encoder + encoderMu sync.Mutex + timestamps bool + + stoppedMu sync.RWMutex + stopped bool + + selfObservabilityEnabled bool + selfObservabilityAttrs []attribute.KeyValue // selfObservability common attributes + selfObservabilitySetOpt metric.MeasurementOption + spanInflightMetric otelconv.SDKExporterSpanInflight + spanExportedMetric otelconv.SDKExporterSpanExported + operationDurationMetric otelconv.SDKExporterOperationDuration +} + +var ( + measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +// ExportSpans writes spans in json format to stdout. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) (err error) { + var success int64 + if e.selfObservabilityEnabled { + count := int64(len(spans)) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + *addOpt = append(*addOpt, e.selfObservabilitySetOpt) + + e.spanInflightMetric.Inst().Add(ctx, count, *addOpt...) + defer func(starting time.Time) { + e.spanInflightMetric.Inst().Add(ctx, -count, *addOpt...) + + // Record the success and duration of the operation. + // + // Do not exclude 0 values, as they are valid and indicate no spans + // were exported which is meaningful for certain aggregations. + e.spanExportedMetric.Inst().Add(ctx, success, *addOpt...) + + mOpt := e.selfObservabilitySetOpt + if err != nil { + // additional attributes for self-observability, + // only spanExportedMetric and operationDurationMetric are supported. + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, e.selfObservabilityAttrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + set := attribute.NewSet(*attrs...) + mOpt = metric.WithAttributeSet(set) + + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], mOpt) + + e.spanExportedMetric.Inst().Add( + ctx, + count-success, + *addOpt..., + ) + } + + recordOpt := recordOptPool.Get().(*[]metric.RecordOption) + defer func() { + *recordOpt = (*recordOpt)[:0] + recordOptPool.Put(recordOpt) + }() + + *recordOpt = append(*recordOpt, mOpt) + e.operationDurationMetric.Inst().Record( + ctx, + time.Since(starting).Seconds(), + *recordOpt..., + ) + }(time.Now()) + } + + if err := ctx.Err(); err != nil { + return err + } + e.stoppedMu.RLock() + stopped := e.stopped + e.stoppedMu.RUnlock() + if stopped { + return nil + } + + if len(spans) == 0 { + return nil + } + + stubs := tracetest.SpanStubsFromReadOnlySpans(spans) + + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + for i := range stubs { + stub := &stubs[i] + // Remove timestamps + if !e.timestamps { + stub.StartTime = zeroTime + stub.EndTime = zeroTime + for j := range stub.Events { + ev := &stub.Events[j] + ev.Time = zeroTime + } + } + + // Encode span stubs, one by one + if e := e.encoder.Encode(stub); e != nil { + err = errors.Join(err, fmt.Errorf("failed to encode span %d: %w", i, e)) + continue + } + success++ + } + return err +} + +// Shutdown is called to stop the exporter, it performs no action. +func (e *Exporter) Shutdown(context.Context) error { + e.stoppedMu.Lock() + e.stopped = true + e.stoppedMu.Unlock() + + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent this Exporter. +func (e *Exporter) MarshalLog() any { + return struct { + Type string + WithTimestamps bool + }{ + Type: "stdout", + WithTimestamps: e.timestamps, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md new file mode 100644 index 0000000000..0678d6564f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md @@ -0,0 +1,3 @@ +# SDK Trace test + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace/tracetest)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace/tracetest) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go new file mode 100644 index 0000000000..e12fa67e63 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package tracetest is a testing helper package for the SDK. User can +// configure no-op or in-memory exporters to verify different SDK behaviors or +// custom instrumentation. +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/sdk/trace" +) + +var _ trace.SpanExporter = (*NoopExporter)(nil) + +// NewNoopExporter returns a new no-op exporter. +func NewNoopExporter() *NoopExporter { + return new(NoopExporter) +} + +// NoopExporter is an exporter that drops all received spans and performs no +// action. +type NoopExporter struct{} + +// ExportSpans handles export of spans by dropping them. +func (*NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } + +// Shutdown stops the exporter by doing nothing. +func (*NoopExporter) Shutdown(context.Context) error { return nil } + +var _ trace.SpanExporter = (*InMemoryExporter)(nil) + +// NewInMemoryExporter returns a new InMemoryExporter. +func NewInMemoryExporter() *InMemoryExporter { + return new(InMemoryExporter) +} + +// InMemoryExporter is an exporter that stores all received spans in-memory. +type InMemoryExporter struct { + mu sync.Mutex + ss SpanStubs +} + +// ExportSpans handles export of spans by storing them in memory. +func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...) + return nil +} + +// Shutdown stops the exporter by clearing spans held in memory. +func (imsb *InMemoryExporter) Shutdown(context.Context) error { + imsb.Reset() + return nil +} + +// Reset the current in-memory storage. +func (imsb *InMemoryExporter) Reset() { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = nil +} + +// GetSpans returns the current in-memory stored spans. +func (imsb *InMemoryExporter) GetSpans() SpanStubs { + imsb.mu.Lock() + defer imsb.mu.Unlock() + ret := make(SpanStubs, len(imsb.ss)) + copy(ret, imsb.ss) + return ret +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go new file mode 100644 index 0000000000..ca63038f34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// SpanRecorder records started and ended spans. +type SpanRecorder struct { + startedMu sync.RWMutex + started []sdktrace.ReadWriteSpan + + endedMu sync.RWMutex + ended []sdktrace.ReadOnlySpan +} + +var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) + +// NewSpanRecorder returns a new initialized SpanRecorder. +func NewSpanRecorder() *SpanRecorder { + return new(SpanRecorder) +} + +// OnStart records started spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) { + sr.startedMu.Lock() + defer sr.startedMu.Unlock() + sr.started = append(sr.started, s) +} + +// OnEnd records completed spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { + sr.endedMu.Lock() + defer sr.endedMu.Unlock() + sr.ended = append(sr.ended, s) +} + +// Shutdown does nothing. +// +// This method is safe to be called concurrently. +func (*SpanRecorder) Shutdown(context.Context) error { + return nil +} + +// ForceFlush does nothing. +// +// This method is safe to be called concurrently. +func (*SpanRecorder) ForceFlush(context.Context) error { + return nil +} + +// Started returns a copy of all started spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { + sr.startedMu.RLock() + defer sr.startedMu.RUnlock() + dst := make([]sdktrace.ReadWriteSpan, len(sr.started)) + copy(dst, sr.started) + return dst +} + +// Reset clears the recorded spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Reset() { + sr.startedMu.Lock() + sr.endedMu.Lock() + defer sr.startedMu.Unlock() + defer sr.endedMu.Unlock() + + sr.started = nil + sr.ended = nil +} + +// Ended returns a copy of all ended spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan { + sr.endedMu.RLock() + defer sr.endedMu.RUnlock() + dst := make([]sdktrace.ReadOnlySpan, len(sr.ended)) + copy(dst, sr.ended) + return dst +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go new file mode 100644 index 0000000000..12b384b088 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +// SpanStubs is a slice of SpanStub use for testing an SDK. +type SpanStubs []SpanStub + +// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. +func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs { + if len(ro) == 0 { + return nil + } + + s := make(SpanStubs, 0, len(ro)) + for _, r := range ro { + s = append(s, SpanStubFromReadOnlySpan(r)) + } + + return s +} + +// Snapshots returns s as a slice of ReadOnlySpans. +func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { + if len(s) == 0 { + return nil + } + + ro := make([]tracesdk.ReadOnlySpan, len(s)) + for i := range s { + ro[i] = s[i].Snapshot() + } + return ro +} + +// SpanStub is a stand-in for a Span. +type SpanStub struct { + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationScope instrumentation.Scope + + // Deprecated: use InstrumentationScope instead. + InstrumentationLibrary instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility +} + +// SpanStubFromReadOnlySpan returns a SpanStub populated from ro. +func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { + if ro == nil { + return SpanStub{} + } + + return SpanStub{ + Name: ro.Name(), + SpanContext: ro.SpanContext(), + Parent: ro.Parent(), + SpanKind: ro.SpanKind(), + StartTime: ro.StartTime(), + EndTime: ro.EndTime(), + Attributes: ro.Attributes(), + Events: ro.Events(), + Links: ro.Links(), + Status: ro.Status(), + DroppedAttributes: ro.DroppedAttributes(), + DroppedEvents: ro.DroppedEvents(), + DroppedLinks: ro.DroppedLinks(), + ChildSpanCount: ro.ChildSpanCount(), + Resource: ro.Resource(), + InstrumentationScope: ro.InstrumentationScope(), + InstrumentationLibrary: ro.InstrumentationScope(), + } +} + +// Snapshot returns a read-only copy of the SpanStub. +func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + scopeOrLibrary := s.InstrumentationScope + if scopeOrLibrary.Name == "" && scopeOrLibrary.Version == "" && scopeOrLibrary.SchemaURL == "" { + scopeOrLibrary = s.InstrumentationLibrary + } + + return spanSnapshot{ + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationScope: scopeOrLibrary, + } +} + +type spanSnapshot struct { + // Embed the interface to implement the private method. + tracesdk.ReadOnlySpan + + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +func (s spanSnapshot) Name() string { return s.name } +func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext } +func (s spanSnapshot) Parent() trace.SpanContext { return s.parent } +func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind } +func (s spanSnapshot) StartTime() time.Time { return s.startTime } +func (s spanSnapshot) EndTime() time.Time { return s.endTime } +func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes } +func (s spanSnapshot) Links() []tracesdk.Link { return s.links } +func (s spanSnapshot) Events() []tracesdk.Event { return s.events } +func (s spanSnapshot) Status() tracesdk.Status { return s.status } +func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes } +func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks } +func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } +func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } +func (s spanSnapshot) Resource() *resource.Resource { return s.resource } +func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility + return s.instrumentationScope +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go deleted file mode 100644 index 58b5eddef6..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package internal provides common semconv functionality. -package internal // import "go.opentelemetry.io/otel/semconv/internal" - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// SemanticConventions are the semantic convention values defined for a -// version of the OpenTelemetry specification. -type SemanticConventions struct { - EnduserIDKey attribute.Key - HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key - HTTPHostKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPServerNameKey attribute.Key - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key - NetHostIPKey attribute.Key - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerIPKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetTransportIP attribute.KeyValue - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportUnix attribute.KeyValue -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func (sc *SemanticConventions) NetAttributesFromHTTPRequest( - network string, - request *http.Request, -) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, sc.NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, sc.NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, sc.NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, sc.NetTransportUnix) - default: - attrs = append(attrs, sc.NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, sc.NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. - } - return -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{sc.EnduserIDKey.String(username)} - } - return nil -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, sc.HTTPSchemeHTTPS) - } else { - attrs = append(attrs, sc.HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) - } else if request.URL != nil && request.URL.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) - } - - flavor := "" - switch request.ProtoMajor { - case 1: - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - case 2: - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) - } - - if request.Method != "" { - attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) - } - - return attrs -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest( - serverName string, - request *http.Request, -) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest( - serverName, route string, - request *http.Request, -) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, sc.HTTPRouteKey.String(route)) - } - if values := request.Header["X-Forwarded-For"]; len(values) > 0 { - addr := values[0] - if i := strings.Index(addr, ","); i > 0 { - addr = addr[:i] - } - attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) - } - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// validateHTTPStatusCode validates the HTTP status code and returns -// corresponding span status code. If the `code` is not a valid HTTP status -// code, returns span status Error and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md deleted file mode 100644 index bc60aa6039..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.21.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.21.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.21.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go deleted file mode 100644 index a9a15a1dab..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go +++ /dev/null @@ -1,1866 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -import "go.opentelemetry.io/otel/attribute" - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API does not expose a -// clear notion of client and server). This also covers UDP network -// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) -// and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - unix domain - // socket name, IPv4 or IPv6 address. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/tmp/my.sock', '10.1.2.80' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent client address behind - // any intermediaries (e.g. proxies) if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent client port behind any - // intermediaries (e.g. proxies) if it's available. - ClientPortKey = attribute.Key("client.port") - - // ClientSocketAddressKey is the attribute Key conforming to the - // "client.socket.address" semantic conventions. It represents the - // immediate client peer address - unix domain socket name, IPv4 or IPv6 - // address. - // - // Type: string - // RequirementLevel: Recommended (If different than `client.address`.) - // Stability: stable - // Examples: '/tmp/my.sock', '127.0.0.1' - ClientSocketAddressKey = attribute.Key("client.socket.address") - - // ClientSocketPortKey is the attribute Key conforming to the - // "client.socket.port" semantic conventions. It represents the immediate - // client peer port number - // - // Type: int - // RequirementLevel: Recommended (If different than `client.port`.) - // Stability: stable - // Examples: 35555 - ClientSocketPortKey = attribute.Key("client.socket.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// unix domain socket name, IPv4 or IPv6 address. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// ClientSocketAddress returns an attribute KeyValue conforming to the -// "client.socket.address" semantic conventions. It represents the immediate -// client peer address - unix domain socket name, IPv4 or IPv6 address. -func ClientSocketAddress(val string) attribute.KeyValue { - return ClientSocketAddressKey.String(val) -} - -// ClientSocketPort returns an attribute KeyValue conforming to the -// "client.socket.port" semantic conventions. It represents the immediate -// client peer port number -func ClientSocketPort(val int) attribute.KeyValue { - return ClientSocketPortKey.Int(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the deprecated, use - // `http.request.method` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the deprecated, - // use `http.response.status_code` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the deprecated, use `url.scheme` - // instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the deprecated, use `url.full` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - HTTPURLKey = attribute.Key("http.url") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the deprecated, use `url.path` and - // `url.query` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // deprecated, use `http.request.body.size` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // deprecated, use `http.response.body.size` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the deprecated, use -// `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the deprecated, use -// `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the deprecated, use `url.scheme` -// instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the deprecated, use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the deprecated, use `url.path` and -// `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the -// deprecated, use `http.request.body.size` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the -// deprecated, use `http.response.body.size` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the deprecated, - // use `server.socket.domain` on client spans. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the deprecated, - // use `server.socket.address` on client spans and `client.socket.address` - // on server spans. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the deprecated, - // use `server.socket.port` on client spans and `client.socket.port` on - // server spans. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the deprecated, use `server.address` - // on client spans and `client.address` on server spans. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the deprecated, use `server.port` on - // client spans and `client.port` on server spans. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the deprecated, use - // `server.address`. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the deprecated, use `server.port`. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the deprecated, - // use `server.socket.address`. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the deprecated, - // use `server.socket.port`. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the deprecated, use - // `network.transport`. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - NetTransportKey = attribute.Key("net.transport") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. It represents the deprecated, - // use `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. It represents the - // deprecated, use `network.protocol.version`. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the deprecated, - // use `network.transport` and `network.type`. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - NetSockFamilyKey = attribute.Key("net.sock.family") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the deprecated, use -// `server.socket.domain` on client spans. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use -// `server.socket.address` on client spans and `client.socket.address` on -// server spans. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the deprecated, use -// `server.socket.port` on client spans and `client.socket.port` on server -// spans. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the deprecated, use -// `server.address` on client spans and `client.address` on server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the deprecated, use -// `server.port` on client spans and `client.port` on server spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the deprecated, use -// `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the deprecated, use -// `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the deprecated, use -// `server.socket.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the deprecated, use -// `server.socket.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. It represents the deprecated, use -// `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. It represents the deprecated, -// use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API does not expose a clear notion -// of client and server. -const ( - // DestinationDomainKey is the attribute Key conforming to the - // "destination.domain" semantic conventions. It represents the domain name - // of the destination system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'foo.example.com' - // Note: This value may be a host name, a fully qualified domain name, or - // another host naming format. - DestinationDomainKey = attribute.Key("destination.domain") - - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the peer - // address, for example IP address or UNIX socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.5.3.2' - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the peer port - // number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationDomain returns an attribute KeyValue conforming to the -// "destination.domain" semantic conventions. It represents the domain name of -// the destination system. -func DestinationDomain(val string) attribute.KeyValue { - return DestinationDomainKey.String(val) -} - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the peer address, -// for example IP address or UNIX socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the peer port number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes HTTP attributes. -const ( - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER` and, except if reporting - // a metric, MUST - // set the exact method received in the request line as value of the - // `http.request.method_original` attribute. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTP Server attributes -const ( - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // TypeKey is the attribute Key conforming to the "type" semantic - // conventions. It represents the type of memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - TypeKey = attribute.Key("type") - - // PoolKey is the attribute Key conforming to the "pool" semantic - // conventions. It represents the name of the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - PoolKey = attribute.Key("pool") -) - -var ( - // Heap memory - TypeHeap = TypeKey.String("heap") - // Non-heap memory - TypeNonHeap = TypeKey.String("non_heap") -) - -// Pool returns an attribute KeyValue conforming to the "pool" semantic -// conventions. It represents the name of the memory pool. -func Pool(val string) attribute.KeyValue { - return PoolKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API does not expose a -// clear notion of client and server). This also covers UDP network -// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) -// and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the logical server hostname, matches - // server FQDN if available, and IP or socket address if FQDN is not known. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the logical server port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - ServerPortKey = attribute.Key("server.port") - - // ServerSocketDomainKey is the attribute Key conforming to the - // "server.socket.domain" semantic conventions. It represents the domain - // name of an immediate peer. - // - // Type: string - // RequirementLevel: Recommended (If different than `server.address`.) - // Stability: stable - // Examples: 'proxy.example.com' - // Note: Typically observed from the client side, and represents a proxy or - // other intermediary domain name. - ServerSocketDomainKey = attribute.Key("server.socket.domain") - - // ServerSocketAddressKey is the attribute Key conforming to the - // "server.socket.address" semantic conventions. It represents the physical - // server IP address or Unix socket address. If set from the client, should - // simply use the socket's peer address, and not attempt to find any actual - // server IP (i.e., if set from client, this may represent some proxy - // server instead of the logical server). - // - // Type: string - // RequirementLevel: Recommended (If different than `server.address`.) - // Stability: stable - // Examples: '10.5.3.2' - ServerSocketAddressKey = attribute.Key("server.socket.address") - - // ServerSocketPortKey is the attribute Key conforming to the - // "server.socket.port" semantic conventions. It represents the physical - // server port. - // - // Type: int - // RequirementLevel: Recommended (If different than `server.port`.) - // Stability: stable - // Examples: 16456 - ServerSocketPortKey = attribute.Key("server.socket.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the logical server -// hostname, matches server FQDN if available, and IP or socket address if FQDN -// is not known. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the logical server port number -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// ServerSocketDomain returns an attribute KeyValue conforming to the -// "server.socket.domain" semantic conventions. It represents the domain name -// of an immediate peer. -func ServerSocketDomain(val string) attribute.KeyValue { - return ServerSocketDomainKey.String(val) -} - -// ServerSocketAddress returns an attribute KeyValue conforming to the -// "server.socket.address" semantic conventions. It represents the physical -// server IP address or Unix socket address. If set from the client, should -// simply use the socket's peer address, and not attempt to find any actual -// server IP (i.e., if set from client, this may represent some proxy server -// instead of the logical server). -func ServerSocketAddress(val string) attribute.KeyValue { - return ServerSocketAddressKey.String(val) -} - -// ServerSocketPort returns an attribute KeyValue conforming to the -// "server.socket.port" semantic conventions. It represents the physical server -// port. -func ServerSocketPort(val int) attribute.KeyValue { - return ServerSocketPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API does not expose a clear notion -// of client and server. -const ( - // SourceDomainKey is the attribute Key conforming to the "source.domain" - // semantic conventions. It represents the domain name of the source - // system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'foo.example.com' - // Note: This value may be a host name, a fully qualified domain name, or - // another host naming format. - SourceDomainKey = attribute.Key("source.domain") - - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address, for example IP - // address or Unix socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.5.3.2' - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceDomain returns an attribute KeyValue conforming to the -// "source.domain" semantic conventions. It represents the domain name of the -// source system. -func SourceDomain(val string) attribute.KeyValue { - return SourceDomainKey.String(val) -} - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address, for -// example IP address or Unix socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // Transport Layer](https://osi-model.com/transport-layer/) or - // [Inter-process Communication - // method](https://en.wikipedia.org/wiki/Inter-process_communication). The - // value SHOULD be normalized to lowercase. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI Network - // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The - // value SHOULD be normalized to lowercase. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - NetworkTypeKey = attribute.Key("network.type") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // Application Layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. The value SHOULD be normalized to lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client used has a version of `0.27.2`, but sends HTTP version - // `1.1`, this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe. See note below - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// Application Layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. The value SHOULD be normalized to lowercase. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the application layer protocol used. See note below. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's different - // than `http.request.method`.) - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") -) - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Attributes describing URL. -const ( - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it should be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password should be redacted and attribute's value should be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: When missing, the value is assumed to be `/` - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") -) - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go deleted file mode 100644 index 461331a555..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.21.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go deleted file mode 100644 index c09d9317e2..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go deleted file mode 100644 index 5184ee339a..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go deleted file mode 100644 index f7aaa50b9e..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go +++ /dev/null @@ -1,2299 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) -// on Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") - - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") -) - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") -) - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // OCI defines a digest of manifest. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S does not have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") -) - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go deleted file mode 100644 index be07217d8a..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go deleted file mode 100644 index 55698cc447..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go +++ /dev/null @@ -1,2484 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Recommended (Should be collected by default only if - // there is sanitization that excludes sensitive information.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Call-level attributes for Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (when performing one of the - // operations in this list) - // Stability: stable - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as - // default)) - // Stability: stable - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if available) - // Stability: stable - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (if response was received) - // Stability: stable - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (when response was received and - // contained sub-code.) - // Stability: stable - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: ConditionallyRequired (when available) - // Stability: stable - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Recommended (If a client id is available) - // Stability: stable - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// Tech-specific attributes for Connect RPC. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If response is not successful - // and if error code available.) - // Stability: stable - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md deleted file mode 100644 index cfbc9055b3..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.4.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.4.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.4.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go deleted file mode 100644 index d83a66b9b4..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.4.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go deleted file mode 100644 index 71a2ece3d3..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go deleted file mode 100644 index f0c023cafb..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/semconv/internal" - "go.opentelemetry.io/otel/trace" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -var sc = &internal.SemanticConventions{ - EnduserIDKey: EnduserIDKey, - HTTPClientIPKey: HTTPClientIPKey, - HTTPFlavorKey: HTTPFlavorKey, - HTTPHostKey: HTTPHostKey, - HTTPMethodKey: HTTPMethodKey, - HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, - HTTPRouteKey: HTTPRouteKey, - HTTPSchemeHTTP: HTTPSchemeHTTP, - HTTPSchemeHTTPS: HTTPSchemeHTTPS, - HTTPServerNameKey: HTTPServerNameKey, - HTTPStatusCodeKey: HTTPStatusCodeKey, - HTTPTargetKey: HTTPTargetKey, - HTTPURLKey: HTTPURLKey, - HTTPUserAgentKey: HTTPUserAgentKey, - NetHostIPKey: NetHostIPKey, - NetHostNameKey: NetHostNameKey, - NetHostPortKey: NetHostPortKey, - NetPeerIPKey: NetPeerIPKey, - NetPeerNameKey: NetPeerNameKey, - NetPeerPortKey: NetPeerPortKey, - NetTransportIP: NetTransportIP, - NetTransportOther: NetTransportOther, - NetTransportTCP: NetTransportTCP, - NetTransportUDP: NetTransportUDP, - NetTransportUnix: NetTransportUnix, -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - return sc.NetAttributesFromHTTPRequest(network, request) -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.EndUserAttributesFromHTTPRequest(request) -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.HTTPClientAttributesFromHTTPRequest(request) -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - return sc.HTTPAttributesFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go deleted file mode 100644 index 66c340c121..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go +++ /dev/null @@ -1,895 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'gcp' - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. Refer to your provider's docs - // to see the available regions, for example [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure regions](https://azure.microsoft.com/en-us/global- - // infrastructure/geographies/), or [Google Cloud - // regions](https://cloud.google.com/about/locations). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") -) - -var ( - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ec2', 'fargate' - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// A serverless instance. -const ( - // The name of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: For example, in AWS Lambda this field corresponds to the - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the - // [FunctionDirectory](https://github.com/Azure/azure-functions- - // host/wiki/Retrieving-information-about-the-currently-running-function) field. - FaaSIDKey = attribute.Key("faas.id") - // The version string of the function being executed as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-function:instance-0001' - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container in a Pod template. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go deleted file mode 100644 index b9457bc0b8..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go deleted file mode 100644 index 006482a307..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go +++ /dev/null @@ -1,1367 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // If no [tech-specific attribute](#call-level-attributes-for-specific- - // technologies) is defined, this attribute is used to report the name of the - // database being accessed. For commands that switch the database, this should be - // set to the target database (even if the command fails). - // - // Type: string - // Required: Required, if applicable and no more-specific attribute is defined. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The name of the keyspace being accessed. To be used instead of the generic - // `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'mykeyspace' - DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Apache HBase -const ( - // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being - // accessed. To be used instead of the generic `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'default' - DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attrbiutes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#exception-end-example). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger on which the function is executed. - // - // Type: Enum - // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. - // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing - // invocations, if it is known to the client. This is, for example, not the case, - // when the transport layer is abstracted in a FaaS client framework without - // access to its configuration. - // Stability: stable - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 'aws' - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ip_tcp' - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is - // empty or not present, this attribute should be the same. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: '1.0' - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") -) - -var ( - // HTTP 1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP 1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP 2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would identify - // the network-level peer, which may be a proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'grpc', 'java_rmi', 'wcf' - RPCSystemKey = attribute.Key("rpc.system") - // The full name of the service being called, including its package name, if - // applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - RPCServiceKey = attribute.Key("rpc.service") - // The name of the method being called, must be equal to the $method part in the - // span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - RPCMethodKey = attribute.Key("rpc.method") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 0, 1, 16 - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `method` property from request. Unlike `rpc.method`, this may not relate to the - // actual method being called. Useful for client-side traces since client does not - // know what will be called on the server. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'users.create', 'get_users' - RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8330f2794f..5daf426d1c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2302,23 +2302,13 @@ go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/internal/v4 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.20.0/httpconv -go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv -go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/exporters/jaeger v1.17.0 -## explicit; go 1.19 -go.opentelemetry.io/otel/exporters/jaeger -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore -go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -2330,6 +2320,11 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 +## explicit; go 1.23.0 +go.opentelemetry.io/otel/exporters/stdout/stdouttrace +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x # go.opentelemetry.io/otel/metric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric @@ -2344,6 +2339,7 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/x +go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/trace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace