vendor: otel v0.56.0 / v1.31.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2025-01-14 17:22:34 +01:00
parent 15e4848a0e
commit 2f42b32722
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
106 changed files with 2423 additions and 10778 deletions

View File

@ -41,13 +41,13 @@ require (
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
github.com/tonistiigi/go-rosetta v0.0.0-20220804170347-3f4430f2d346 github.com/tonistiigi/go-rosetta v0.0.0-20220804170347-3f4430f2d346
github.com/xeipuuv/gojsonschema v1.2.0 github.com/xeipuuv/gojsonschema v1.2.0
go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/metric v1.28.0 go.opentelemetry.io/otel/metric v1.31.0
go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/sdk/metric v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.31.0
go.opentelemetry.io/otel/trace v1.28.0 go.opentelemetry.io/otel/trace v1.31.0
golang.org/x/sync v0.10.0 golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0 golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0 golang.org/x/term v0.27.0
@ -74,7 +74,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/compress v1.17.11 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
@ -91,9 +91,9 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
go.etcd.io/etcd/raft/v3 v3.5.16 // indirect go.etcd.io/etcd/raft/v3 v3.5.16 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.31.0 // indirect golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.33.0 // indirect golang.org/x/net v0.33.0 // indirect

View File

@ -116,8 +116,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -234,8 +234,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -285,26 +285,26 @@ github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0=
github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU=
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=

View File

@ -73,7 +73,7 @@ go_test(
"@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_api//httpbody",
"@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//errdetails",
"@org_golang_google_genproto_googleapis_rpc//status", "@org_golang_google_genproto_googleapis_rpc//status",
"@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//:grpc",
"@org_golang_google_grpc//codes", "@org_golang_google_grpc//codes",
"@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//health/grpc_health_v1",
"@org_golang_google_grpc//metadata", "@org_golang_google_grpc//metadata",

View File

@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{
type ( type (
rpcMethodKey struct{} rpcMethodKey struct{}
httpPathPatternKey struct{} httpPathPatternKey struct{}
httpPatternKey struct{}
AnnotateContextOption func(ctx context.Context) context.Context AnnotateContextOption func(ctx context.Context) context.Context
) )
@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) {
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
} }
// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists.
func HTTPPattern(ctx context.Context) (Pattern, bool) {
v, ok := ctx.Value(httpPatternKey{}).(Pattern)
return v, ok
}
func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context {
return context.WithValue(ctx, httpPatternKey{}, httpPattern)
}

View File

@ -93,6 +93,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
// return Internal when Marshal failed // return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}` const fallback = `{"code": 13, "message": "failed to marshal error message"}`
const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}`
var customStatus *HTTPStatusError var customStatus *HTTPStatusError
if errors.As(err, &customStatus) { if errors.As(err, &customStatus) {
@ -100,19 +101,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
} }
s := status.Convert(err) s := status.Convert(err)
pb := s.Proto()
w.Header().Del("Trailer") w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding") w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType(pb) respRw, err := mux.forwardResponseRewriter(ctx, s.Proto())
if err != nil {
grpclog.Errorf("Failed to rewrite error message %q: %v", s, err)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallbackRewriter); err != nil {
grpclog.Errorf("Failed to write response: %v", err)
}
return
}
contentType := marshaler.ContentType(respRw)
w.Header().Set("Content-Type", contentType) w.Header().Set("Content-Type", contentType)
if s.Code() == codes.Unauthenticated { if s.Code() == codes.Unauthenticated {
w.Header().Set("WWW-Authenticate", s.Message()) w.Header().Set("WWW-Authenticate", s.Message())
} }
buf, merr := marshaler.Marshal(pb) buf, merr := marshaler.Marshal(respRw)
if merr != nil { if merr != nil {
grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) grpclog.Errorf("Failed to marshal error message %q: %v", s, merr)
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)

View File

@ -3,6 +3,7 @@ package runtime
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"io" "io"
"net/http" "net/http"
"net/textproto" "net/textproto"
@ -55,20 +56,27 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
return return
} }
respRw, err := mux.forwardResponseRewriter(ctx, resp)
if err != nil {
grpclog.Errorf("Rewrite error: %v", err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if !wroteHeader { if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(resp)) w.Header().Set("Content-Type", marshaler.ContentType(respRw))
} }
var buf []byte var buf []byte
httpBody, isHTTPBody := resp.(*httpbody.HttpBody) httpBody, isHTTPBody := respRw.(*httpbody.HttpBody)
switch { switch {
case resp == nil: case respRw == nil:
buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
case isHTTPBody: case isHTTPBody:
buf = httpBody.GetData() buf = httpBody.GetData()
default: default:
result := map[string]interface{}{"result": resp} result := map[string]interface{}{"result": respRw}
if rb, ok := resp.(responseBody); ok { if rb, ok := respRw.(responseBody); ok {
result["result"] = rb.XXX_ResponseBody() result["result"] = rb.XXX_ResponseBody()
} }
@ -164,12 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
HTTPError(ctx, mux, marshaler, w, req, err) HTTPError(ctx, mux, marshaler, w, req, err)
return return
} }
respRw, err := mux.forwardResponseRewriter(ctx, resp)
if err != nil {
grpclog.Errorf("Rewrite error: %v", err)
HTTPError(ctx, mux, marshaler, w, req, err)
return
}
var buf []byte var buf []byte
var err error if rb, ok := respRw.(responseBody); ok {
if rb, ok := resp.(responseBody); ok {
buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
} else { } else {
buf, err = marshaler.Marshal(resp) buf, err = marshaler.Marshal(respRw)
} }
if err != nil { if err != nil {
grpclog.Errorf("Marshal error: %v", err) grpclog.Errorf("Marshal error: %v", err)
@ -201,8 +214,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
} }
for _, opt := range opts { for _, opt := range opts {
if err := opt(ctx, w, resp); err != nil { if err := opt(ctx, w, resp); err != nil {
grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) return fmt.Errorf("error handling ForwardResponseOptions: %w", err)
return err
} }
} }
return nil return nil

View File

@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
// A HandlerFunc handles a specific pair of path pattern and HTTP method. // A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation
// registration methods. It is generally recommended to use gRPC client or server interceptors instead
// where possible.
type Middleware func(HandlerFunc) HandlerFunc
// ServeMux is a request multiplexer for grpc-gateway. // ServeMux is a request multiplexer for grpc-gateway.
// It matches http requests to patterns and invokes the corresponding handler. // It matches http requests to patterns and invokes the corresponding handler.
type ServeMux struct { type ServeMux struct {
// handlers maps HTTP method to a list of handlers. // handlers maps HTTP method to a list of handlers.
handlers map[string][]handler handlers map[string][]handler
middlewares []Middleware
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
forwardResponseRewriter ForwardResponseRewriter
marshalers marshalerRegistry marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc
@ -69,6 +76,24 @@ type ServeMux struct {
// ServeMuxOption is an option that can be given to a ServeMux on construction. // ServeMuxOption is an option that can be given to a ServeMux on construction.
type ServeMuxOption func(*ServeMux) type ServeMuxOption func(*ServeMux)
// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages
// before they are forwarded in a unary, stream, or error response.
type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error)
// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic
// that can rewrite the final response before it is forwarded.
//
// The response rewriter function is called during unary message forwarding, stream message
// forwarding and when errors are being forwarded.
//
// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect.
// Since this option involves making runtime changes to the response shape or type.
func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption {
return func(sm *ServeMux) {
sm.forwardResponseRewriter = fwdResponseRewriter
}
}
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
// //
// forwardResponseOption is an option that will be called on the relevant context.Context, // forwardResponseOption is an option that will be called on the relevant context.Context,
@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
} }
} }
// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC
// interceptors when using the direct-to-implementation registration methods and cannot rely
// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible.
func WithMiddlewares(middlewares ...Middleware) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.middlewares = append(serveMux.middlewares, middlewares...)
}
}
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
// done with careful consideration. // done with careful consideration.
@ -279,6 +313,7 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{ serveMux := &ServeMux{
handlers: make(map[string][]handler), handlers: make(map[string][]handler),
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil },
marshalers: makeMarshalerMIMERegistry(), marshalers: makeMarshalerMIMERegistry(),
errorHandler: DefaultHTTPErrorHandler, errorHandler: DefaultHTTPErrorHandler,
streamErrorHandler: DefaultStreamErrorHandler, streamErrorHandler: DefaultStreamErrorHandler,
@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
// Handle associates "h" to the pair of HTTP method and path pattern. // Handle associates "h" to the pair of HTTP method and path pattern.
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
if len(s.middlewares) > 0 {
h = chainMiddlewares(s.middlewares)(h)
}
s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
} }
@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
continue continue
} }
h.h(w, r, pathParams) s.handleHandler(h, w, r, pathParams)
return return
} }
@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return return
} }
h.h(w, r, pathParams) s.handleHandler(h, w, r, pathParams)
return return
} }
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
@ -484,3 +522,16 @@ type handler struct {
pat Pattern pat Pattern
h HandlerFunc h HandlerFunc
} }
func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) {
h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams)
}
func chainMiddlewares(mws []Middleware) Middleware {
return func(next HandlerFunc) HandlerFunc {
for i := len(mws); i > 0; i-- {
next = mws[i-1](next)
}
return next
}
}

View File

@ -18,20 +18,6 @@ const (
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
) )
// Server HTTP metrics.
const (
serverRequestSize = "http.server.request.size" // Incoming request bytes total
serverResponseSize = "http.server.response.size" // Incoming response bytes total
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
)
// Client HTTP metrics.
const (
clientRequestSize = "http.client.request.size" // Outgoing request bytes total
clientResponseSize = "http.client.response.size" // Outgoing response bytes total
clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
)
// Filter is a predicate used to determine whether a given http.request should // Filter is a predicate used to determine whether a given http.request should
// be traced. A Filter must return true if the request should be traced. // be traced. A Filter must return true if the request should be traced.
type Filter func(*http.Request) bool type Filter func(*http.Request) bool

View File

@ -8,6 +8,8 @@ import (
"net/http" "net/http"
"net/http/httptrace" "net/http/httptrace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
@ -35,6 +37,7 @@ type config struct {
TracerProvider trace.TracerProvider TracerProvider trace.TracerProvider
MeterProvider metric.MeterProvider MeterProvider metric.MeterProvider
MetricAttributesFn func(*http.Request) []attribute.KeyValue
} }
// Option interface used for setting optional config properties. // Option interface used for setting optional config properties.
@ -194,3 +197,11 @@ func WithServerName(server string) Option {
c.ServerName = server c.ServerName = server
}) })
} }
// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
// These attributes will be included in metrics for every request.
func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
return optionFunc(func(c *config) {
c.MetricAttributesFn = metricAttributesFn
})
}

View File

@ -9,11 +9,9 @@ import (
"github.com/felixge/httpsnoop" "github.com/felixge/httpsnoop"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
@ -24,7 +22,6 @@ type middleware struct {
server string server string
tracer trace.Tracer tracer trace.Tracer
meter metric.Meter
propagators propagation.TextMapPropagator propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption spanStartOptions []trace.SpanStartOption
readEvent bool readEvent bool
@ -34,10 +31,7 @@ type middleware struct {
publicEndpoint bool publicEndpoint bool
publicEndpointFn func(*http.Request) bool publicEndpointFn func(*http.Request) bool
traceSemconv semconv.HTTPServer semconv semconv.HTTPServer
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
serverLatencyMeasure metric.Float64Histogram
} }
func defaultHandlerFormatter(operation string, _ *http.Request) string { func defaultHandlerFormatter(operation string, _ *http.Request) string {
@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
h := middleware{ h := middleware{
operation: operation, operation: operation,
traceSemconv: semconv.NewHTTPServer(),
} }
defaultOpts := []Option{ defaultOpts := []Option{
@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
c := newConfig(append(defaultOpts, opts...)...) c := newConfig(append(defaultOpts, opts...)...)
h.configure(c) h.configure(c)
h.createMeasures()
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
func (h *middleware) configure(c *config) { func (h *middleware) configure(c *config) {
h.tracer = c.Tracer h.tracer = c.Tracer
h.meter = c.Meter
h.propagators = c.Propagators h.propagators = c.Propagators
h.spanStartOptions = c.SpanStartOptions h.spanStartOptions = c.SpanStartOptions
h.readEvent = c.ReadEvent h.readEvent = c.ReadEvent
@ -88,36 +78,7 @@ func (h *middleware) configure(c *config) {
h.publicEndpoint = c.PublicEndpoint h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn h.publicEndpointFn = c.PublicEndpointFn
h.server = c.ServerName h.server = c.ServerName
} h.semconv = semconv.NewHTTPServer(c.Meter)
func handleErr(err error) {
if err != nil {
otel.Handle(err)
}
}
func (h *middleware) createMeasures() {
var err error
h.requestBytesCounter, err = h.meter.Int64Counter(
serverRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
h.responseBytesCounter, err = h.meter.Int64Counter(
serverResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
h.serverLatencyMeasure, err = h.meter.Float64Histogram(
serverDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of inbound HTTP requests."),
)
handleErr(err)
} }
// serveHTTP sets up tracing and calls the given next http.Handler with the span // serveHTTP sets up tracing and calls the given next http.Handler with the span
@ -134,7 +95,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
opts := []trace.SpanStartOption{ opts := []trace.SpanStartOption{
trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
} }
opts = append(opts, h.spanStartOptions...) opts = append(opts, h.spanStartOptions...)
@ -166,14 +127,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
} }
} }
var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it // if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert // will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody. // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
bw := request.NewBodyWrapper(r.Body, readRecordFunc)
if r.Body != nil && r.Body != http.NoBody { if r.Body != nil && r.Body != http.NoBody {
bw.ReadCloser = r.Body r.Body = bw
bw.record = readRecordFunc
r.Body = &bw
} }
writeRecordFunc := func(int64) {} writeRecordFunc := func(int64) {}
@ -183,13 +142,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
} }
} }
rww := &respWriterWrapper{ rww := request.NewRespWriterWrapper(w, writeRecordFunc)
ResponseWriter: w,
record: writeRecordFunc,
ctx: ctx,
props: h.propagators,
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
}
// Wrap w to use our ResponseWriter methods while also exposing // Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier, // other interfaces that w may implement (http.CloseNotifier,
@ -217,35 +170,39 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
next.ServeHTTP(w, r.WithContext(ctx)) next.ServeHTTP(w, r.WithContext(ctx))
span.SetStatus(semconv.ServerStatus(rww.statusCode)) statusCode := rww.StatusCode()
span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ bytesWritten := rww.BytesWritten()
StatusCode: rww.statusCode, span.SetStatus(h.semconv.Status(statusCode))
ReadBytes: bw.read.Load(), span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
ReadError: bw.err, StatusCode: statusCode,
WriteBytes: rww.written, ReadBytes: bw.BytesRead(),
WriteError: rww.err, ReadError: bw.Error(),
WriteBytes: bytesWritten,
WriteError: rww.Error(),
})...) })...)
// Add metrics
attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
if rww.statusCode > 0 {
attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
}
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
// Use floating point division here for higher precision (instead of Millisecond method). // Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
h.serverLatencyMeasure.Record(ctx, elapsedTime, o) h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
ServerName: h.server,
ResponseSize: bytesWritten,
MetricAttributes: semconv.MetricAttributes{
Req: r,
StatusCode: statusCode,
AdditionalAttributes: labeler.Get(),
},
MetricData: semconv.MetricData{
RequestSize: bw.BytesRead(),
ElapsedTime: elapsedTime,
},
})
} }
// WithRouteTag annotates spans and metrics with the provided route name // WithRouteTag annotates spans and metrics with the provided route name
// with HTTP route attribute. // with HTTP route attribute.
func WithRouteTag(route string, h http.Handler) http.Handler { func WithRouteTag(route string, h http.Handler) http.Handler {
attr := semconv.NewHTTPServer().Route(route) attr := semconv.NewHTTPServer(nil).Route(route)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.SpanFromContext(r.Context()) span := trace.SpanFromContext(r.Context())
span.SetAttributes(attr) span.SetAttributes(attr)

View File

@ -0,0 +1,75 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
import (
"io"
"sync"
)
var _ io.ReadCloser = &BodyWrapper{}
// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
// of bytes read and the last error.
type BodyWrapper struct {
io.ReadCloser
OnRead func(n int64) // must not be nil
mu sync.Mutex
read int64
err error
}
// NewBodyWrapper creates a new BodyWrapper.
//
// The onRead attribute is a callback that will be called every time the data
// is read, with the number of bytes being read.
func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
return &BodyWrapper{
ReadCloser: body,
OnRead: onRead,
}
}
// Read reads the data from the io.ReadCloser, and stores the number of bytes
// read and the error.
func (w *BodyWrapper) Read(b []byte) (int, error) {
n, err := w.ReadCloser.Read(b)
n1 := int64(n)
w.updateReadData(n1, err)
w.OnRead(n1)
return n, err
}
func (w *BodyWrapper) updateReadData(n int64, err error) {
w.mu.Lock()
defer w.mu.Unlock()
w.read += n
if err != nil {
w.err = err
}
}
// Closes closes the io.ReadCloser.
func (w *BodyWrapper) Close() error {
return w.ReadCloser.Close()
}
// BytesRead returns the number of bytes read up to this point.
func (w *BodyWrapper) BytesRead() int64 {
w.mu.Lock()
defer w.mu.Unlock()
return w.read
}
// Error returns the last error.
func (w *BodyWrapper) Error() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}

View File

@ -0,0 +1,119 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
import (
"net/http"
"sync"
)
var _ http.ResponseWriter = &RespWriterWrapper{}
// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
// bytes written, the last error, and to catch the first written statusCode.
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
// that may be useful when using it in real life situations.
type RespWriterWrapper struct {
http.ResponseWriter
OnWrite func(n int64) // must not be nil
mu sync.RWMutex
written int64
statusCode int
err error
wroteHeader bool
}
// NewRespWriterWrapper creates a new RespWriterWrapper.
//
// The onWrite attribute is a callback that will be called every time the data
// is written, with the number of bytes that were written.
func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
return &RespWriterWrapper{
ResponseWriter: w,
OnWrite: onWrite,
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
}
}
// Write writes the bytes array into the [ResponseWriter], and tracks the
// number of bytes written and last error.
func (w *RespWriterWrapper) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
if !w.wroteHeader {
w.writeHeader(http.StatusOK)
}
n, err := w.ResponseWriter.Write(p)
n1 := int64(n)
w.OnWrite(n1)
w.written += n1
w.err = err
return n, err
}
// WriteHeader persists initial statusCode for span attribution.
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
// and will persist the statusCode from the first call.
// Blocking consecutive calls to WriteHeader alters expected behavior and will
// remove warning logs from net/http where developers will notice incorrect handler implementations.
func (w *RespWriterWrapper) WriteHeader(statusCode int) {
w.mu.Lock()
defer w.mu.Unlock()
w.writeHeader(statusCode)
}
// writeHeader persists the status code for span attribution, and propagates
// the call to the underlying ResponseWriter.
// It does not acquire a lock, and therefore assumes that is being handled by a
// parent method.
func (w *RespWriterWrapper) writeHeader(statusCode int) {
if !w.wroteHeader {
w.wroteHeader = true
w.statusCode = statusCode
}
w.ResponseWriter.WriteHeader(statusCode)
}
// Flush implements [http.Flusher].
func (w *RespWriterWrapper) Flush() {
w.mu.Lock()
defer w.mu.Unlock()
if !w.wroteHeader {
w.writeHeader(http.StatusOK)
}
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
// BytesWritten returns the number of bytes written.
func (w *RespWriterWrapper) BytesWritten() int64 {
w.mu.RLock()
defer w.mu.RUnlock()
return w.written
}
// BytesWritten returns the HTTP status code that was sent.
func (w *RespWriterWrapper) StatusCode() int {
w.mu.RLock()
defer w.mu.RUnlock()
return w.statusCode
}
// Error returns the last error.
func (w *RespWriterWrapper) Error() error {
w.mu.RLock()
defer w.mu.RUnlock()
return w.err
}

View File

@ -4,6 +4,7 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
@ -11,6 +12,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
) )
type ResponseTelemetry struct { type ResponseTelemetry struct {
@ -23,6 +25,11 @@ type ResponseTelemetry struct {
type HTTPServer struct { type HTTPServer struct {
duplicate bool duplicate bool
// Old metrics
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
serverLatencyMeasure metric.Float64Histogram
} }
// RequestTraceAttrs returns trace attributes for an HTTP request received by a // RequestTraceAttrs returns trace attributes for an HTTP request received by a
@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
return oldHTTPServer{}.Route(route) return oldHTTPServer{}.Route(route)
} }
func NewHTTPServer() HTTPServer { // Status returns a span status code and message for an HTTP status code
env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
return HTTPServer{duplicate: env == "http/dup"}
}
// ServerStatus returns a span status code and message for an HTTP status code
// value returned by a server. Status codes in the 400-499 range are not // value returned by a server. Status codes in the 400-499 range are not
// returned as errors. // returned as errors.
func ServerStatus(code int) (codes.Code, string) { func (s HTTPServer) Status(code int) (codes.Code, string) {
if code < 100 || code >= 600 { if code < 100 || code >= 600 {
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
} }
@ -80,3 +82,146 @@ func ServerStatus(code int) (codes.Code, string) {
} }
return codes.Unset, "" return codes.Unset, ""
} }
type ServerMetricData struct {
ServerName string
ResponseSize int64
MetricData
MetricAttributes
}
type MetricAttributes struct {
Req *http.Request
StatusCode int
AdditionalAttributes []attribute.KeyValue
}
type MetricData struct {
RequestSize int64
ElapsedTime float64
}
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
return
}
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := []metric.AddOption{o}
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
// TODO: Duplicate Metrics
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
duplicate := env == "http/dup"
server := HTTPServer{
duplicate: duplicate,
}
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
return server
}
type HTTPClient struct {
duplicate bool
// old metrics
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
latencyMeasure metric.Float64Histogram
}
func NewHTTPClient(meter metric.Meter) HTTPClient {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
client := HTTPClient{
duplicate: env == "http/dup",
}
client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter)
return client
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
}
return oldHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
}
return oldHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
if code < 100 || code >= 600 {
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
}
if code >= 400 {
return codes.Error, ""
}
return codes.Unset, ""
}
func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
if c.duplicate {
return newHTTPClient{}.ErrorType(err)
}
return attribute.KeyValue{}
}
type MetricOpts struct {
measurement metric.MeasurementOption
addOptions metric.AddOption
}
func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
return o.measurement
}
func (o MetricOpts) AddOptions() metric.AddOption {
return o.addOptions
}
func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts {
attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
// TODO: Duplicate Metrics
set := metric.WithAttributeSet(attribute.NewSet(attributes...))
return MetricOpts{
measurement: set,
addOptions: set,
}
}
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) {
if s.requestBytesCounter == nil || s.latencyMeasure == nil {
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
return
}
s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions())
s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption())
// TODO: Duplicate Metrics
}
func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) {
if s.responseBytesCounter == nil {
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
return
}
s.responseBytesCounter.Add(ctx, responseData, opts)
// TODO: Duplicate Metrics
}

View File

@ -4,11 +4,14 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import ( import (
"fmt"
"net/http" "net/http"
"reflect"
"strconv"
"strings" "strings"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
) )
type newHTTPServer struct{} type newHTTPServer struct{}
@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
func (n newHTTPServer) Route(route string) attribute.KeyValue { func (n newHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route) return semconvNew.HTTPRoute(route)
} }
type newHTTPClient struct{}
// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
/*
below attributes are returned:
- http.request.method
- http.request.method.original
- url.full
- server.address
- server.port
- network.protocol.name
- network.protocol.version
*/
numOfAttributes := 3 // URL, server address, proto, and method.
var urlHost string
if req.URL != nil {
urlHost = req.URL.Host
}
var requestHost string
var requestPort int
for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
requestHost, requestPort = splitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
}
eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
if eligiblePort > 0 {
numOfAttributes++
}
useragent := req.UserAgent()
if useragent != "" {
numOfAttributes++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" && protoName != "http" {
numOfAttributes++
}
if protoVersion != "" {
numOfAttributes++
}
method, originalMethod := n.method(req.Method)
if originalMethod != (attribute.KeyValue{}) {
numOfAttributes++
}
attrs := make([]attribute.KeyValue, 0, numOfAttributes)
attrs = append(attrs, method)
if originalMethod != (attribute.KeyValue{}) {
attrs = append(attrs, originalMethod)
}
var u string
if req.URL != nil {
// Remove any username/password info that may be in the URL.
userinfo := req.URL.User
req.URL.User = nil
u = req.URL.String()
// Restore any username/password info that was removed.
req.URL.User = userinfo
}
attrs = append(attrs, semconvNew.URLFull(u))
attrs = append(attrs, semconvNew.ServerAddress(requestHost))
if eligiblePort > 0 {
attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
}
if protoName != "" && protoName != "http" {
attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
}
if protoVersion != "" {
attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
}
return attrs
}
// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
/*
below attributes are returned:
- http.response.status_code
- error.type
*/
var count int
if resp.StatusCode > 0 {
count++
}
if isErrorStatusCode(resp.StatusCode) {
count++
}
attrs := make([]attribute.KeyValue, 0, count)
if resp.StatusCode > 0 {
attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
}
if isErrorStatusCode(resp.StatusCode) {
errorType := strconv.Itoa(resp.StatusCode)
attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
}
return attrs
}
func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
t := reflect.TypeOf(err)
var value string
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
value = t.String()
} else {
value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
}
if value == "" {
return semconvNew.ErrorTypeOther
}
return semconvNew.ErrorTypeKey.String(value)
}
func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
if method == "" {
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
}
if attr, ok := methodLookup[method]; ok {
return attr, attribute.KeyValue{}
}
orig := semconvNew.HTTPRequestMethodOriginal(method)
if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
return attr, orig
}
return semconvNew.HTTPRequestMethodGet, orig
}
func isErrorStatusCode(code int) bool {
return code >= 400 || code < 100
}

View File

@ -9,8 +9,9 @@ import (
"strconv" "strconv"
"strings" "strings"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
) )
// splitHostPort splits a network address hostport of the form "host", // splitHostPort splits a network address hostport of the form "host",
@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil { if err != nil {
return return
} }
return host, int(p) return host, int(p) // nolint: gosec // Byte size checked 16 above.
} }
func requiredHTTPPort(https bool, port int) int { // nolint:revive func requiredHTTPPort(https bool, port int) int { // nolint:revive
@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodPut: semconvNew.HTTPRequestMethodPut,
http.MethodTrace: semconvNew.HTTPRequestMethodTrace, http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
} }
func handleErr(err error) {
if err != nil {
otel.Handle(err)
}
}

View File

@ -7,9 +7,13 @@ import (
"errors" "errors"
"io" "io"
"net/http" "net/http"
"slices"
"strings"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0" semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
) )
@ -72,3 +76,199 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
func HTTPStatusCode(status int) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue {
return semconv.HTTPStatusCode(status) return semconv.HTTPStatusCode(status)
} }
// Server HTTP metrics.
const (
serverRequestSize = "http.server.request.size" // Incoming request bytes total
serverResponseSize = "http.server.response.size" // Incoming response bytes total
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
)
func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
}
var err error
requestBytesCounter, err := meter.Int64Counter(
serverRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
responseBytesCounter, err := meter.Int64Counter(
serverResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
serverLatencyMeasure, err := meter.Float64Histogram(
serverDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of inbound HTTP requests."),
)
handleErr(err)
return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
}
func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
n := len(additionalAttributes) + 3
var host string
var p int
if server == "" {
host, p = splitHostPort(req.Host)
} else {
// Prioritize the primary server name.
host, p = splitHostPort(server)
if p < 0 {
_, p = splitHostPort(req.Host)
}
}
hostPort := requiredHTTPPort(req.TLS != nil, p)
if hostPort > 0 {
n++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" {
n++
}
if protoVersion != "" {
n++
}
if statusCode > 0 {
n++
}
attributes := slices.Grow(additionalAttributes, n)
attributes = append(attributes,
standardizeHTTPMethodMetric(req.Method),
o.scheme(req.TLS != nil),
semconv.NetHostName(host))
if hostPort > 0 {
attributes = append(attributes, semconv.NetHostPort(hostPort))
}
if protoName != "" {
attributes = append(attributes, semconv.NetProtocolName(protoName))
}
if protoVersion != "" {
attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
}
if statusCode > 0 {
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
}
return attributes
}
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
if https {
return semconv.HTTPSchemeHTTPS
}
return semconv.HTTPSchemeHTTP
}
type oldHTTPClient struct{}
func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
return semconvutil.HTTPClientRequest(req)
}
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
return semconvutil.HTTPClientResponse(resp)
}
func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
/* The following semantic conventions are returned if present:
http.method string
http.status_code int
net.peer.name string
net.peer.port int
*/
n := 2 // method, peer name.
var h string
if req.URL != nil {
h = req.URL.Host
}
var requestHost string
var requestPort int
for _, hostport := range []string{h, req.Header.Get("Host")} {
requestHost, requestPort = splitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
}
port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
if port > 0 {
n++
}
if statusCode > 0 {
n++
}
attributes := slices.Grow(additionalAttributes, n)
attributes = append(attributes,
standardizeHTTPMethodMetric(req.Method),
semconv.NetPeerName(requestHost),
)
if port > 0 {
attributes = append(attributes, semconv.NetPeerPort(port))
}
if statusCode > 0 {
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
}
return attributes
}
// Client HTTP metrics.
const (
clientRequestSize = "http.client.request.size" // Incoming request bytes total
clientResponseSize = "http.client.response.size" // Incoming response bytes total
clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
)
func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
}
requestBytesCounter, err := meter.Int64Counter(
clientRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
responseBytesCounter, err := meter.Int64Counter(
clientResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
latencyMeasure, err := meter.Float64Histogram(
clientDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of outbound HTTP requests."),
)
handleErr(err)
return requestBytesCounter, responseBytesCounter, latencyMeasure
}
func standardizeHTTPMethodMetric(method string) attribute.KeyValue {
method = strings.ToUpper(method)
switch method {
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
default:
method = "_OTHER"
}
return semconv.HTTPMethod(method)
}

View File

@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil { if err != nil {
return return
} }
return host, int(p) return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
} }
func netProtocol(proto string) (name string, version string) { func netProtocol(proto string) (name string, version string) {

View File

@ -11,13 +11,13 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
@ -27,16 +27,14 @@ type Transport struct {
rt http.RoundTripper rt http.RoundTripper
tracer trace.Tracer tracer trace.Tracer
meter metric.Meter
propagators propagation.TextMapPropagator propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption spanStartOptions []trace.SpanStartOption
filters []Filter filters []Filter
spanNameFormatter func(string, *http.Request) string spanNameFormatter func(string, *http.Request) string
clientTrace func(context.Context) *httptrace.ClientTrace clientTrace func(context.Context) *httptrace.ClientTrace
metricAttributesFn func(*http.Request) []attribute.KeyValue
requestBytesCounter metric.Int64Counter semconv semconv.HTTPClient
responseBytesCounter metric.Int64Counter
latencyMeasure metric.Float64Histogram
} }
var _ http.RoundTripper = &Transport{} var _ http.RoundTripper = &Transport{}
@ -63,43 +61,19 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
c := newConfig(append(defaultOpts, opts...)...) c := newConfig(append(defaultOpts, opts...)...)
t.applyConfig(c) t.applyConfig(c)
t.createMeasures()
return &t return &t
} }
func (t *Transport) applyConfig(c *config) { func (t *Transport) applyConfig(c *config) {
t.tracer = c.Tracer t.tracer = c.Tracer
t.meter = c.Meter
t.propagators = c.Propagators t.propagators = c.Propagators
t.spanStartOptions = c.SpanStartOptions t.spanStartOptions = c.SpanStartOptions
t.filters = c.Filters t.filters = c.Filters
t.spanNameFormatter = c.SpanNameFormatter t.spanNameFormatter = c.SpanNameFormatter
t.clientTrace = c.ClientTrace t.clientTrace = c.ClientTrace
} t.semconv = semconv.NewHTTPClient(c.Meter)
t.metricAttributesFn = c.MetricAttributesFn
func (t *Transport) createMeasures() {
var err error
t.requestBytesCounter, err = t.meter.Int64Counter(
clientRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
t.responseBytesCounter, err = t.meter.Int64Counter(
clientResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
t.latencyMeasure, err = t.meter.Float64Histogram(
clientDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of outbound HTTP requests."),
)
handleErr(err)
} }
func defaultTransportFormatter(_ string, r *http.Request) string { func defaultTransportFormatter(_ string, r *http.Request) string {
@ -143,54 +117,68 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
// use a body wrapper to determine the request size
var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it // if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert // will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody. // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
bw := request.NewBodyWrapper(r.Body, func(int64) {})
if r.Body != nil && r.Body != http.NoBody { if r.Body != nil && r.Body != http.NoBody {
bw.ReadCloser = r.Body r.Body = bw
// noop to prevent nil panic. not using this record fun yet.
bw.record = func(int64) {}
r.Body = &bw
} }
span.SetAttributes(semconvutil.HTTPClientRequest(r)...) span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r) res, err := t.rt.RoundTrip(r)
if err != nil { if err != nil {
// set error type attribute if the error is part of the predefined
// error types.
// otherwise, record it as an exception
if errType := t.semconv.ErrorType(err); errType.Valid() {
span.SetAttributes(errType)
} else {
span.RecordError(err) span.RecordError(err)
}
span.SetStatus(codes.Error, err.Error()) span.SetStatus(codes.Error, err.Error())
span.End() span.End()
return res, err return res, err
} }
// metrics // metrics
metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
if res.StatusCode > 0 { Req: r,
metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) StatusCode: res.StatusCode,
} AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) })
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
// For handling response bytes we leverage a callback when the client reads the http response // For handling response bytes we leverage a callback when the client reads the http response
readRecordFunc := func(n int64) { readRecordFunc := func(n int64) {
t.responseBytesCounter.Add(ctx, n, addOpts...) t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions())
} }
// traces // traces
span.SetAttributes(semconvutil.HTTPClientResponse(res)...) span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) span.SetStatus(t.semconv.Status(res.StatusCode))
res.Body = newWrappedBody(span, readRecordFunc, res.Body) res.Body = newWrappedBody(span, readRecordFunc, res.Body)
// Use floating point division here for higher precision (instead of Millisecond method). // Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
t.latencyMeasure.Record(ctx, elapsedTime, o) t.semconv.RecordMetrics(ctx, semconv.MetricData{
RequestSize: bw.BytesRead(),
ElapsedTime: elapsedTime,
}, metricOpts)
return res, err return res, nil
}
func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
var attributeForRequest []attribute.KeyValue
if t.metricAttributesFn != nil {
attributeForRequest = t.metricAttributesFn(r)
}
return attributeForRequest
} }
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an // newWrappedBody returns a new and appropriately scoped *wrappedBody as an

View File

@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation. // Version is the current release version of the otelhttp instrumentation.
func Version() string { func Version() string {
return "0.53.0" return "0.56.0"
// This string is updated by the pre_release.sh script during release // This string is updated by the pre_release.sh script during release
} }

View File

@ -1,99 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
"io"
"net/http"
"sync/atomic"
"go.opentelemetry.io/otel/propagation"
)
var _ io.ReadCloser = &bodyWrapper{}
// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
// of bytes read and the last error.
type bodyWrapper struct {
io.ReadCloser
record func(n int64) // must not be nil
read atomic.Int64
err error
}
func (w *bodyWrapper) Read(b []byte) (int, error) {
n, err := w.ReadCloser.Read(b)
n1 := int64(n)
w.read.Add(n1)
w.err = err
w.record(n1)
return n, err
}
func (w *bodyWrapper) Close() error {
return w.ReadCloser.Close()
}
var _ http.ResponseWriter = &respWriterWrapper{}
// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
// bytes written, the last error, and to catch the first written statusCode.
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
// that may be useful when using it in real life situations.
type respWriterWrapper struct {
http.ResponseWriter
record func(n int64) // must not be nil
// used to inject the header
ctx context.Context
props propagation.TextMapPropagator
written int64
statusCode int
err error
wroteHeader bool
}
func (w *respWriterWrapper) Header() http.Header {
return w.ResponseWriter.Header()
}
func (w *respWriterWrapper) Write(p []byte) (int, error) {
if !w.wroteHeader {
w.WriteHeader(http.StatusOK)
}
n, err := w.ResponseWriter.Write(p)
n1 := int64(n)
w.record(n1)
w.written += n1
w.err = err
return n, err
}
// WriteHeader persists initial statusCode for span attribution.
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
// and will persist the statusCode from the first call.
// Blocking consecutive calls to WriteHeader alters expected behavior and will
// remove warning logs from net/http where developers will notice incorrect handler implementations.
func (w *respWriterWrapper) WriteHeader(statusCode int) {
if !w.wroteHeader {
w.wroteHeader = true
w.statusCode = statusCode
}
w.ResponseWriter.WriteHeader(statusCode)
}
func (w *respWriterWrapper) Flush() {
if !w.wroteHeader {
w.WriteHeader(http.StatusOK)
}
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}

View File

@ -9,6 +9,8 @@ linters:
disable-all: true disable-all: true
# Specifically enable linters we want to use. # Specifically enable linters we want to use.
enable: enable:
- asasalint
- bodyclose
- depguard - depguard
- errcheck - errcheck
- errorlint - errorlint
@ -23,6 +25,7 @@ linters:
- revive - revive
- staticcheck - staticcheck
- tenv - tenv
- testifylint
- typecheck - typecheck
- unconvert - unconvert
- unused - unused
@ -62,12 +65,12 @@ issues:
- path: _test\.go - path: _test\.go
linters: linters:
- gosec - gosec
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples. # as we commonly use it in tests and examples.
- text: "G404:" - text: "G404:"
linters: linters:
- gosec - gosec
# Igonoring gosec G402: TLS MinVersion too low # Ignoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low." - text: "G402: TLS MinVersion too low."
linters: linters:
@ -300,3 +303,9 @@ linters-settings:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- name: waitgroup-by-value - name: waitgroup-by-value
disabled: false disabled: false
testifylint:
enable-all: true
disable:
- float-compare
- go-require
- require-error

View File

@ -8,6 +8,112 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased] ## [Unreleased]
<!-- Released section -->
<!-- Don't change this section unless doing release -->
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
### Added
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
- Add `WithExportBufferSize` option to log batch processor.(#5877)
### Changed
- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
### Deprecated
- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
### Fixed
- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
### Added
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
### Fixed
- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
- Fix panic on instruments creation when setting meter provider. (#5758)
- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
### Removed
- Drop support for [Go 1.21]. (#5736, #5740, #5800)
## [1.29.0/0.51.0/0.5.0] 2024-08-23
This release is the last to support [Go 1.21].
The next release will require at least [Go 1.22].
### Added
- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
This new module contains an OTLP exporter that transmits log telemetry using gRPC.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
- Support [Go 1.23]. (#5720)
### Changed
- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
### Fixed
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
### Removed
- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
## [1.28.0/0.50.0/0.4.0] 2024-07-02 ## [1.28.0/0.50.0/0.4.0] 2024-07-02
### Added ### Added
@ -49,6 +155,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Fix stale timestamps reported by the last-value aggregation. (#5517) - Fix stale timestamps reported by the last-value aggregation. (#5517)
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
## [1.27.0/0.49.0/0.3.0] 2024-05-21 ## [1.27.0/0.49.0/0.3.0] 2024-05-21
@ -175,7 +282,7 @@ The next release will require at least [Go 1.21].
This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
This module is in an alpha state, it is subject to breaking changes. This module is in an alpha state, it is subject to breaking changes.
See our [versioning policy](./VERSIONING.md) for more info. (#4961) See our [versioning policy](./VERSIONING.md) for more info. (#4961)
- ARM64 platform to the compatibility testing suite. (#4994) - Add ARM64 platform to the compatibility testing suite. (#4994)
### Fixed ### Fixed
@ -1836,7 +1943,7 @@ with major version 0.
- Setting error status while recording error with Span from oteltest package. (#1729) - Setting error status while recording error with Span from oteltest package. (#1729)
- The concept of a remote and local Span stored in a context is unified to just the current Span. - The concept of a remote and local Span stored in a context is unified to just the current Span.
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
@ -2410,7 +2517,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
- Update otel-colector example to use the v0.5.0 collector. (#915) - Update otel-collector example to use the v0.5.0 collector. (#915)
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
@ -3003,7 +3110,10 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
@ -3086,6 +3196,9 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
<!-- Released section ended -->
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22 [Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21 [Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20 [Go 1.20]: https://go.dev/doc/go1.20

View File

@ -5,13 +5,13 @@
##################################################### #####################################################
# #
# Learn about membership in OpenTelemetry community: # Learn about membership in OpenTelemetry community:
# https://github.com/open-telemetry/community/blob/main/community-membership.md # https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
# #
# #
# Learn about CODEOWNERS file format: # Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners # https://help.github.com/en/articles/about-code-owners
# #
* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu * @MrAlias @XSAM @dashpole @pellared @dmathieu
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu

View File

@ -578,7 +578,10 @@ See also:
The tests should never leak goroutines. The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the Use the term `ConcurrentSafe` in the test name when it aims to verify the
absence of race conditions. absence of race conditions. The top-level tests with this term will be run
many times in the `test-concurrent-safe` CI job to increase the chance of
catching concurrency issues. This does not apply to subtests when this term
is not in their root name.
### Internal packages ### Internal packages
@ -628,11 +631,8 @@ should be canceled.
### Approvers ### Approvers
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
### Maintainers ### Maintainers
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Damien Mathieu](https://github.com/dmathieu), Elastic - [Damien Mathieu](https://github.com/dmathieu), Elastic
- [David Ashpole](https://github.com/dashpole), Google - [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk - [Robert Pająk](https://github.com/pellared), Splunk
@ -641,16 +641,18 @@ should be canceled.
### Emeritus ### Emeritus
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Evan Torrie](https://github.com/evantorrie), Yahoo
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Evan Torrie](https://github.com/evantorrie), Yahoo
### Become an Approver or a Maintainer ### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community See the [community membership document in OpenTelemetry community
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
[Approver]: #approvers [Approver]: #approvers
[Maintainer]: #maintainers [Maintainer]: #maintainers

View File

@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
PORTO = $(TOOLS)/porto PORTO = $(TOOLS)/porto
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
GOTMPL = $(TOOLS)/gotmpl GOTMPL = $(TOOLS)/gotmpl
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools .PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker # Virtualized python tools via docker
@ -145,12 +142,14 @@ build-tests/%:
# Tests # Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
.PHONY: $(TEST_TARGETS) test .PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short test-short: ARGS=-short
test-verbose: ARGS=-v -race test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120
$(TEST_TARGETS): test $(TEST_TARGETS): test
test: $(OTEL_GO_MOD_DIRS:%=test/%) test: $(OTEL_GO_MOD_DIRS:%=test/%)
test/%: DIR=$* test/%: DIR=$*
@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE)
done; \ done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
# Adding a directory will include all benchmarks in that directory if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark .PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
BENCHMARK_FILTER = .
# You can override the filter for a particular directory by adding a rule here.
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
benchmark/%: benchmark/%:
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
&& cd $* \ && cd $* \
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) && $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
.PHONY: golangci-lint golangci-lint-fix .PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix golangci-lint-fix: ARGS=--fix

View File

@ -48,19 +48,21 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments. Currently, this project supports the following environments.
| OS | Go Version | Architecture | | OS | Go Version | Architecture |
|---------|------------|--------------| |----------|------------|--------------|
| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.22 | amd64 | | Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.21 | amd64 | | Ubuntu | 1.23 | 386 |
| Ubuntu | 1.22 | 386 | | Ubuntu | 1.22 | 386 |
| Ubuntu | 1.21 | 386 | | Linux | 1.23 | arm64 |
| Linux | 1.22 | arm64 | | Linux | 1.22 | arm64 |
| Linux | 1.21 | arm64 | | macOS 13 | 1.23 | amd64 |
| MacOS | 1.22 | amd64 | | macOS 13 | 1.22 | amd64 |
| MacOS | 1.21 | amd64 | | macOS | 1.23 | arm64 |
| macOS | 1.22 | arm64 |
| Windows | 1.23 | amd64 |
| Windows | 1.22 | amd64 | | Windows | 1.22 | amd64 |
| Windows | 1.21 | amd64 | | Windows | 1.23 | 386 |
| Windows | 1.22 | 386 | | Windows | 1.22 | 386 |
| Windows | 1.21 | 386 |
While this project should work for other systems, no compatibility guarantees While this project should work for other systems, no compatibility guarantees
are made for those systems currently. are made for those systems currently.
@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need to build your own instrumentation for your application directly you will need
to use the to use the
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
package. The included [examples](./example/) are a good way to see some package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
practical uses of this process. are a good way to see some practical uses of this process.
### Export ### Export

View File

@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
``` ```
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`). - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
- Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
- Update all the appropriate links at the bottom. - Update all the appropriate links at the bottom.
4. Push the changes to upstream and create a Pull Request on GitHub. 4. Push the changes to upstream and create a Pull Request on GitHub.
@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `<new tag>` on GitHub. Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release. The release body should include all the release notes from the Changelog for this release.
## Verify Examples
After releasing verify that examples build outside of the repository.
```
./verify_examples.sh
```
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
This ensures they build with the published release, not the local copy.
## Post-Release ## Post-Release
### Contrib Repository ### Contrib Repository

View File

@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
func computeDistinctFixed(kvs []KeyValue) interface{} { func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) { switch len(kvs) {
case 1: case 1:
ptr := new([1]KeyValue) return [1]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 2: case 2:
ptr := new([2]KeyValue) return [2]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 3: case 3:
ptr := new([3]KeyValue) return [3]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 4: case 4:
ptr := new([4]KeyValue) return [4]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 5: case 5:
ptr := new([5]KeyValue) return [5]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 6: case 6:
ptr := new([6]KeyValue) return [6]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 7: case 7:
ptr := new([7]KeyValue) return [7]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 8: case 8:
ptr := new([8]KeyValue) return [8]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 9: case 9:
ptr := new([9]KeyValue) return [9]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
case 10: case 10:
ptr := new([10]KeyValue) return [10]KeyValue(kvs)
copy((*ptr)[:], kvs)
return *ptr
default: default:
return nil return nil
} }

View File

@ -44,9 +44,15 @@ type Property struct {
// NewKeyProperty returns a new Property for key. // NewKeyProperty returns a new Property for key.
// //
// The passed key must be valid, non-empty UTF-8 string.
// If key is invalid, an error will be returned. // If key is invalid, an error will be returned.
// However, the specific Propagators that are used to transmit baggage entries across
// component boundaries may impose their own restrictions on Property key.
// For example, the W3C Baggage specification restricts the Property keys to strings that
// satisfy the token definition from RFC7230, Section 3.2.6.
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
func NewKeyProperty(key string) (Property, error) { func NewKeyProperty(key string) (Property, error) {
if !validateKey(key) { if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) {
// Notice: Consider using [NewKeyValuePropertyRaw] instead // Notice: Consider using [NewKeyValuePropertyRaw] instead
// that does not require percent-encoding of the value. // that does not require percent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) { func NewKeyValueProperty(key, value string) (Property, error) {
if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !validateValue(value) { if !validateValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
} }
@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
// NewKeyValuePropertyRaw returns a new Property for key with value. // NewKeyValuePropertyRaw returns a new Property for key with value.
// //
// The passed key must be compliant with W3C Baggage specification. // The passed key must be valid, non-empty UTF-8 string.
// The passed value must be valid UTF-8 string.
// However, the specific Propagators that are used to transmit baggage entries across
// component boundaries may impose their own restrictions on Property key.
// For example, the W3C Baggage specification restricts the Property keys to strings that
// satisfy the token definition from RFC7230, Section 3.2.6.
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
func NewKeyValuePropertyRaw(key, value string) (Property, error) { func NewKeyValuePropertyRaw(key, value string) (Property, error) {
if !validateKey(key) { if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
if !validateBaggageValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
p := Property{ p := Property{
key: key, key: key,
@ -115,12 +134,15 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err) return fmt.Errorf("invalid property: %w", err)
} }
if !validateKey(p.key) { if !validateBaggageName(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
} }
if !p.hasValue && p.value != "" { if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value")) return errFunc(errors.New("inconsistent value"))
} }
if p.hasValue && !validateBaggageValue(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
return nil return nil
} }
@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) {
// String encodes Property into a header string compliant with the W3C Baggage // String encodes Property into a header string compliant with the W3C Baggage
// specification. // specification.
// It would return empty string if the key is invalid with the W3C Baggage
// specification. This could happen for a UTF-8 key, as it may contain
// invalid characters.
func (p Property) String() string { func (p Property) String() string {
// W3C Baggage specification does not allow percent-encoded keys.
if !validateKey(p.key) {
return ""
}
if p.hasValue { if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
} }
@ -203,9 +233,14 @@ func (p properties) validate() error {
// String encodes properties into a header string compliant with the W3C Baggage // String encodes properties into a header string compliant with the W3C Baggage
// specification. // specification.
func (p properties) String() string { func (p properties) String() string {
props := make([]string, len(p)) props := make([]string, 0, len(p))
for i, prop := range p { for _, prop := range p {
props[i] = prop.String() s := prop.String()
// Ignored empty properties.
if s != "" {
props = append(props, s)
}
} }
return strings.Join(props, propertyDelimiter) return strings.Join(props, propertyDelimiter)
} }
@ -230,6 +265,10 @@ type Member struct {
// Notice: Consider using [NewMemberRaw] instead // Notice: Consider using [NewMemberRaw] instead
// that does not require percent-encoding of the value. // that does not require percent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) { func NewMember(key, value string, props ...Property) (Member, error) {
if !validateKey(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !validateValue(value) { if !validateValue(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
} }
@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
// NewMemberRaw returns a new Member from the passed arguments. // NewMemberRaw returns a new Member from the passed arguments.
// //
// The passed key must be compliant with W3C Baggage specification. // The passed key must be valid, non-empty UTF-8 string.
// The passed value must be valid UTF-8 string.
// However, the specific Propagators that are used to transmit baggage entries across
// component boundaries may impose their own restrictions on baggage key.
// For example, the W3C Baggage specification restricts the baggage keys to strings that
// satisfy the token definition from RFC7230, Section 3.2.6.
// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
func NewMemberRaw(key, value string, props ...Property) (Member, error) { func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{ m := Member{
key: key, key: key,
@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
val := strings.TrimSpace(v) rawVal := strings.TrimSpace(v)
if !validateValue(val) { if !validateValue(rawVal) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
} }
// Decode a percent-encoded value. // Decode a percent-encoded value.
value, err := url.PathUnescape(val) unescapeVal, err := url.PathUnescape(rawVal)
if err != nil { if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
} }
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
return Member{key: key, value: value, properties: props, hasData: true}, nil return Member{key: key, value: value, properties: props, hasData: true}, nil
} }
// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '<27>'.
func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
if utf8.ValidString(unescapeVal) {
return unescapeVal
}
// W3C baggage spec:
// https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
var b strings.Builder
b.Grow(cap)
for i := 0; i < len(unescapeVal); {
r, size := utf8.DecodeRuneInString(unescapeVal[i:])
if r == utf8.RuneError && size == 1 {
// Invalid UTF-8 sequence found, replace it with '<27>'
_, _ = b.WriteString("<22>")
} else {
_, _ = b.WriteRune(r)
}
i += size
}
return b.String()
}
// validate ensures m conforms to the W3C Baggage specification. // validate ensures m conforms to the W3C Baggage specification.
// A key must be an ASCII string, returning an error otherwise. // A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error { func (m Member) validate() error {
@ -314,9 +385,12 @@ func (m Member) validate() error {
return fmt.Errorf("%w: %q", errInvalidMember, m) return fmt.Errorf("%w: %q", errInvalidMember, m)
} }
if !validateKey(m.key) { if !validateBaggageName(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key) return fmt.Errorf("%w: %q", errInvalidKey, m.key)
} }
if !validateBaggageValue(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate() return m.properties.validate()
} }
@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a header string compliant with the W3C Baggage // String encodes Member into a header string compliant with the W3C Baggage
// specification. // specification.
// It would return empty string if the key is invalid with the W3C Baggage
// specification. This could happen for a UTF-8 key, as it may contain
// invalid characters.
func (m Member) String() string { func (m Member) String() string {
// A key is just an ASCII string. A value is restricted to be // W3C Baggage specification does not allow percent-encoded keys.
// US-ASCII characters excluding CTLs, whitespace, if !validateKey(m.key) {
// DQUOTE, comma, semicolon, and backslash. return ""
}
s := m.key + keyValueDelimiter + valueEscape(m.value) s := m.key + keyValueDelimiter + valueEscape(m.value)
if len(m.properties) > 0 { if len(m.properties) > 0 {
s += propertyDelimiter + m.properties.String() s += propertyDelimiter + m.properties.String()
@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member {
} }
// Members returns all the baggage list-members. // Members returns all the baggage list-members.
// The order of the returned list-members does not have significance. // The order of the returned list-members is not significant.
// //
// The returned members are not validated, as we assume the validation happened // The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage. // when they were added to the Baggage.
@ -469,8 +548,8 @@ func (b Baggage) Members() []Member {
return members return members
} }
// SetMember returns a copy the Baggage with the member included. If the // SetMember returns a copy of the Baggage with the member included. If the
// baggage contains a Member with the same key the existing Member is // baggage contains a Member with the same key, the existing Member is
// replaced. // replaced.
// //
// If member is invalid according to the W3C Baggage specification, an error // If member is invalid according to the W3C Baggage specification, an error
@ -528,14 +607,22 @@ func (b Baggage) Len() int {
// String encodes Baggage into a header string compliant with the W3C Baggage // String encodes Baggage into a header string compliant with the W3C Baggage
// specification. // specification.
// It would ignore members where the member key is invalid with the W3C Baggage
// specification. This could happen for a UTF-8 key, as it may contain
// invalid characters.
func (b Baggage) String() string { func (b Baggage) String() string {
members := make([]string, 0, len(b.list)) members := make([]string, 0, len(b.list))
for k, v := range b.list { for k, v := range b.list {
members = append(members, Member{ s := Member{
key: k, key: k,
value: v.Value, value: v.Value,
properties: fromInternalProperties(v.Properties), properties: fromInternalProperties(v.Properties),
}.String()) }.String()
// Ignored empty members.
if s != "" {
members = append(members, s)
}
} }
return strings.Join(members, listDelimiter) return strings.Join(members, listDelimiter)
} }
@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
} }
// Decode a percent-encoded value. // Decode a percent-encoded value.
value, err := url.PathUnescape(s[valueStart:valueEnd]) rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal)
if err != nil { if err != nil {
return return
} }
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
ok = true ok = true
p.key = s[keyStart:keyEnd] p.key = s[keyStart:keyEnd]
@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
'~': true, '~': true,
} }
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
// Baggage name is a valid, non-empty UTF-8 string.
func validateBaggageName(s string) bool {
if len(s) == 0 {
return false
}
return utf8.ValidString(s)
}
// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
// Baggage value is a valid UTF-8 strings.
// Empty string is also a valid UTF-8 string.
func validateBaggageValue(s string) bool {
return utf8.ValidString(s)
}
// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool { func validateKey(s string) bool {
if len(s) == 0 { if len(s) == 0 {
return false return false
@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool {
return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
} }
// validateValue checks if the string is a valid W3C Baggage value.
func validateValue(s string) bool { func validateValue(s string) bool {
for _, c := range s { for _, c := range s {
if !validateValueChar(c) { if !validateValueChar(c) {

View File

@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return fmt.Errorf("invalid code: %q", ci) return fmt.Errorf("invalid code: %q", ci)
} }
*c = Code(ci) *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
return nil return nil
} }
return fmt.Errorf("invalid code: %q", string(b)) return fmt.Errorf("invalid code: %q", string(b))

View File

@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
To read more about metrics, see go.opentelemetry.io/otel/metric. To read more about metrics, see go.opentelemetry.io/otel/metric.
To read more about logs, see go.opentelemetry.io/otel/log.
To read more about propagation, see go.opentelemetry.io/otel/propagation and To read more about propagation, see go.opentelemetry.io/otel/propagation and
go.opentelemetry.io/otel/baggage. go.opentelemetry.io/otel/baggage.
*/ */

View File

@ -66,8 +66,9 @@ func WithInsecure() Option {
// //
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
// environment variable is set, and this option is not passed, that variable // environment variable is set, and this option is not passed, that variable
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // value will be used. If both environment variables are set,
// will take precedence. // OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
// variable is set, and this option is passed, this option will take precedence.
// //
// If both this option and WithEndpointURL are used, the last used option will // If both this option and WithEndpointURL are used, the last used option will
// take precedence. // take precedence.
@ -84,8 +85,9 @@ func WithEndpoint(endpoint string) Option {
// //
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
// environment variable is set, and this option is not passed, that variable // environment variable is set, and this option is not passed, that variable
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // value will be used. If both environment variables are set,
// will take precedence. // OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
// variable is set, and this option is passed, this option will take precedence.
// //
// If both this option and WithEndpoint are used, the last used option will // If both this option and WithEndpoint are used, the last used option will
// take precedence. // take precedence.

View File

@ -12,9 +12,8 @@ The environment variables described below can be used for configuration.
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
target to which the exporter sends telemetry. target to which the exporter sends telemetry.
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
The value must contain a host. The value must contain a scheme ("http" or "https") and host.
The value may additionally a port, a scheme, and a path. The value may additionally contain a port, and a path.
The value accepts "http" and "https" scheme.
The value should not contain a query string or fragment. The value should not contain a query string or fragment.
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.

View File

@ -15,6 +15,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/internal/global"
) )
@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string {
global.Error(errors.New("missing '="), "parse headers", "input", header) global.Error(errors.New("missing '="), "parse headers", "input", header)
continue continue
} }
name, err := url.PathUnescape(n)
if err != nil { trimmedName := strings.TrimSpace(n)
global.Error(err, "escape header key", "key", n)
// Validate the key.
if !isValidHeaderKey(trimmedName) {
global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
continue continue
} }
trimmedName := strings.TrimSpace(name)
// Only decode the value.
value, err := url.PathUnescape(v) value, err := url.PathUnescape(v)
if err != nil { if err != nil {
global.Error(err, "escape header value", "value", v) global.Error(err, "escape header value", "value", v)
@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) {
} }
return cp, nil return cp, nil
} }
func isValidHeaderKey(key string) bool {
if key == "" {
return false
}
for _, c := range key {
if !isTokenChar(c) {
return false
}
}
return true
}
func isTokenChar(c rune) bool {
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
unicode.IsDigit(c) ||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
}

View File

@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.ServiceConfig != "" { if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
} }
// Priroritize GRPCCredentials over Insecure (passing both is an error). // Prioritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Metrics.GRPCCredentials != nil { if cfg.Metrics.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
} else if cfg.Metrics.Insecure { } else if cfg.Metrics.Insecure {

View File

@ -14,7 +14,7 @@ import (
) )
// ReadTLSConfigFromFile reads a PEM certificate file and creates // ReadTLSConfigFromFile reads a PEM certificate file and creates
// a tls.Config that will use this certifate to verify a server certificate. // a tls.Config that will use this certificate to verify a server certificate.
func ReadTLSConfigFromFile(path string) (*tls.Config, error) { func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
b, err := os.ReadFile(path) b, err := os.ReadFile(path)
if err != nil { if err != nil {

View File

@ -279,10 +279,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
// timeUnixNano on the zero Time returns 0. // timeUnixNano on the zero Time returns 0.
// The result does not depend on the location associated with t. // The result does not depend on the location associated with t.
func timeUnixNano(t time.Time) uint64 { func timeUnixNano(t time.Time) uint64 {
if t.IsZero() { return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
return 0
}
return uint64(t.UnixNano())
} }
// Exemplars returns a slice of OTLP Exemplars generated from exemplars. // Exemplars returns a slice of OTLP Exemplars generated from exemplars.

View File

@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
func Version() string { func Version() string {
return "1.28.0" return "1.31.0"
} }

View File

@ -4,6 +4,8 @@
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import ( import (
"math"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/instrumentation"
@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
SpanId: sid[:], SpanId: sid[:],
TraceState: sd.SpanContext().TraceState().String(), TraceState: sd.SpanContext().TraceState().String(),
Status: status(sd.Status().Code, sd.Status().Description), Status: status(sd.Status().Code, sd.Status().Description),
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
Links: links(sd.Links()), Links: links(sd.Links()),
Kind: spanKind(sd.SpanKind()), Kind: spanKind(sd.SpanKind()),
Name: sd.Name(), Name: sd.Name(),
Attributes: KeyValues(sd.Attributes()), Attributes: KeyValues(sd.Attributes()),
Events: spanEvents(sd.Events()), Events: spanEvents(sd.Events()),
DroppedAttributesCount: uint32(sd.DroppedAttributes()), DroppedAttributesCount: clampUint32(sd.DroppedAttributes()),
DroppedEventsCount: uint32(sd.DroppedEvents()), DroppedEventsCount: clampUint32(sd.DroppedEvents()),
DroppedLinksCount: uint32(sd.DroppedLinks()), DroppedLinksCount: clampUint32(sd.DroppedLinks()),
} }
if psid := sd.Parent().SpanID(); psid.IsValid() { if psid := sd.Parent().SpanID(); psid.IsValid() {
@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
return s return s
} }
func clampUint32(v int) uint32 {
if v < 0 {
return 0
}
if int64(v) > math.MaxUint32 {
return math.MaxUint32
}
return uint32(v) // nolint: gosec // Overflow/Underflow checked.
}
// status transform a span code and message into an OTLP span status. // status transform a span code and message into an OTLP span status.
func status(status codes.Code, message string) *tracepb.Status { func status(status codes.Code, message string) *tracepb.Status {
var c tracepb.Status_StatusCode var c tracepb.Status_StatusCode
@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
TraceId: tid[:], TraceId: tid[:],
SpanId: sid[:], SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes), Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount),
Flags: flags, Flags: flags,
}) })
} }
@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 {
flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
} }
return uint32(flags) return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
} }
// spanEvents transforms span Events to an OTLP span events. // spanEvents transforms span Events to an OTLP span events.
@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
for i := 0; i < len(es); i++ { for i := 0; i < len(es); i++ {
events[i] = &tracepb.Span_Event{ events[i] = &tracepb.Span_Event{
Name: es[i].Name, Name: es[i].Name,
TimeUnixNano: uint64(es[i].Time.UnixNano()), TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
Attributes: KeyValues(es[i].Attributes), Attributes: KeyValues(es[i].Attributes),
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
} }
} }
return events return events

View File

@ -12,9 +12,8 @@ The environment variables described below can be used for configuration.
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
target to which the exporter sends telemetry. target to which the exporter sends telemetry.
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
The value must contain a host. The value must contain a scheme ("http" or "https") and host.
The value may additionally a port, a scheme, and a path. The value may additionally contain a port, and a path.
The value accepts "http" and "https" scheme.
The value should not contain a query string or fragment. The value should not contain a query string or fragment.
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.

View File

@ -15,6 +15,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/internal/global"
) )
@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string {
global.Error(errors.New("missing '="), "parse headers", "input", header) global.Error(errors.New("missing '="), "parse headers", "input", header)
continue continue
} }
name, err := url.PathUnescape(n)
if err != nil { trimmedName := strings.TrimSpace(n)
global.Error(err, "escape header key", "key", n)
// Validate the key.
if !isValidHeaderKey(trimmedName) {
global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
continue continue
} }
trimmedName := strings.TrimSpace(name)
// Only decode the value.
value, err := url.PathUnescape(v) value, err := url.PathUnescape(v)
if err != nil { if err != nil {
global.Error(err, "escape header value", "value", v) global.Error(err, "escape header value", "value", v)
@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) {
} }
return cp, nil return cp, nil
} }
func isValidHeaderKey(key string) bool {
if key == "" {
return false
}
for _, c := range key {
if !isTokenChar(c) {
return false
}
}
return true
}
func isTokenChar(c rune) bool {
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
unicode.IsDigit(c) ||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
}

View File

@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.ServiceConfig != "" { if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
} }
// Priroritize GRPCCredentials over Insecure (passing both is an error). // Prioritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Traces.GRPCCredentials != nil { if cfg.Traces.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
} else if cfg.Traces.Insecure { } else if cfg.Traces.Insecure {

View File

@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. // Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string { func Version() string {
return "1.28.0" return "1.31.0"
} }

View File

@ -5,8 +5,8 @@ package global // import "go.opentelemetry.io/otel/internal/global"
import ( import (
"container/list" "container/list"
"reflect"
"sync" "sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/metric/embedded"
@ -76,7 +76,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
return val return val
} }
t := &meter{name: name, opts: opts} t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
p.meters[key] = t p.meters[key] = t
return t return t
} }
@ -92,17 +92,29 @@ type meter struct {
opts []metric.MeterOption opts []metric.MeterOption
mtx sync.Mutex mtx sync.Mutex
instruments []delegatedInstrument instruments map[instID]delegatedInstrument
registry list.List registry list.List
delegate atomic.Value // metric.Meter delegate metric.Meter
} }
type delegatedInstrument interface { type delegatedInstrument interface {
setDelegate(metric.Meter) setDelegate(metric.Meter)
} }
// instID are the identifying properties of a instrument.
type instID struct {
// name is the name of the stream.
name string
// description is the description of the stream.
description string
// kind defines the functional group of the instrument.
kind reflect.Type
// unit is the unit of the stream.
unit string
}
// setDelegate configures m to delegate all Meter functionality to Meters // setDelegate configures m to delegate all Meter functionality to Meters
// created by provider. // created by provider.
// //
@ -110,12 +122,12 @@ type delegatedInstrument interface {
// //
// It is guaranteed by the caller that this happens only once. // It is guaranteed by the caller that this happens only once.
func (m *meter) setDelegate(provider metric.MeterProvider) { func (m *meter) setDelegate(provider metric.MeterProvider) {
meter := provider.Meter(m.name, m.opts...)
m.delegate.Store(meter)
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
meter := provider.Meter(m.name, m.opts...)
m.delegate = meter
for _, inst := range m.instruments { for _, inst := range m.instruments {
inst.setDelegate(meter) inst.setDelegate(meter)
} }
@ -133,169 +145,337 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
} }
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64Counter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64Counter(name, options...)
}
cfg := metric.NewInt64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64Counter), nil
}
i := &siCounter{name: name, opts: options} i := &siCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64UpDownCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64UpDownCounter(name, options...)
}
cfg := metric.NewInt64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64UpDownCounter), nil
}
i := &siUpDownCounter{name: name, opts: options} i := &siUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64Histogram(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64Histogram(name, options...)
}
cfg := metric.NewInt64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siHistogram)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64Histogram), nil
}
i := &siHistogram{name: name, opts: options} i := &siHistogram{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64Gauge(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64Gauge(name, options...)
}
cfg := metric.NewInt64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siGauge)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64Gauge), nil
}
i := &siGauge{name: name, opts: options} i := &siGauge{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64ObservableCounter(name, options...)
}
cfg := metric.NewInt64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64ObservableCounter), nil
}
i := &aiCounter{name: name, opts: options} i := &aiCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableUpDownCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64ObservableUpDownCounter(name, options...)
}
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64ObservableUpDownCounter), nil
}
i := &aiUpDownCounter{name: name, opts: options} i := &aiUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableGauge(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Int64ObservableGauge(name, options...)
}
cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiGauge)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Int64ObservableGauge), nil
}
i := &aiGauge{name: name, opts: options} i := &aiGauge{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64Counter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64Counter(name, options...)
}
cfg := metric.NewFloat64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64Counter), nil
}
i := &sfCounter{name: name, opts: options} i := &sfCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64UpDownCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64UpDownCounter(name, options...)
}
cfg := metric.NewFloat64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64UpDownCounter), nil
}
i := &sfUpDownCounter{name: name, opts: options} i := &sfUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64Histogram(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64Histogram(name, options...)
}
cfg := metric.NewFloat64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfHistogram)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64Histogram), nil
}
i := &sfHistogram{name: name, opts: options} i := &sfHistogram{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64Gauge(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64Gauge(name, options...)
}
cfg := metric.NewFloat64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfGauge)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64Gauge), nil
}
i := &sfGauge{name: name, opts: options} i := &sfGauge{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64ObservableCounter(name, options...)
}
cfg := metric.NewFloat64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64ObservableCounter), nil
}
i := &afCounter{name: name, opts: options} i := &afCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableUpDownCounter(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64ObservableUpDownCounter(name, options...)
}
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64ObservableUpDownCounter), nil
}
i := &afUpDownCounter{name: name, opts: options} i := &afUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableGauge(name, options...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
return m.delegate.Float64ObservableGauge(name, options...)
}
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afGauge)(nil)),
description: cfg.Description(),
unit: cfg.Unit(),
}
if f, ok := m.instruments[id]; ok {
return f.(metric.Float64ObservableGauge), nil
}
i := &afGauge{name: name, opts: options} i := &afGauge{name: name, opts: options}
m.instruments = append(m.instruments, i) m.instruments[id] = i
return i, nil return i, nil
} }
// RegisterCallback captures the function that will be called during Collect. // RegisterCallback captures the function that will be called during Collect.
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
insts = unwrapInstruments(insts)
return del.RegisterCallback(f, insts...)
}
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
if m.delegate != nil {
insts = unwrapInstruments(insts)
return m.delegate.RegisterCallback(f, insts...)
}
reg := &registration{instruments: insts, function: f} reg := &registration{instruments: insts, function: f}
e := m.registry.PushBack(reg) e := m.registry.PushBack(reg)
reg.unreg = func() error { reg.unreg = func() error {
@ -349,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) {
reg, err := m.RegisterCallback(c.function, insts...) reg, err := m.RegisterCallback(c.function, insts...)
if err != nil { if err != nil {
GetErrorHandler().Handle(err) GetErrorHandler().Handle(err)
return
} }
c.unreg = reg.Unregister c.unreg = reg.Unregister

View File

@ -20,11 +20,13 @@ func RawToBool(r uint64) bool {
} }
func Int64ToRaw(i int64) uint64 { func Int64ToRaw(i int64) uint64 {
return uint64(i) // Assumes original was a valid int64 (overflow not checked).
return uint64(i) // nolint: gosec
} }
func RawToInt64(r uint64) int64 { func RawToInt64(r uint64) int64 {
return int64(r) // Assumes original was a valid int64 (overflow not checked).
return int64(r) // nolint: gosec
} }
func Float64ToRaw(f float64) uint64 { func Float64ToRaw(f float64) uint64 {
@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
} }
func RawPtrToFloat64Ptr(r *uint64) *float64 { func RawPtrToFloat64Ptr(r *uint64) *float64 {
return (*float64)(unsafe.Pointer(r)) // Assumes original was a valid *float64 (overflow not checked).
return (*float64)(unsafe.Pointer(r)) // nolint: gosec
} }
func RawPtrToInt64Ptr(r *uint64) *int64 { func RawPtrToInt64Ptr(r *uint64) *int64 {
return (*int64)(unsafe.Pointer(r)) // Assumes original was a valid *int64 (overflow not checked).
return (*int64)(unsafe.Pointer(r)) // nolint: gosec
} }

View File

@ -213,7 +213,7 @@ type Float64Observer interface {
} }
// Float64Callback is a function registered with a Meter that makes // Float64Callback is a function registered with a Meter that makes
// observations for a Float64Observerable instrument it is registered with. // observations for a Float64Observable instrument it is registered with.
// Calls to the Float64Observer record measurement values for the // Calls to the Float64Observer record measurement values for the
// Float64Observable. // Float64Observable.
// //

View File

@ -212,7 +212,7 @@ type Int64Observer interface {
} }
// Int64Callback is a function registered with a Meter that makes observations // Int64Callback is a function registered with a Meter that makes observations
// for an Int64Observerable instrument it is registered with. Calls to the // for an Int64Observable instrument it is registered with. Calls to the
// Int64Observer record measurement values for the Int64Observable. // Int64Observer record measurement values for the Int64Observable.
// //
// The function needs to complete in a finite amount of time and the deadline // The function needs to complete in a finite amount of time and the deadline

View File

@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
// //
// cp := make([]attribute.KeyValue, len(attributes)) // cp := make([]attribute.KeyValue, len(attributes))
// copy(cp, attributes) // copy(cp, attributes)
// WithAttributes(attribute.NewSet(cp...)) // WithAttributeSet(attribute.NewSet(cp...))
// //
// [attribute.NewSet] may modify the passed attributes so this will make a copy // [attribute.NewSet] may modify the passed attributes so this will make a copy
// of attributes before creating a set in order to ensure this function is // of attributes before creating a set in order to ensure this function is

View File

@ -52,6 +52,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
// Int64UpDownCounter returns a new Int64UpDownCounter instrument // Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational // to synchronously record int64 measurements during a computational
@ -61,6 +62,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
// Int64Histogram returns a new Int64Histogram instrument identified by // Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to // name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a // synchronously record the distribution of int64 measurements during a
@ -70,6 +72,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
// Int64Gauge returns a new Int64Gauge instrument identified by name and // Int64Gauge returns a new Int64Gauge instrument identified by name and
// configured with options. The instrument is used to synchronously record // configured with options. The instrument is used to synchronously record
// instantaneous int64 measurements during a computational operation. // instantaneous int64 measurements during a computational operation.
@ -78,6 +81,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
// Int64ObservableCounter returns a new Int64ObservableCounter identified // Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to // by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a // asynchronously record increasing int64 measurements once per a
@ -92,6 +96,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The // instrument identified by name and configured with options. The
// instrument is used to asynchronously record int64 measurements once per // instrument is used to asynchronously record int64 measurements once per
@ -106,6 +111,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
// Int64ObservableGauge returns a new Int64ObservableGauge instrument // Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous int64 measurements once per a // to asynchronously record instantaneous int64 measurements once per a
@ -130,6 +136,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
// Float64UpDownCounter returns a new Float64UpDownCounter instrument // Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational // to synchronously record float64 measurements during a computational
@ -139,6 +146,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new Float64Histogram instrument identified by // Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to // name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a // synchronously record the distribution of float64 measurements during a
@ -148,6 +156,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
// Float64Gauge returns a new Float64Gauge instrument identified by name and // Float64Gauge returns a new Float64Gauge instrument identified by name and
// configured with options. The instrument is used to synchronously record // configured with options. The instrument is used to synchronously record
// instantaneous float64 measurements during a computational operation. // instantaneous float64 measurements during a computational operation.
@ -156,6 +165,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
// Float64ObservableCounter returns a new Float64ObservableCounter // Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The // instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64 // instrument is used to asynchronously record increasing float64
@ -170,6 +180,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and // Float64ObservableUpDownCounter instrument identified by name and
// configured with options. The instrument is used to asynchronously record // configured with options. The instrument is used to asynchronously record
@ -184,6 +195,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new Float64ObservableGauge instrument // Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous float64 measurements once per a // to asynchronously record instantaneous float64 measurements once per a
@ -242,6 +254,7 @@ type Observer interface {
// ObserveFloat64 records the float64 value for obsrv. // ObserveFloat64 records the float64 value for obsrv.
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
// ObserveInt64 records the int64 value for obsrv. // ObserveInt64 records the int64 value for obsrv.
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
} }

View File

@ -19,6 +19,14 @@
"matchManagers": ["gomod"], "matchManagers": ["gomod"],
"matchDepTypes": ["indirect"], "matchDepTypes": ["indirect"],
"enabled": false "enabled": false
},
{
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
"groupName": "googleapis"
},
{
"matchPackageNames": ["golang.org/x/**"],
"groupName": "golang.org/x"
} }
] ]
} }

View File

@ -4,5 +4,6 @@
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
// Library represents the instrumentation library. // Library represents the instrumentation library.
// Deprecated: please use Scope instead. //
// Deprecated: use [Scope] instead.
type Library = Scope type Library = Scope

View File

@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/resource"
) )
@ -103,7 +104,11 @@ func (o optionFunc) apply(conf config) config {
// go.opentelemetry.io/otel/sdk/resource package will be used. // go.opentelemetry.io/otel/sdk/resource package will be used.
func WithResource(res *resource.Resource) Option { func WithResource(res *resource.Resource) Option {
return optionFunc(func(conf config) config { return optionFunc(func(conf config) config {
conf.res = res var err error
conf.res, err = resource.Merge(resource.Environment(), res)
if err != nil {
otel.Handle(err)
}
return conf return conf
}) })
} }

View File

@ -31,6 +31,14 @@
// is being run on. That way when multiple instances of the code are collected // is being run on. That way when multiple instances of the code are collected
// at a single endpoint their origin is decipherable. // at a single endpoint their origin is decipherable.
// //
// To avoid leaking memory, the SDK returns the same instrument for calls to
// create new instruments with the same Name, Unit, and Description.
// Importantly, callbacks provided using metric.WithFloat64Callback or
// metric.WithInt64Callback will only apply for the first instrument created
// with a given Name, Unit, and Description. Instead, use
// Meter.RegisterCallback and Registration.Unregister to add and remove
// callbacks without leaking memory.
//
// See [go.opentelemetry.io/otel/metric] for more information about // See [go.opentelemetry.io/otel/metric] for more information about
// the metric API. // the metric API.
// //

View File

@ -8,8 +8,8 @@ import (
"runtime" "runtime"
"slices" "slices"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/exemplar"
"go.opentelemetry.io/otel/sdk/metric/internal/x" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
) )
// reservoirFunc returns the appropriately configured exemplar reservoir // reservoirFunc returns the appropriately configured exemplar reservoir
@ -19,10 +19,7 @@ import (
// Note: This will only return non-nil values when the experimental exemplar // Note: This will only return non-nil values when the experimental exemplar
// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable // feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
// is not set to always_off. // is not set to always_off.
func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { func reservoirFunc[N int64 | float64](agg Aggregation) func() aggregate.FilteredExemplarReservoir[N] {
if !x.Exemplars.Enabled() {
return nil
}
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
@ -32,11 +29,11 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR
case "always_on": case "always_on":
filter = exemplar.AlwaysOnFilter filter = exemplar.AlwaysOnFilter
case "always_off": case "always_off":
return exemplar.Drop return aggregate.DropReservoir
case "trace_based": case "trace_based":
fallthrough fallthrough
default: default:
filter = exemplar.SampledFilter filter = exemplar.TraceBasedFilter
} }
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
@ -45,9 +42,9 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR
a, ok := agg.(AggregationExplicitBucketHistogram) a, ok := agg.(AggregationExplicitBucketHistogram)
if ok && len(a.Boundaries) > 0 { if ok && len(a.Boundaries) > 0 {
cp := slices.Clone(a.Boundaries) cp := slices.Clone(a.Boundaries)
return func() exemplar.FilteredReservoir[N] { return func() aggregate.FilteredExemplarReservoir[N] {
bounds := cp bounds := cp
return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewHistogramReservoir(bounds))
} }
} }
@ -75,7 +72,7 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR
} }
} }
return func() exemplar.FilteredReservoir[N] { return func() aggregate.FilteredExemplarReservoir[N] {
return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewFixedSizeReservoir(n))
} }
} }

View File

@ -0,0 +1,3 @@
# Metric SDK Exemplars
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/exemplar)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar)

View File

@ -3,4 +3,4 @@
// Package exemplar provides an implementation of the OpenTelemetry exemplar // Package exemplar provides an implementation of the OpenTelemetry exemplar
// reservoir to be used in metric collection pipelines. // reservoir to be used in metric collection pipelines.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"

View File

@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import ( import (
"time" "time"

View File

@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import ( import (
"context" "context"
@ -16,10 +16,10 @@ import (
// Reservoir in making a sampling decision. // Reservoir in making a sampling decision.
type Filter func(context.Context) bool type Filter func(context.Context) bool
// SampledFilter is a [Filter] that will only offer measurements // TraceBasedFilter is a [Filter] that will only offer measurements
// if the passed context associated with the measurement contains a sampled // if the passed context associated with the measurement contains a sampled
// [go.opentelemetry.io/otel/trace.SpanContext]. // [go.opentelemetry.io/otel/trace.SpanContext].
func SampledFilter(ctx context.Context) bool { func TraceBasedFilter(ctx context.Context) bool {
return trace.SpanContextFromContext(ctx).IsSampled() return trace.SpanContextFromContext(ctx).IsSampled()
} }

View File

@ -1,31 +1,62 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import ( import (
"context" "context"
"math" "math"
"math/rand" "math/rand"
"sync"
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
) )
var ( // NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most
// k exemplars. If there are k or less measurements made, the Reservoir will
// sample each one. If there are more than k, the Reservoir will then randomly
// sample all additional measurement with a decreasing probability.
func NewFixedSizeReservoir(k int) *FixedSizeReservoir {
return newFixedSizeReservoir(newStorage(k))
}
var _ Reservoir = &FixedSizeReservoir{}
// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If
// there are k or less measurements made, the Reservoir will sample each one.
// If there are more than k, the Reservoir will then randomly sample all
// additional measurement with a decreasing probability.
type FixedSizeReservoir struct {
*storage
// count is the number of measurement seen.
count int64
// next is the next count that will store a measurement at a random index
// once the reservoir has been filled.
next int64
// w is the largest random number in a distribution that is used to compute
// the next next.
w float64
// rng is used to make sampling decisions. // rng is used to make sampling decisions.
// //
// Do not use crypto/rand. There is no reason for the decrease in performance // Do not use crypto/rand. There is no reason for the decrease in performance
// given this is not a security sensitive decision. // given this is not a security sensitive decision.
rng = rand.New(rand.NewSource(time.Now().UnixNano())) rng *rand.Rand
// Ensure concurrent safe accecess to rng and its underlying source. }
rngMu sync.Mutex
)
// random returns, as a float64, a uniform pseudo-random number in the open func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
// interval (0.0,1.0). r := &FixedSizeReservoir{
func random() float64 { storage: s,
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
}
r.reset()
return r
}
// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
// open interval (0.0,1.0).
func (r *FixedSizeReservoir) randomFloat64() float64 {
// TODO: This does not return a uniform number. rng.Float64 returns a // TODO: This does not return a uniform number. rng.Float64 returns a
// uniformly random int in [0,2^53) that is divided by 2^53. Meaning it // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
// returns multiples of 2^-53, and not all floating point numbers between 0 // returns multiples of 2^-53, and not all floating point numbers between 0
@ -43,40 +74,25 @@ func random() float64 {
// //
// There are likely many other methods to explore here as well. // There are likely many other methods to explore here as well.
rngMu.Lock() f := r.rng.Float64()
defer rngMu.Unlock()
f := rng.Float64()
for f == 0 { for f == 0 {
f = rng.Float64() f = r.rng.Float64()
} }
return f return f
} }
// FixedSize returns a [Reservoir] that samples at most k exemplars. If there // Offer accepts the parameters associated with a measurement. The
// are k or less measurements made, the Reservoir will sample each one. If // parameters will be stored as an exemplar if the Reservoir decides to
// there are more than k, the Reservoir will then randomly sample all // sample the measurement.
// additional measurement with a decreasing probability. //
func FixedSize(k int) Reservoir { // The passed ctx needs to contain any baggage or span that were active
r := &randRes{storage: newStorage(k)} // when the measurement was made. This information may be used by the
r.reset() // Reservoir in making a sampling decision.
return r //
} // The time t is the time when the measurement was made. The v and a
// parameters are the value and dropped (filtered) attributes of the
type randRes struct { // measurement respectively.
*storage func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
// count is the number of measurement seen.
count int64
// next is the next count that will store a measurement at a random index
// once the reservoir has been filled.
next int64
// w is the largest random number in a distribution that is used to compute
// the next next.
w float64
}
func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
// The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
// 1994). "Reservoir-Sampling Algorithms of Time Complexity // 1994). "Reservoir-Sampling Algorithms of Time Complexity
// O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
@ -123,7 +139,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute
} else { } else {
if r.count == r.next { if r.count == r.next {
// Overwrite a random existing measurement with the one offered. // Overwrite a random existing measurement with the one offered.
idx := int(rng.Int63n(int64(cap(r.store)))) idx := int(r.rng.Int63n(int64(cap(r.store))))
r.store[idx] = newMeasurement(ctx, t, n, a) r.store[idx] = newMeasurement(ctx, t, n, a)
r.advance() r.advance()
} }
@ -132,7 +148,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute
} }
// reset resets r to the initial state. // reset resets r to the initial state.
func (r *randRes) reset() { func (r *FixedSizeReservoir) reset() {
// This resets the number of exemplars known. // This resets the number of exemplars known.
r.count = 0 r.count = 0
// Random index inserts should only happen after the storage is full. // Random index inserts should only happen after the storage is full.
@ -147,14 +163,14 @@ func (r *randRes) reset() {
// This maps the uniform random number in (0,1) to a geometric distribution // This maps the uniform random number in (0,1) to a geometric distribution
// over the same interval. The mean of the distribution is inversely // over the same interval. The mean of the distribution is inversely
// proportional to the storage capacity. // proportional to the storage capacity.
r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
r.advance() r.advance()
} }
// advance updates the count at which the offered measurement will overwrite an // advance updates the count at which the offered measurement will overwrite an
// existing exemplar. // existing exemplar.
func (r *randRes) advance() { func (r *FixedSizeReservoir) advance() {
// Calculate the next value in the random number series. // Calculate the next value in the random number series.
// //
// The current value of r.w is based on the max of a distribution of random // The current value of r.w is based on the max of a distribution of random
@ -167,7 +183,7 @@ func (r *randRes) advance() {
// therefore the next r.w will be based on the same distribution (i.e. // therefore the next r.w will be based on the same distribution (i.e.
// `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
// computing the next random number `u` and take r.w as `w * u^(1/k)`. // computing the next random number `u` and take r.w as `w * u^(1/k)`.
r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
// Use the new random number in the series to calculate the count of the // Use the new random number in the series to calculate the count of the
// next measurement that will be stored. // next measurement that will be stored.
// //
@ -178,10 +194,13 @@ func (r *randRes) advance() {
// //
// Important to note, the new r.next will always be at least 1 more than // Important to note, the new r.next will always be at least 1 more than
// the last r.next. // the last r.next.
r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1
} }
func (r *randRes) Collect(dest *[]Exemplar) { // Collect returns all the held exemplars.
//
// The Reservoir state is preserved after this call.
func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
r.storage.Collect(dest) r.storage.Collect(dest)
// Call reset here even though it will reset r.count and restart the random // Call reset here even though it will reset r.count and restart the random
// number series. This will persist any old exemplars as long as no new // number series. This will persist any old exemplars as long as no new

View File

@ -0,0 +1,62 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import (
"context"
"slices"
"sort"
"time"
"go.opentelemetry.io/otel/attribute"
)
// NewHistogramReservoir returns a [HistogramReservoir] that samples the last
// measurement that falls within a histogram bucket. The histogram bucket
// upper-boundaries are define by bounds.
//
// The passed bounds will be sorted by this function.
func NewHistogramReservoir(bounds []float64) *HistogramReservoir {
slices.Sort(bounds)
return &HistogramReservoir{
bounds: bounds,
storage: newStorage(len(bounds) + 1),
}
}
var _ Reservoir = &HistogramReservoir{}
// HistogramReservoir is a [Reservoir] that samples the last measurement that
// falls within a histogram bucket. The histogram bucket upper-boundaries are
// define by bounds.
type HistogramReservoir struct {
*storage
// bounds are bucket bounds in ascending order.
bounds []float64
}
// Offer accepts the parameters associated with a measurement. The
// parameters will be stored as an exemplar if the Reservoir decides to
// sample the measurement.
//
// The passed ctx needs to contain any baggage or span that were active
// when the measurement was made. This information may be used by the
// Reservoir in making a sampling decision.
//
// The time t is the time when the measurement was made. The v and a
// parameters are the value and dropped (filtered) attributes of the
// measurement respectively.
func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
var x float64
switch v.Type() {
case Int64ValueType:
x = float64(v.Int64())
case Float64ValueType:
x = v.Float64()
default:
panic("unknown value type")
}
r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
}

View File

@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import ( import (
"context" "context"

View File

@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import ( import (
"context" "context"
@ -35,7 +35,7 @@ func (r *storage) Collect(dest *[]Exemplar) {
continue continue
} }
m.Exemplar(&(*dest)[n]) m.exemplar(&(*dest)[n])
n++ n++
} }
*dest = (*dest)[:n] *dest = (*dest)[:n]
@ -66,8 +66,8 @@ func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []at
} }
} }
// Exemplar returns m as an [Exemplar]. // exemplar returns m as an [Exemplar].
func (m measurement) Exemplar(dest *Exemplar) { func (m measurement) exemplar(dest *Exemplar) {
dest.FilteredAttributes = m.FilteredAttributes dest.FilteredAttributes = m.FilteredAttributes
dest.Time = m.Time dest.Time = m.Time
dest.Value = m.Value dest.Value = m.Value

View File

@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import "math" import "math"
@ -28,7 +28,8 @@ type Value struct {
func NewValue[N int64 | float64](value N) Value { func NewValue[N int64 | float64](value N) Value {
switch v := any(value).(type) { switch v := any(value).(type) {
case int64: case int64:
return Value{t: Int64ValueType, val: uint64(v)} // This can be later converted back to int64 (overflow not checked).
return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec
case float64: case float64:
return Value{t: Float64ValueType, val: math.Float64bits(v)} return Value{t: Float64ValueType, val: math.Float64bits(v)}
} }
@ -42,7 +43,8 @@ func (v Value) Type() ValueType { return v.t }
// Int64ValueType, 0 is returned. // Int64ValueType, 0 is returned.
func (v Value) Int64() int64 { func (v Value) Int64() int64 {
if v.t == Int64ValueType { if v.t == Int64ValueType {
return int64(v.val) // Assumes the correct int64 was stored in v.val based on type.
return int64(v.val) // nolint: gosec
} }
return 0 return 0
} }

View File

@ -234,8 +234,8 @@ func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Se
} }
} }
// observablID is a comparable unique identifier of an observable. // observableID is a comparable unique identifier of an observable.
type observablID[N int64 | float64] struct { type observableID[N int64 | float64] struct {
name string name string
description string description string
kind InstrumentKind kind InstrumentKind
@ -287,7 +287,7 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int
type observable[N int64 | float64] struct { type observable[N int64 | float64] struct {
metric.Observable metric.Observable
observablID[N] observableID[N]
meter *meter meter *meter
measures measures[N] measures measures[N]
@ -296,7 +296,7 @@ type observable[N int64 | float64] struct {
func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
return &observable[N]{ return &observable[N]{
observablID: observablID[N]{ observableID: observableID[N]{
name: name, name: name,
description: desc, description: desc,
kind: kind, kind: kind,

View File

@ -8,7 +8,6 @@ import (
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )
@ -38,8 +37,8 @@ type Builder[N int64 | float64] struct {
// create new exemplar reservoirs for a new seen attribute set. // create new exemplar reservoirs for a new seen attribute set.
// //
// If this is not provided a default factory function that returns an // If this is not provided a default factory function that returns an
// exemplar.Drop reservoir will be used. // DropReservoir reservoir will be used.
ReservoirFunc func() exemplar.FilteredReservoir[N] ReservoirFunc func() FilteredExemplarReservoir[N]
// AggregationLimit is the cardinality limit of measurement attributes. Any // AggregationLimit is the cardinality limit of measurement attributes. Any
// measurement for new attributes once the limit has been reached will be // measurement for new attributes once the limit has been reached will be
// aggregated into a single aggregate for the "otel.metric.overflow" // aggregated into a single aggregate for the "otel.metric.overflow"
@ -50,12 +49,12 @@ type Builder[N int64 | float64] struct {
AggregationLimit int AggregationLimit int
} }
func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { func (b Builder[N]) resFunc() func() FilteredExemplarReservoir[N] {
if b.ReservoirFunc != nil { if b.ReservoirFunc != nil {
return b.ReservoirFunc return b.ReservoirFunc
} }
return exemplar.Drop return DropReservoir
} }
type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)

View File

@ -1,16 +1,17 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
import ( import (
"context" "context"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
) )
// Drop returns a [FilteredReservoir] that drops all measurements it is offered. // DropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } func DropReservoir[N int64 | float64]() FilteredExemplarReservoir[N] { return &dropRes[N]{} }
type dropRes[N int64 | float64] struct{} type dropRes[N int64 | float64] struct{}
@ -18,6 +19,6 @@ type dropRes[N int64 | float64] struct{}
func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
// Collect resets dest. No exemplars will ever be returned. // Collect resets dest. No exemplars will ever be returned.
func (r *dropRes[N]) Collect(dest *[]Exemplar) { func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
*dest = (*dest)[:0] *dest = (*dest)[:0]
} }

View File

@ -6,7 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
import ( import (
"sync" "sync"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )

View File

@ -12,7 +12,6 @@ import (
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )
@ -31,7 +30,7 @@ const (
// expoHistogramDataPoint is a single data point in an exponential histogram. // expoHistogramDataPoint is a single data point in an exponential histogram.
type expoHistogramDataPoint[N int64 | float64] struct { type expoHistogramDataPoint[N int64 | float64] struct {
attrs attribute.Set attrs attribute.Set
res exemplar.FilteredReservoir[N] res FilteredExemplarReservoir[N]
count uint64 count uint64
min N min N
@ -42,14 +41,14 @@ type expoHistogramDataPoint[N int64 | float64] struct {
noMinMax bool noMinMax bool
noSum bool noSum bool
scale int scale int32
posBuckets expoBuckets posBuckets expoBuckets
negBuckets expoBuckets negBuckets expoBuckets
zeroCount uint64 zeroCount uint64
} }
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] { func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
f := math.MaxFloat64 f := math.MaxFloat64
max := N(f) // if N is int64, max will overflow to -9223372036854775808 max := N(f) // if N is int64, max will overflow to -9223372036854775808
min := N(-f) min := N(-f)
@ -119,11 +118,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
} }
// getBin returns the bin v should be recorded into. // getBin returns the bin v should be recorded into.
func (p *expoHistogramDataPoint[N]) getBin(v float64) int { func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
frac, exp := math.Frexp(v) frac, expInt := math.Frexp(v)
// 11-bit exponential.
exp := int32(expInt) // nolint: gosec
if p.scale <= 0 { if p.scale <= 0 {
// Because of the choice of fraction is always 1 power of two higher than we want. // Because of the choice of fraction is always 1 power of two higher than we want.
correction := 1 var correction int32 = 1
if frac == .5 { if frac == .5 {
// If v is an exact power of two the frac will be .5 and the exp // If v is an exact power of two the frac will be .5 and the exp
// will be one higher than we want. // will be one higher than we want.
@ -131,7 +132,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
} }
return (exp - correction) >> (-p.scale) return (exp - correction) >> (-p.scale)
} }
return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1 return exp<<p.scale + int32(math.Log(frac)*scaleFactors[p.scale]) - 1
} }
// scaleFactors are constants used in calculating the logarithm index. They are // scaleFactors are constants used in calculating the logarithm index. They are
@ -162,20 +163,20 @@ var scaleFactors = [21]float64{
// scaleChange returns the magnitude of the scale change needed to fit bin in // scaleChange returns the magnitude of the scale change needed to fit bin in
// the bucket. If no scale change is needed 0 is returned. // the bucket. If no scale change is needed 0 is returned.
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int { func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) int32 {
if length == 0 { if length == 0 {
// No need to rescale if there are no buckets. // No need to rescale if there are no buckets.
return 0 return 0
} }
low := startBin low := int(startBin)
high := bin high := int(bin)
if startBin >= bin { if startBin >= bin {
low = bin low = int(bin)
high = startBin + length - 1 high = int(startBin) + length - 1
} }
count := 0 var count int32
for high-low >= p.maxSize { for high-low >= p.maxSize {
low = low >> 1 low = low >> 1
high = high >> 1 high = high >> 1
@ -189,39 +190,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
// expoBuckets is a set of buckets in an exponential histogram. // expoBuckets is a set of buckets in an exponential histogram.
type expoBuckets struct { type expoBuckets struct {
startBin int startBin int32
counts []uint64 counts []uint64
} }
// record increments the count for the given bin, and expands the buckets if needed. // record increments the count for the given bin, and expands the buckets if needed.
// Size changes must be done before calling this function. // Size changes must be done before calling this function.
func (b *expoBuckets) record(bin int) { func (b *expoBuckets) record(bin int32) {
if len(b.counts) == 0 { if len(b.counts) == 0 {
b.counts = []uint64{1} b.counts = []uint64{1}
b.startBin = bin b.startBin = bin
return return
} }
endBin := b.startBin + len(b.counts) - 1 endBin := int(b.startBin) + len(b.counts) - 1
// if the new bin is inside the current range // if the new bin is inside the current range
if bin >= b.startBin && bin <= endBin { if bin >= b.startBin && int(bin) <= endBin {
b.counts[bin-b.startBin]++ b.counts[bin-b.startBin]++
return return
} }
// if the new bin is before the current start add spaces to the counts // if the new bin is before the current start add spaces to the counts
if bin < b.startBin { if bin < b.startBin {
origLen := len(b.counts) origLen := len(b.counts)
newLength := endBin - bin + 1 newLength := endBin - int(bin) + 1
shift := b.startBin - bin shift := b.startBin - bin
if newLength > cap(b.counts) { if newLength > cap(b.counts) {
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
} }
copy(b.counts[shift:origLen+shift], b.counts[:]) copy(b.counts[shift:origLen+int(shift)], b.counts[:])
b.counts = b.counts[:newLength] b.counts = b.counts[:newLength]
for i := 1; i < shift; i++ { for i := 1; i < int(shift); i++ {
b.counts[i] = 0 b.counts[i] = 0
} }
b.startBin = bin b.startBin = bin
@ -229,17 +230,17 @@ func (b *expoBuckets) record(bin int) {
return return
} }
// if the new is after the end add spaces to the end // if the new is after the end add spaces to the end
if bin > endBin { if int(bin) > endBin {
if bin-b.startBin < cap(b.counts) { if int(bin-b.startBin) < cap(b.counts) {
b.counts = b.counts[:bin-b.startBin+1] b.counts = b.counts[:bin-b.startBin+1]
for i := endBin + 1 - b.startBin; i < len(b.counts); i++ { for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
b.counts[i] = 0 b.counts[i] = 0
} }
b.counts[bin-b.startBin] = 1 b.counts[bin-b.startBin] = 1
return return
} }
end := make([]uint64, bin-b.startBin-len(b.counts)+1) end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
b.counts = append(b.counts, end...) b.counts = append(b.counts, end...)
b.counts[bin-b.startBin] = 1 b.counts[bin-b.startBin] = 1
} }
@ -247,7 +248,7 @@ func (b *expoBuckets) record(bin int) {
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the // downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
// correct lower resolution bucket. // correct lower resolution bucket.
func (b *expoBuckets) downscale(delta int) { func (b *expoBuckets) downscale(delta int32) {
// Example // Example
// delta = 2 // delta = 2
// Original offset: -6 // Original offset: -6
@ -262,19 +263,19 @@ func (b *expoBuckets) downscale(delta int) {
return return
} }
steps := 1 << delta steps := int32(1) << delta
offset := b.startBin % steps offset := b.startBin % steps
offset = (offset + steps) % steps // to make offset positive offset = (offset + steps) % steps // to make offset positive
for i := 1; i < len(b.counts); i++ { for i := 1; i < len(b.counts); i++ {
idx := i + offset idx := i + int(offset)
if idx%steps == 0 { if idx%int(steps) == 0 {
b.counts[idx/steps] = b.counts[i] b.counts[idx/int(steps)] = b.counts[i]
continue continue
} }
b.counts[idx/steps] += b.counts[i] b.counts[idx/int(steps)] += b.counts[i]
} }
lastIdx := (len(b.counts) - 1 + offset) / steps lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
b.counts = b.counts[:lastIdx+1] b.counts = b.counts[:lastIdx+1]
b.startBin = b.startBin >> delta b.startBin = b.startBin >> delta
} }
@ -282,12 +283,12 @@ func (b *expoBuckets) downscale(delta int) {
// newExponentialHistogram returns an Aggregator that summarizes a set of // newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes // measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in. // and the aggregation cycle the measurements were made in.
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *expoHistogram[N] {
return &expoHistogram[N]{ return &expoHistogram[N]{
noSum: noSum, noSum: noSum,
noMinMax: noMinMax, noMinMax: noMinMax,
maxSize: int(maxSize), maxSize: int(maxSize),
maxScale: int(maxScale), maxScale: maxScale,
newRes: r, newRes: r,
limit: newLimiter[*expoHistogramDataPoint[N]](limit), limit: newLimiter[*expoHistogramDataPoint[N]](limit),
@ -303,9 +304,9 @@ type expoHistogram[N int64 | float64] struct {
noSum bool noSum bool
noMinMax bool noMinMax bool
maxSize int maxSize int
maxScale int maxScale int32
newRes func() exemplar.FilteredReservoir[N] newRes func() FilteredExemplarReservoir[N]
limit limiter[*expoHistogramDataPoint[N]] limit limiter[*expoHistogramDataPoint[N]]
values map[attribute.Distinct]*expoHistogramDataPoint[N] values map[attribute.Distinct]*expoHistogramDataPoint[N]
valuesMu sync.Mutex valuesMu sync.Mutex
@ -354,15 +355,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].StartTime = e.start hDPts[i].StartTime = e.start
hDPts[i].Time = t hDPts[i].Time = t
hDPts[i].Count = val.count hDPts[i].Count = val.count
hDPts[i].Scale = int32(val.scale) hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0 hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
@ -407,15 +408,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].StartTime = e.start hDPts[i].StartTime = e.start
hDPts[i].Time = t hDPts[i].Time = t
hDPts[i].Count = val.count hDPts[i].Count = val.count
hDPts[i].Scale = int32(val.scale) hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0 hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)

View File

@ -0,0 +1,50 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
)
// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
type FilteredExemplarReservoir[N int64 | float64] interface {
// Offer accepts the parameters associated with a measurement. The
// parameters will be stored as an exemplar if the filter decides to
// sample the measurement.
//
// The passed ctx needs to contain any baggage or span that were active
// when the measurement was made. This information may be used by the
// Reservoir in making a sampling decision.
Offer(ctx context.Context, val N, attr []attribute.KeyValue)
// Collect returns all the held exemplars in the reservoir.
Collect(dest *[]exemplar.Exemplar)
}
// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made.
type filteredExemplarReservoir[N int64 | float64] struct {
filter exemplar.Filter
reservoir exemplar.Reservoir
}
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
// that are allowed by the filter.
func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
return &filteredExemplarReservoir[N]{
filter: f,
reservoir: r,
}
}
func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
if f.filter(ctx) {
// only record the current time if we are sampling this measurement.
f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
}
}
func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }

View File

@ -11,13 +11,12 @@ import (
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )
type buckets[N int64 | float64] struct { type buckets[N int64 | float64] struct {
attrs attribute.Set attrs attribute.Set
res exemplar.FilteredReservoir[N] res FilteredExemplarReservoir[N]
counts []uint64 counts []uint64
count uint64 count uint64
@ -48,13 +47,13 @@ type histValues[N int64 | float64] struct {
noSum bool noSum bool
bounds []float64 bounds []float64
newRes func() exemplar.FilteredReservoir[N] newRes func() FilteredExemplarReservoir[N]
limit limiter[*buckets[N]] limit limiter[*buckets[N]]
values map[attribute.Distinct]*buckets[N] values map[attribute.Distinct]*buckets[N]
valuesMu sync.Mutex valuesMu sync.Mutex
} }
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the // The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy // passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have // here so we can always guarantee this. Or, in the case of failure, have
@ -109,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
// newHistogram returns an Aggregator that summarizes a set of measurements as // newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram. // an histogram.
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histogram[N] {
return &histogram[N]{ return &histogram[N]{
histValues: newHistValues[N](boundaries, noSum, limit, r), histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax, noMinMax: noMinMax,

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )
@ -17,10 +16,10 @@ import (
type datapoint[N int64 | float64] struct { type datapoint[N int64 | float64] struct {
attrs attribute.Set attrs attribute.Set
value N value N
res exemplar.FilteredReservoir[N] res FilteredExemplarReservoir[N]
} }
func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *lastValue[N] {
return &lastValue[N]{ return &lastValue[N]{
newRes: r, newRes: r,
limit: newLimiter[datapoint[N]](limit), limit: newLimiter[datapoint[N]](limit),
@ -33,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv
type lastValue[N int64 | float64] struct { type lastValue[N int64 | float64] struct {
sync.Mutex sync.Mutex
newRes func() exemplar.FilteredReservoir[N] newRes func() FilteredExemplarReservoir[N]
limit limiter[datapoint[N]] limit limiter[datapoint[N]]
values map[attribute.Distinct]datapoint[N] values map[attribute.Distinct]datapoint[N]
start time.Time start time.Time
@ -115,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
// newPrecomputedLastValue returns an aggregator that summarizes a set of // newPrecomputedLastValue returns an aggregator that summarizes a set of
// observations as the last one made. // observations as the last one made.
func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { func newPrecomputedLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
} }

View File

@ -9,25 +9,24 @@ import (
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata"
) )
type sumValue[N int64 | float64] struct { type sumValue[N int64 | float64] struct {
n N n N
res exemplar.FilteredReservoir[N] res FilteredExemplarReservoir[N]
attrs attribute.Set attrs attribute.Set
} }
// valueMap is the storage for sums. // valueMap is the storage for sums.
type valueMap[N int64 | float64] struct { type valueMap[N int64 | float64] struct {
sync.Mutex sync.Mutex
newRes func() exemplar.FilteredReservoir[N] newRes func() FilteredExemplarReservoir[N]
limit limiter[sumValue[N]] limit limiter[sumValue[N]]
values map[attribute.Distinct]sumValue[N] values map[attribute.Distinct]sumValue[N]
} }
func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { func newValueMap[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *valueMap[N] {
return &valueMap[N]{ return &valueMap[N]{
newRes: r, newRes: r,
limit: newLimiter[sumValue[N]](limit), limit: newLimiter[sumValue[N]](limit),
@ -55,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
// newSum returns an aggregator that summarizes a set of measurements as their // newSum returns an aggregator that summarizes a set of measurements as their
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
// the measurements were made in. // the measurements were made in.
func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { func newSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *sum[N] {
return &sum[N]{ return &sum[N]{
valueMap: newValueMap[N](limit, r), valueMap: newValueMap[N](limit, r),
monotonic: monotonic, monotonic: monotonic,
@ -142,9 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
} }
// newPrecomputedSum returns an aggregator that summarizes a set of // newPrecomputedSum returns an aggregator that summarizes a set of
// observatrions as their arithmetic sum. Each sum is scoped by attributes and // observations as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in. // the aggregation cycle the measurements were made in.
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *precomputedSum[N] {
return &precomputedSum[N]{ return &precomputedSum[N]{
valueMap: newValueMap[N](limit, r), valueMap: newValueMap[N](limit, r),
monotonic: monotonic, monotonic: monotonic,
@ -152,7 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex
} }
} }
// precomputedSum summarizes a set of observatrions as their arithmetic sum. // precomputedSum summarizes a set of observations as their arithmetic sum.
type precomputedSum[N int64 | float64] struct { type precomputedSum[N int64 | float64] struct {
*valueMap[N] *valueMap[N]

View File

@ -1,49 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
)
// FilteredReservoir wraps a [Reservoir] with a filter.
type FilteredReservoir[N int64 | float64] interface {
// Offer accepts the parameters associated with a measurement. The
// parameters will be stored as an exemplar if the filter decides to
// sample the measurement.
//
// The passed ctx needs to contain any baggage or span that were active
// when the measurement was made. This information may be used by the
// Reservoir in making a sampling decision.
Offer(ctx context.Context, val N, attr []attribute.KeyValue)
// Collect returns all the held exemplars in the reservoir.
Collect(dest *[]Exemplar)
}
// filteredReservoir handles the pre-sampled exemplar of measurements made.
type filteredReservoir[N int64 | float64] struct {
filter Filter
reservoir Reservoir
}
// NewFilteredReservoir creates a [FilteredReservoir] which only offers values
// that are allowed by the filter.
func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] {
return &filteredReservoir[N]{
filter: f,
reservoir: r,
}
}
func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
if f.filter(ctx) {
// only record the current time if we are sampling this measurment.
f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr)
}
}
func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) }

View File

@ -1,46 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"slices"
"sort"
"time"
"go.opentelemetry.io/otel/attribute"
)
// Histogram returns a [Reservoir] that samples the last measurement that falls
// within a histogram bucket. The histogram bucket upper-boundaries are define
// by bounds.
//
// The passed bounds will be sorted by this function.
func Histogram(bounds []float64) Reservoir {
slices.Sort(bounds)
return &histRes{
bounds: bounds,
storage: newStorage(len(bounds) + 1),
}
}
type histRes struct {
*storage
// bounds are bucket bounds in ascending order.
bounds []float64
}
func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
var x float64
switch v.Type() {
case Int64ValueType:
x = float64(v.Int64())
case Float64ValueType:
x = v.Float64()
default:
panic("unknown value type")
}
r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
}

View File

@ -10,39 +10,23 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
import ( import (
"os" "os"
"strconv" "strconv"
"strings"
) )
var ( // CardinalityLimit is an experimental feature flag that defines if
// Exemplars is an experimental feature flag that defines if exemplars // cardinality limits should be applied to the recorded metric data-points.
// should be recorded for metric data-points. //
// // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
// To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable // variable to the integer limit value you want to use.
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" //
// will also enable this). // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { // will disable the cardinality limits.
if strings.ToLower(v) == "true" { var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
return v, true
}
return "", false
})
// CardinalityLimit is an experimental feature flag that defines if
// cardinality limits should be applied to the recorded metric data-points.
//
// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
// variable to the integer limit value you want to use.
//
// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
// will disable the cardinality limits.
CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
n, err := strconv.Atoi(v) n, err := strconv.Atoi(v)
if err != nil { if err != nil {
return 0, false return 0, false
} }
return n, true return n, true
}) })
)
// Feature is an experimental feature control flag. It provides a uniform way // Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values. // to interact with these feature flags and parse their values.

View File

@ -185,6 +185,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
// configured with options. The instrument is used to asynchronously record // configured with options. The instrument is used to asynchronously record
// int64 measurements once per a measurement collection cycle. Only the // int64 measurements once per a measurement collection cycle. Only the
// measurements recorded during the collection cycle are exported. // measurements recorded during the collection cycle are exported.
//
// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := Instrument{ id := Instrument{
@ -201,6 +206,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
// configured with options. The instrument is used to asynchronously record // configured with options. The instrument is used to asynchronously record
// instantaneous int64 measurements once per a measurement collection cycle. // instantaneous int64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported. // Only the measurements recorded during the collection cycle are exported.
//
// If Int64ObservableGauge is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
cfg := metric.NewInt64ObservableGaugeConfig(options...) cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := Instrument{ id := Instrument{
@ -334,6 +344,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
// and configured with options. The instrument is used to asynchronously record // and configured with options. The instrument is used to asynchronously record
// float64 measurements once per a measurement collection cycle. Only the // float64 measurements once per a measurement collection cycle. Only the
// measurements recorded during the collection cycle are exported. // measurements recorded during the collection cycle are exported.
//
// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := Instrument{ id := Instrument{
@ -350,6 +365,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
// configured with options. The instrument is used to asynchronously record // configured with options. The instrument is used to asynchronously record
// instantaneous float64 measurements once per a measurement collection cycle. // instantaneous float64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported. // Only the measurements recorded during the collection cycle are exported.
//
// If Float64ObservableGauge is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
cfg := metric.NewFloat64ObservableGaugeConfig(options...) cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := Instrument{ id := Instrument{
@ -439,7 +459,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
} }
continue continue
} }
reg.registerInt64(o.observablID) reg.registerInt64(o.observableID)
case float64Observable: case float64Observable:
if err := o.registerable(m); err != nil { if err := o.registerable(m); err != nil {
if !errors.Is(err, errEmptyAgg) { if !errors.Is(err, errEmptyAgg) {
@ -447,7 +467,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
} }
continue continue
} }
reg.registerFloat64(o.observablID) reg.registerFloat64(o.observableID)
default: default:
// Instrument external to the SDK. // Instrument external to the SDK.
return nil, fmt.Errorf("invalid observable: from different implementation") return nil, fmt.Errorf("invalid observable: from different implementation")
@ -468,14 +488,14 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
type observer struct { type observer struct {
embedded.Observer embedded.Observer
float64 map[observablID[float64]]struct{} float64 map[observableID[float64]]struct{}
int64 map[observablID[int64]]struct{} int64 map[observableID[int64]]struct{}
} }
func newObserver() observer { func newObserver() observer {
return observer{ return observer{
float64: make(map[observablID[float64]]struct{}), float64: make(map[observableID[float64]]struct{}),
int64: make(map[observablID[int64]]struct{}), int64: make(map[observableID[int64]]struct{}),
} }
} }
@ -483,11 +503,11 @@ func (r observer) len() int {
return len(r.float64) + len(r.int64) return len(r.float64) + len(r.int64)
} }
func (r observer) registerFloat64(id observablID[float64]) { func (r observer) registerFloat64(id observableID[float64]) {
r.float64[id] = struct{}{} r.float64[id] = struct{}{}
} }
func (r observer) registerInt64(id observablID[int64]) { func (r observer) registerInt64(id observableID[int64]) {
r.int64[id] = struct{}{} r.int64[id] = struct{}{}
} }
@ -516,7 +536,7 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
return return
} }
if _, registered := r.float64[oImpl.observablID]; !registered { if _, registered := r.float64[oImpl.observableID]; !registered {
if !oImpl.dropAggregation { if !oImpl.dropAggregation {
global.Error(errUnregObserver, "failed to record", global.Error(errUnregObserver, "failed to record",
"name", oImpl.name, "name", oImpl.name,
@ -551,7 +571,7 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
return return
} }
if _, registered := r.int64[oImpl.observablID]; !registered { if _, registered := r.int64[oImpl.observableID]; !registered {
if !oImpl.dropAggregation { if !oImpl.dropAggregation {
global.Error(errUnregObserver, "failed to record", global.Error(errUnregObserver, "failed to record",
"name", oImpl.name, "name", oImpl.name,

View File

@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration")
// start of bi-directional control flow. // start of bi-directional control flow.
// //
// Typically, push-based exporters that are periodic will // Typically, push-based exporters that are periodic will
// implement PeroidicExporter themselves and construct a // implement PeriodicExporter themselves and construct a
// PeriodicReader to satisfy this interface. // PeriodicReader to satisfy this interface.
// //
// Pull-based exporters will typically implement Register // Pull-based exporters will typically implement Register

View File

@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use. // version is the current release version of the metric SDK in use.
func version() string { func version() string {
return "1.28.0" return "1.31.0"
} }

View File

@ -10,17 +10,16 @@ import (
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
// implements hostIDReader // implements hostIDReader.
type hostIDReaderWindows struct{} type hostIDReaderWindows struct{}
// read reads MachineGuid from the windows registry key: // read reads MachineGuid from the Windows registry key:
// SOFTWARE\Microsoft\Cryptography // SOFTWARE\Microsoft\Cryptography.
func (*hostIDReaderWindows) read() (string, error) { func (*hostIDReaderWindows) read() (string, error) {
k, err := registry.OpenKey( k, err := registry.OpenKey(
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
registry.QUERY_VALUE|registry.WOW64_64KEY, registry.QUERY_VALUE|registry.WOW64_64KEY,
) )
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -17,7 +17,6 @@ import (
func platformOSDescription() (string, error) { func platformOSDescription() (string, error) {
k, err := registry.OpenKey( k, err := registry.OpenKey(
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -316,7 +316,11 @@ func (bsp *batchSpanProcessor) processQueue() {
bsp.batchMutex.Unlock() bsp.batchMutex.Unlock()
if shouldExport { if shouldExport {
if !bsp.timer.Stop() { if !bsp.timer.Stop() {
<-bsp.timer.C // Handle both GODEBUG=asynctimerchan=[0|1] properly.
select {
case <-bsp.timer.C:
default:
}
} }
if err := bsp.exportSpans(ctx); err != nil { if err := bsp.exportSpans(ctx); err != nil {
otel.Handle(err) otel.Handle(err)

View File

@ -15,14 +15,15 @@ type evictedQueue[T any] struct {
queue []T queue []T
capacity int capacity int
droppedCount int droppedCount int
logDropped func() logDroppedMsg string
logDroppedOnce sync.Once
} }
func newEvictedQueueEvent(capacity int) evictedQueue[Event] { func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
// Do not pre-allocate queue, do this lazily. // Do not pre-allocate queue, do this lazily.
return evictedQueue[Event]{ return evictedQueue[Event]{
capacity: capacity, capacity: capacity,
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), logDroppedMsg: "limit reached: dropping trace trace.Event",
} }
} }
@ -30,7 +31,7 @@ func newEvictedQueueLink(capacity int) evictedQueue[Link] {
// Do not pre-allocate queue, do this lazily. // Do not pre-allocate queue, do this lazily.
return evictedQueue[Link]{ return evictedQueue[Link]{
capacity: capacity, capacity: capacity,
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), logDroppedMsg: "limit reached: dropping trace trace.Link",
} }
} }
@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) {
eq.queue = append(eq.queue, value) eq.queue = append(eq.queue, value)
} }
func (eq *evictedQueue[T]) logDropped() {
eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
}
// copy returns a copy of the evictedQueue. // copy returns a copy of the evictedQueue.
func (eq *evictedQueue[T]) copy() []T { func (eq *evictedQueue[T]) copy() []T {
return slices.Clone(eq.queue) return slices.Clone(eq.queue)

View File

@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope {
// InstrumentationLibrary returns information about the instrumentation // InstrumentationLibrary returns information about the instrumentation
// library that created the span. // library that created the span.
func (s snapshot) InstrumentationLibrary() instrumentation.Library { func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
return s.instrumentationScope return s.instrumentationScope
} }

View File

@ -62,7 +62,7 @@ type ReadOnlySpan interface {
// InstrumentationLibrary returns information about the instrumentation // InstrumentationLibrary returns information about the instrumentation
// library that created the span. // library that created the span.
// Deprecated: please use InstrumentationScope instead. // Deprecated: please use InstrumentationScope instead.
InstrumentationLibrary() instrumentation.Library InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
// Resource returns information about the entity that produced the span. // Resource returns information about the entity that produced the span.
Resource() *resource.Resource Resource() *resource.Resource
// DroppedAttributes returns the number of attributes dropped by the span // DroppedAttributes returns the number of attributes dropped by the span
@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
return s.isRecording()
}
// isRecording returns if this span is being recorded. If this span has ended
// this will return false.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) isRecording() bool {
if s == nil {
return false
}
return s.endTime.IsZero() return s.endTime.IsZero()
} }
@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool {
// included in the set status when the code is for an error. If this span is // included in the set status when the code is for an error. If this span is
// not being recorded than this method does nothing. // not being recorded than this method does nothing.
func (s *recordingSpan) SetStatus(code codes.Code, description string) { func (s *recordingSpan) SetStatus(code codes.Code, description string) {
if !s.IsRecording() { if s == nil {
return return
} }
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if !s.isRecording() {
return
}
if s.status.Code > code { if s.status.Code > code {
return return
} }
@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
// attributes the span is configured to have, the last added attributes will // attributes the span is configured to have, the last added attributes will
// be dropped. // be dropped.
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
if !s.IsRecording() { if s == nil || len(attributes) == 0 {
return return
} }
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if !s.isRecording() {
return
}
limit := s.tracer.provider.spanLimits.AttributeCountLimit limit := s.tracer.provider.spanLimits.AttributeCountLimit
if limit == 0 { if limit == 0 {
@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
// Otherwise, add without deduplication. When attributes are read they // Otherwise, add without deduplication. When attributes are read they
// will be deduplicated, optimizing the operation. // will be deduplicated, optimizing the operation.
s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) s.attributes = slices.Grow(s.attributes, len(attributes))
for _, a := range attributes { for _, a := range attributes {
if !a.Valid() { if !a.Valid() {
// Drop all invalid attributes. // Drop all invalid attributes.
@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
// Do not set a capacity when creating this map. Benchmark testing has // Do not set a capacity when creating this map. Benchmark testing has
// showed this to only add unused memory allocations in general use. // showed this to only add unused memory allocations in general use.
exists := make(map[attribute.Key]int) exists := make(map[attribute.Key]int, len(s.attributes))
s.dedupeAttrsFromRecord(&exists) s.dedupeAttrsFromRecord(exists)
// Now that s.attributes is deduplicated, adding unique attributes up to // Now that s.attributes is deduplicated, adding unique attributes up to
// the capacity of s will not over allocate s.attributes. // the capacity of s will not over allocate s.attributes.
sum := len(attrs) + len(s.attributes)
s.attributes = slices.Grow(s.attributes, min(sum, limit)) // max size = limit
maxCap := min(len(attrs)+len(s.attributes), limit)
if cap(s.attributes) < maxCap {
s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
}
for _, a := range attrs { for _, a := range attrs {
if !a.Valid() { if !a.Valid() {
// Drop all invalid attributes. // Drop all invalid attributes.
@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
if idx, ok := exists[a.Key]; ok { if idx, ok := exists[a.Key]; ok {
// Perform all updates before dropping, even when at capacity. // Perform all updates before dropping, even when at capacity.
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
s.attributes[idx] = a s.attributes[idx] = a
continue continue
} }
@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
// the span's duration in case some operation below takes a while. // the span's duration in case some operation below takes a while.
et := monotonicEndTime(s.startTime) et := monotonicEndTime(s.startTime)
// Do relative expensive check now that we have an end time and see if we // Lock the span now that we have an end time and see if we need to do any more processing.
// need to do any more processing. s.mu.Lock()
if !s.IsRecording() { if !s.isRecording() {
s.mu.Unlock()
return return
} }
@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
} }
if s.executionTracerTaskEnd != nil { if s.executionTracerTaskEnd != nil {
s.mu.Unlock()
s.executionTracerTaskEnd() s.executionTracerTaskEnd()
s.mu.Lock()
} }
s.mu.Lock()
// Setting endTime to non-zero marks the span as ended and not recording. // Setting endTime to non-zero marks the span as ended and not recording.
if config.Timestamp().IsZero() { if config.Timestamp().IsZero() {
s.endTime = et s.endTime = et
@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time {
// does not change the Span status. If this span is not being recorded or err is nil // does not change the Span status. If this span is not being recorded or err is nil
// than this method does nothing. // than this method does nothing.
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
if s == nil || err == nil || !s.IsRecording() { if s == nil || err == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return return
} }
@ -486,14 +517,23 @@ func recordStackTrace() string {
} }
// AddEvent adds an event with the provided name and options. If this span is // AddEvent adds an event with the provided name and options. If this span is
// not being recorded than this method does nothing. // not being recorded then this method does nothing.
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
if !s.IsRecording() { if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return return
} }
s.addEvent(name, o...) s.addEvent(name, o...)
} }
// addEvent adds an event with the provided name and options.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
c := trace.NewEventConfig(o...) c := trace.NewEventConfig(o...)
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
e.Attributes = e.Attributes[:limit] e.Attributes = e.Attributes[:limit]
} }
s.mu.Lock()
s.events.add(e) s.events.add(e)
s.mu.Unlock()
} }
// SetName sets the name of this span. If this span is not being recorded than // SetName sets the name of this span. If this span is not being recorded than
// this method does nothing. // this method does nothing.
func (s *recordingSpan) SetName(name string) { func (s *recordingSpan) SetName(name string) {
if !s.IsRecording() { if s == nil {
return return
} }
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if !s.isRecording() {
return
}
s.name = name s.name = name
} }
@ -579,23 +620,23 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue {
func (s *recordingSpan) dedupeAttrs() { func (s *recordingSpan) dedupeAttrs() {
// Do not set a capacity when creating this map. Benchmark testing has // Do not set a capacity when creating this map. Benchmark testing has
// showed this to only add unused memory allocations in general use. // showed this to only add unused memory allocations in general use.
exists := make(map[attribute.Key]int) exists := make(map[attribute.Key]int, len(s.attributes))
s.dedupeAttrsFromRecord(&exists) s.dedupeAttrsFromRecord(exists)
} }
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
// using record as the record of unique attribute keys to their index. // using record as the record of unique attribute keys to their index.
// //
// This method assumes s.mu.Lock is held by the caller. // This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
// Use the fact that slices share the same backing array. // Use the fact that slices share the same backing array.
unique := s.attributes[:0] unique := s.attributes[:0]
for _, a := range s.attributes { for _, a := range s.attributes {
if idx, ok := (*record)[a.Key]; ok { if idx, ok := record[a.Key]; ok {
unique[idx] = a unique[idx] = a
} else { } else {
unique = append(unique, a) unique = append(unique, a)
(*record)[a.Key] = len(unique) - 1 record[a.Key] = len(unique) - 1
} }
} }
// s.attributes have element types of attribute.KeyValue. These types are // s.attributes have element types of attribute.KeyValue. These types are
@ -642,7 +683,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
// InstrumentationLibrary returns the instrumentation.Library associated with // InstrumentationLibrary returns the instrumentation.Library associated with
// the Tracer that created this span. // the Tracer that created this span.
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
return s.tracer.instrumentationScope return s.tracer.instrumentationScope
@ -657,7 +698,7 @@ func (s *recordingSpan) Resource() *resource.Resource {
} }
func (s *recordingSpan) AddLink(link trace.Link) { func (s *recordingSpan) AddLink(link trace.Link) {
if !s.IsRecording() { if s == nil {
return return
} }
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
@ -665,6 +706,12 @@ func (s *recordingSpan) AddLink(link trace.Link) {
return return
} }
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
// Discard attributes over limit. // Discard attributes over limit.
@ -678,9 +725,7 @@ func (s *recordingSpan) AddLink(link trace.Link) {
l.Attributes = l.Attributes[:limit] l.Attributes = l.Attributes[:limit]
} }
s.mu.Lock()
s.links.add(l) s.links.add(l)
s.mu.Unlock()
} }
// DroppedAttributes returns the number of attributes dropped by the span // DroppedAttributes returns the number of attributes dropped by the span
@ -755,12 +800,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
} }
func (s *recordingSpan) addChild() { func (s *recordingSpan) addChild() {
if !s.IsRecording() { if s == nil {
return return
} }
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
s.childSpanCount++ s.childSpanCount++
s.mu.Unlock()
} }
func (*recordingSpan) private() {} func (*recordingSpan) private() {}

View File

@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use. // Version is the current release version of the OpenTelemetry SDK in use.
func Version() string { func Version() string {
return "1.28.0" return "1.31.0"
} }

View File

@ -1,3 +0,0 @@
# Semconv v1.24.0
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the v1.24.0
// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"

View File

@ -1,200 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
import "go.opentelemetry.io/otel/attribute"
// This event represents an occurrence of a lifecycle transition on the iOS
// platform.
const (
// IosStateKey is the attribute Key conforming to the "ios.state" semantic
// conventions. It represents the this attribute represents the state the
// application has transitioned into at the occurrence of the event.
//
// Type: Enum
// RequirementLevel: Required
// Stability: experimental
// Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
// documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
// and from which the `OS terminology` column values are derived.
IosStateKey = attribute.Key("ios.state")
)
var (
// The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
IosStateActive = IosStateKey.String("active")
// The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
IosStateInactive = IosStateKey.String("inactive")
// The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
IosStateBackground = IosStateKey.String("background")
// The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
IosStateForeground = IosStateKey.String("foreground")
// The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
IosStateTerminate = IosStateKey.String("terminate")
)
// This event represents an occurrence of a lifecycle transition on the Android
// platform.
const (
// AndroidStateKey is the attribute Key conforming to the "android.state"
// semantic conventions. It represents the this attribute represents the
// state the application has transitioned into at the occurrence of the
// event.
//
// Type: Enum
// RequirementLevel: Required
// Stability: experimental
// Note: The Android lifecycle states are defined in [Activity lifecycle
// callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
// and from which the `OS identifiers` are derived.
AndroidStateKey = attribute.Key("android.state")
)
var (
// Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
AndroidStateCreated = AndroidStateKey.String("created")
// Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
AndroidStateBackground = AndroidStateKey.String("background")
// Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
AndroidStateForeground = AndroidStateKey.String("foreground")
)
// This semantic convention defines the attributes used to represent a feature
// flag evaluation as an event.
const (
// FeatureFlagKeyKey is the attribute Key conforming to the
// "feature_flag.key" semantic conventions. It represents the unique
// identifier of the feature flag.
//
// Type: string
// RequirementLevel: Required
// Stability: experimental
// Examples: 'logo-color'
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
// FeatureFlagProviderNameKey is the attribute Key conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the
// name of the service provider that performs the flag evaluation.
//
// Type: string
// RequirementLevel: Recommended
// Stability: experimental
// Examples: 'Flag Manager'
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
// FeatureFlagVariantKey is the attribute Key conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be
// a semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
//
// Type: string
// RequirementLevel: Recommended
// Stability: experimental
// Examples: 'red', 'true', 'on'
// Note: A semantic identifier, commonly referred to as a variant, provides
// a means
// for referring to a value without including the value itself. This can
// provide additional context for understanding the meaning behind a value.
// For example, the variant `red` maybe be used for the value `#c05543`.
//
// A stringified version of the value can be used in situations where a
// semantic identifier is unavailable. String representation of the value
// should be determined by the implementer.
FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
)
// FeatureFlagKey returns an attribute KeyValue conforming to the
// "feature_flag.key" semantic conventions. It represents the unique identifier
// of the feature flag.
func FeatureFlagKey(val string) attribute.KeyValue {
return FeatureFlagKeyKey.String(val)
}
// FeatureFlagProviderName returns an attribute KeyValue conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the name of
// the service provider that performs the flag evaluation.
func FeatureFlagProviderName(val string) attribute.KeyValue {
return FeatureFlagProviderNameKey.String(val)
}
// FeatureFlagVariant returns an attribute KeyValue conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
// semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
func FeatureFlagVariant(val string) attribute.KeyValue {
return FeatureFlagVariantKey.String(val)
}
// RPC received/sent message.
const (
// MessageCompressedSizeKey is the attribute Key conforming to the
// "message.compressed_size" semantic conventions. It represents the
// compressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: experimental
MessageCompressedSizeKey = attribute.Key("message.compressed_size")
// MessageIDKey is the attribute Key conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two
// different counters starting from `1` one for sent messages and one for
// received message.
//
// Type: int
// RequirementLevel: Optional
// Stability: experimental
// Note: This way we guarantee that the values will be consistent between
// different implementations.
MessageIDKey = attribute.Key("message.id")
// MessageTypeKey is the attribute Key conforming to the "message.type"
// semantic conventions. It represents the whether this is a received or
// sent message.
//
// Type: Enum
// RequirementLevel: Optional
// Stability: experimental
MessageTypeKey = attribute.Key("message.type")
// MessageUncompressedSizeKey is the attribute Key conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: experimental
MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
)
var (
// sent
MessageTypeSent = MessageTypeKey.String("SENT")
// received
MessageTypeReceived = MessageTypeKey.String("RECEIVED")
)
// MessageCompressedSize returns an attribute KeyValue conforming to the
// "message.compressed_size" semantic conventions. It represents the compressed
// size of the message in bytes.
func MessageCompressedSize(val int) attribute.KeyValue {
return MessageCompressedSizeKey.Int(val)
}
// MessageID returns an attribute KeyValue conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two different
// counters starting from `1` one for sent messages and one for received
// message.
func MessageID(val int) attribute.KeyValue {
return MessageIDKey.Int(val)
}
// MessageUncompressedSize returns an attribute KeyValue conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
func MessageUncompressedSize(val int) attribute.KeyValue {
return MessageUncompressedSizeKey.Int(val)
}

View File

@ -1,9 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
} }
// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly // ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
// as a remote SpanContext and as the current Span. The Span implementation // as a remote SpanContext and as the current Span. The Span implementation
// that wraps rsc is non-recording and performs no operations other than to // that wraps rsc is non-recording and performs no operations other than to
// return rsc as the SpanContext from the SpanContext method. // return rsc as the SpanContext from the SpanContext method.

View File

@ -96,7 +96,7 @@ can embed the API interface directly.
This option is not recommended. It will lead to publishing packages that This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of contain runtime panics when users update to newer versions of
[go.opentelemetry.io/otel/trace], which may be done with a trasitive [go.opentelemetry.io/otel/trace], which may be done with a transitive
dependency. dependency.
Finally, an author can embed another implementation in theirs. The embedded Finally, an author can embed another implementation in theirs. The embedded

59
vendor/go.opentelemetry.io/otel/trace/provider.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
import "go.opentelemetry.io/otel/trace/embedded"
// TracerProvider provides Tracers that are used by instrumentation code to
// trace computational workflows.
//
// A TracerProvider is the collection destination of all Spans from Tracers it
// provides, it represents a unique telemetry collection pipeline. How that
// pipeline is defined, meaning how those Spans are collected, processed, and
// where they are exported, depends on its implementation. Instrumentation
// authors do not need to define this implementation, rather just use the
// provided Tracers to instrument code.
//
// Commonly, instrumentation code will accept a TracerProvider implementation
// at runtime from its users or it can simply use the globally registered one
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type TracerProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.TracerProvider
// Tracer returns a unique Tracer scoped to be used by instrumentation code
// to trace computational workflows. The scope and identity of that
// instrumentation code is uniquely defined by the name and options passed.
//
// The passed name needs to uniquely identify instrumentation code.
// Therefore, it is recommended that name is the Go package name of the
// library providing instrumentation (note: not the code being
// instrumented). Instrumentation libraries can have multiple versions,
// therefore, the WithInstrumentationVersion option should be used to
// distinguish these different codebases. Additionally, instrumentation
// libraries may sometimes use traces to communicate different domains of
// workflow data (i.e. using spans to communicate workflow events only). If
// this is the case, the WithScopeAttributes option should be used to
// uniquely identify Tracers that handle the different domains of workflow
// data.
//
// If the same name and options are passed multiple times, the same Tracer
// will be returned (it is up to the implementation if this will be the
// same underlying instance of that Tracer or not). It is not necessary to
// call this multiple times with the same name and options to get an
// up-to-date Tracer. All implementations will ensure any TracerProvider
// configuration changes are propagated to all provided Tracers.
//
// If name is empty, then an implementation defined default name will be
// used instead.
//
// This method is safe to call concurrently.
Tracer(name string, options ...TracerOption) Tracer
}

177
vendor/go.opentelemetry.io/otel/trace/span.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
)
// Span is the individual component of a trace. It represents a single named
// and timed operation of a workflow that is traced. A Tracer is used to
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Span interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Span
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
// method has been called.
End(options ...SpanEndOption)
// AddEvent adds an event with the provided name and options.
AddEvent(name string, options ...EventOption)
// AddLink adds a link.
// Adding links at span creation using WithLinks is preferred to calling AddLink
// later, for contexts that are available during span creation, because head
// sampling decisions can only consider information present during span creation.
AddLink(link Link)
// IsRecording returns the recording state of the Span. It will return
// true if the Span is active and events can be recorded.
IsRecording() bool
// RecordError will record err as an exception span event for this span. An
// additional call to SetStatus is required if the Status of the Span should
// be set to Error, as this method does not change the Span status. If this
// span is not being recorded or err is nil then this method does nothing.
RecordError(err error, options ...EventOption)
// SpanContext returns the SpanContext of the Span. The returned SpanContext
// is usable even after the End method has been called for the Span.
SpanContext() SpanContext
// SetStatus sets the status of the Span in the form of a code and a
// description, provided the status hasn't already been set to a higher
// value before (OK > Error > Unset). The description is only included in a
// status when the code is for an error.
SetStatus(code codes.Code, description string)
// SetName sets the Span name.
SetName(name string)
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate
// additional Spans on the same telemetry pipeline as the current Span.
TracerProvider() TracerProvider
}
// Link is the relationship between two Spans. The relationship can be within
// the same Trace or across different Traces.
//
// For example, a Link is used in the following situations:
//
// 1. Batch Processing: A batch of operations may contain operations
// associated with one or more traces/spans. Since there can only be one
// parent SpanContext, a Link is used to keep reference to the
// SpanContext of all operations in the batch.
// 2. Public Endpoint: A SpanContext for an in incoming client request on a
// public endpoint should be considered untrusted. In such a case, a new
// trace with its own identity and sampling decision needs to be created,
// but this new trace needs to be related to the original trace in some
// form. A Link is used to keep reference to the original SpanContext and
// track the relationship.
type Link struct {
// SpanContext of the linked Span.
SpanContext SpanContext
// Attributes describe the aspects of the link.
Attributes []attribute.KeyValue
}
// LinkFromContext returns a link encapsulating the SpanContext in the provided
// ctx.
func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
return Link{
SpanContext: SpanContextFromContext(ctx),
Attributes: attrs,
}
}
// SpanKind is the role a Span plays in a Trace.
type SpanKind int
// As a convenience, these match the proto definition, see
// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
//
// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
// to coerce a span kind to a valid value.
const (
// SpanKindUnspecified is an unspecified SpanKind and is not a valid
// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
// if it is received.
SpanKindUnspecified SpanKind = 0
// SpanKindInternal is a SpanKind for a Span that represents an internal
// operation within an application.
SpanKindInternal SpanKind = 1
// SpanKindServer is a SpanKind for a Span that represents the operation
// of handling a request from a client.
SpanKindServer SpanKind = 2
// SpanKindClient is a SpanKind for a Span that represents the operation
// of client making a request to a server.
SpanKindClient SpanKind = 3
// SpanKindProducer is a SpanKind for a Span that represents the operation
// of a producer sending a message to a message broker. Unlike
// SpanKindClient and SpanKindServer, there is often no direct
// relationship between this kind of Span and a SpanKindConsumer kind. A
// SpanKindProducer Span will end once the message is accepted by the
// message broker which might not overlap with the processing of that
// message.
SpanKindProducer SpanKind = 4
// SpanKindConsumer is a SpanKind for a Span that represents the operation
// of a consumer receiving a message from a message broker. Like
// SpanKindProducer Spans, there is often no direct relationship between
// this Span and the Span that produced the message.
SpanKindConsumer SpanKind = 5
)
// ValidateSpanKind returns a valid span kind value. This will coerce
// invalid values into the default value, SpanKindInternal.
func ValidateSpanKind(spanKind SpanKind) SpanKind {
switch spanKind {
case SpanKindInternal,
SpanKindServer,
SpanKindClient,
SpanKindProducer,
SpanKindConsumer:
// valid
return spanKind
default:
return SpanKindInternal
}
}
// String returns the specified name of the SpanKind in lower-case.
func (sk SpanKind) String() string {
switch sk {
case SpanKindInternal:
return "internal"
case SpanKindServer:
return "server"
case SpanKindClient:
return "client"
case SpanKindProducer:
return "producer"
case SpanKindConsumer:
return "consumer"
default:
return "unspecified"
}
}

View File

@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
import ( import (
"bytes" "bytes"
"context"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
) )
const ( const (
@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
Remote: sc.remote, Remote: sc.remote,
}) })
} }
// Span is the individual component of a trace. It represents a single named
// and timed operation of a workflow that is traced. A Tracer is used to
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Span interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Span
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
// method has been called.
End(options ...SpanEndOption)
// AddEvent adds an event with the provided name and options.
AddEvent(name string, options ...EventOption)
// AddLink adds a link.
// Adding links at span creation using WithLinks is preferred to calling AddLink
// later, for contexts that are available during span creation, because head
// sampling decisions can only consider information present during span creation.
AddLink(link Link)
// IsRecording returns the recording state of the Span. It will return
// true if the Span is active and events can be recorded.
IsRecording() bool
// RecordError will record err as an exception span event for this span. An
// additional call to SetStatus is required if the Status of the Span should
// be set to Error, as this method does not change the Span status. If this
// span is not being recorded or err is nil then this method does nothing.
RecordError(err error, options ...EventOption)
// SpanContext returns the SpanContext of the Span. The returned SpanContext
// is usable even after the End method has been called for the Span.
SpanContext() SpanContext
// SetStatus sets the status of the Span in the form of a code and a
// description, provided the status hasn't already been set to a higher
// value before (OK > Error > Unset). The description is only included in a
// status when the code is for an error.
SetStatus(code codes.Code, description string)
// SetName sets the Span name.
SetName(name string)
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate
// additional Spans on the same telemetry pipeline as the current Span.
TracerProvider() TracerProvider
}
// Link is the relationship between two Spans. The relationship can be within
// the same Trace or across different Traces.
//
// For example, a Link is used in the following situations:
//
// 1. Batch Processing: A batch of operations may contain operations
// associated with one or more traces/spans. Since there can only be one
// parent SpanContext, a Link is used to keep reference to the
// SpanContext of all operations in the batch.
// 2. Public Endpoint: A SpanContext for an in incoming client request on a
// public endpoint should be considered untrusted. In such a case, a new
// trace with its own identity and sampling decision needs to be created,
// but this new trace needs to be related to the original trace in some
// form. A Link is used to keep reference to the original SpanContext and
// track the relationship.
type Link struct {
// SpanContext of the linked Span.
SpanContext SpanContext
// Attributes describe the aspects of the link.
Attributes []attribute.KeyValue
}
// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
return Link{
SpanContext: SpanContextFromContext(ctx),
Attributes: attrs,
}
}
// SpanKind is the role a Span plays in a Trace.
type SpanKind int
// As a convenience, these match the proto definition, see
// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
//
// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
// to coerce a span kind to a valid value.
const (
// SpanKindUnspecified is an unspecified SpanKind and is not a valid
// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
// if it is received.
SpanKindUnspecified SpanKind = 0
// SpanKindInternal is a SpanKind for a Span that represents an internal
// operation within an application.
SpanKindInternal SpanKind = 1
// SpanKindServer is a SpanKind for a Span that represents the operation
// of handling a request from a client.
SpanKindServer SpanKind = 2
// SpanKindClient is a SpanKind for a Span that represents the operation
// of client making a request to a server.
SpanKindClient SpanKind = 3
// SpanKindProducer is a SpanKind for a Span that represents the operation
// of a producer sending a message to a message broker. Unlike
// SpanKindClient and SpanKindServer, there is often no direct
// relationship between this kind of Span and a SpanKindConsumer kind. A
// SpanKindProducer Span will end once the message is accepted by the
// message broker which might not overlap with the processing of that
// message.
SpanKindProducer SpanKind = 4
// SpanKindConsumer is a SpanKind for a Span that represents the operation
// of a consumer receiving a message from a message broker. Like
// SpanKindProducer Spans, there is often no direct relationship between
// this Span and the Span that produced the message.
SpanKindConsumer SpanKind = 5
)
// ValidateSpanKind returns a valid span kind value. This will coerce
// invalid values into the default value, SpanKindInternal.
func ValidateSpanKind(spanKind SpanKind) SpanKind {
switch spanKind {
case SpanKindInternal,
SpanKindServer,
SpanKindClient,
SpanKindProducer,
SpanKindConsumer:
// valid
return spanKind
default:
return SpanKindInternal
}
}
// String returns the specified name of the SpanKind in lower-case.
func (sk SpanKind) String() string {
switch sk {
case SpanKindInternal:
return "internal"
case SpanKindServer:
return "server"
case SpanKindClient:
return "client"
case SpanKindProducer:
return "producer"
case SpanKindConsumer:
return "consumer"
default:
return "unspecified"
}
}
// Tracer is the creator of Spans.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Tracer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Tracer
// Start creates a span and a context.Context containing the newly-created span.
//
// If the context.Context provided in `ctx` contains a Span then the newly-created
// Span will be a child of that span, otherwise it will be a root span. This behavior
// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
// newly-created Span to be a root span even if `ctx` contains a Span.
//
// When creating a Span it is recommended to provide all known span attributes using
// the `WithAttributes()` SpanOption as samplers will only have access to the
// attributes provided when a Span is created.
//
// Any Span that is created MUST also be ended. This is the responsibility of the user.
// Implementations of this API may leak memory or other resources if Spans are not ended.
Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
}
// TracerProvider provides Tracers that are used by instrumentation code to
// trace computational workflows.
//
// A TracerProvider is the collection destination of all Spans from Tracers it
// provides, it represents a unique telemetry collection pipeline. How that
// pipeline is defined, meaning how those Spans are collected, processed, and
// where they are exported, depends on its implementation. Instrumentation
// authors do not need to define this implementation, rather just use the
// provided Tracers to instrument code.
//
// Commonly, instrumentation code will accept a TracerProvider implementation
// at runtime from its users or it can simply use the globally registered one
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type TracerProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.TracerProvider
// Tracer returns a unique Tracer scoped to be used by instrumentation code
// to trace computational workflows. The scope and identity of that
// instrumentation code is uniquely defined by the name and options passed.
//
// The passed name needs to uniquely identify instrumentation code.
// Therefore, it is recommended that name is the Go package name of the
// library providing instrumentation (note: not the code being
// instrumented). Instrumentation libraries can have multiple versions,
// therefore, the WithInstrumentationVersion option should be used to
// distinguish these different codebases. Additionally, instrumentation
// libraries may sometimes use traces to communicate different domains of
// workflow data (i.e. using spans to communicate workflow events only). If
// this is the case, the WithScopeAttributes option should be used to
// uniquely identify Tracers that handle the different domains of workflow
// data.
//
// If the same name and options are passed multiple times, the same Tracer
// will be returned (it is up to the implementation if this will be the
// same underlying instance of that Tracer or not). It is not necessary to
// call this multiple times with the same name and options to get an
// up-to-date Tracer. All implementations will ensure any TracerProvider
// configuration changes are propagated to all provided Tracers.
//
// If name is empty, then an implementation defined default name will be
// used instead.
//
// This method is safe to call concurrently.
Tracer(name string, options ...TracerOption) Tracer
}

37
vendor/go.opentelemetry.io/otel/trace/tracer.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
import (
"context"
"go.opentelemetry.io/otel/trace/embedded"
)
// Tracer is the creator of Spans.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Tracer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Tracer
// Start creates a span and a context.Context containing the newly-created span.
//
// If the context.Context provided in `ctx` contains a Span then the newly-created
// Span will be a child of that span, otherwise it will be a root span. This behavior
// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
// newly-created Span to be a root span even if `ctx` contains a Span.
//
// When creating a Span it is recommended to provide all known span attributes using
// the `WithAttributes()` SpanOption as samplers will only have access to the
// attributes provided when a Span is created.
//
// Any Span that is created MUST also be ended. This is the responsibility of the user.
// Implementations of this API may leak memory or other resources if Spans are not ended.
Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
}

Some files were not shown because too many files have changed in this diff Show More