vendor: github.com/prometheus/client_golang v1.22.0

full diff: https://github.com/prometheus/client_golang/compare/v1.20.5...v1.22.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2025-06-12 14:12:33 +02:00
parent f6985b7a27
commit a76643bca3
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
35 changed files with 1025 additions and 282 deletions

View File

@ -87,9 +87,9 @@ require (
github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect

View File

@ -224,8 +224,8 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -234,8 +234,8 @@ github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQy
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=

View File

@ -0,0 +1,30 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// CollectorFunc is a convenient way to implement a Prometheus Collector
// without interface boilerplate.
// This implementation is based on DescribeByCollect method.
// familiarize yourself to it before using.
type CollectorFunc func(chan<- Metric)
// Collect calls the defined CollectorFunc function with the provided Metrics channel
func (f CollectorFunc) Collect(ch chan<- Metric) {
f(ch)
}
// Describe sends the descriptor information using DescribeByCollect
func (f CollectorFunc) Describe(ch chan<- *Desc) {
DescribeByCollect(f, ch)
}

View File

@ -189,12 +189,15 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
) )
} }
vlStrings := make([]string, 0, len(d.variableLabels.names)) vlStrings := []string{}
for _, vl := range d.variableLabels.names { if d.variableLabels != nil {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { vlStrings = make([]string, 0, len(d.variableLabels.names))
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) for _, vl := range d.variableLabels.names {
} else { if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, vl) vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
} }
} }
return fmt.Sprintf( return fmt.Sprintf(

View File

@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
} }
func attachOriginalName(desc, origName string) string { func attachOriginalName(desc, origName string) string {
return fmt.Sprintf("%s Sourced from %s", desc, origName) return fmt.Sprintf("%s Sourced from %s.", desc, origName)
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.

View File

@ -14,6 +14,7 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"runtime" "runtime"
@ -28,6 +29,11 @@ import (
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
) )
const (
nativeHistogramSchemaMaximum = 8
nativeHistogramSchemaMinimum = -4
)
// nativeHistogramBounds for the frac of observed values. Only relevant for // nativeHistogramBounds for the frac of observed values. Only relevant for
// schema > 0. The position in the slice is the schema. (0 is never used, just // schema > 0. The position in the slice is the schema. (0 is never used, just
// here for convenience of using the schema directly as the index.) // here for convenience of using the schema directly as the index.)
@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
// used for the Buckets field of HistogramOpts. // used for the Buckets field of HistogramOpts.
// //
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
func ExponentialBucketsRange(min, max float64, count int) []float64 { func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
if count < 1 { if count < 1 {
panic("ExponentialBucketsRange count needs a positive count") panic("ExponentialBucketsRange count needs a positive count")
} }
if min <= 0 { if minBucket <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0") panic("ExponentialBucketsRange min needs to be greater than 0")
} }
@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 {
// max = min*growthFactor^(bucketCount-1) // max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor. // We know max/min and highest bucket. Solve for growthFactor.
growthFactor := math.Pow(max/min, 1.0/float64(count-1)) growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket. // Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count) buckets := make([]float64, count)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
} }
return buckets return buckets
} }
@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error {
// findBucket returns the index of the bucket for the provided value, or // findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket. // len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int { func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is n := len(h.upperBounds)
// slightly faster than the binary search. If we really care, we could if n == 0 {
// switch from one search strategy to the other depending on the number return 0
// of buckets. }
//
// Microbenchmarks (BenchmarkHistogramNoLabels): // Early exit: if v is less than or equal to the first upper bound, return 0
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op if v <= h.upperBounds[0] {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op return 0
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op }
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
if v > h.upperBounds[n-1] {
return n
}
// For small arrays, use simple linear search
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
// see more details here: https://github.com/prometheus/client_golang/pull/1662
if n < 35 {
for i, bound := range h.upperBounds {
if v <= bound {
return i
}
}
// If v is greater than all upper bounds, return len(h.upperBounds)
return n
}
// For larger arrays, use stdlib's binary search
return sort.SearchFloat64s(h.upperBounds, v) return sort.SearchFloat64s(h.upperBounds, v)
} }
@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 {
floor := math.Floor(math.Log2(math.Log2(bucketFactor))) floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
switch { switch {
case floor <= -8: case floor <= -8:
return 8 return nativeHistogramSchemaMaximum
case floor >= 4: case floor >= 4:
return -4 return nativeHistogramSchemaMinimum
default: default:
return -int32(floor) return -int32(floor)
} }
@ -1835,3 +1861,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
} }
} }
type constNativeHistogram struct {
desc *Desc
dto.Histogram
labelPairs []*dto.LabelPair
}
func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
var bucketPopulationSum int64
for _, v := range positiveBuckets {
bucketPopulationSum += v
}
for _, v := range negativeBuckets {
bucketPopulationSum += v
}
bucketPopulationSum += int64(zeroBucket)
// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
// Otherwise, the number of observations must be equal to the sum of all bucket counts .
if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
return errors.New("the sum of all bucket populations exceeds the count of observations")
}
return nil
}
// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
// cannot be changed, the returned value does not implement the Histogram
// interface (but only the Metric interface). Users of this package will not
// have much use for it in regular operations. However, when implementing custom
// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
// to send it to Prometheus in the Collect method.
//
// zeroBucket counts all (positive and negative)
// observations in the zero bucket (with an absolute value less or equal
// the current threshold).
// positiveBuckets and negativeBuckets are separate maps for negative and positive
// observations. The map's value is an int64, counting observations in
// that bucket. The map's key is the
// index of the bucket according to the used
// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
// NewConstNativeHistogram returns an error if
// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
// - the schema passed is not between 8 and -4
// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
//
// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
func NewConstNativeHistogram(
desc *Desc,
count uint64,
sum float64,
positiveBuckets, negativeBuckets map[int]int64,
zeroBucket uint64,
schema int32,
zeroThreshold float64,
createdTimestamp time.Time,
labelValues ...string,
) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
return nil, errors.New("invalid native histogram schema")
}
if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
return nil, err
}
NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
ret := &constNativeHistogram{
desc: desc,
Histogram: dto.Histogram{
CreatedTimestamp: timestamppb.New(createdTimestamp),
Schema: &schema,
ZeroThreshold: &zeroThreshold,
SampleCount: &count,
SampleSum: &sum,
NegativeSpan: NegativeSpan,
NegativeDelta: NegativeDelta,
PositiveSpan: PositiveSpan,
PositiveDelta: PositiveDelta,
ZeroCount: proto.Uint64(zeroBucket),
},
labelPairs: MakeLabelPairs(desc, labelValues),
}
if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
ret.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
return ret, nil
}
// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
// NewConstNativeHistogram would have returned an error.
func MustNewConstNativeHistogram(
desc *Desc,
count uint64,
sum float64,
positiveBuckets, negativeBuckets map[int]int64,
zeroBucket uint64,
nativeHistogramSchema int32,
nativeHistogramZeroThreshold float64,
createdTimestamp time.Time,
labelValues ...string,
) Metric {
nativehistogram, err := NewConstNativeHistogram(desc,
count,
sum,
positiveBuckets,
negativeBuckets,
zeroBucket,
nativeHistogramSchema,
nativeHistogramZeroThreshold,
createdTimestamp,
labelValues...)
if err != nil {
panic(err)
}
return nativehistogram
}
func (h *constNativeHistogram) Desc() *Desc {
return h.desc
}
func (h *constNativeHistogram) Write(out *dto.Metric) error {
out.Histogram = &h.Histogram
out.Label = h.labelPairs
return nil
}
func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
if len(buckets) == 0 {
return nil, nil
}
var ii []int
for k := range buckets {
ii = append(ii, k)
}
sort.Ints(ii)
var (
spans []*dto.BucketSpan
deltas []int64
prevCount int64
nextI int
)
appendDelta := func(count int64) {
*spans[len(spans)-1].Length++
deltas = append(deltas, count-prevCount)
prevCount = count
}
for n, i := range ii {
count := buckets[i]
// Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one or two buckets should not create
// a new span.
iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 {
// We have to create a new span, either because we are
// at the very beginning, or because we have found a gap
// of more than two buckets.
spans = append(spans, &dto.BucketSpan{
Offset: proto.Int32(iDelta),
Length: proto.Uint32(0),
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
for j := int32(0); j < iDelta; j++ {
appendDelta(0)
}
}
appendDelta(count)
nextI = i + 1
}
return spans, deltas
}

View File

@ -22,17 +22,18 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"strconv"
"strings" "strings"
) )
func min(a, b int) int { func minInt(a, b int) int {
if a < b { if a < b {
return a return a
} }
return b return b
} }
func max(a, b int) int { func maxInt(a, b int) int {
if a > b { if a > b {
return a return a
} }
@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if codes[0].Tag == 'e' { if codes[0].Tag == 'e' {
c := codes[0] c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
} }
if codes[len(codes)-1].Tag == 'e' { if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1] c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
} }
nn := n + n nn := n + n
groups := [][]OpCode{} groups := [][]OpCode{}
@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
// there is a large range with no changes. // there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn { if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{ group = append(group, OpCode{
c.Tag, i1, min(i2, i1+n), c.Tag, i1, minInt(i2, i1+n),
j1, min(j2, j1+n), j1, minInt(j2, j1+n),
}) })
groups = append(groups, group) groups = append(groups, group)
group = []OpCode{} group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n) i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
} }
group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
} }
@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
// is faster to compute than either .Ratio() or .QuickRatio(). // is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 { func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b) la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb) return calculateRatio(minInt(la, lb), la+lb)
} }
// Convert range to the "ed" format // Convert range to the "ed" format
@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
beginning := start + 1 // lines start numbering with one beginning := start + 1 // lines start numbering with one
length := stop - start length := stop - start
if length == 1 { if length == 1 {
return fmt.Sprintf("%d", beginning) return strconv.Itoa(beginning)
} }
if length == 0 { if length == 0 {
beginning-- // empty ranges begin at line just before the range beginning-- // empty ranges begin at line just before the range

View File

@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
name += "_total" name += "_total"
} }
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) // Our current conversion moves to legacy naming, so use legacy validation.
valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
switch d.Kind { switch d.Kind {
case metrics.KindUint64: case metrics.KindUint64:
case metrics.KindFloat64: case metrics.KindFloat64:

View File

@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
if name == "" { if name == "" {
return "" return ""
} }
switch {
case namespace != "" && subsystem != "": sb := strings.Builder{}
return strings.Join([]string{namespace, subsystem, name}, "_") sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
case namespace != "":
return strings.Join([]string{namespace, name}, "_") if namespace != "" {
case subsystem != "": sb.WriteString(namespace)
return strings.Join([]string{subsystem, name}, "_") sb.WriteString("_")
} }
return name
if subsystem != "" {
sb.WriteString(subsystem)
sb.WriteString("_")
}
sb.WriteString(name)
return sb.String()
} }
type invalidMetric struct { type invalidMetric struct {

View File

@ -23,6 +23,7 @@ import (
type processCollector struct { type processCollector struct {
collectFn func(chan<- Metric) collectFn func(chan<- Metric)
describeFn func(chan<- *Desc)
pidFn func() (int, error) pidFn func() (int, error)
reportErrors bool reportErrors bool
cpuTotal *Desc cpuTotal *Desc
@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
// Set up process metric collection if supported by the runtime. // Set up process metric collection if supported by the runtime.
if canCollectProcess() { if canCollectProcess() {
c.collectFn = c.processCollect c.collectFn = c.processCollect
c.describeFn = c.describe
} else { } else {
c.collectFn = func(ch chan<- Metric) { c.collectFn = c.errorCollectFn
c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) c.describeFn = c.errorDescribeFn
}
} }
return c return c
} }
// Describe returns all descriptions of the collector. func (c *processCollector) errorCollectFn(ch chan<- Metric) {
func (c *processCollector) Describe(ch chan<- *Desc) { c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
ch <- c.cpuTotal }
ch <- c.openFDs
ch <- c.maxFDs func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
ch <- c.vsize if c.reportErrors {
ch <- c.maxVsize ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
ch <- c.rss }
ch <- c.startTime
ch <- c.inBytes
ch <- c.outBytes
} }
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch) c.collectFn(ch)
} }
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
c.describeFn(ch)
}
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors { if !c.reportErrors {
return return

View File

@ -0,0 +1,130 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !ios
package prometheus
import (
"errors"
"fmt"
"os"
"syscall"
"time"
"golang.org/x/sys/unix"
)
// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
// isn't available.
var notImplementedErr = errors.New("not implemented")
type memoryInfo struct {
vsize uint64 // Virtual memory size in bytes
rss uint64 // Resident memory size in bytes
}
func canCollectProcess() bool {
return true
}
func getSoftLimit(which int) (uint64, error) {
rlimit := syscall.Rlimit{}
if err := syscall.Getrlimit(which, &rlimit); err != nil {
return 0, err
}
return rlimit.Cur, nil
}
func getOpenFileCount() (float64, error) {
// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
// return a list of open fds, but that requires a way to call C APIs. The
// benefits, however, include fewer system calls and not failing when at the
// open file soft limit.
if dir, err := os.Open("/dev/fd"); err != nil {
return 0.0, err
} else {
defer dir.Close()
// Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is
// that info not used, but KQUEUE descriptors fail stat(2), which causes
// the whole method to fail.
if names, err := dir.Readdirnames(0); err != nil {
return 0.0, err
} else {
// Subtract 1 to ignore the open /dev/fd descriptor above.
return float64(len(names) - 1), nil
}
}
}
func (c *processCollector) processCollect(ch chan<- Metric) {
if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
if len(procs) == 1 {
startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
} else {
err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
c.reportError(ch, c.startTime, err)
}
} else {
c.reportError(ch, c.startTime, err)
}
// The proc structure returned by kern.proc.pid above has an Rusage member,
// but it is not filled in, so it needs to be fetched by getrusage(2). For
// that call, the UTime, STime, and Maxrss members are filled out, but not
// Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require
// access to the C API to call task_info(TASK_BASIC_INFO).
rusage := unix.Rusage{}
if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
} else {
c.reportError(ch, c.cpuTotal, err)
}
if memInfo, err := getMemory(); err == nil {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
} else if !errors.Is(err, notImplementedErr) {
// Don't report an error when support is not compiled in.
c.reportError(ch, c.rss, err)
c.reportError(ch, c.vsize, err)
}
if fds, err := getOpenFileCount(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
} else {
c.reportError(ch, c.openFDs, err)
}
if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
} else {
c.reportError(ch, c.maxFDs, err)
}
if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
} else {
c.reportError(ch, c.maxVsize, err)
}
// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
// be able to get the per-process network send/receive counts.
}

View File

@ -0,0 +1,84 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !ios && cgo
#include <mach/mach_init.h>
#include <mach/task.h>
#include <mach/mach_vm.h>
// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
// mach/shared_region.h instead. But that doesn't define
// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
// avoid a warning message when running tests.
#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
#define SHARED_DATA_REGION_SIZE 0x10000000
#define SHARED_TEXT_REGION_SIZE 0x10000000
int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
{
// This is lightly adapted from how ps(1) obtains its memory info.
// https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
kern_return_t error;
task_t task = MACH_PORT_NULL;
mach_task_basic_info_data_t info;
mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT;
error = task_info(
mach_task_self(),
MACH_TASK_BASIC_INFO,
(task_info_t) &info,
&info_count );
if( error != KERN_SUCCESS )
{
return error;
}
*rss = info.resident_size;
*vsize = info.virtual_size;
{
vm_region_basic_info_data_64_t b_info;
mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
mach_vm_size_t size;
mach_port_t object_name;
/*
* try to determine if this task has the split libraries
* mapped in... if so, adjust its virtual size down by
* the 2 segments that are used for split libraries
*/
info_count = VM_REGION_BASIC_INFO_COUNT_64;
error = mach_vm_region(
mach_task_self(),
&address,
&size,
VM_REGION_BASIC_INFO_64,
(vm_region_info_t) &b_info,
&info_count,
&object_name);
if (error == KERN_SUCCESS) {
if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
*vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
*vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
}
}
}
return 0;
}

View File

@ -0,0 +1,51 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !ios && cgo
package prometheus
/*
int get_memory_info(unsigned long long *rss, unsigned long long *vs);
*/
import "C"
import "fmt"
func getMemory() (*memoryInfo, error) {
var rss, vsize C.ulonglong
if err := C.get_memory_info(&rss, &vsize); err != 0 {
return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
}
return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
}
// describe returns all descriptions of the collector for Darwin.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.maxVsize
ch <- c.startTime
ch <- c.rss
ch <- c.vsize
/* the process could be collected but not implemented yet
ch <- c.inBytes
ch <- c.outBytes
*/
}

View File

@ -0,0 +1,39 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !ios && !cgo
package prometheus
func getMemory() (*memoryInfo, error) {
return nil, notImplementedErr
}
// describe returns all descriptions of the collector for Darwin.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.maxVsize
ch <- c.startTime
/* the process could be collected but not implemented yet
ch <- c.rss
ch <- c.vsize
ch <- c.inBytes
ch <- c.outBytes
*/
}

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build wasip1 //go:build wasip1 || js || ios
// +build wasip1 // +build wasip1 js ios
package prometheus package prometheus
@ -20,7 +20,14 @@ func canCollectProcess() bool {
return false return false
} }
func (*processCollector) processCollect(chan<- Metric) { func (c *processCollector) processCollect(ch chan<- Metric) {
// noop on this platform c.errorCollectFn(ch)
return }
// describe returns all descriptions of the collector for wasip1 and js.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
c.errorDescribeFn(ch)
} }

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !windows && !js && !wasip1 //go:build !windows && !js && !wasip1 && !darwin
// +build !windows,!js,!wasip1 // +build !windows,!js,!wasip1,!darwin
package prometheus package prometheus
@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
c.reportError(ch, nil, err) c.reportError(ch, nil, err)
} }
} }
// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
ch <- c.inBytes
ch <- c.outBytes
}

View File

@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) {
} }
func (c *processCollector) processCollect(ch chan<- Metric) { func (c *processCollector) processCollect(ch chan<- Metric) {
h, err := windows.GetCurrentProcess() h := windows.CurrentProcess()
if err != nil {
c.reportError(ch, nil, err)
return
}
var startTime, exitTime, kernelTime, userTime windows.Filetime var startTime, exitTime, kernelTime, userTime windows.Filetime
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil { if err != nil {
c.reportError(ch, nil, err) c.reportError(ch, nil, err)
return return
@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
} }
// describe returns all descriptions of the collector for windows.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.rss
ch <- c.startTime
}
func fileTimeToSeconds(ft windows.Filetime) float64 { func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
} }

View File

@ -41,11 +41,11 @@ import (
"sync" "sync"
"time" "time"
"github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp/internal"
) )
const ( const (
@ -65,7 +65,13 @@ const (
Zstd Compression = "zstd" Zstd Compression = "zstd"
) )
var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} func defaultCompressionFormats() []Compression {
if internal.NewZstdWriter != nil {
return []Compression{Identity, Gzip, Zstd}
} else {
return []Compression{Identity, Gzip}
}
}
var gzipPool = sync.Pool{ var gzipPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
// Select compression formats to offer based on default or user choice. // Select compression formats to offer based on default or user choice.
var compressions []string var compressions []string
if !opts.DisableCompression { if !opts.DisableCompression {
offers := defaultCompressionFormats offers := defaultCompressionFormats()
if len(opts.OfferedCompressions) > 0 { if len(opts.OfferedCompressions) > 0 {
offers = opts.OfferedCompressions offers = opts.OfferedCompressions
} }
@ -207,7 +213,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
if encodingHeader != string(Identity) { if encodingHeader != string(Identity) {
rsp.Header().Set(contentEncodingHeader, encodingHeader) rsp.Header().Set(contentEncodingHeader, encodingHeader)
} }
enc := expfmt.NewEncoder(w, contentType)
var enc expfmt.Encoder
if opts.EnableOpenMetricsTextCreatedSamples {
enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
} else {
enc = expfmt.NewEncoder(w, contentType)
}
// handleError handles the error according to opts.ErrorHandling // handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling. // and returns true if we have to abort after the handling.
@ -408,6 +420,21 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus // (which changes the identity of the resulting series on the Prometheus
// server). // server).
EnableOpenMetrics bool EnableOpenMetrics bool
// EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
// Created Timestamps for counters, histograms and summaries, which for the current
// version of OpenMetrics are defined as extra series with the same name and "_created"
// suffix. See also the OpenMetrics specification for more details
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
//
// Created timestamps are used to improve the accuracy of reset detection,
// but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
// if the scraper does not handle those metrics correctly (converting to created timestamp
// instead of leaving those series as-is). New OpenMetrics versions might improve
// this situation.
//
// Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
// in version 2.50.0 to handle this situation.
EnableOpenMetricsTextCreatedSamples bool
// ProcessStartTime allows setting process start timevalue that will be exposed // ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics // with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative // payload. This allow callers to have efficient transformations to cumulative
@ -445,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin
switch selected { switch selected {
case "zstd": case "zstd":
// TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. if internal.NewZstdWriter == nil {
z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) // The content encoding was not implemented yet.
if err != nil { return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
return nil, "", func() {}, err
} }
writer, closeWriter, err := internal.NewZstdWriter(rw)
z.Reset(rw) return writer, selected, closeWriter, err
return z, selected, func() { _ = z.Close() }, nil
case "gzip": case "gzip":
gz := gzipPool.Get().(*gzip.Writer) gz := gzipPool.Get().(*gzip.Writer)
gz.Reset(rw) gz.Reset(rw)
@ -462,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin
return rw, selected, func() {}, nil return rw, selected, func() {}, nil
default: default:
// The content encoding was not implemented yet. // The content encoding was not implemented yet.
return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
} }
} }

View File

@ -1,4 +1,4 @@
// Copyright 2019 The Prometheus Authors // Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
@ -11,16 +11,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build js package internal
// +build js
package prometheus import (
"io"
)
func canCollectProcess() bool { // NewZstdWriter enables zstd write support if non-nil.
return false var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error)
}
func (c *processCollector) processCollect(ch chan<- Metric) {
// noop on this platform
return
}

View File

@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
s := &summary{ s := &summary{
desc: desc, desc: desc,
now: opts.now,
objectives: opts.Objectives, objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)), sortedObjectives: make([]float64, 0, len(opts.Objectives)),
@ -280,6 +281,8 @@ type summary struct {
desc *Desc desc *Desc
now func() time.Time
objectives map[float64]float64 objectives map[float64]float64
sortedObjectives []float64 sortedObjectives []float64
@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
s.bufMtx.Lock() s.bufMtx.Lock()
defer s.bufMtx.Unlock() defer s.bufMtx.Unlock()
now := time.Now() now := s.now()
if now.After(s.hotBufExpTime) { if now.After(s.hotBufExpTime) {
s.asyncFlush(now) s.asyncFlush(now)
} }
@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
s.bufMtx.Lock() s.bufMtx.Lock()
s.mtx.Lock() s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime. // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
s.swapBufs(time.Now()) s.swapBufs(s.now())
s.bufMtx.Unlock() s.bufMtx.Unlock()
s.flushColdBuf() s.flushColdBuf()

View File

@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct) mediatype, params, err := mime.ParseMediaType(ct)
if err != nil { if err != nil {
return fmtUnknown return FmtUnknown
} }
const textType = "text/plain" const textType = "text/plain"
@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
switch mediatype { switch mediatype {
case ProtoType: case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol { if p, ok := params["proto"]; ok && p != ProtoProtocol {
return fmtUnknown return FmtUnknown
} }
if e, ok := params["encoding"]; ok && e != "delimited" { if e, ok := params["encoding"]; ok && e != "delimited" {
return fmtUnknown return FmtUnknown
} }
return fmtProtoDelim return FmtProtoDelim
case textType: case textType:
if v, ok := params["version"]; ok && v != TextVersion { if v, ok := params["version"]; ok && v != TextVersion {
return fmtUnknown return FmtUnknown
} }
return fmtText return FmtText
} }
return fmtUnknown return FmtUnknown
} }
// NewDecoder returns a new decoder based on the given input format. // NewDecoder returns a new decoder based on the given input format.

View File

@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) { switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) escapingScheme = Format("; escaping=" + escapeParam)
default: default:
// If the escaping parameter is unknown, ignore it. // If the escaping parameter is unknown, ignore it.
} }
@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] { switch ac.Params["encoding"] {
case "delimited": case "delimited":
return fmtProtoDelim + escapingScheme return FmtProtoDelim + escapingScheme
case "text": case "text":
return fmtProtoText + escapingScheme return FmtProtoText + escapingScheme
case "compact-text": case "compact-text":
return fmtProtoCompact + escapingScheme return FmtProtoCompact + escapingScheme
} }
} }
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return fmtText + escapingScheme return FmtText + escapingScheme
} }
} }
return fmtText + escapingScheme return FmtText + escapingScheme
} }
// NegotiateIncludingOpenMetrics works like Negotiate but includes // NegotiateIncludingOpenMetrics works like Negotiate but includes
@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) { switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) escapingScheme = Format("; escaping=" + escapeParam)
default: default:
// If the escaping parameter is unknown, ignore it. // If the escaping parameter is unknown, ignore it.
} }
@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] { switch ac.Params["encoding"] {
case "delimited": case "delimited":
return fmtProtoDelim + escapingScheme return FmtProtoDelim + escapingScheme
case "text": case "text":
return fmtProtoText + escapingScheme return FmtProtoText + escapingScheme
case "compact-text": case "compact-text":
return fmtProtoCompact + escapingScheme return FmtProtoCompact + escapingScheme
} }
} }
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return fmtText + escapingScheme return FmtText + escapingScheme
} }
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
switch ver { switch ver {
case OpenMetricsVersion_1_0_0: case OpenMetricsVersion_1_0_0:
return fmtOpenMetrics_1_0_0 + escapingScheme return FmtOpenMetrics_1_0_0 + escapingScheme
default: default:
return fmtOpenMetrics_0_0_1 + escapingScheme return FmtOpenMetrics_0_0_1 + escapingScheme
} }
} }
} }
return fmtText + escapingScheme return FmtText + escapingScheme
} }
// NewEncoder returns a new encoder based on content type negotiation. All // NewEncoder returns a new encoder based on content type negotiation. All

View File

@ -15,7 +15,7 @@
package expfmt package expfmt
import ( import (
"fmt" "errors"
"strings" "strings"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -32,24 +32,31 @@ type Format string
// it on the wire, new content-type strings will have to be agreed upon and // it on the wire, new content-type strings will have to be agreed upon and
// added here. // added here.
const ( const (
TextVersion = "0.0.4" TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf` ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily` ProtoProtocol = `io.prometheus.client.MetricFamily`
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text` OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0" OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols. Note that these // The Content-Type values for the different wire protocols. Do not do direct
// values are now unexported. If code was relying on comparisons to these // comparisons to these constants, instead use the comparison functions.
// constants, instead use FormatType(). // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
fmtUnknown Format = `<unknown>` FmtUnknown Format = `<unknown>`
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
fmtProtoDelim Format = protoFmt + ` encoding=delimited` FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
fmtProtoText Format = protoFmt + ` encoding=text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
fmtProtoCompact Format = protoFmt + ` encoding=compact-text` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` FmtProtoText Format = ProtoFmt + ` encoding=text`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
) )
const ( const (
@ -79,17 +86,17 @@ const (
func NewFormat(t FormatType) Format { func NewFormat(t FormatType) Format {
switch t { switch t {
case TypeProtoCompact: case TypeProtoCompact:
return fmtProtoCompact return FmtProtoCompact
case TypeProtoDelim: case TypeProtoDelim:
return fmtProtoDelim return FmtProtoDelim
case TypeProtoText: case TypeProtoText:
return fmtProtoText return FmtProtoText
case TypeTextPlain: case TypeTextPlain:
return fmtText return FmtText
case TypeOpenMetrics: case TypeOpenMetrics:
return fmtOpenMetrics_1_0_0 return FmtOpenMetrics_1_0_0
default: default:
return fmtUnknown return FmtUnknown
} }
} }
@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format {
// specified version number. // specified version number.
func NewOpenMetricsFormat(version string) (Format, error) { func NewOpenMetricsFormat(version string) (Format, error) {
if version == OpenMetricsVersion_0_0_1 { if version == OpenMetricsVersion_0_0_1 {
return fmtOpenMetrics_0_0_1, nil return FmtOpenMetrics_0_0_1, nil
} }
if version == OpenMetricsVersion_1_0_0 { if version == OpenMetricsVersion_1_0_0 {
return fmtOpenMetrics_1_0_0, nil return FmtOpenMetrics_1_0_0, nil
} }
return fmtUnknown, fmt.Errorf("unknown open metrics version string") return FmtUnknown, errors.New("unknown open metrics version string")
}
// WithEscapingScheme returns a copy of Format with the specified escaping
// scheme appended to the end. If an escaping scheme already exists it is
// removed.
func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
var terms []string
for _, p := range strings.Split(string(f), ";") {
toks := strings.Split(p, "=")
if len(toks) != 2 {
trimmed := strings.TrimSpace(p)
if len(trimmed) > 0 {
terms = append(terms, trimmed)
}
continue
}
key := strings.TrimSpace(toks[0])
if key != model.EscapingKey {
terms = append(terms, strings.TrimSpace(p))
}
}
terms = append(terms, model.EscapingKey+"="+s.String())
return Format(strings.Join(terms, "; "))
} }
// FormatType deduces an overall FormatType for the given format. // FormatType deduces an overall FormatType for the given format.

View File

@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption)
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
// to include _created lines (See // to include _created lines (See
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
// Created timestamps can improve the accuracy of series reset detection, but // Created timestamps can improve the accuracy of series reset detection, but
// come with a bandwidth cost. // come with a bandwidth cost.
// //
@ -102,7 +102,7 @@ func WithUnit() EncoderOption {
// //
// - According to the OM specs, the `# UNIT` line is optional, but if populated, // - According to the OM specs, the `# UNIT` line is optional, but if populated,
// the unit has to be present in the metric name as its suffix: // the unit has to be present in the metric name as its suffix:
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). // (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
// However, in order to accommodate any potential scenario where such a change in the // However, in order to accommodate any potential scenario where such a change in the
// metric name is not desirable, the users are here given the choice of either explicitly // metric name is not desirable, the users are here given the choice of either explicitly
// opt in, in case they wish for the unit to be included in the output AND in the metric name // opt in, in case they wish for the unit to be included in the output AND in the metric name
@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
compliantName = name[:len(name)-6] compliantName = name[:len(name)-6]
} }
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) compliantName = compliantName + "_" + *in.Unit
} }
// Comments, first HELP, then TYPE. // Comments, first HELP, then TYPE.
@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
if name != "" { if name != "" {
// If the name does not pass the legacy validity check, we must put the // If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted. // metric name inside the braces, quoted.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) { if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true metricInsideBraces = true
err := w.WriteByte(separator) err := w.WriteByte(separator)
written++ written++

View File

@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
if name != "" { if name != "" {
// If the name does not pass the legacy validity check, we must put the // If the name does not pass the legacy validity check, we must put the
// metric name inside the braces. // metric name inside the braces.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) { if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true metricInsideBraces = true
err := w.WriteByte(separator) err := w.WriteByte(separator)
written++ written++
@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
// writeName writes a string as-is if it complies with the legacy naming // writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not. // scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) { func writeName(w enhancedWriter, name string) (int, error) {
if model.IsValidLegacyMetricName(model.LabelValue(name)) { if model.IsValidLegacyMetricName(name) {
return w.WriteString(name) return w.WriteString(name)
} }
var written int var written int

View File

@ -22,9 +22,9 @@ import (
"math" "math"
"strconv" "strconv"
"strings" "strings"
"unicode/utf8"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -60,6 +60,7 @@ type TextParser struct {
currentMF *dto.MetricFamily currentMF *dto.MetricFamily
currentMetric *dto.Metric currentMetric *dto.Metric
currentLabelPair *dto.LabelPair currentLabelPair *dto.LabelPair
currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
// The remaining member variables are only used for summaries/histograms. // The remaining member variables are only used for summaries/histograms.
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
@ -74,6 +75,9 @@ type TextParser struct {
// count and sum of that summary/histogram. // count and sum of that summary/histogram.
currentIsSummaryCount, currentIsSummarySum bool currentIsSummaryCount, currentIsSummarySum bool
currentIsHistogramCount, currentIsHistogramSum bool currentIsHistogramCount, currentIsHistogramSum bool
// These indicate if the metric name from the current line being parsed is inside
// braces and if that metric name was found respectively.
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
} }
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) {
} }
p.currentQuantile = math.NaN() p.currentQuantile = math.NaN()
p.currentBucket = math.NaN() p.currentBucket = math.NaN()
p.currentMF = nil
} }
// startOfLine represents the state where the next byte read from p.buf is the // startOfLine represents the state where the next byte read from p.buf is the
// start of a line (or whitespace leading up to it). // start of a line (or whitespace leading up to it).
func (p *TextParser) startOfLine() stateFn { func (p *TextParser) startOfLine() stateFn {
p.lineCount++ p.lineCount++
p.currentMetricIsInsideBraces = false
p.currentMetricInsideBracesIsPresent = false
if p.skipBlankTab(); p.err != nil { if p.skipBlankTab(); p.err != nil {
// This is the only place that we expect to see io.EOF, // This is the only place that we expect to see io.EOF,
// which is not an error but the signal that we are done. // which is not an error but the signal that we are done.
@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn {
return p.startComment return p.startComment
case '\n': case '\n':
return p.startOfLine // Empty line, start the next one. return p.startOfLine // Empty line, start the next one.
case '{':
p.currentMetricIsInsideBraces = true
return p.readingLabels
} }
return p.readingMetricName return p.readingMetricName
} }
@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn {
return nil // Unexpected end of input. return nil // Unexpected end of input.
} }
if p.currentByte == '}' { if p.currentByte == '}' {
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil { if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input. return nil // Unexpected end of input.
} }
@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
return nil return nil
} }
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte != '=' {
if p.currentMetricIsInsideBraces {
if p.currentMetricInsideBracesIsPresent {
p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
return nil
}
switch p.currentByte {
case ',':
p.setOrCreateCurrentMF()
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
p.currentMetric = &dto.Metric{}
p.currentMetricInsideBracesIsPresent = true
return p.startLabelName
case '}':
p.setOrCreateCurrentMF()
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
p.currentMetric = &dto.Metric{}
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
default:
p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
return nil
}
}
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
p.currentLabelPairs = nil
return nil
}
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn {
// labels to 'real' labels. // labels to 'real' labels.
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte != '=' {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil
} }
// Check for duplicate label names. // Check for duplicate label names.
labels := make(map[string]struct{}) labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label { for _, l := range p.currentLabelPairs {
lName := l.GetName() lName := l.GetName()
if _, exists := labels[lName]; !exists { if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{} labels[lName] = struct{}{}
} else { } else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
p.currentLabelPairs = nil
return nil return nil
} }
} }
@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn {
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message. // Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
p.currentLabelPairs = nil
return nil return nil
} }
} else { } else {
@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn {
return p.startLabelName return p.startLabelName
case '}': case '}':
if p.currentMF == nil {
p.parseError("invalid metric name")
return nil
}
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil { if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input. return nil // Unexpected end of input.
} }
return p.readingValue return p.readingValue
default: default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
p.currentLabelPairs = nil
return nil return nil
} }
} }
@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.WriteByte(p.currentByte) p.currentToken.WriteByte(p.currentByte)
case 'n': case 'n':
p.currentToken.WriteByte('\n') p.currentToken.WriteByte('\n')
case '"':
p.currentToken.WriteByte('"')
default: default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return return
@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
// but not into p.currentToken. // but not into p.currentToken.
func (p *TextParser) readTokenAsMetricName() { func (p *TextParser) readTokenAsMetricName() {
p.currentToken.Reset() p.currentToken.Reset()
// A UTF-8 metric name must be quoted and may have escaped characters.
quoted := false
escaped := false
if !isValidMetricNameStart(p.currentByte) { if !isValidMetricNameStart(p.currentByte) {
return return
} }
for { for p.err == nil {
p.currentToken.WriteByte(p.currentByte) if escaped {
switch p.currentByte {
case '\\':
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
case '"':
p.currentToken.WriteByte('"')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
}
escaped = false
} else {
switch p.currentByte {
case '"':
quoted = !quoted
if !quoted {
p.currentByte, p.err = p.buf.ReadByte()
return
}
case '\n':
p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
return
case '\\':
escaped = true
default:
p.currentToken.WriteByte(p.currentByte)
}
}
p.currentByte, p.err = p.buf.ReadByte() p.currentByte, p.err = p.buf.ReadByte()
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
return return
} }
} }
@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() {
// but not into p.currentToken. // but not into p.currentToken.
func (p *TextParser) readTokenAsLabelName() { func (p *TextParser) readTokenAsLabelName() {
p.currentToken.Reset() p.currentToken.Reset()
// A UTF-8 label name must be quoted and may have escaped characters.
quoted := false
escaped := false
if !isValidLabelNameStart(p.currentByte) { if !isValidLabelNameStart(p.currentByte) {
return return
} }
for { for p.err == nil {
p.currentToken.WriteByte(p.currentByte) if escaped {
switch p.currentByte {
case '\\':
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
case '"':
p.currentToken.WriteByte('"')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
}
escaped = false
} else {
switch p.currentByte {
case '"':
quoted = !quoted
if !quoted {
p.currentByte, p.err = p.buf.ReadByte()
return
}
case '\n':
p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
return
case '\\':
escaped = true
default:
p.currentToken.WriteByte(p.currentByte)
}
}
p.currentByte, p.err = p.buf.ReadByte() p.currentByte, p.err = p.buf.ReadByte()
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
return return
} }
} }
@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() {
p.currentToken.WriteByte('\n') p.currentToken.WriteByte('\n')
default: default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
p.currentLabelPairs = nil
return return
} }
escaped = false escaped = false
@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
} }
func isValidLabelNameStart(b byte) bool { func isValidLabelNameStart(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
} }
func isValidLabelNameContinuation(b byte) bool { func isValidLabelNameContinuation(b byte, quoted bool) bool {
return isValidLabelNameStart(b) || (b >= '0' && b <= '9') return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
} }
func isValidMetricNameStart(b byte) bool { func isValidMetricNameStart(b byte) bool {
return isValidLabelNameStart(b) || b == ':' return isValidLabelNameStart(b) || b == ':'
} }
func isValidMetricNameContinuation(b byte) bool { func isValidMetricNameContinuation(b byte, quoted bool) bool {
return isValidLabelNameContinuation(b) || b == ':' return isValidLabelNameContinuation(b, quoted) || b == ':'
} }
func isBlankOrTab(b byte) bool { func isBlankOrTab(b byte) bool {
@ -775,7 +895,7 @@ func histogramMetricName(name string) string {
func parseFloat(s string) (float64, error) { func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") { if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float") return 0, errors.New("unsupported character in float")
} }
return strconv.ParseFloat(s, 64) return strconv.ParseFloat(s, 64)
} }

View File

@ -14,6 +14,7 @@
package model package model
import ( import (
"errors"
"fmt" "fmt"
"time" "time"
) )
@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus {
// Validate checks whether the alert data is inconsistent. // Validate checks whether the alert data is inconsistent.
func (a *Alert) Validate() error { func (a *Alert) Validate() error {
if a.StartsAt.IsZero() { if a.StartsAt.IsZero() {
return fmt.Errorf("start time missing") return errors.New("start time missing")
} }
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
return fmt.Errorf("start time must be before end time") return errors.New("start time must be before end time")
} }
if err := a.Labels.Validate(); err != nil { if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %w", err) return fmt.Errorf("invalid label set: %w", err)
} }
if len(a.Labels) == 0 { if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required") return errors.New("at least one label pair required")
} }
if err := a.Annotations.Validate(); err != nil { if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %w", err) return fmt.Errorf("invalid annotations: %w", err)

View File

@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith. // therewith.
type LabelName string type LabelName string
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy // IsValid returns true iff the name matches the pattern of LabelNameRE when
// names, and iff it's valid UTF-8 if NameValidationScheme is set to // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the // NameValidationScheme is set to UTF8Validation.
// check but a much faster hardcoded implementation.
func (ln LabelName) IsValid() bool { func (ln LabelName) IsValid() bool {
if len(ln) == 0 { if len(ln) == 0 {
return false return false
} }
switch NameValidationScheme { switch NameValidationScheme {
case LegacyValidation: case LegacyValidation:
for i, b := range ln { return ln.IsValidLegacy()
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
case UTF8Validation: case UTF8Validation:
return utf8.ValidString(string(ln)) return utf8.ValidString(string(ln))
default: default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
} }
}
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
// legacy names. It does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
func (ln LabelName) IsValidLegacy() bool {
if len(ln) == 0 {
return false
}
for i, b := range ln {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
return true return true
} }

View File

@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build go1.21
package model package model
import ( import (

View File

@ -1,39 +0,0 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.21
package model
import (
"fmt"
"sort"
"strings"
)
// String was optimized using functions not available for go 1.20
// or lower. We keep the old implementation for compatibility with client_golang.
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
// file can be removed.
func (l LabelSet) String() string {
labelNames := make([]string, 0, len(l))
for name := range l {
labelNames = append(labelNames, string(name))
}
sort.Strings(labelNames)
lstrs := make([]string, 0, len(l))
for _, name := range labelNames {
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
}
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
}

View File

@ -14,9 +14,11 @@
package model package model
import ( import (
"errors"
"fmt" "fmt"
"regexp" "regexp"
"sort" "sort"
"strconv"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -26,18 +28,21 @@ import (
var ( var (
// NameValidationScheme determines the method of name validation to be used by // NameValidationScheme determines the method of name validation to be used by
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
// in isolation from other components that don't support UTF-8 may result in // mode in isolation from other components that don't support UTF-8 may result
// bugs or other undefined behavior. This value is intended to be set by // in bugs or other undefined behavior. This value can be set to
// UTF-8-aware binaries as part of their startup. To avoid need for locking, // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
// this value should be set once, ideally in an init(), before multiple // avoid need for locking, this value should be set once, ideally in an
// goroutines are started. // init(), before multiple goroutines are started.
NameValidationScheme = LegacyValidation NameValidationScheme = UTF8Validation
// NameEscapingScheme defines the default way that names will be // NameEscapingScheme defines the default way that names will be escaped when
// escaped when presented to systems that do not support UTF-8 names. If the // presented to systems that do not support UTF-8 names. If the Content-Type
// Content-Type "escaping" term is specified, that will override this value. // "escaping" term is specified, that will override this value.
NameEscapingScheme = ValueEncodingEscaping // NameEscapingScheme should not be set to the NoEscaping value. That string
// is used in content negotiation to indicate that a system supports UTF-8 and
// has that feature enabled.
NameEscapingScheme = UnderscoreEscaping
) )
// ValidationScheme is a Go enum for determining how metric and label names will // ValidationScheme is a Go enum for determining how metric and label names will
@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint {
func IsValidMetricName(n LabelValue) bool { func IsValidMetricName(n LabelValue) bool {
switch NameValidationScheme { switch NameValidationScheme {
case LegacyValidation: case LegacyValidation:
return IsValidLegacyMetricName(n) return IsValidLegacyMetricName(string(n))
case UTF8Validation: case UTF8Validation:
if len(n) == 0 { if len(n) == 0 {
return false return false
@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool {
// legacy validation scheme regardless of the value of NameValidationScheme. // legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much // This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation. // faster hardcoded implementation.
func IsValidLegacyMetricName(n LabelValue) bool { func IsValidLegacyMetricName(n string) bool {
if len(n) == 0 { if len(n) == 0 {
return false return false
} }
@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
} }
// If the name is nil, copy as-is, don't try to escape. // If the name is nil, copy as-is, don't try to escape.
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
out.Name = v.Name out.Name = v.Name
} else { } else {
out.Name = proto.String(EscapeName(v.GetName(), scheme)) out.Name = proto.String(EscapeName(v.GetName(), scheme))
@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
for _, l := range m.Label { for _, l := range m.Label {
if l.GetName() == MetricNameLabel { if l.GetName() == MetricNameLabel {
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
escaped.Label = append(escaped.Label, l) escaped.Label = append(escaped.Label, l)
continue continue
} }
@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
}) })
continue continue
} }
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
escaped.Label = append(escaped.Label, l) escaped.Label = append(escaped.Label, l)
continue continue
} }
@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
func metricNeedsEscaping(m *dto.Metric) bool { func metricNeedsEscaping(m *dto.Metric) bool {
for _, l := range m.Label { for _, l := range m.Label {
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
return true return true
} }
if !IsValidLegacyMetricName(LabelValue(l.GetName())) { if !IsValidLegacyMetricName(l.GetName()) {
return true return true
} }
} }
return false return false
} }
const (
lowerhex = "0123456789abcdef"
)
// EscapeName escapes the incoming name according to the provided escaping // EscapeName escapes the incoming name according to the provided escaping
// scheme. Depending on the rules of escaping, this may cause no change in the // scheme. Depending on the rules of escaping, this may cause no change in the
// string that is returned. (Especially NoEscaping, which by definition is a // string that is returned. (Especially NoEscaping, which by definition is a
@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
case NoEscaping: case NoEscaping:
return name return name
case UnderscoreEscaping: case UnderscoreEscaping:
if IsValidLegacyMetricName(LabelValue(name)) { if IsValidLegacyMetricName(name) {
return name return name
} }
for i, b := range name { for i, b := range name {
@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string {
} else if isValidLegacyRune(b, i) { } else if isValidLegacyRune(b, i) {
escaped.WriteRune(b) escaped.WriteRune(b)
} else { } else {
escaped.WriteRune('_') escaped.WriteString("__")
} }
} }
return escaped.String() return escaped.String()
case ValueEncodingEscaping: case ValueEncodingEscaping:
if IsValidLegacyMetricName(LabelValue(name)) { if IsValidLegacyMetricName(name) {
return name return name
} }
escaped.WriteString("U__") escaped.WriteString("U__")
for i, b := range name { for i, b := range name {
if isValidLegacyRune(b, i) { if b == '_' {
escaped.WriteString("__")
} else if isValidLegacyRune(b, i) {
escaped.WriteRune(b) escaped.WriteRune(b)
} else if !utf8.ValidRune(b) { } else if !utf8.ValidRune(b) {
escaped.WriteString("_FFFD_") escaped.WriteString("_FFFD_")
} else if b < 0x100 { } else {
escaped.WriteRune('_') escaped.WriteRune('_')
for s := 4; s >= 0; s -= 4 { escaped.WriteString(strconv.FormatInt(int64(b), 16))
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_')
} else if b < 0x10000 {
escaped.WriteRune('_')
for s := 12; s >= 0; s -= 4 {
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_') escaped.WriteRune('_')
} }
} }
@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string {
// We think we are in a UTF-8 code, process it. // We think we are in a UTF-8 code, process it.
var utf8Val uint var utf8Val uint
for j := 0; i < len(escapedName); j++ { for j := 0; i < len(escapedName); j++ {
// This is too many characters for a utf8 value. // This is too many characters for a utf8 value based on the MaxRune
if j > 4 { // value of '\U0010FFFF'.
if j >= 6 {
return name return name
} }
// Found a closing underscore, convert to a rune, check validity, and append. // Found a closing underscore, convert to a rune, check validity, and append.
@ -440,7 +436,7 @@ func (e EscapingScheme) String() string {
func ToEscapingScheme(s string) (EscapingScheme, error) { func ToEscapingScheme(s string) (EscapingScheme, error) {
if s == "" { if s == "" {
return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") return NoEscaping, errors.New("got empty string instead of escaping scheme")
} }
switch s { switch s {
case AllowUTF8: case AllowUTF8:
@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
case EscapeValues: case EscapeValues:
return ValueEncodingEscaping, nil return ValueEncodingEscaping, nil
default: default:
return NoEscaping, fmt.Errorf("unknown format scheme " + s) return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
} }
} }

View File

@ -15,6 +15,7 @@ package model
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"regexp" "regexp"
"time" "time"
@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
} }
if len(m.Name) == 0 { if len(m.Name) == 0 {
return fmt.Errorf("label name in matcher must not be empty") return errors.New("label name in matcher must not be empty")
} }
if m.IsRegex { if m.IsRegex {
if _, err := regexp.Compile(m.Value); err != nil { if _, err := regexp.Compile(m.Value); err != nil {
@ -77,7 +78,7 @@ type Silence struct {
// Validate returns true iff all fields of the silence have valid values. // Validate returns true iff all fields of the silence have valid values.
func (s *Silence) Validate() error { func (s *Silence) Validate() error {
if len(s.Matchers) == 0 { if len(s.Matchers) == 0 {
return fmt.Errorf("at least one matcher required") return errors.New("at least one matcher required")
} }
for _, m := range s.Matchers { for _, m := range s.Matchers {
if err := m.Validate(); err != nil { if err := m.Validate(); err != nil {
@ -85,22 +86,22 @@ func (s *Silence) Validate() error {
} }
} }
if s.StartsAt.IsZero() { if s.StartsAt.IsZero() {
return fmt.Errorf("start time missing") return errors.New("start time missing")
} }
if s.EndsAt.IsZero() { if s.EndsAt.IsZero() {
return fmt.Errorf("end time missing") return errors.New("end time missing")
} }
if s.EndsAt.Before(s.StartsAt) { if s.EndsAt.Before(s.StartsAt) {
return fmt.Errorf("start time must be before end time") return errors.New("start time must be before end time")
} }
if s.CreatedBy == "" { if s.CreatedBy == "" {
return fmt.Errorf("creator information missing") return errors.New("creator information missing")
} }
if s.Comment == "" { if s.Comment == "" {
return fmt.Errorf("comment missing") return errors.New("comment missing")
} }
if s.CreatedAt.IsZero() { if s.CreatedAt.IsZero() {
return fmt.Errorf("creation timestamp missing") return errors.New("creation timestamp missing")
} }
return nil return nil
} }

View File

@ -15,6 +15,7 @@ package model
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"math" "math"
"strconv" "strconv"
@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) {
// UnmarshalJSON implements json.Unmarshaler. // UnmarshalJSON implements json.Unmarshaler.
func (v *SampleValue) UnmarshalJSON(b []byte) error { func (v *SampleValue) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
return fmt.Errorf("sample value must be a quoted string") return errors.New("sample value must be a quoted string")
} }
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil { if err != nil {

View File

@ -15,6 +15,7 @@ package model
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) {
func (v *FloatString) UnmarshalJSON(b []byte) error { func (v *FloatString) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
return fmt.Errorf("float value must be a quoted string") return errors.New("float value must be a quoted string")
} }
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil { if err != nil {
@ -141,7 +142,7 @@ type SampleHistogramPair struct {
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
if s.Histogram == nil { if s.Histogram == nil {
return nil, fmt.Errorf("histogram is nil") return nil, errors.New("histogram is nil")
} }
t, err := json.Marshal(s.Timestamp) t, err := json.Marshal(s.Timestamp)
if err != nil { if err != nil {
@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
} }
if s.Histogram == nil { if s.Histogram == nil {
return fmt.Errorf("histogram is null") return errors.New("histogram is null")
} }
return nil return nil
} }

9
vendor/modules.txt vendored
View File

@ -263,18 +263,19 @@ github.com/pkg/browser
# github.com/pkg/errors v0.9.1 # github.com/pkg/errors v0.9.1
## explicit ## explicit
github.com/pkg/errors github.com/pkg/errors
# github.com/prometheus/client_golang v1.20.5 # github.com/prometheus/client_golang v1.22.0
## explicit; go 1.20 ## explicit; go 1.22
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promhttp/internal
# github.com/prometheus/client_model v0.6.1 # github.com/prometheus/client_model v0.6.1
## explicit; go 1.19 ## explicit; go 1.19
github.com/prometheus/client_model/go github.com/prometheus/client_model/go
# github.com/prometheus/common v0.55.0 # github.com/prometheus/common v0.62.0
## explicit; go 1.20 ## explicit; go 1.21
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.15.1 # github.com/prometheus/procfs v0.15.1