prometheus
1712 строк · 55.9 Кб
1// Copyright 2015 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package promql
15
16import (
17"fmt"
18"math"
19"slices"
20"sort"
21"strconv"
22"strings"
23"time"
24
25"github.com/facette/natsort"
26"github.com/grafana/regexp"
27"github.com/prometheus/common/model"
28
29"github.com/prometheus/prometheus/model/histogram"
30"github.com/prometheus/prometheus/model/labels"
31"github.com/prometheus/prometheus/promql/parser"
32"github.com/prometheus/prometheus/promql/parser/posrange"
33"github.com/prometheus/prometheus/util/annotations"
34)
35
36// FunctionCall is the type of a PromQL function implementation
37//
38// vals is a list of the evaluated arguments for the function call.
39//
40// For range vectors it will be a Matrix with one series, instant vectors a
41// Vector, scalars a Vector with one series whose value is the scalar
42// value,and nil for strings.
43//
44// args are the original arguments to the function, where you can access
45// matrixSelectors, vectorSelectors, and StringLiterals.
46//
47// enh.Out is a pre-allocated empty vector that you may use to accumulate
48// output before returning it. The vectors in vals should not be returned.a
49//
50// Range vector functions need only return a vector with the right value,
51// the metric and timestamp are not needed.
52//
53// Instant vector functions need only return a vector with the right values and
54// metrics, the timestamp are not needed.
55//
56// Scalar results should be returned as the value of a sample in a Vector.
57type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations)
58
59// === time() float64 ===
60func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
61return Vector{Sample{
62F: float64(enh.Ts) / 1000,
63}}, nil
64}
65
66// extrapolatedRate is a utility function for rate/increase/delta.
67// It calculates the rate (allowing for counter resets if isCounter is true),
68// extrapolates if the first/last sample is close to the boundary, and returns
69// the result as either per-second (if isRate is true) or overall.
70func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
71ms := args[0].(*parser.MatrixSelector)
72vs := ms.VectorSelector.(*parser.VectorSelector)
73var (
74samples = vals[0].(Matrix)[0]
75rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
76rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
77resultFloat float64
78resultHistogram *histogram.FloatHistogram
79firstT, lastT int64
80numSamplesMinusOne int
81annos annotations.Annotations
82)
83
84// We need either at least two Histograms and no Floats, or at least two
85// Floats and no Histograms to calculate a rate. Otherwise, drop this
86// Vector element.
87metricName := samples.Metric.Get(labels.MetricName)
88if len(samples.Histograms) > 0 && len(samples.Floats) > 0 {
89return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
90}
91
92switch {
93case len(samples.Histograms) > 1:
94numSamplesMinusOne = len(samples.Histograms) - 1
95firstT = samples.Histograms[0].T
96lastT = samples.Histograms[numSamplesMinusOne].T
97var newAnnos annotations.Annotations
98resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
99if resultHistogram == nil {
100// The histograms are not compatible with each other.
101return enh.Out, annos.Merge(newAnnos)
102}
103case len(samples.Floats) > 1:
104numSamplesMinusOne = len(samples.Floats) - 1
105firstT = samples.Floats[0].T
106lastT = samples.Floats[numSamplesMinusOne].T
107resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F
108if !isCounter {
109break
110}
111// Handle counter resets:
112prevValue := samples.Floats[0].F
113for _, currPoint := range samples.Floats[1:] {
114if currPoint.F < prevValue {
115resultFloat += prevValue
116}
117prevValue = currPoint.F
118}
119default:
120// TODO: add RangeTooShortWarning
121return enh.Out, annos
122}
123
124// Duration between first/last samples and boundary of range.
125durationToStart := float64(firstT-rangeStart) / 1000
126durationToEnd := float64(rangeEnd-lastT) / 1000
127
128sampledInterval := float64(lastT-firstT) / 1000
129averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne)
130
131// If the first/last samples are close to the boundaries of the range,
132// extrapolate the result. This is as we expect that another sample
133// will exist given the spacing between samples we've seen thus far,
134// with an allowance for noise.
135extrapolationThreshold := averageDurationBetweenSamples * 1.1
136extrapolateToInterval := sampledInterval
137
138if durationToStart >= extrapolationThreshold {
139durationToStart = averageDurationBetweenSamples / 2
140}
141if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 {
142// Counters cannot be negative. If we have any slope at all
143// (i.e. resultFloat went up), we can extrapolate the zero point
144// of the counter. If the duration to the zero point is shorter
145// than the durationToStart, we take the zero point as the start
146// of the series, thereby avoiding extrapolation to negative
147// counter values.
148// TODO(beorn7): Do this for histograms, too.
149durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat)
150if durationToZero < durationToStart {
151durationToStart = durationToZero
152}
153}
154extrapolateToInterval += durationToStart
155
156if durationToEnd >= extrapolationThreshold {
157durationToEnd = averageDurationBetweenSamples / 2
158}
159extrapolateToInterval += durationToEnd
160
161factor := extrapolateToInterval / sampledInterval
162if isRate {
163factor /= ms.Range.Seconds()
164}
165if resultHistogram == nil {
166resultFloat *= factor
167} else {
168resultHistogram.Mul(factor)
169}
170
171return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos
172}
173
174// histogramRate is a helper function for extrapolatedRate. It requires
175// points[0] to be a histogram. It returns nil if any other Point in points is
176// not a histogram, and a warning wrapped in an annotation in that case.
177// Otherwise, it returns the calculated histogram and an empty annotation.
178func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
179prev := points[0].H
180last := points[len(points)-1].H
181if last == nil {
182return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
183}
184minSchema := prev.Schema
185if last.Schema < minSchema {
186minSchema = last.Schema
187}
188
189var annos annotations.Annotations
190
191// First iteration to find out two things:
192// - What's the smallest relevant schema?
193// - Are all data points histograms?
194// TODO(beorn7): Find a way to check that earlier, e.g. by handing in a
195// []FloatPoint and a []HistogramPoint separately.
196for _, currPoint := range points[1 : len(points)-1] {
197curr := currPoint.H
198if curr == nil {
199return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
200}
201if !isCounter {
202continue
203}
204if curr.CounterResetHint == histogram.GaugeType {
205annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
206}
207if curr.Schema < minSchema {
208minSchema = curr.Schema
209}
210}
211
212h := last.CopyToSchema(minSchema)
213h.Sub(prev)
214
215if isCounter {
216// Second iteration to deal with counter resets.
217for _, currPoint := range points[1:] {
218curr := currPoint.H
219if curr.DetectReset(prev) {
220h.Add(prev)
221}
222prev = curr
223}
224} else if points[0].H.CounterResetHint != histogram.GaugeType || points[len(points)-1].H.CounterResetHint != histogram.GaugeType {
225annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, pos))
226}
227
228h.CounterResetHint = histogram.GaugeType
229return h.Compact(0), nil
230}
231
232// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
233func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
234return extrapolatedRate(vals, args, enh, false, false)
235}
236
237// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
238func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
239return extrapolatedRate(vals, args, enh, true, true)
240}
241
242// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) ===
243func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
244return extrapolatedRate(vals, args, enh, true, false)
245}
246
247// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
248func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
249return instantValue(vals, enh.Out, true)
250}
251
252// === idelta(node model.ValMatrix) (Vector, Annotations) ===
253func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
254return instantValue(vals, enh.Out, false)
255}
256
257func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) {
258samples := vals[0].(Matrix)[0]
259// No sense in trying to compute a rate without at least two points. Drop
260// this Vector element.
261// TODO: add RangeTooShortWarning
262if len(samples.Floats) < 2 {
263return out, nil
264}
265
266lastSample := samples.Floats[len(samples.Floats)-1]
267previousSample := samples.Floats[len(samples.Floats)-2]
268
269var resultValue float64
270if isRate && lastSample.F < previousSample.F {
271// Counter reset.
272resultValue = lastSample.F
273} else {
274resultValue = lastSample.F - previousSample.F
275}
276
277sampledInterval := lastSample.T - previousSample.T
278if sampledInterval == 0 {
279// Avoid dividing by 0.
280return out, nil
281}
282
283if isRate {
284// Convert to per-second.
285resultValue /= float64(sampledInterval) / 1000
286}
287
288return append(out, Sample{F: resultValue}), nil
289}
290
291// Calculate the trend value at the given index i in raw data d.
292// This is somewhat analogous to the slope of the trend at the given index.
293// The argument "tf" is the trend factor.
294// The argument "s0" is the computed smoothed value.
295// The argument "s1" is the computed trend factor.
296// The argument "b" is the raw input value.
297func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
298if i == 0 {
299return b
300}
301
302x := tf * (s1 - s0)
303y := (1 - tf) * b
304
305return x + y
306}
307
308// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data.
309// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current
310// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects
311// how trends in historical data will affect the current data. A higher trend factor increases the influence.
312// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing".
313func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
314samples := vals[0].(Matrix)[0]
315
316// The smoothing factor argument.
317sf := vals[1].(Vector)[0].F
318
319// The trend factor argument.
320tf := vals[2].(Vector)[0].F
321
322// Check that the input parameters are valid.
323if sf <= 0 || sf >= 1 {
324panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf))
325}
326if tf <= 0 || tf >= 1 {
327panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf))
328}
329
330l := len(samples.Floats)
331
332// Can't do the smoothing operation with less than two points.
333if l < 2 {
334return enh.Out, nil
335}
336
337var s0, s1, b float64
338// Set initial values.
339s1 = samples.Floats[0].F
340b = samples.Floats[1].F - samples.Floats[0].F
341
342// Run the smoothing operation.
343var x, y float64
344for i := 1; i < l; i++ {
345// Scale the raw value against the smoothing factor.
346x = sf * samples.Floats[i].F
347
348// Scale the last smoothed value with the trend at this point.
349b = calcTrendValue(i-1, tf, s0, s1, b)
350y = (1 - sf) * (s1 + b)
351
352s0, s1 = s1, x+y
353}
354
355return append(enh.Out, Sample{F: s1}), nil
356}
357
358// === sort(node parser.ValueTypeVector) (Vector, Annotations) ===
359func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
360// NaN should sort to the bottom, so take descending sort with NaN first and
361// reverse it.
362byValueSorter := vectorByReverseValueHeap(vals[0].(Vector))
363sort.Sort(sort.Reverse(byValueSorter))
364return Vector(byValueSorter), nil
365}
366
367// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) ===
368func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
369// NaN should sort to the bottom, so take ascending sort with NaN first and
370// reverse it.
371byValueSorter := vectorByValueHeap(vals[0].(Vector))
372sort.Sort(sort.Reverse(byValueSorter))
373return Vector(byValueSorter), nil
374}
375
376// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
377func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
378// In case the labels are the same, NaN should sort to the bottom, so take
379// ascending sort with NaN first and reverse it.
380var anno annotations.Annotations
381vals[0], anno = funcSort(vals, args, enh)
382labels := stringSliceFromArgs(args[1:])
383slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
384// Iterate over each given label
385for _, label := range labels {
386lv1 := a.Metric.Get(label)
387lv2 := b.Metric.Get(label)
388
389if lv1 == lv2 {
390continue
391}
392
393if natsort.Compare(lv1, lv2) {
394return -1
395}
396
397return +1
398}
399
400return 0
401})
402
403return vals[0].(Vector), anno
404}
405
406// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
407func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
408// In case the labels are the same, NaN should sort to the bottom, so take
409// ascending sort with NaN first and reverse it.
410var anno annotations.Annotations
411vals[0], anno = funcSortDesc(vals, args, enh)
412labels := stringSliceFromArgs(args[1:])
413slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
414// Iterate over each given label
415for _, label := range labels {
416lv1 := a.Metric.Get(label)
417lv2 := b.Metric.Get(label)
418
419if lv1 == lv2 {
420continue
421}
422
423if natsort.Compare(lv1, lv2) {
424return +1
425}
426
427return -1
428}
429
430return 0
431})
432
433return vals[0].(Vector), anno
434}
435
436// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
437func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
438vec := vals[0].(Vector)
439min := vals[1].(Vector)[0].F
440max := vals[2].(Vector)[0].F
441if max < min {
442return enh.Out, nil
443}
444for _, el := range vec {
445enh.Out = append(enh.Out, Sample{
446Metric: el.Metric.DropMetricName(),
447F: math.Max(min, math.Min(max, el.F)),
448})
449}
450return enh.Out, nil
451}
452
453// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
454func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
455vec := vals[0].(Vector)
456max := vals[1].(Vector)[0].F
457for _, el := range vec {
458enh.Out = append(enh.Out, Sample{
459Metric: el.Metric.DropMetricName(),
460F: math.Min(max, el.F),
461})
462}
463return enh.Out, nil
464}
465
466// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
467func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
468vec := vals[0].(Vector)
469min := vals[1].(Vector)[0].F
470for _, el := range vec {
471enh.Out = append(enh.Out, Sample{
472Metric: el.Metric.DropMetricName(),
473F: math.Max(min, el.F),
474})
475}
476return enh.Out, nil
477}
478
479// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) ===
480func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
481vec := vals[0].(Vector)
482// round returns a number rounded to toNearest.
483// Ties are solved by rounding up.
484toNearest := float64(1)
485if len(args) >= 2 {
486toNearest = vals[1].(Vector)[0].F
487}
488// Invert as it seems to cause fewer floating point accuracy issues.
489toNearestInverse := 1.0 / toNearest
490
491for _, el := range vec {
492f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
493enh.Out = append(enh.Out, Sample{
494Metric: el.Metric.DropMetricName(),
495F: f,
496})
497}
498return enh.Out, nil
499}
500
501// === Scalar(node parser.ValueTypeVector) Scalar ===
502func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
503v := vals[0].(Vector)
504if len(v) != 1 {
505return append(enh.Out, Sample{F: math.NaN()}), nil
506}
507return append(enh.Out, Sample{F: v[0].F}), nil
508}
509
510func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
511el := vals[0].(Matrix)[0]
512
513return append(enh.Out, Sample{F: aggrFn(el)})
514}
515
516func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
517el := vals[0].(Matrix)[0]
518
519return append(enh.Out, Sample{H: aggrFn(el)})
520}
521
522// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
523func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
524firstSeries := vals[0].(Matrix)[0]
525if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
526metricName := firstSeries.Metric.Get(labels.MetricName)
527return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
528}
529if len(firstSeries.Floats) == 0 {
530// The passed values only contain histograms.
531return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
532count := 1
533mean := s.Histograms[0].H.Copy()
534for _, h := range s.Histograms[1:] {
535count++
536left := h.H.Copy().Div(float64(count))
537right := mean.Copy().Div(float64(count))
538toAdd := left.Sub(right)
539mean.Add(toAdd)
540}
541return mean
542}), nil
543}
544return aggrOverTime(vals, enh, func(s Series) float64 {
545var mean, count, c float64
546for _, f := range s.Floats {
547count++
548if math.IsInf(mean, 0) {
549if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
550// The `mean` and `f.F` values are `Inf` of the same sign. They
551// can't be subtracted, but the value of `mean` is correct
552// already.
553continue
554}
555if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) {
556// At this stage, the mean is an infinite. If the added
557// value is neither an Inf or a Nan, we can keep that mean
558// value.
559// This is required because our calculation below removes
560// the mean value, which would look like Inf += x - Inf and
561// end up as a NaN.
562continue
563}
564}
565mean, c = kahanSumInc(f.F/count-mean/count, mean, c)
566}
567
568if math.IsInf(mean, 0) {
569return mean
570}
571return mean + c
572}), nil
573}
574
575// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
576func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
577return aggrOverTime(vals, enh, func(s Series) float64 {
578return float64(len(s.Floats) + len(s.Histograms))
579}), nil
580}
581
582// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
583func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
584el := vals[0].(Matrix)[0]
585
586var f FPoint
587if len(el.Floats) > 0 {
588f = el.Floats[len(el.Floats)-1]
589}
590
591var h HPoint
592if len(el.Histograms) > 0 {
593h = el.Histograms[len(el.Histograms)-1]
594}
595
596if h.H == nil || h.T < f.T {
597return append(enh.Out, Sample{
598Metric: el.Metric,
599F: f.F,
600}), nil
601}
602return append(enh.Out, Sample{
603Metric: el.Metric,
604H: h.H.Copy(),
605}), nil
606}
607
608// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
609func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
610if len(vals[0].(Matrix)[0].Floats) == 0 {
611return enh.Out, nil
612}
613return aggrOverTime(vals, enh, func(s Series) float64 {
614values := make(vectorByValueHeap, 0, len(s.Floats))
615for _, f := range s.Floats {
616values = append(values, Sample{F: f.F})
617}
618median := quantile(0.5, values)
619values = make(vectorByValueHeap, 0, len(s.Floats))
620for _, f := range s.Floats {
621values = append(values, Sample{F: math.Abs(f.F - median)})
622}
623return quantile(0.5, values)
624}), nil
625}
626
627// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
628func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
629if len(vals[0].(Matrix)[0].Floats) == 0 {
630// TODO(beorn7): The passed values only contain
631// histograms. max_over_time ignores histograms for now. If
632// there are only histograms, we have to return without adding
633// anything to enh.Out.
634return enh.Out, nil
635}
636return aggrOverTime(vals, enh, func(s Series) float64 {
637max := s.Floats[0].F
638for _, f := range s.Floats {
639if f.F > max || math.IsNaN(max) {
640max = f.F
641}
642}
643return max
644}), nil
645}
646
647// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
648func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
649if len(vals[0].(Matrix)[0].Floats) == 0 {
650// TODO(beorn7): The passed values only contain
651// histograms. min_over_time ignores histograms for now. If
652// there are only histograms, we have to return without adding
653// anything to enh.Out.
654return enh.Out, nil
655}
656return aggrOverTime(vals, enh, func(s Series) float64 {
657min := s.Floats[0].F
658for _, f := range s.Floats {
659if f.F < min || math.IsNaN(min) {
660min = f.F
661}
662}
663return min
664}), nil
665}
666
667// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
668func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
669firstSeries := vals[0].(Matrix)[0]
670if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
671metricName := firstSeries.Metric.Get(labels.MetricName)
672return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
673}
674if len(firstSeries.Floats) == 0 {
675// The passed values only contain histograms.
676return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
677sum := s.Histograms[0].H.Copy()
678for _, h := range s.Histograms[1:] {
679sum.Add(h.H)
680}
681return sum
682}), nil
683}
684return aggrOverTime(vals, enh, func(s Series) float64 {
685var sum, c float64
686for _, f := range s.Floats {
687sum, c = kahanSumInc(f.F, sum, c)
688}
689if math.IsInf(sum, 0) {
690return sum
691}
692return sum + c
693}), nil
694}
695
696// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
697func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
698q := vals[0].(Vector)[0].F
699el := vals[1].(Matrix)[0]
700if len(el.Floats) == 0 {
701// TODO(beorn7): The passed values only contain
702// histograms. quantile_over_time ignores histograms for now. If
703// there are only histograms, we have to return without adding
704// anything to enh.Out.
705return enh.Out, nil
706}
707
708var annos annotations.Annotations
709if math.IsNaN(q) || q < 0 || q > 1 {
710annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
711}
712
713values := make(vectorByValueHeap, 0, len(el.Floats))
714for _, f := range el.Floats {
715values = append(values, Sample{F: f.F})
716}
717return append(enh.Out, Sample{F: quantile(q, values)}), annos
718}
719
720// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
721func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
722if len(vals[0].(Matrix)[0].Floats) == 0 {
723// TODO(beorn7): The passed values only contain
724// histograms. stddev_over_time ignores histograms for now. If
725// there are only histograms, we have to return without adding
726// anything to enh.Out.
727return enh.Out, nil
728}
729return aggrOverTime(vals, enh, func(s Series) float64 {
730var count float64
731var mean, cMean float64
732var aux, cAux float64
733for _, f := range s.Floats {
734count++
735delta := f.F - (mean + cMean)
736mean, cMean = kahanSumInc(delta/count, mean, cMean)
737aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
738}
739return math.Sqrt((aux + cAux) / count)
740}), nil
741}
742
743// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
744func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
745if len(vals[0].(Matrix)[0].Floats) == 0 {
746// TODO(beorn7): The passed values only contain
747// histograms. stdvar_over_time ignores histograms for now. If
748// there are only histograms, we have to return without adding
749// anything to enh.Out.
750return enh.Out, nil
751}
752return aggrOverTime(vals, enh, func(s Series) float64 {
753var count float64
754var mean, cMean float64
755var aux, cAux float64
756for _, f := range s.Floats {
757count++
758delta := f.F - (mean + cMean)
759mean, cMean = kahanSumInc(delta/count, mean, cMean)
760aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
761}
762return (aux + cAux) / count
763}), nil
764}
765
766// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) ===
767func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
768if len(vals[0].(Vector)) > 0 {
769return enh.Out, nil
770}
771return append(enh.Out,
772Sample{
773Metric: createLabelsForAbsentFunction(args[0]),
774F: 1,
775}), nil
776}
777
778// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
779// As this function has a matrix as argument, it does not get all the Series.
780// This function will return 1 if the matrix has at least one element.
781// Due to engine optimization, this function is only called when this condition is true.
782// Then, the engine post-processes the results to get the expected output.
783func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
784return append(enh.Out, Sample{F: 1}), nil
785}
786
787// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
788func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
789return aggrOverTime(vals, enh, func(s Series) float64 {
790return 1
791}), nil
792}
793
794func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
795for _, el := range vals[0].(Vector) {
796if el.H == nil { // Process only float samples.
797enh.Out = append(enh.Out, Sample{
798Metric: el.Metric.DropMetricName(),
799F: f(el.F),
800})
801}
802}
803return enh.Out
804}
805
806// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
807func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
808return simpleFunc(vals, enh, math.Abs), nil
809}
810
811// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
812func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
813return simpleFunc(vals, enh, math.Ceil), nil
814}
815
816// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
817func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
818return simpleFunc(vals, enh, math.Floor), nil
819}
820
821// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
822func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
823return simpleFunc(vals, enh, math.Exp), nil
824}
825
826// === sqrt(Vector VectorNode) (Vector, Annotations) ===
827func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
828return simpleFunc(vals, enh, math.Sqrt), nil
829}
830
831// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
832func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
833return simpleFunc(vals, enh, math.Log), nil
834}
835
836// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
837func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
838return simpleFunc(vals, enh, math.Log2), nil
839}
840
841// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
842func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
843return simpleFunc(vals, enh, math.Log10), nil
844}
845
846// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
847func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
848return simpleFunc(vals, enh, math.Sin), nil
849}
850
851// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
852func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
853return simpleFunc(vals, enh, math.Cos), nil
854}
855
856// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
857func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
858return simpleFunc(vals, enh, math.Tan), nil
859}
860
861// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
862func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
863return simpleFunc(vals, enh, math.Asin), nil
864}
865
866// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
867func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
868return simpleFunc(vals, enh, math.Acos), nil
869}
870
871// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
872func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
873return simpleFunc(vals, enh, math.Atan), nil
874}
875
876// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
877func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
878return simpleFunc(vals, enh, math.Sinh), nil
879}
880
881// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
882func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
883return simpleFunc(vals, enh, math.Cosh), nil
884}
885
886// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
887func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
888return simpleFunc(vals, enh, math.Tanh), nil
889}
890
891// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
892func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
893return simpleFunc(vals, enh, math.Asinh), nil
894}
895
896// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
897func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
898return simpleFunc(vals, enh, math.Acosh), nil
899}
900
901// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
902func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
903return simpleFunc(vals, enh, math.Atanh), nil
904}
905
906// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
907func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
908return simpleFunc(vals, enh, func(v float64) float64 {
909return v * math.Pi / 180
910}), nil
911}
912
913// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
914func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
915return simpleFunc(vals, enh, func(v float64) float64 {
916return v * 180 / math.Pi
917}), nil
918}
919
920// === pi() Scalar ===
921func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
922return Vector{Sample{F: math.Pi}}, nil
923}
924
925// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
926func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
927return simpleFunc(vals, enh, func(v float64) float64 {
928switch {
929case v < 0:
930return -1
931case v > 0:
932return 1
933default:
934return v
935}
936}), nil
937}
938
939// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
940func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
941vec := vals[0].(Vector)
942for _, el := range vec {
943enh.Out = append(enh.Out, Sample{
944Metric: el.Metric.DropMetricName(),
945F: float64(el.T) / 1000,
946})
947}
948return enh.Out, nil
949}
950
951func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
952t := sum + inc
953// Using Neumaier improvement, swap if next term larger than sum.
954if math.Abs(sum) >= math.Abs(inc) {
955c += (sum - t) + inc
956} else {
957c += (inc - t) + sum
958}
959return t, c
960}
961
962// linearRegression performs a least-square linear regression analysis on the
963// provided SamplePairs. It returns the slope, and the intercept value at the
964// provided time.
965func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) {
966var (
967n float64
968sumX, cX float64
969sumY, cY float64
970sumXY, cXY float64
971sumX2, cX2 float64
972initY float64
973constY bool
974)
975initY = samples[0].F
976constY = true
977for i, sample := range samples {
978// Set constY to false if any new y values are encountered.
979if constY && i > 0 && sample.F != initY {
980constY = false
981}
982n += 1.0
983x := float64(sample.T-interceptTime) / 1e3
984sumX, cX = kahanSumInc(x, sumX, cX)
985sumY, cY = kahanSumInc(sample.F, sumY, cY)
986sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY)
987sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2)
988}
989if constY {
990if math.IsInf(initY, 0) {
991return math.NaN(), math.NaN()
992}
993return 0, initY
994}
995sumX += cX
996sumY += cY
997sumXY += cXY
998sumX2 += cX2
999
1000covXY := sumXY - sumX*sumY/n
1001varX := sumX2 - sumX*sumX/n
1002
1003slope = covXY / varX
1004intercept = sumY/n - slope*sumX/n
1005return slope, intercept
1006}
1007
1008// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
1009func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1010samples := vals[0].(Matrix)[0]
1011
1012// No sense in trying to compute a derivative without at least two points.
1013// Drop this Vector element.
1014if len(samples.Floats) < 2 {
1015return enh.Out, nil
1016}
1017
1018// We pass in an arbitrary timestamp that is near the values in use
1019// to avoid floating point accuracy issues, see
1020// https://github.com/prometheus/prometheus/issues/2674
1021slope, _ := linearRegression(samples.Floats, samples.Floats[0].T)
1022return append(enh.Out, Sample{F: slope}), nil
1023}
1024
1025// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) ===
1026func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1027samples := vals[0].(Matrix)[0]
1028duration := vals[1].(Vector)[0].F
1029// No sense in trying to predict anything without at least two points.
1030// Drop this Vector element.
1031if len(samples.Floats) < 2 {
1032return enh.Out, nil
1033}
1034slope, intercept := linearRegression(samples.Floats, enh.Ts)
1035
1036return append(enh.Out, Sample{F: slope*duration + intercept}), nil
1037}
1038
1039// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1040func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1041inVec := vals[0].(Vector)
1042
1043for _, sample := range inVec {
1044// Skip non-histogram samples.
1045if sample.H == nil {
1046continue
1047}
1048enh.Out = append(enh.Out, Sample{
1049Metric: sample.Metric.DropMetricName(),
1050F: sample.H.Count,
1051})
1052}
1053return enh.Out, nil
1054}
1055
1056// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1057func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1058inVec := vals[0].(Vector)
1059
1060for _, sample := range inVec {
1061// Skip non-histogram samples.
1062if sample.H == nil {
1063continue
1064}
1065enh.Out = append(enh.Out, Sample{
1066Metric: sample.Metric.DropMetricName(),
1067F: sample.H.Sum,
1068})
1069}
1070return enh.Out, nil
1071}
1072
1073// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1074func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1075inVec := vals[0].(Vector)
1076
1077for _, sample := range inVec {
1078// Skip non-histogram samples.
1079if sample.H == nil {
1080continue
1081}
1082enh.Out = append(enh.Out, Sample{
1083Metric: sample.Metric.DropMetricName(),
1084F: sample.H.Sum / sample.H.Count,
1085})
1086}
1087return enh.Out, nil
1088}
1089
1090// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1091func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1092inVec := vals[0].(Vector)
1093
1094for _, sample := range inVec {
1095// Skip non-histogram samples.
1096if sample.H == nil {
1097continue
1098}
1099mean := sample.H.Sum / sample.H.Count
1100var variance, cVariance float64
1101it := sample.H.AllBucketIterator()
1102for it.Next() {
1103bucket := it.At()
1104if bucket.Count == 0 {
1105continue
1106}
1107var val float64
1108if bucket.Lower <= 0 && 0 <= bucket.Upper {
1109val = 0
1110} else {
1111val = math.Sqrt(bucket.Upper * bucket.Lower)
1112if bucket.Upper < 0 {
1113val = -val
1114}
1115}
1116delta := val - mean
1117variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
1118}
1119variance += cVariance
1120variance /= sample.H.Count
1121enh.Out = append(enh.Out, Sample{
1122Metric: sample.Metric.DropMetricName(),
1123F: math.Sqrt(variance),
1124})
1125}
1126return enh.Out, nil
1127}
1128
1129// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1130func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1131inVec := vals[0].(Vector)
1132
1133for _, sample := range inVec {
1134// Skip non-histogram samples.
1135if sample.H == nil {
1136continue
1137}
1138mean := sample.H.Sum / sample.H.Count
1139var variance, cVariance float64
1140it := sample.H.AllBucketIterator()
1141for it.Next() {
1142bucket := it.At()
1143if bucket.Count == 0 {
1144continue
1145}
1146var val float64
1147if bucket.Lower <= 0 && 0 <= bucket.Upper {
1148val = 0
1149} else {
1150val = math.Sqrt(bucket.Upper * bucket.Lower)
1151if bucket.Upper < 0 {
1152val = -val
1153}
1154}
1155delta := val - mean
1156variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
1157}
1158variance += cVariance
1159variance /= sample.H.Count
1160enh.Out = append(enh.Out, Sample{
1161Metric: sample.Metric.DropMetricName(),
1162F: variance,
1163})
1164}
1165return enh.Out, nil
1166}
1167
1168// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
1169func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1170lower := vals[0].(Vector)[0].F
1171upper := vals[1].(Vector)[0].F
1172inVec := vals[2].(Vector)
1173
1174for _, sample := range inVec {
1175// Skip non-histogram samples.
1176if sample.H == nil {
1177continue
1178}
1179enh.Out = append(enh.Out, Sample{
1180Metric: sample.Metric.DropMetricName(),
1181F: histogramFraction(lower, upper, sample.H),
1182})
1183}
1184return enh.Out, nil
1185}
1186
1187// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
1188func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1189q := vals[0].(Vector)[0].F
1190inVec := vals[1].(Vector)
1191var annos annotations.Annotations
1192
1193if math.IsNaN(q) || q < 0 || q > 1 {
1194annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
1195}
1196
1197if enh.signatureToMetricWithBuckets == nil {
1198enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}
1199} else {
1200for _, v := range enh.signatureToMetricWithBuckets {
1201v.buckets = v.buckets[:0]
1202}
1203}
1204
1205var histogramSamples []Sample
1206
1207for _, sample := range inVec {
1208// We are only looking for classic buckets here. Remember
1209// the histograms for later treatment.
1210if sample.H != nil {
1211histogramSamples = append(histogramSamples, sample)
1212continue
1213}
1214
1215upperBound, err := strconv.ParseFloat(
1216sample.Metric.Get(model.BucketLabel), 64,
1217)
1218if err != nil {
1219annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange()))
1220continue
1221}
1222enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
1223mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
1224if !ok {
1225sample.Metric = labels.NewBuilder(sample.Metric).
1226Del(excludedLabels...).
1227Labels()
1228
1229mb = &metricWithBuckets{sample.Metric, nil}
1230enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
1231}
1232mb.buckets = append(mb.buckets, bucket{upperBound, sample.F})
1233}
1234
1235// Now deal with the histograms.
1236for _, sample := range histogramSamples {
1237// We have to reconstruct the exact same signature as above for
1238// a classic histogram, just ignoring any le label.
1239enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
1240if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {
1241// At this data point, we have classic histogram
1242// buckets and a native histogram with the same name and
1243// labels. Do not evaluate anything.
1244annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange()))
1245delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
1246continue
1247}
1248
1249enh.Out = append(enh.Out, Sample{
1250Metric: sample.Metric.DropMetricName(),
1251F: histogramQuantile(q, sample.H),
1252})
1253}
1254
1255for _, mb := range enh.signatureToMetricWithBuckets {
1256if len(mb.buckets) > 0 {
1257res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets)
1258enh.Out = append(enh.Out, Sample{
1259Metric: mb.metric,
1260F: res,
1261})
1262if forcedMonotonicity {
1263annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
1264}
1265}
1266}
1267
1268return enh.Out, annos
1269}
1270
1271// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
1272func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1273floats := vals[0].(Matrix)[0].Floats
1274histograms := vals[0].(Matrix)[0].Histograms
1275resets := 0
1276
1277if len(floats) > 1 {
1278prev := floats[0].F
1279for _, sample := range floats[1:] {
1280current := sample.F
1281if current < prev {
1282resets++
1283}
1284prev = current
1285}
1286}
1287
1288if len(histograms) > 1 {
1289prev := histograms[0].H
1290for _, sample := range histograms[1:] {
1291current := sample.H
1292if current.DetectReset(prev) {
1293resets++
1294}
1295prev = current
1296}
1297}
1298
1299return append(enh.Out, Sample{F: float64(resets)}), nil
1300}
1301
1302// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
1303func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1304floats := vals[0].(Matrix)[0].Floats
1305changes := 0
1306
1307if len(floats) == 0 {
1308// TODO(beorn7): Only histogram values, still need to add support.
1309return enh.Out, nil
1310}
1311
1312prev := floats[0].F
1313for _, sample := range floats[1:] {
1314current := sample.F
1315if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) {
1316changes++
1317}
1318prev = current
1319}
1320
1321return append(enh.Out, Sample{F: float64(changes)}), nil
1322}
1323
1324// label_replace function operates only on series; does not look at timestamps or values.
1325func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) {
1326var (
1327dst = stringFromArg(args[1])
1328repl = stringFromArg(args[2])
1329src = stringFromArg(args[3])
1330regexStr = stringFromArg(args[4])
1331)
1332
1333regex, err := regexp.Compile("^(?:" + regexStr + ")$")
1334if err != nil {
1335panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
1336}
1337if !model.LabelNameRE.MatchString(dst) {
1338panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
1339}
1340
1341val, ws := ev.eval(args[0])
1342matrix := val.(Matrix)
1343lb := labels.NewBuilder(labels.EmptyLabels())
1344
1345for i, el := range matrix {
1346srcVal := el.Metric.Get(src)
1347indexes := regex.FindStringSubmatchIndex(srcVal)
1348if indexes != nil { // Only replace when regexp matches.
1349res := regex.ExpandString([]byte{}, repl, srcVal, indexes)
1350lb.Reset(el.Metric)
1351lb.Set(dst, string(res))
1352matrix[i].Metric = lb.Labels()
1353}
1354}
1355if matrix.ContainsSameLabelset() {
1356ev.errorf("vector cannot contain metrics with the same labelset")
1357}
1358
1359return matrix, ws
1360}
1361
1362// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) ===
1363func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1364panic("funcLabelReplace wrong implementation called")
1365}
1366
1367// === Vector(s Scalar) (Vector, Annotations) ===
1368func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1369return append(enh.Out,
1370Sample{
1371Metric: labels.Labels{},
1372F: vals[0].(Vector)[0].F,
1373}), nil
1374}
1375
1376// label_join function operates only on series; does not look at timestamps or values.
1377func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) {
1378var (
1379dst = stringFromArg(args[1])
1380sep = stringFromArg(args[2])
1381srcLabels = make([]string, len(args)-3)
1382)
1383for i := 3; i < len(args); i++ {
1384src := stringFromArg(args[i])
1385if !model.LabelName(src).IsValid() {
1386panic(fmt.Errorf("invalid source label name in label_join(): %s", src))
1387}
1388srcLabels[i-3] = src
1389}
1390if !model.LabelName(dst).IsValid() {
1391panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
1392}
1393
1394val, ws := ev.eval(args[0])
1395matrix := val.(Matrix)
1396srcVals := make([]string, len(srcLabels))
1397lb := labels.NewBuilder(labels.EmptyLabels())
1398
1399for i, el := range matrix {
1400for i, src := range srcLabels {
1401srcVals[i] = el.Metric.Get(src)
1402}
1403strval := strings.Join(srcVals, sep)
1404lb.Reset(el.Metric)
1405lb.Set(dst, strval)
1406matrix[i].Metric = lb.Labels()
1407}
1408
1409return matrix, ws
1410}
1411
1412// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) ===
1413func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1414panic("funcLabelReplace wrong implementation called")
1415}
1416
1417// Common code for date related functions.
1418func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector {
1419if len(vals) == 0 {
1420return append(enh.Out,
1421Sample{
1422Metric: labels.Labels{},
1423F: f(time.Unix(enh.Ts/1000, 0).UTC()),
1424})
1425}
1426
1427for _, el := range vals[0].(Vector) {
1428t := time.Unix(int64(el.F), 0).UTC()
1429enh.Out = append(enh.Out, Sample{
1430Metric: el.Metric.DropMetricName(),
1431F: f(t),
1432})
1433}
1434return enh.Out
1435}
1436
1437// === days_in_month(v Vector) Scalar ===
1438func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1439return dateWrapper(vals, enh, func(t time.Time) float64 {
1440return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day())
1441}), nil
1442}
1443
1444// === day_of_month(v Vector) Scalar ===
1445func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1446return dateWrapper(vals, enh, func(t time.Time) float64 {
1447return float64(t.Day())
1448}), nil
1449}
1450
1451// === day_of_week(v Vector) Scalar ===
1452func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1453return dateWrapper(vals, enh, func(t time.Time) float64 {
1454return float64(t.Weekday())
1455}), nil
1456}
1457
1458// === day_of_year(v Vector) Scalar ===
1459func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1460return dateWrapper(vals, enh, func(t time.Time) float64 {
1461return float64(t.YearDay())
1462}), nil
1463}
1464
1465// === hour(v Vector) Scalar ===
1466func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1467return dateWrapper(vals, enh, func(t time.Time) float64 {
1468return float64(t.Hour())
1469}), nil
1470}
1471
1472// === minute(v Vector) Scalar ===
1473func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1474return dateWrapper(vals, enh, func(t time.Time) float64 {
1475return float64(t.Minute())
1476}), nil
1477}
1478
1479// === month(v Vector) Scalar ===
1480func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1481return dateWrapper(vals, enh, func(t time.Time) float64 {
1482return float64(t.Month())
1483}), nil
1484}
1485
1486// === year(v Vector) Scalar ===
1487func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1488return dateWrapper(vals, enh, func(t time.Time) float64 {
1489return float64(t.Year())
1490}), nil
1491}
1492
1493// FunctionCalls is a list of all functions supported by PromQL, including their types.
1494var FunctionCalls = map[string]FunctionCall{
1495"abs": funcAbs,
1496"absent": funcAbsent,
1497"absent_over_time": funcAbsentOverTime,
1498"acos": funcAcos,
1499"acosh": funcAcosh,
1500"asin": funcAsin,
1501"asinh": funcAsinh,
1502"atan": funcAtan,
1503"atanh": funcAtanh,
1504"avg_over_time": funcAvgOverTime,
1505"ceil": funcCeil,
1506"changes": funcChanges,
1507"clamp": funcClamp,
1508"clamp_max": funcClampMax,
1509"clamp_min": funcClampMin,
1510"cos": funcCos,
1511"cosh": funcCosh,
1512"count_over_time": funcCountOverTime,
1513"days_in_month": funcDaysInMonth,
1514"day_of_month": funcDayOfMonth,
1515"day_of_week": funcDayOfWeek,
1516"day_of_year": funcDayOfYear,
1517"deg": funcDeg,
1518"delta": funcDelta,
1519"deriv": funcDeriv,
1520"exp": funcExp,
1521"floor": funcFloor,
1522"histogram_avg": funcHistogramAvg,
1523"histogram_count": funcHistogramCount,
1524"histogram_fraction": funcHistogramFraction,
1525"histogram_quantile": funcHistogramQuantile,
1526"histogram_sum": funcHistogramSum,
1527"histogram_stddev": funcHistogramStdDev,
1528"histogram_stdvar": funcHistogramStdVar,
1529"holt_winters": funcHoltWinters,
1530"hour": funcHour,
1531"idelta": funcIdelta,
1532"increase": funcIncrease,
1533"irate": funcIrate,
1534"label_replace": funcLabelReplace,
1535"label_join": funcLabelJoin,
1536"ln": funcLn,
1537"log10": funcLog10,
1538"log2": funcLog2,
1539"last_over_time": funcLastOverTime,
1540"mad_over_time": funcMadOverTime,
1541"max_over_time": funcMaxOverTime,
1542"min_over_time": funcMinOverTime,
1543"minute": funcMinute,
1544"month": funcMonth,
1545"pi": funcPi,
1546"predict_linear": funcPredictLinear,
1547"present_over_time": funcPresentOverTime,
1548"quantile_over_time": funcQuantileOverTime,
1549"rad": funcRad,
1550"rate": funcRate,
1551"resets": funcResets,
1552"round": funcRound,
1553"scalar": funcScalar,
1554"sgn": funcSgn,
1555"sin": funcSin,
1556"sinh": funcSinh,
1557"sort": funcSort,
1558"sort_desc": funcSortDesc,
1559"sort_by_label": funcSortByLabel,
1560"sort_by_label_desc": funcSortByLabelDesc,
1561"sqrt": funcSqrt,
1562"stddev_over_time": funcStddevOverTime,
1563"stdvar_over_time": funcStdvarOverTime,
1564"sum_over_time": funcSumOverTime,
1565"tan": funcTan,
1566"tanh": funcTanh,
1567"time": funcTime,
1568"timestamp": funcTimestamp,
1569"vector": funcVector,
1570"year": funcYear,
1571}
1572
1573// AtModifierUnsafeFunctions are the functions whose result
1574// can vary if evaluation time is changed when the arguments are
1575// step invariant. It also includes functions that use the timestamps
1576// of the passed instant vector argument to calculate a result since
1577// that can also change with change in eval time.
1578var AtModifierUnsafeFunctions = map[string]struct{}{
1579// Step invariant functions.
1580"days_in_month": {}, "day_of_month": {}, "day_of_week": {}, "day_of_year": {},
1581"hour": {}, "minute": {}, "month": {}, "year": {},
1582"predict_linear": {}, "time": {},
1583// Uses timestamp of the argument for the result,
1584// hence unsafe to use with @ modifier.
1585"timestamp": {},
1586}
1587
1588type vectorByValueHeap Vector
1589
1590func (s vectorByValueHeap) Len() int {
1591return len(s)
1592}
1593
1594func (s vectorByValueHeap) Less(i, j int) bool {
1595// We compare histograms based on their sum of observations.
1596// TODO(beorn7): Is that what we want?
1597vi, vj := s[i].F, s[j].F
1598if s[i].H != nil {
1599vi = s[i].H.Sum
1600}
1601if s[j].H != nil {
1602vj = s[j].H.Sum
1603}
1604
1605if math.IsNaN(vi) {
1606return true
1607}
1608return vi < vj
1609}
1610
1611func (s vectorByValueHeap) Swap(i, j int) {
1612s[i], s[j] = s[j], s[i]
1613}
1614
1615func (s *vectorByValueHeap) Push(x interface{}) {
1616*s = append(*s, *(x.(*Sample)))
1617}
1618
1619func (s *vectorByValueHeap) Pop() interface{} {
1620old := *s
1621n := len(old)
1622el := old[n-1]
1623*s = old[0 : n-1]
1624return el
1625}
1626
1627type vectorByReverseValueHeap Vector
1628
1629func (s vectorByReverseValueHeap) Len() int {
1630return len(s)
1631}
1632
1633func (s vectorByReverseValueHeap) Less(i, j int) bool {
1634// We compare histograms based on their sum of observations.
1635// TODO(beorn7): Is that what we want?
1636vi, vj := s[i].F, s[j].F
1637if s[i].H != nil {
1638vi = s[i].H.Sum
1639}
1640if s[j].H != nil {
1641vj = s[j].H.Sum
1642}
1643
1644if math.IsNaN(vi) {
1645return true
1646}
1647return vi > vj
1648}
1649
1650func (s vectorByReverseValueHeap) Swap(i, j int) {
1651s[i], s[j] = s[j], s[i]
1652}
1653
1654func (s *vectorByReverseValueHeap) Push(x interface{}) {
1655*s = append(*s, *(x.(*Sample)))
1656}
1657
1658func (s *vectorByReverseValueHeap) Pop() interface{} {
1659old := *s
1660n := len(old)
1661el := old[n-1]
1662*s = old[0 : n-1]
1663return el
1664}
1665
1666// createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched
1667// in a given expression. It is used in the absent functions.
1668func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
1669b := labels.NewBuilder(labels.EmptyLabels())
1670
1671var lm []*labels.Matcher
1672switch n := expr.(type) {
1673case *parser.VectorSelector:
1674lm = n.LabelMatchers
1675case *parser.MatrixSelector:
1676lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers
1677default:
1678return labels.EmptyLabels()
1679}
1680
1681// The 'has' map implements backwards-compatibility for historic behaviour:
1682// e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output.
1683// Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`.
1684has := make(map[string]bool, len(lm))
1685for _, ma := range lm {
1686if ma.Name == labels.MetricName {
1687continue
1688}
1689if ma.Type == labels.MatchEqual && !has[ma.Name] {
1690b.Set(ma.Name, ma.Value)
1691has[ma.Name] = true
1692} else {
1693b.Del(ma.Name)
1694}
1695}
1696
1697return b.Labels()
1698}
1699
1700func stringFromArg(e parser.Expr) string {
1701tmp := unwrapStepInvariantExpr(e) // Unwrap StepInvariant
1702unwrapParenExpr(&tmp) // Optionally unwrap ParenExpr
1703return tmp.(*parser.StringLiteral).Val
1704}
1705
1706func stringSliceFromArgs(args parser.Expressions) []string {
1707tmp := make([]string, len(args))
1708for i := 0; i < len(args); i++ {
1709tmp[i] = stringFromArg(args[i])
1710}
1711return tmp
1712}
1713