prometheus

Форк
0
/
functions.go 
1712 строк · 55.9 Кб
1
// Copyright 2015 The Prometheus Authors
2
// Licensed under the Apache License, Version 2.0 (the "License");
3
// you may not use this file except in compliance with the License.
4
// You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software
9
// distributed under the License is distributed on an "AS IS" BASIS,
10
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
// See the License for the specific language governing permissions and
12
// limitations under the License.
13

14
package promql
15

16
import (
17
	"fmt"
18
	"math"
19
	"slices"
20
	"sort"
21
	"strconv"
22
	"strings"
23
	"time"
24

25
	"github.com/facette/natsort"
26
	"github.com/grafana/regexp"
27
	"github.com/prometheus/common/model"
28

29
	"github.com/prometheus/prometheus/model/histogram"
30
	"github.com/prometheus/prometheus/model/labels"
31
	"github.com/prometheus/prometheus/promql/parser"
32
	"github.com/prometheus/prometheus/promql/parser/posrange"
33
	"github.com/prometheus/prometheus/util/annotations"
34
)
35

36
// FunctionCall is the type of a PromQL function implementation
37
//
38
// vals is a list of the evaluated arguments for the function call.
39
//
40
// For range vectors it will be a Matrix with one series, instant vectors a
41
// Vector, scalars a Vector with one series whose value is the scalar
42
// value,and nil for strings.
43
//
44
// args are the original arguments to the function, where you can access
45
// matrixSelectors, vectorSelectors, and StringLiterals.
46
//
47
// enh.Out is a pre-allocated empty vector that you may use to accumulate
48
// output before returning it. The vectors in vals should not be returned.a
49
//
50
// Range vector functions need only return a vector with the right value,
51
// the metric and timestamp are not needed.
52
//
53
// Instant vector functions need only return a vector with the right values and
54
// metrics, the timestamp are not needed.
55
//
56
// Scalar results should be returned as the value of a sample in a Vector.
57
type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations)
58

59
// === time() float64 ===
60
func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
61
	return Vector{Sample{
62
		F: float64(enh.Ts) / 1000,
63
	}}, nil
64
}
65

66
// extrapolatedRate is a utility function for rate/increase/delta.
67
// It calculates the rate (allowing for counter resets if isCounter is true),
68
// extrapolates if the first/last sample is close to the boundary, and returns
69
// the result as either per-second (if isRate is true) or overall.
70
func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
71
	ms := args[0].(*parser.MatrixSelector)
72
	vs := ms.VectorSelector.(*parser.VectorSelector)
73
	var (
74
		samples            = vals[0].(Matrix)[0]
75
		rangeStart         = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
76
		rangeEnd           = enh.Ts - durationMilliseconds(vs.Offset)
77
		resultFloat        float64
78
		resultHistogram    *histogram.FloatHistogram
79
		firstT, lastT      int64
80
		numSamplesMinusOne int
81
		annos              annotations.Annotations
82
	)
83

84
	// We need either at least two Histograms and no Floats, or at least two
85
	// Floats and no Histograms to calculate a rate. Otherwise, drop this
86
	// Vector element.
87
	metricName := samples.Metric.Get(labels.MetricName)
88
	if len(samples.Histograms) > 0 && len(samples.Floats) > 0 {
89
		return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
90
	}
91

92
	switch {
93
	case len(samples.Histograms) > 1:
94
		numSamplesMinusOne = len(samples.Histograms) - 1
95
		firstT = samples.Histograms[0].T
96
		lastT = samples.Histograms[numSamplesMinusOne].T
97
		var newAnnos annotations.Annotations
98
		resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
99
		if resultHistogram == nil {
100
			// The histograms are not compatible with each other.
101
			return enh.Out, annos.Merge(newAnnos)
102
		}
103
	case len(samples.Floats) > 1:
104
		numSamplesMinusOne = len(samples.Floats) - 1
105
		firstT = samples.Floats[0].T
106
		lastT = samples.Floats[numSamplesMinusOne].T
107
		resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F
108
		if !isCounter {
109
			break
110
		}
111
		// Handle counter resets:
112
		prevValue := samples.Floats[0].F
113
		for _, currPoint := range samples.Floats[1:] {
114
			if currPoint.F < prevValue {
115
				resultFloat += prevValue
116
			}
117
			prevValue = currPoint.F
118
		}
119
	default:
120
		// TODO: add RangeTooShortWarning
121
		return enh.Out, annos
122
	}
123

124
	// Duration between first/last samples and boundary of range.
125
	durationToStart := float64(firstT-rangeStart) / 1000
126
	durationToEnd := float64(rangeEnd-lastT) / 1000
127

128
	sampledInterval := float64(lastT-firstT) / 1000
129
	averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne)
130

131
	// If the first/last samples are close to the boundaries of the range,
132
	// extrapolate the result. This is as we expect that another sample
133
	// will exist given the spacing between samples we've seen thus far,
134
	// with an allowance for noise.
135
	extrapolationThreshold := averageDurationBetweenSamples * 1.1
136
	extrapolateToInterval := sampledInterval
137

138
	if durationToStart >= extrapolationThreshold {
139
		durationToStart = averageDurationBetweenSamples / 2
140
	}
141
	if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 {
142
		// Counters cannot be negative. If we have any slope at all
143
		// (i.e. resultFloat went up), we can extrapolate the zero point
144
		// of the counter. If the duration to the zero point is shorter
145
		// than the durationToStart, we take the zero point as the start
146
		// of the series, thereby avoiding extrapolation to negative
147
		// counter values.
148
		// TODO(beorn7): Do this for histograms, too.
149
		durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat)
150
		if durationToZero < durationToStart {
151
			durationToStart = durationToZero
152
		}
153
	}
154
	extrapolateToInterval += durationToStart
155

156
	if durationToEnd >= extrapolationThreshold {
157
		durationToEnd = averageDurationBetweenSamples / 2
158
	}
159
	extrapolateToInterval += durationToEnd
160

161
	factor := extrapolateToInterval / sampledInterval
162
	if isRate {
163
		factor /= ms.Range.Seconds()
164
	}
165
	if resultHistogram == nil {
166
		resultFloat *= factor
167
	} else {
168
		resultHistogram.Mul(factor)
169
	}
170

171
	return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos
172
}
173

174
// histogramRate is a helper function for extrapolatedRate. It requires
175
// points[0] to be a histogram. It returns nil if any other Point in points is
176
// not a histogram, and a warning wrapped in an annotation in that case.
177
// Otherwise, it returns the calculated histogram and an empty annotation.
178
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
179
	prev := points[0].H
180
	last := points[len(points)-1].H
181
	if last == nil {
182
		return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
183
	}
184
	minSchema := prev.Schema
185
	if last.Schema < minSchema {
186
		minSchema = last.Schema
187
	}
188

189
	var annos annotations.Annotations
190

191
	// First iteration to find out two things:
192
	// - What's the smallest relevant schema?
193
	// - Are all data points histograms?
194
	//   TODO(beorn7): Find a way to check that earlier, e.g. by handing in a
195
	//   []FloatPoint and a []HistogramPoint separately.
196
	for _, currPoint := range points[1 : len(points)-1] {
197
		curr := currPoint.H
198
		if curr == nil {
199
			return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
200
		}
201
		if !isCounter {
202
			continue
203
		}
204
		if curr.CounterResetHint == histogram.GaugeType {
205
			annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
206
		}
207
		if curr.Schema < minSchema {
208
			minSchema = curr.Schema
209
		}
210
	}
211

212
	h := last.CopyToSchema(minSchema)
213
	h.Sub(prev)
214

215
	if isCounter {
216
		// Second iteration to deal with counter resets.
217
		for _, currPoint := range points[1:] {
218
			curr := currPoint.H
219
			if curr.DetectReset(prev) {
220
				h.Add(prev)
221
			}
222
			prev = curr
223
		}
224
	} else if points[0].H.CounterResetHint != histogram.GaugeType || points[len(points)-1].H.CounterResetHint != histogram.GaugeType {
225
		annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, pos))
226
	}
227

228
	h.CounterResetHint = histogram.GaugeType
229
	return h.Compact(0), nil
230
}
231

232
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
233
func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
234
	return extrapolatedRate(vals, args, enh, false, false)
235
}
236

237
// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
238
func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
239
	return extrapolatedRate(vals, args, enh, true, true)
240
}
241

242
// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) ===
243
func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
244
	return extrapolatedRate(vals, args, enh, true, false)
245
}
246

247
// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
248
func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
249
	return instantValue(vals, enh.Out, true)
250
}
251

252
// === idelta(node model.ValMatrix) (Vector, Annotations) ===
253
func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
254
	return instantValue(vals, enh.Out, false)
255
}
256

257
func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) {
258
	samples := vals[0].(Matrix)[0]
259
	// No sense in trying to compute a rate without at least two points. Drop
260
	// this Vector element.
261
	// TODO: add RangeTooShortWarning
262
	if len(samples.Floats) < 2 {
263
		return out, nil
264
	}
265

266
	lastSample := samples.Floats[len(samples.Floats)-1]
267
	previousSample := samples.Floats[len(samples.Floats)-2]
268

269
	var resultValue float64
270
	if isRate && lastSample.F < previousSample.F {
271
		// Counter reset.
272
		resultValue = lastSample.F
273
	} else {
274
		resultValue = lastSample.F - previousSample.F
275
	}
276

277
	sampledInterval := lastSample.T - previousSample.T
278
	if sampledInterval == 0 {
279
		// Avoid dividing by 0.
280
		return out, nil
281
	}
282

283
	if isRate {
284
		// Convert to per-second.
285
		resultValue /= float64(sampledInterval) / 1000
286
	}
287

288
	return append(out, Sample{F: resultValue}), nil
289
}
290

291
// Calculate the trend value at the given index i in raw data d.
292
// This is somewhat analogous to the slope of the trend at the given index.
293
// The argument "tf" is the trend factor.
294
// The argument "s0" is the computed smoothed value.
295
// The argument "s1" is the computed trend factor.
296
// The argument "b" is the raw input value.
297
func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
298
	if i == 0 {
299
		return b
300
	}
301

302
	x := tf * (s1 - s0)
303
	y := (1 - tf) * b
304

305
	return x + y
306
}
307

308
// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data.
309
// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current
310
// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects
311
// how trends in historical data will affect the current data. A higher trend factor increases the influence.
312
// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing".
313
func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
314
	samples := vals[0].(Matrix)[0]
315

316
	// The smoothing factor argument.
317
	sf := vals[1].(Vector)[0].F
318

319
	// The trend factor argument.
320
	tf := vals[2].(Vector)[0].F
321

322
	// Check that the input parameters are valid.
323
	if sf <= 0 || sf >= 1 {
324
		panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf))
325
	}
326
	if tf <= 0 || tf >= 1 {
327
		panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf))
328
	}
329

330
	l := len(samples.Floats)
331

332
	// Can't do the smoothing operation with less than two points.
333
	if l < 2 {
334
		return enh.Out, nil
335
	}
336

337
	var s0, s1, b float64
338
	// Set initial values.
339
	s1 = samples.Floats[0].F
340
	b = samples.Floats[1].F - samples.Floats[0].F
341

342
	// Run the smoothing operation.
343
	var x, y float64
344
	for i := 1; i < l; i++ {
345
		// Scale the raw value against the smoothing factor.
346
		x = sf * samples.Floats[i].F
347

348
		// Scale the last smoothed value with the trend at this point.
349
		b = calcTrendValue(i-1, tf, s0, s1, b)
350
		y = (1 - sf) * (s1 + b)
351

352
		s0, s1 = s1, x+y
353
	}
354

355
	return append(enh.Out, Sample{F: s1}), nil
356
}
357

358
// === sort(node parser.ValueTypeVector) (Vector, Annotations) ===
359
func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
360
	// NaN should sort to the bottom, so take descending sort with NaN first and
361
	// reverse it.
362
	byValueSorter := vectorByReverseValueHeap(vals[0].(Vector))
363
	sort.Sort(sort.Reverse(byValueSorter))
364
	return Vector(byValueSorter), nil
365
}
366

367
// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) ===
368
func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
369
	// NaN should sort to the bottom, so take ascending sort with NaN first and
370
	// reverse it.
371
	byValueSorter := vectorByValueHeap(vals[0].(Vector))
372
	sort.Sort(sort.Reverse(byValueSorter))
373
	return Vector(byValueSorter), nil
374
}
375

376
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
377
func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
378
	// In case the labels are the same, NaN should sort to the bottom, so take
379
	// ascending sort with NaN first and reverse it.
380
	var anno annotations.Annotations
381
	vals[0], anno = funcSort(vals, args, enh)
382
	labels := stringSliceFromArgs(args[1:])
383
	slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
384
		// Iterate over each given label
385
		for _, label := range labels {
386
			lv1 := a.Metric.Get(label)
387
			lv2 := b.Metric.Get(label)
388

389
			if lv1 == lv2 {
390
				continue
391
			}
392

393
			if natsort.Compare(lv1, lv2) {
394
				return -1
395
			}
396

397
			return +1
398
		}
399

400
		return 0
401
	})
402

403
	return vals[0].(Vector), anno
404
}
405

406
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
407
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
408
	// In case the labels are the same, NaN should sort to the bottom, so take
409
	// ascending sort with NaN first and reverse it.
410
	var anno annotations.Annotations
411
	vals[0], anno = funcSortDesc(vals, args, enh)
412
	labels := stringSliceFromArgs(args[1:])
413
	slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
414
		// Iterate over each given label
415
		for _, label := range labels {
416
			lv1 := a.Metric.Get(label)
417
			lv2 := b.Metric.Get(label)
418

419
			if lv1 == lv2 {
420
				continue
421
			}
422

423
			if natsort.Compare(lv1, lv2) {
424
				return +1
425
			}
426

427
			return -1
428
		}
429

430
		return 0
431
	})
432

433
	return vals[0].(Vector), anno
434
}
435

436
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
437
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
438
	vec := vals[0].(Vector)
439
	min := vals[1].(Vector)[0].F
440
	max := vals[2].(Vector)[0].F
441
	if max < min {
442
		return enh.Out, nil
443
	}
444
	for _, el := range vec {
445
		enh.Out = append(enh.Out, Sample{
446
			Metric: el.Metric.DropMetricName(),
447
			F:      math.Max(min, math.Min(max, el.F)),
448
		})
449
	}
450
	return enh.Out, nil
451
}
452

453
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
454
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
455
	vec := vals[0].(Vector)
456
	max := vals[1].(Vector)[0].F
457
	for _, el := range vec {
458
		enh.Out = append(enh.Out, Sample{
459
			Metric: el.Metric.DropMetricName(),
460
			F:      math.Min(max, el.F),
461
		})
462
	}
463
	return enh.Out, nil
464
}
465

466
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
467
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
468
	vec := vals[0].(Vector)
469
	min := vals[1].(Vector)[0].F
470
	for _, el := range vec {
471
		enh.Out = append(enh.Out, Sample{
472
			Metric: el.Metric.DropMetricName(),
473
			F:      math.Max(min, el.F),
474
		})
475
	}
476
	return enh.Out, nil
477
}
478

479
// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) ===
480
func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
481
	vec := vals[0].(Vector)
482
	// round returns a number rounded to toNearest.
483
	// Ties are solved by rounding up.
484
	toNearest := float64(1)
485
	if len(args) >= 2 {
486
		toNearest = vals[1].(Vector)[0].F
487
	}
488
	// Invert as it seems to cause fewer floating point accuracy issues.
489
	toNearestInverse := 1.0 / toNearest
490

491
	for _, el := range vec {
492
		f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
493
		enh.Out = append(enh.Out, Sample{
494
			Metric: el.Metric.DropMetricName(),
495
			F:      f,
496
		})
497
	}
498
	return enh.Out, nil
499
}
500

501
// === Scalar(node parser.ValueTypeVector) Scalar ===
502
func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
503
	v := vals[0].(Vector)
504
	if len(v) != 1 {
505
		return append(enh.Out, Sample{F: math.NaN()}), nil
506
	}
507
	return append(enh.Out, Sample{F: v[0].F}), nil
508
}
509

510
func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
511
	el := vals[0].(Matrix)[0]
512

513
	return append(enh.Out, Sample{F: aggrFn(el)})
514
}
515

516
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
517
	el := vals[0].(Matrix)[0]
518

519
	return append(enh.Out, Sample{H: aggrFn(el)})
520
}
521

522
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations)  ===
523
func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
524
	firstSeries := vals[0].(Matrix)[0]
525
	if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
526
		metricName := firstSeries.Metric.Get(labels.MetricName)
527
		return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
528
	}
529
	if len(firstSeries.Floats) == 0 {
530
		// The passed values only contain histograms.
531
		return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
532
			count := 1
533
			mean := s.Histograms[0].H.Copy()
534
			for _, h := range s.Histograms[1:] {
535
				count++
536
				left := h.H.Copy().Div(float64(count))
537
				right := mean.Copy().Div(float64(count))
538
				toAdd := left.Sub(right)
539
				mean.Add(toAdd)
540
			}
541
			return mean
542
		}), nil
543
	}
544
	return aggrOverTime(vals, enh, func(s Series) float64 {
545
		var mean, count, c float64
546
		for _, f := range s.Floats {
547
			count++
548
			if math.IsInf(mean, 0) {
549
				if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
550
					// The `mean` and `f.F` values are `Inf` of the same sign.  They
551
					// can't be subtracted, but the value of `mean` is correct
552
					// already.
553
					continue
554
				}
555
				if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) {
556
					// At this stage, the mean is an infinite. If the added
557
					// value is neither an Inf or a Nan, we can keep that mean
558
					// value.
559
					// This is required because our calculation below removes
560
					// the mean value, which would look like Inf += x - Inf and
561
					// end up as a NaN.
562
					continue
563
				}
564
			}
565
			mean, c = kahanSumInc(f.F/count-mean/count, mean, c)
566
		}
567

568
		if math.IsInf(mean, 0) {
569
			return mean
570
		}
571
		return mean + c
572
	}), nil
573
}
574

575
// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes)  ===
576
func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
577
	return aggrOverTime(vals, enh, func(s Series) float64 {
578
		return float64(len(s.Floats) + len(s.Histograms))
579
	}), nil
580
}
581

582
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes)  ===
583
func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
584
	el := vals[0].(Matrix)[0]
585

586
	var f FPoint
587
	if len(el.Floats) > 0 {
588
		f = el.Floats[len(el.Floats)-1]
589
	}
590

591
	var h HPoint
592
	if len(el.Histograms) > 0 {
593
		h = el.Histograms[len(el.Histograms)-1]
594
	}
595

596
	if h.H == nil || h.T < f.T {
597
		return append(enh.Out, Sample{
598
			Metric: el.Metric,
599
			F:      f.F,
600
		}), nil
601
	}
602
	return append(enh.Out, Sample{
603
		Metric: el.Metric,
604
		H:      h.H.Copy(),
605
	}), nil
606
}
607

608
// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
609
func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
610
	if len(vals[0].(Matrix)[0].Floats) == 0 {
611
		return enh.Out, nil
612
	}
613
	return aggrOverTime(vals, enh, func(s Series) float64 {
614
		values := make(vectorByValueHeap, 0, len(s.Floats))
615
		for _, f := range s.Floats {
616
			values = append(values, Sample{F: f.F})
617
		}
618
		median := quantile(0.5, values)
619
		values = make(vectorByValueHeap, 0, len(s.Floats))
620
		for _, f := range s.Floats {
621
			values = append(values, Sample{F: math.Abs(f.F - median)})
622
		}
623
		return quantile(0.5, values)
624
	}), nil
625
}
626

627
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
628
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
629
	if len(vals[0].(Matrix)[0].Floats) == 0 {
630
		// TODO(beorn7): The passed values only contain
631
		// histograms. max_over_time ignores histograms for now. If
632
		// there are only histograms, we have to return without adding
633
		// anything to enh.Out.
634
		return enh.Out, nil
635
	}
636
	return aggrOverTime(vals, enh, func(s Series) float64 {
637
		max := s.Floats[0].F
638
		for _, f := range s.Floats {
639
			if f.F > max || math.IsNaN(max) {
640
				max = f.F
641
			}
642
		}
643
		return max
644
	}), nil
645
}
646

647
// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
648
func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
649
	if len(vals[0].(Matrix)[0].Floats) == 0 {
650
		// TODO(beorn7): The passed values only contain
651
		// histograms. min_over_time ignores histograms for now. If
652
		// there are only histograms, we have to return without adding
653
		// anything to enh.Out.
654
		return enh.Out, nil
655
	}
656
	return aggrOverTime(vals, enh, func(s Series) float64 {
657
		min := s.Floats[0].F
658
		for _, f := range s.Floats {
659
			if f.F < min || math.IsNaN(min) {
660
				min = f.F
661
			}
662
		}
663
		return min
664
	}), nil
665
}
666

667
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
668
func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
669
	firstSeries := vals[0].(Matrix)[0]
670
	if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
671
		metricName := firstSeries.Metric.Get(labels.MetricName)
672
		return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
673
	}
674
	if len(firstSeries.Floats) == 0 {
675
		// The passed values only contain histograms.
676
		return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
677
			sum := s.Histograms[0].H.Copy()
678
			for _, h := range s.Histograms[1:] {
679
				sum.Add(h.H)
680
			}
681
			return sum
682
		}), nil
683
	}
684
	return aggrOverTime(vals, enh, func(s Series) float64 {
685
		var sum, c float64
686
		for _, f := range s.Floats {
687
			sum, c = kahanSumInc(f.F, sum, c)
688
		}
689
		if math.IsInf(sum, 0) {
690
			return sum
691
		}
692
		return sum + c
693
	}), nil
694
}
695

696
// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
697
func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
698
	q := vals[0].(Vector)[0].F
699
	el := vals[1].(Matrix)[0]
700
	if len(el.Floats) == 0 {
701
		// TODO(beorn7): The passed values only contain
702
		// histograms. quantile_over_time ignores histograms for now. If
703
		// there are only histograms, we have to return without adding
704
		// anything to enh.Out.
705
		return enh.Out, nil
706
	}
707

708
	var annos annotations.Annotations
709
	if math.IsNaN(q) || q < 0 || q > 1 {
710
		annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
711
	}
712

713
	values := make(vectorByValueHeap, 0, len(el.Floats))
714
	for _, f := range el.Floats {
715
		values = append(values, Sample{F: f.F})
716
	}
717
	return append(enh.Out, Sample{F: quantile(q, values)}), annos
718
}
719

720
// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
721
func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
722
	if len(vals[0].(Matrix)[0].Floats) == 0 {
723
		// TODO(beorn7): The passed values only contain
724
		// histograms. stddev_over_time ignores histograms for now. If
725
		// there are only histograms, we have to return without adding
726
		// anything to enh.Out.
727
		return enh.Out, nil
728
	}
729
	return aggrOverTime(vals, enh, func(s Series) float64 {
730
		var count float64
731
		var mean, cMean float64
732
		var aux, cAux float64
733
		for _, f := range s.Floats {
734
			count++
735
			delta := f.F - (mean + cMean)
736
			mean, cMean = kahanSumInc(delta/count, mean, cMean)
737
			aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
738
		}
739
		return math.Sqrt((aux + cAux) / count)
740
	}), nil
741
}
742

743
// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
744
func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
745
	if len(vals[0].(Matrix)[0].Floats) == 0 {
746
		// TODO(beorn7): The passed values only contain
747
		// histograms. stdvar_over_time ignores histograms for now. If
748
		// there are only histograms, we have to return without adding
749
		// anything to enh.Out.
750
		return enh.Out, nil
751
	}
752
	return aggrOverTime(vals, enh, func(s Series) float64 {
753
		var count float64
754
		var mean, cMean float64
755
		var aux, cAux float64
756
		for _, f := range s.Floats {
757
			count++
758
			delta := f.F - (mean + cMean)
759
			mean, cMean = kahanSumInc(delta/count, mean, cMean)
760
			aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
761
		}
762
		return (aux + cAux) / count
763
	}), nil
764
}
765

766
// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) ===
767
func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
768
	if len(vals[0].(Vector)) > 0 {
769
		return enh.Out, nil
770
	}
771
	return append(enh.Out,
772
		Sample{
773
			Metric: createLabelsForAbsentFunction(args[0]),
774
			F:      1,
775
		}), nil
776
}
777

778
// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
779
// As this function has a matrix as argument, it does not get all the Series.
780
// This function will return 1 if the matrix has at least one element.
781
// Due to engine optimization, this function is only called when this condition is true.
782
// Then, the engine post-processes the results to get the expected output.
783
func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
784
	return append(enh.Out, Sample{F: 1}), nil
785
}
786

787
// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
788
func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
789
	return aggrOverTime(vals, enh, func(s Series) float64 {
790
		return 1
791
	}), nil
792
}
793

794
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
795
	for _, el := range vals[0].(Vector) {
796
		if el.H == nil { // Process only float samples.
797
			enh.Out = append(enh.Out, Sample{
798
				Metric: el.Metric.DropMetricName(),
799
				F:      f(el.F),
800
			})
801
		}
802
	}
803
	return enh.Out
804
}
805

806
// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
807
func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
808
	return simpleFunc(vals, enh, math.Abs), nil
809
}
810

811
// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
812
func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
813
	return simpleFunc(vals, enh, math.Ceil), nil
814
}
815

816
// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
817
func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
818
	return simpleFunc(vals, enh, math.Floor), nil
819
}
820

821
// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
822
func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
823
	return simpleFunc(vals, enh, math.Exp), nil
824
}
825

826
// === sqrt(Vector VectorNode) (Vector, Annotations) ===
827
func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
828
	return simpleFunc(vals, enh, math.Sqrt), nil
829
}
830

831
// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
832
func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
833
	return simpleFunc(vals, enh, math.Log), nil
834
}
835

836
// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
837
func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
838
	return simpleFunc(vals, enh, math.Log2), nil
839
}
840

841
// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
842
func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
843
	return simpleFunc(vals, enh, math.Log10), nil
844
}
845

846
// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
847
func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
848
	return simpleFunc(vals, enh, math.Sin), nil
849
}
850

851
// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
852
func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
853
	return simpleFunc(vals, enh, math.Cos), nil
854
}
855

856
// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
857
func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
858
	return simpleFunc(vals, enh, math.Tan), nil
859
}
860

861
// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
862
func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
863
	return simpleFunc(vals, enh, math.Asin), nil
864
}
865

866
// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
867
func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
868
	return simpleFunc(vals, enh, math.Acos), nil
869
}
870

871
// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
872
func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
873
	return simpleFunc(vals, enh, math.Atan), nil
874
}
875

876
// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
877
func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
878
	return simpleFunc(vals, enh, math.Sinh), nil
879
}
880

881
// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
882
func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
883
	return simpleFunc(vals, enh, math.Cosh), nil
884
}
885

886
// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
887
func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
888
	return simpleFunc(vals, enh, math.Tanh), nil
889
}
890

891
// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
892
func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
893
	return simpleFunc(vals, enh, math.Asinh), nil
894
}
895

896
// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
897
func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
898
	return simpleFunc(vals, enh, math.Acosh), nil
899
}
900

901
// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
902
func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
903
	return simpleFunc(vals, enh, math.Atanh), nil
904
}
905

906
// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
907
func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
908
	return simpleFunc(vals, enh, func(v float64) float64 {
909
		return v * math.Pi / 180
910
	}), nil
911
}
912

913
// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
914
func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
915
	return simpleFunc(vals, enh, func(v float64) float64 {
916
		return v * 180 / math.Pi
917
	}), nil
918
}
919

920
// === pi() Scalar ===
921
func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
922
	return Vector{Sample{F: math.Pi}}, nil
923
}
924

925
// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
926
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
927
	return simpleFunc(vals, enh, func(v float64) float64 {
928
		switch {
929
		case v < 0:
930
			return -1
931
		case v > 0:
932
			return 1
933
		default:
934
			return v
935
		}
936
	}), nil
937
}
938

939
// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
940
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
941
	vec := vals[0].(Vector)
942
	for _, el := range vec {
943
		enh.Out = append(enh.Out, Sample{
944
			Metric: el.Metric.DropMetricName(),
945
			F:      float64(el.T) / 1000,
946
		})
947
	}
948
	return enh.Out, nil
949
}
950

951
func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
952
	t := sum + inc
953
	// Using Neumaier improvement, swap if next term larger than sum.
954
	if math.Abs(sum) >= math.Abs(inc) {
955
		c += (sum - t) + inc
956
	} else {
957
		c += (inc - t) + sum
958
	}
959
	return t, c
960
}
961

962
// linearRegression performs a least-square linear regression analysis on the
963
// provided SamplePairs. It returns the slope, and the intercept value at the
964
// provided time.
965
func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) {
966
	var (
967
		n          float64
968
		sumX, cX   float64
969
		sumY, cY   float64
970
		sumXY, cXY float64
971
		sumX2, cX2 float64
972
		initY      float64
973
		constY     bool
974
	)
975
	initY = samples[0].F
976
	constY = true
977
	for i, sample := range samples {
978
		// Set constY to false if any new y values are encountered.
979
		if constY && i > 0 && sample.F != initY {
980
			constY = false
981
		}
982
		n += 1.0
983
		x := float64(sample.T-interceptTime) / 1e3
984
		sumX, cX = kahanSumInc(x, sumX, cX)
985
		sumY, cY = kahanSumInc(sample.F, sumY, cY)
986
		sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY)
987
		sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2)
988
	}
989
	if constY {
990
		if math.IsInf(initY, 0) {
991
			return math.NaN(), math.NaN()
992
		}
993
		return 0, initY
994
	}
995
	sumX += cX
996
	sumY += cY
997
	sumXY += cXY
998
	sumX2 += cX2
999

1000
	covXY := sumXY - sumX*sumY/n
1001
	varX := sumX2 - sumX*sumX/n
1002

1003
	slope = covXY / varX
1004
	intercept = sumY/n - slope*sumX/n
1005
	return slope, intercept
1006
}
1007

1008
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
1009
func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1010
	samples := vals[0].(Matrix)[0]
1011

1012
	// No sense in trying to compute a derivative without at least two points.
1013
	// Drop this Vector element.
1014
	if len(samples.Floats) < 2 {
1015
		return enh.Out, nil
1016
	}
1017

1018
	// We pass in an arbitrary timestamp that is near the values in use
1019
	// to avoid floating point accuracy issues, see
1020
	// https://github.com/prometheus/prometheus/issues/2674
1021
	slope, _ := linearRegression(samples.Floats, samples.Floats[0].T)
1022
	return append(enh.Out, Sample{F: slope}), nil
1023
}
1024

1025
// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) ===
1026
func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1027
	samples := vals[0].(Matrix)[0]
1028
	duration := vals[1].(Vector)[0].F
1029
	// No sense in trying to predict anything without at least two points.
1030
	// Drop this Vector element.
1031
	if len(samples.Floats) < 2 {
1032
		return enh.Out, nil
1033
	}
1034
	slope, intercept := linearRegression(samples.Floats, enh.Ts)
1035

1036
	return append(enh.Out, Sample{F: slope*duration + intercept}), nil
1037
}
1038

1039
// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1040
func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1041
	inVec := vals[0].(Vector)
1042

1043
	for _, sample := range inVec {
1044
		// Skip non-histogram samples.
1045
		if sample.H == nil {
1046
			continue
1047
		}
1048
		enh.Out = append(enh.Out, Sample{
1049
			Metric: sample.Metric.DropMetricName(),
1050
			F:      sample.H.Count,
1051
		})
1052
	}
1053
	return enh.Out, nil
1054
}
1055

1056
// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1057
func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1058
	inVec := vals[0].(Vector)
1059

1060
	for _, sample := range inVec {
1061
		// Skip non-histogram samples.
1062
		if sample.H == nil {
1063
			continue
1064
		}
1065
		enh.Out = append(enh.Out, Sample{
1066
			Metric: sample.Metric.DropMetricName(),
1067
			F:      sample.H.Sum,
1068
		})
1069
	}
1070
	return enh.Out, nil
1071
}
1072

1073
// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1074
func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1075
	inVec := vals[0].(Vector)
1076

1077
	for _, sample := range inVec {
1078
		// Skip non-histogram samples.
1079
		if sample.H == nil {
1080
			continue
1081
		}
1082
		enh.Out = append(enh.Out, Sample{
1083
			Metric: sample.Metric.DropMetricName(),
1084
			F:      sample.H.Sum / sample.H.Count,
1085
		})
1086
	}
1087
	return enh.Out, nil
1088
}
1089

1090
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations)  ===
1091
func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1092
	inVec := vals[0].(Vector)
1093

1094
	for _, sample := range inVec {
1095
		// Skip non-histogram samples.
1096
		if sample.H == nil {
1097
			continue
1098
		}
1099
		mean := sample.H.Sum / sample.H.Count
1100
		var variance, cVariance float64
1101
		it := sample.H.AllBucketIterator()
1102
		for it.Next() {
1103
			bucket := it.At()
1104
			if bucket.Count == 0 {
1105
				continue
1106
			}
1107
			var val float64
1108
			if bucket.Lower <= 0 && 0 <= bucket.Upper {
1109
				val = 0
1110
			} else {
1111
				val = math.Sqrt(bucket.Upper * bucket.Lower)
1112
				if bucket.Upper < 0 {
1113
					val = -val
1114
				}
1115
			}
1116
			delta := val - mean
1117
			variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
1118
		}
1119
		variance += cVariance
1120
		variance /= sample.H.Count
1121
		enh.Out = append(enh.Out, Sample{
1122
			Metric: sample.Metric.DropMetricName(),
1123
			F:      math.Sqrt(variance),
1124
		})
1125
	}
1126
	return enh.Out, nil
1127
}
1128

1129
// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
1130
func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1131
	inVec := vals[0].(Vector)
1132

1133
	for _, sample := range inVec {
1134
		// Skip non-histogram samples.
1135
		if sample.H == nil {
1136
			continue
1137
		}
1138
		mean := sample.H.Sum / sample.H.Count
1139
		var variance, cVariance float64
1140
		it := sample.H.AllBucketIterator()
1141
		for it.Next() {
1142
			bucket := it.At()
1143
			if bucket.Count == 0 {
1144
				continue
1145
			}
1146
			var val float64
1147
			if bucket.Lower <= 0 && 0 <= bucket.Upper {
1148
				val = 0
1149
			} else {
1150
				val = math.Sqrt(bucket.Upper * bucket.Lower)
1151
				if bucket.Upper < 0 {
1152
					val = -val
1153
				}
1154
			}
1155
			delta := val - mean
1156
			variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
1157
		}
1158
		variance += cVariance
1159
		variance /= sample.H.Count
1160
		enh.Out = append(enh.Out, Sample{
1161
			Metric: sample.Metric.DropMetricName(),
1162
			F:      variance,
1163
		})
1164
	}
1165
	return enh.Out, nil
1166
}
1167

1168
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
1169
func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1170
	lower := vals[0].(Vector)[0].F
1171
	upper := vals[1].(Vector)[0].F
1172
	inVec := vals[2].(Vector)
1173

1174
	for _, sample := range inVec {
1175
		// Skip non-histogram samples.
1176
		if sample.H == nil {
1177
			continue
1178
		}
1179
		enh.Out = append(enh.Out, Sample{
1180
			Metric: sample.Metric.DropMetricName(),
1181
			F:      histogramFraction(lower, upper, sample.H),
1182
		})
1183
	}
1184
	return enh.Out, nil
1185
}
1186

1187
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
1188
func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1189
	q := vals[0].(Vector)[0].F
1190
	inVec := vals[1].(Vector)
1191
	var annos annotations.Annotations
1192

1193
	if math.IsNaN(q) || q < 0 || q > 1 {
1194
		annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
1195
	}
1196

1197
	if enh.signatureToMetricWithBuckets == nil {
1198
		enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}
1199
	} else {
1200
		for _, v := range enh.signatureToMetricWithBuckets {
1201
			v.buckets = v.buckets[:0]
1202
		}
1203
	}
1204

1205
	var histogramSamples []Sample
1206

1207
	for _, sample := range inVec {
1208
		// We are only looking for classic buckets here. Remember
1209
		// the histograms for later treatment.
1210
		if sample.H != nil {
1211
			histogramSamples = append(histogramSamples, sample)
1212
			continue
1213
		}
1214

1215
		upperBound, err := strconv.ParseFloat(
1216
			sample.Metric.Get(model.BucketLabel), 64,
1217
		)
1218
		if err != nil {
1219
			annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange()))
1220
			continue
1221
		}
1222
		enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
1223
		mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
1224
		if !ok {
1225
			sample.Metric = labels.NewBuilder(sample.Metric).
1226
				Del(excludedLabels...).
1227
				Labels()
1228

1229
			mb = &metricWithBuckets{sample.Metric, nil}
1230
			enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
1231
		}
1232
		mb.buckets = append(mb.buckets, bucket{upperBound, sample.F})
1233
	}
1234

1235
	// Now deal with the histograms.
1236
	for _, sample := range histogramSamples {
1237
		// We have to reconstruct the exact same signature as above for
1238
		// a classic histogram, just ignoring any le label.
1239
		enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
1240
		if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {
1241
			// At this data point, we have classic histogram
1242
			// buckets and a native histogram with the same name and
1243
			// labels. Do not evaluate anything.
1244
			annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange()))
1245
			delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
1246
			continue
1247
		}
1248

1249
		enh.Out = append(enh.Out, Sample{
1250
			Metric: sample.Metric.DropMetricName(),
1251
			F:      histogramQuantile(q, sample.H),
1252
		})
1253
	}
1254

1255
	for _, mb := range enh.signatureToMetricWithBuckets {
1256
		if len(mb.buckets) > 0 {
1257
			res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets)
1258
			enh.Out = append(enh.Out, Sample{
1259
				Metric: mb.metric,
1260
				F:      res,
1261
			})
1262
			if forcedMonotonicity {
1263
				annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
1264
			}
1265
		}
1266
	}
1267

1268
	return enh.Out, annos
1269
}
1270

1271
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
1272
func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1273
	floats := vals[0].(Matrix)[0].Floats
1274
	histograms := vals[0].(Matrix)[0].Histograms
1275
	resets := 0
1276

1277
	if len(floats) > 1 {
1278
		prev := floats[0].F
1279
		for _, sample := range floats[1:] {
1280
			current := sample.F
1281
			if current < prev {
1282
				resets++
1283
			}
1284
			prev = current
1285
		}
1286
	}
1287

1288
	if len(histograms) > 1 {
1289
		prev := histograms[0].H
1290
		for _, sample := range histograms[1:] {
1291
			current := sample.H
1292
			if current.DetectReset(prev) {
1293
				resets++
1294
			}
1295
			prev = current
1296
		}
1297
	}
1298

1299
	return append(enh.Out, Sample{F: float64(resets)}), nil
1300
}
1301

1302
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
1303
func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1304
	floats := vals[0].(Matrix)[0].Floats
1305
	changes := 0
1306

1307
	if len(floats) == 0 {
1308
		// TODO(beorn7): Only histogram values, still need to add support.
1309
		return enh.Out, nil
1310
	}
1311

1312
	prev := floats[0].F
1313
	for _, sample := range floats[1:] {
1314
		current := sample.F
1315
		if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) {
1316
			changes++
1317
		}
1318
		prev = current
1319
	}
1320

1321
	return append(enh.Out, Sample{F: float64(changes)}), nil
1322
}
1323

1324
// label_replace function operates only on series; does not look at timestamps or values.
1325
func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) {
1326
	var (
1327
		dst      = stringFromArg(args[1])
1328
		repl     = stringFromArg(args[2])
1329
		src      = stringFromArg(args[3])
1330
		regexStr = stringFromArg(args[4])
1331
	)
1332

1333
	regex, err := regexp.Compile("^(?:" + regexStr + ")$")
1334
	if err != nil {
1335
		panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
1336
	}
1337
	if !model.LabelNameRE.MatchString(dst) {
1338
		panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
1339
	}
1340

1341
	val, ws := ev.eval(args[0])
1342
	matrix := val.(Matrix)
1343
	lb := labels.NewBuilder(labels.EmptyLabels())
1344

1345
	for i, el := range matrix {
1346
		srcVal := el.Metric.Get(src)
1347
		indexes := regex.FindStringSubmatchIndex(srcVal)
1348
		if indexes != nil { // Only replace when regexp matches.
1349
			res := regex.ExpandString([]byte{}, repl, srcVal, indexes)
1350
			lb.Reset(el.Metric)
1351
			lb.Set(dst, string(res))
1352
			matrix[i].Metric = lb.Labels()
1353
		}
1354
	}
1355
	if matrix.ContainsSameLabelset() {
1356
		ev.errorf("vector cannot contain metrics with the same labelset")
1357
	}
1358

1359
	return matrix, ws
1360
}
1361

1362
// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) ===
1363
func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1364
	panic("funcLabelReplace wrong implementation called")
1365
}
1366

1367
// === Vector(s Scalar) (Vector, Annotations) ===
1368
func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1369
	return append(enh.Out,
1370
		Sample{
1371
			Metric: labels.Labels{},
1372
			F:      vals[0].(Vector)[0].F,
1373
		}), nil
1374
}
1375

1376
// label_join function operates only on series; does not look at timestamps or values.
1377
func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) {
1378
	var (
1379
		dst       = stringFromArg(args[1])
1380
		sep       = stringFromArg(args[2])
1381
		srcLabels = make([]string, len(args)-3)
1382
	)
1383
	for i := 3; i < len(args); i++ {
1384
		src := stringFromArg(args[i])
1385
		if !model.LabelName(src).IsValid() {
1386
			panic(fmt.Errorf("invalid source label name in label_join(): %s", src))
1387
		}
1388
		srcLabels[i-3] = src
1389
	}
1390
	if !model.LabelName(dst).IsValid() {
1391
		panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
1392
	}
1393

1394
	val, ws := ev.eval(args[0])
1395
	matrix := val.(Matrix)
1396
	srcVals := make([]string, len(srcLabels))
1397
	lb := labels.NewBuilder(labels.EmptyLabels())
1398

1399
	for i, el := range matrix {
1400
		for i, src := range srcLabels {
1401
			srcVals[i] = el.Metric.Get(src)
1402
		}
1403
		strval := strings.Join(srcVals, sep)
1404
		lb.Reset(el.Metric)
1405
		lb.Set(dst, strval)
1406
		matrix[i].Metric = lb.Labels()
1407
	}
1408

1409
	return matrix, ws
1410
}
1411

1412
// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) ===
1413
func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1414
	panic("funcLabelReplace wrong implementation called")
1415
}
1416

1417
// Common code for date related functions.
1418
func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector {
1419
	if len(vals) == 0 {
1420
		return append(enh.Out,
1421
			Sample{
1422
				Metric: labels.Labels{},
1423
				F:      f(time.Unix(enh.Ts/1000, 0).UTC()),
1424
			})
1425
	}
1426

1427
	for _, el := range vals[0].(Vector) {
1428
		t := time.Unix(int64(el.F), 0).UTC()
1429
		enh.Out = append(enh.Out, Sample{
1430
			Metric: el.Metric.DropMetricName(),
1431
			F:      f(t),
1432
		})
1433
	}
1434
	return enh.Out
1435
}
1436

1437
// === days_in_month(v Vector) Scalar ===
1438
func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1439
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1440
		return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day())
1441
	}), nil
1442
}
1443

1444
// === day_of_month(v Vector) Scalar ===
1445
func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1446
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1447
		return float64(t.Day())
1448
	}), nil
1449
}
1450

1451
// === day_of_week(v Vector) Scalar ===
1452
func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1453
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1454
		return float64(t.Weekday())
1455
	}), nil
1456
}
1457

1458
// === day_of_year(v Vector) Scalar ===
1459
func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1460
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1461
		return float64(t.YearDay())
1462
	}), nil
1463
}
1464

1465
// === hour(v Vector) Scalar ===
1466
func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1467
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1468
		return float64(t.Hour())
1469
	}), nil
1470
}
1471

1472
// === minute(v Vector) Scalar ===
1473
func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1474
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1475
		return float64(t.Minute())
1476
	}), nil
1477
}
1478

1479
// === month(v Vector) Scalar ===
1480
func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1481
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1482
		return float64(t.Month())
1483
	}), nil
1484
}
1485

1486
// === year(v Vector) Scalar ===
1487
func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
1488
	return dateWrapper(vals, enh, func(t time.Time) float64 {
1489
		return float64(t.Year())
1490
	}), nil
1491
}
1492

1493
// FunctionCalls is a list of all functions supported by PromQL, including their types.
1494
var FunctionCalls = map[string]FunctionCall{
1495
	"abs":                funcAbs,
1496
	"absent":             funcAbsent,
1497
	"absent_over_time":   funcAbsentOverTime,
1498
	"acos":               funcAcos,
1499
	"acosh":              funcAcosh,
1500
	"asin":               funcAsin,
1501
	"asinh":              funcAsinh,
1502
	"atan":               funcAtan,
1503
	"atanh":              funcAtanh,
1504
	"avg_over_time":      funcAvgOverTime,
1505
	"ceil":               funcCeil,
1506
	"changes":            funcChanges,
1507
	"clamp":              funcClamp,
1508
	"clamp_max":          funcClampMax,
1509
	"clamp_min":          funcClampMin,
1510
	"cos":                funcCos,
1511
	"cosh":               funcCosh,
1512
	"count_over_time":    funcCountOverTime,
1513
	"days_in_month":      funcDaysInMonth,
1514
	"day_of_month":       funcDayOfMonth,
1515
	"day_of_week":        funcDayOfWeek,
1516
	"day_of_year":        funcDayOfYear,
1517
	"deg":                funcDeg,
1518
	"delta":              funcDelta,
1519
	"deriv":              funcDeriv,
1520
	"exp":                funcExp,
1521
	"floor":              funcFloor,
1522
	"histogram_avg":      funcHistogramAvg,
1523
	"histogram_count":    funcHistogramCount,
1524
	"histogram_fraction": funcHistogramFraction,
1525
	"histogram_quantile": funcHistogramQuantile,
1526
	"histogram_sum":      funcHistogramSum,
1527
	"histogram_stddev":   funcHistogramStdDev,
1528
	"histogram_stdvar":   funcHistogramStdVar,
1529
	"holt_winters":       funcHoltWinters,
1530
	"hour":               funcHour,
1531
	"idelta":             funcIdelta,
1532
	"increase":           funcIncrease,
1533
	"irate":              funcIrate,
1534
	"label_replace":      funcLabelReplace,
1535
	"label_join":         funcLabelJoin,
1536
	"ln":                 funcLn,
1537
	"log10":              funcLog10,
1538
	"log2":               funcLog2,
1539
	"last_over_time":     funcLastOverTime,
1540
	"mad_over_time":      funcMadOverTime,
1541
	"max_over_time":      funcMaxOverTime,
1542
	"min_over_time":      funcMinOverTime,
1543
	"minute":             funcMinute,
1544
	"month":              funcMonth,
1545
	"pi":                 funcPi,
1546
	"predict_linear":     funcPredictLinear,
1547
	"present_over_time":  funcPresentOverTime,
1548
	"quantile_over_time": funcQuantileOverTime,
1549
	"rad":                funcRad,
1550
	"rate":               funcRate,
1551
	"resets":             funcResets,
1552
	"round":              funcRound,
1553
	"scalar":             funcScalar,
1554
	"sgn":                funcSgn,
1555
	"sin":                funcSin,
1556
	"sinh":               funcSinh,
1557
	"sort":               funcSort,
1558
	"sort_desc":          funcSortDesc,
1559
	"sort_by_label":      funcSortByLabel,
1560
	"sort_by_label_desc": funcSortByLabelDesc,
1561
	"sqrt":               funcSqrt,
1562
	"stddev_over_time":   funcStddevOverTime,
1563
	"stdvar_over_time":   funcStdvarOverTime,
1564
	"sum_over_time":      funcSumOverTime,
1565
	"tan":                funcTan,
1566
	"tanh":               funcTanh,
1567
	"time":               funcTime,
1568
	"timestamp":          funcTimestamp,
1569
	"vector":             funcVector,
1570
	"year":               funcYear,
1571
}
1572

1573
// AtModifierUnsafeFunctions are the functions whose result
1574
// can vary if evaluation time is changed when the arguments are
1575
// step invariant. It also includes functions that use the timestamps
1576
// of the passed instant vector argument to calculate a result since
1577
// that can also change with change in eval time.
1578
var AtModifierUnsafeFunctions = map[string]struct{}{
1579
	// Step invariant functions.
1580
	"days_in_month": {}, "day_of_month": {}, "day_of_week": {}, "day_of_year": {},
1581
	"hour": {}, "minute": {}, "month": {}, "year": {},
1582
	"predict_linear": {}, "time": {},
1583
	// Uses timestamp of the argument for the result,
1584
	// hence unsafe to use with @ modifier.
1585
	"timestamp": {},
1586
}
1587

1588
type vectorByValueHeap Vector
1589

1590
func (s vectorByValueHeap) Len() int {
1591
	return len(s)
1592
}
1593

1594
func (s vectorByValueHeap) Less(i, j int) bool {
1595
	// We compare histograms based on their sum of observations.
1596
	// TODO(beorn7): Is that what we want?
1597
	vi, vj := s[i].F, s[j].F
1598
	if s[i].H != nil {
1599
		vi = s[i].H.Sum
1600
	}
1601
	if s[j].H != nil {
1602
		vj = s[j].H.Sum
1603
	}
1604

1605
	if math.IsNaN(vi) {
1606
		return true
1607
	}
1608
	return vi < vj
1609
}
1610

1611
func (s vectorByValueHeap) Swap(i, j int) {
1612
	s[i], s[j] = s[j], s[i]
1613
}
1614

1615
func (s *vectorByValueHeap) Push(x interface{}) {
1616
	*s = append(*s, *(x.(*Sample)))
1617
}
1618

1619
func (s *vectorByValueHeap) Pop() interface{} {
1620
	old := *s
1621
	n := len(old)
1622
	el := old[n-1]
1623
	*s = old[0 : n-1]
1624
	return el
1625
}
1626

1627
type vectorByReverseValueHeap Vector
1628

1629
func (s vectorByReverseValueHeap) Len() int {
1630
	return len(s)
1631
}
1632

1633
func (s vectorByReverseValueHeap) Less(i, j int) bool {
1634
	// We compare histograms based on their sum of observations.
1635
	// TODO(beorn7): Is that what we want?
1636
	vi, vj := s[i].F, s[j].F
1637
	if s[i].H != nil {
1638
		vi = s[i].H.Sum
1639
	}
1640
	if s[j].H != nil {
1641
		vj = s[j].H.Sum
1642
	}
1643

1644
	if math.IsNaN(vi) {
1645
		return true
1646
	}
1647
	return vi > vj
1648
}
1649

1650
func (s vectorByReverseValueHeap) Swap(i, j int) {
1651
	s[i], s[j] = s[j], s[i]
1652
}
1653

1654
func (s *vectorByReverseValueHeap) Push(x interface{}) {
1655
	*s = append(*s, *(x.(*Sample)))
1656
}
1657

1658
func (s *vectorByReverseValueHeap) Pop() interface{} {
1659
	old := *s
1660
	n := len(old)
1661
	el := old[n-1]
1662
	*s = old[0 : n-1]
1663
	return el
1664
}
1665

1666
// createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched
1667
// in a given expression. It is used in the absent functions.
1668
func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
1669
	b := labels.NewBuilder(labels.EmptyLabels())
1670

1671
	var lm []*labels.Matcher
1672
	switch n := expr.(type) {
1673
	case *parser.VectorSelector:
1674
		lm = n.LabelMatchers
1675
	case *parser.MatrixSelector:
1676
		lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers
1677
	default:
1678
		return labels.EmptyLabels()
1679
	}
1680

1681
	// The 'has' map implements backwards-compatibility for historic behaviour:
1682
	// e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output.
1683
	// Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`.
1684
	has := make(map[string]bool, len(lm))
1685
	for _, ma := range lm {
1686
		if ma.Name == labels.MetricName {
1687
			continue
1688
		}
1689
		if ma.Type == labels.MatchEqual && !has[ma.Name] {
1690
			b.Set(ma.Name, ma.Value)
1691
			has[ma.Name] = true
1692
		} else {
1693
			b.Del(ma.Name)
1694
		}
1695
	}
1696

1697
	return b.Labels()
1698
}
1699

1700
func stringFromArg(e parser.Expr) string {
1701
	tmp := unwrapStepInvariantExpr(e) // Unwrap StepInvariant
1702
	unwrapParenExpr(&tmp)             // Optionally unwrap ParenExpr
1703
	return tmp.(*parser.StringLiteral).Val
1704
}
1705

1706
func stringSliceFromArgs(args parser.Expressions) []string {
1707
	tmp := make([]string, len(args))
1708
	for i := 0; i < len(args); i++ {
1709
		tmp[i] = stringFromArg(args[i])
1710
	}
1711
	return tmp
1712
}
1713

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.