prometheus
871 строка · 24.9 Кб
1// Copyright 2013 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package scrape
15
16import (
17"context"
18"fmt"
19"net/http"
20"net/http/httptest"
21"net/url"
22"os"
23"strconv"
24"sync"
25"testing"
26"time"
27
28"github.com/go-kit/log"
29"github.com/gogo/protobuf/proto"
30"github.com/prometheus/client_golang/prometheus"
31dto "github.com/prometheus/client_model/go"
32"github.com/prometheus/common/model"
33"github.com/stretchr/testify/require"
34"google.golang.org/protobuf/types/known/timestamppb"
35"gopkg.in/yaml.v2"
36
37"github.com/prometheus/prometheus/config"
38"github.com/prometheus/prometheus/discovery"
39"github.com/prometheus/prometheus/discovery/targetgroup"
40"github.com/prometheus/prometheus/model/labels"
41"github.com/prometheus/prometheus/model/relabel"
42"github.com/prometheus/prometheus/util/runutil"
43"github.com/prometheus/prometheus/util/testutil"
44)
45
46func TestPopulateLabels(t *testing.T) {
47cases := []struct {
48in labels.Labels
49cfg *config.ScrapeConfig
50noDefaultPort bool
51res labels.Labels
52resOrig labels.Labels
53err string
54}{
55// Regular population of scrape config options.
56{
57in: labels.FromMap(map[string]string{
58model.AddressLabel: "1.2.3.4:1000",
59"custom": "value",
60}),
61cfg: &config.ScrapeConfig{
62Scheme: "https",
63MetricsPath: "/metrics",
64JobName: "job",
65ScrapeInterval: model.Duration(time.Second),
66ScrapeTimeout: model.Duration(time.Second),
67},
68res: labels.FromMap(map[string]string{
69model.AddressLabel: "1.2.3.4:1000",
70model.InstanceLabel: "1.2.3.4:1000",
71model.SchemeLabel: "https",
72model.MetricsPathLabel: "/metrics",
73model.JobLabel: "job",
74model.ScrapeIntervalLabel: "1s",
75model.ScrapeTimeoutLabel: "1s",
76"custom": "value",
77}),
78resOrig: labels.FromMap(map[string]string{
79model.AddressLabel: "1.2.3.4:1000",
80model.SchemeLabel: "https",
81model.MetricsPathLabel: "/metrics",
82model.JobLabel: "job",
83"custom": "value",
84model.ScrapeIntervalLabel: "1s",
85model.ScrapeTimeoutLabel: "1s",
86}),
87},
88// Pre-define/overwrite scrape config labels.
89// Leave out port and expect it to be defaulted to scheme.
90{
91in: labels.FromMap(map[string]string{
92model.AddressLabel: "1.2.3.4",
93model.SchemeLabel: "http",
94model.MetricsPathLabel: "/custom",
95model.JobLabel: "custom-job",
96model.ScrapeIntervalLabel: "2s",
97model.ScrapeTimeoutLabel: "2s",
98}),
99cfg: &config.ScrapeConfig{
100Scheme: "https",
101MetricsPath: "/metrics",
102JobName: "job",
103ScrapeInterval: model.Duration(time.Second),
104ScrapeTimeout: model.Duration(time.Second),
105},
106res: labels.FromMap(map[string]string{
107model.AddressLabel: "1.2.3.4:80",
108model.InstanceLabel: "1.2.3.4:80",
109model.SchemeLabel: "http",
110model.MetricsPathLabel: "/custom",
111model.JobLabel: "custom-job",
112model.ScrapeIntervalLabel: "2s",
113model.ScrapeTimeoutLabel: "2s",
114}),
115resOrig: labels.FromMap(map[string]string{
116model.AddressLabel: "1.2.3.4",
117model.SchemeLabel: "http",
118model.MetricsPathLabel: "/custom",
119model.JobLabel: "custom-job",
120model.ScrapeIntervalLabel: "2s",
121model.ScrapeTimeoutLabel: "2s",
122}),
123},
124// Provide instance label. HTTPS port default for IPv6.
125{
126in: labels.FromMap(map[string]string{
127model.AddressLabel: "[::1]",
128model.InstanceLabel: "custom-instance",
129}),
130cfg: &config.ScrapeConfig{
131Scheme: "https",
132MetricsPath: "/metrics",
133JobName: "job",
134ScrapeInterval: model.Duration(time.Second),
135ScrapeTimeout: model.Duration(time.Second),
136},
137res: labels.FromMap(map[string]string{
138model.AddressLabel: "[::1]:443",
139model.InstanceLabel: "custom-instance",
140model.SchemeLabel: "https",
141model.MetricsPathLabel: "/metrics",
142model.JobLabel: "job",
143model.ScrapeIntervalLabel: "1s",
144model.ScrapeTimeoutLabel: "1s",
145}),
146resOrig: labels.FromMap(map[string]string{
147model.AddressLabel: "[::1]",
148model.InstanceLabel: "custom-instance",
149model.SchemeLabel: "https",
150model.MetricsPathLabel: "/metrics",
151model.JobLabel: "job",
152model.ScrapeIntervalLabel: "1s",
153model.ScrapeTimeoutLabel: "1s",
154}),
155},
156// Address label missing.
157{
158in: labels.FromStrings("custom", "value"),
159cfg: &config.ScrapeConfig{
160Scheme: "https",
161MetricsPath: "/metrics",
162JobName: "job",
163ScrapeInterval: model.Duration(time.Second),
164ScrapeTimeout: model.Duration(time.Second),
165},
166res: labels.EmptyLabels(),
167resOrig: labels.EmptyLabels(),
168err: "no address",
169},
170// Address label missing, but added in relabelling.
171{
172in: labels.FromStrings("custom", "host:1234"),
173cfg: &config.ScrapeConfig{
174Scheme: "https",
175MetricsPath: "/metrics",
176JobName: "job",
177ScrapeInterval: model.Duration(time.Second),
178ScrapeTimeout: model.Duration(time.Second),
179RelabelConfigs: []*relabel.Config{
180{
181Action: relabel.Replace,
182Regex: relabel.MustNewRegexp("(.*)"),
183SourceLabels: model.LabelNames{"custom"},
184Replacement: "${1}",
185TargetLabel: string(model.AddressLabel),
186},
187},
188},
189res: labels.FromMap(map[string]string{
190model.AddressLabel: "host:1234",
191model.InstanceLabel: "host:1234",
192model.SchemeLabel: "https",
193model.MetricsPathLabel: "/metrics",
194model.JobLabel: "job",
195model.ScrapeIntervalLabel: "1s",
196model.ScrapeTimeoutLabel: "1s",
197"custom": "host:1234",
198}),
199resOrig: labels.FromMap(map[string]string{
200model.SchemeLabel: "https",
201model.MetricsPathLabel: "/metrics",
202model.JobLabel: "job",
203model.ScrapeIntervalLabel: "1s",
204model.ScrapeTimeoutLabel: "1s",
205"custom": "host:1234",
206}),
207},
208// Address label missing, but added in relabelling.
209{
210in: labels.FromStrings("custom", "host:1234"),
211cfg: &config.ScrapeConfig{
212Scheme: "https",
213MetricsPath: "/metrics",
214JobName: "job",
215ScrapeInterval: model.Duration(time.Second),
216ScrapeTimeout: model.Duration(time.Second),
217RelabelConfigs: []*relabel.Config{
218{
219Action: relabel.Replace,
220Regex: relabel.MustNewRegexp("(.*)"),
221SourceLabels: model.LabelNames{"custom"},
222Replacement: "${1}",
223TargetLabel: string(model.AddressLabel),
224},
225},
226},
227res: labels.FromMap(map[string]string{
228model.AddressLabel: "host:1234",
229model.InstanceLabel: "host:1234",
230model.SchemeLabel: "https",
231model.MetricsPathLabel: "/metrics",
232model.JobLabel: "job",
233model.ScrapeIntervalLabel: "1s",
234model.ScrapeTimeoutLabel: "1s",
235"custom": "host:1234",
236}),
237resOrig: labels.FromMap(map[string]string{
238model.SchemeLabel: "https",
239model.MetricsPathLabel: "/metrics",
240model.JobLabel: "job",
241model.ScrapeIntervalLabel: "1s",
242model.ScrapeTimeoutLabel: "1s",
243"custom": "host:1234",
244}),
245},
246// Invalid UTF-8 in label.
247{
248in: labels.FromMap(map[string]string{
249model.AddressLabel: "1.2.3.4:1000",
250"custom": "\xbd",
251}),
252cfg: &config.ScrapeConfig{
253Scheme: "https",
254MetricsPath: "/metrics",
255JobName: "job",
256ScrapeInterval: model.Duration(time.Second),
257ScrapeTimeout: model.Duration(time.Second),
258},
259res: labels.EmptyLabels(),
260resOrig: labels.EmptyLabels(),
261err: "invalid label value for \"custom\": \"\\xbd\"",
262},
263// Invalid duration in interval label.
264{
265in: labels.FromMap(map[string]string{
266model.AddressLabel: "1.2.3.4:1000",
267model.ScrapeIntervalLabel: "2notseconds",
268}),
269cfg: &config.ScrapeConfig{
270Scheme: "https",
271MetricsPath: "/metrics",
272JobName: "job",
273ScrapeInterval: model.Duration(time.Second),
274ScrapeTimeout: model.Duration(time.Second),
275},
276res: labels.EmptyLabels(),
277resOrig: labels.EmptyLabels(),
278err: "error parsing scrape interval: unknown unit \"notseconds\" in duration \"2notseconds\"",
279},
280// Invalid duration in timeout label.
281{
282in: labels.FromMap(map[string]string{
283model.AddressLabel: "1.2.3.4:1000",
284model.ScrapeTimeoutLabel: "2notseconds",
285}),
286cfg: &config.ScrapeConfig{
287Scheme: "https",
288MetricsPath: "/metrics",
289JobName: "job",
290ScrapeInterval: model.Duration(time.Second),
291ScrapeTimeout: model.Duration(time.Second),
292},
293res: labels.EmptyLabels(),
294resOrig: labels.EmptyLabels(),
295err: "error parsing scrape timeout: unknown unit \"notseconds\" in duration \"2notseconds\"",
296},
297// 0 interval in timeout label.
298{
299in: labels.FromMap(map[string]string{
300model.AddressLabel: "1.2.3.4:1000",
301model.ScrapeIntervalLabel: "0s",
302}),
303cfg: &config.ScrapeConfig{
304Scheme: "https",
305MetricsPath: "/metrics",
306JobName: "job",
307ScrapeInterval: model.Duration(time.Second),
308ScrapeTimeout: model.Duration(time.Second),
309},
310res: labels.EmptyLabels(),
311resOrig: labels.EmptyLabels(),
312err: "scrape interval cannot be 0",
313},
314// 0 duration in timeout label.
315{
316in: labels.FromMap(map[string]string{
317model.AddressLabel: "1.2.3.4:1000",
318model.ScrapeTimeoutLabel: "0s",
319}),
320cfg: &config.ScrapeConfig{
321Scheme: "https",
322MetricsPath: "/metrics",
323JobName: "job",
324ScrapeInterval: model.Duration(time.Second),
325ScrapeTimeout: model.Duration(time.Second),
326},
327res: labels.EmptyLabels(),
328resOrig: labels.EmptyLabels(),
329err: "scrape timeout cannot be 0",
330},
331// Timeout less than interval.
332{
333in: labels.FromMap(map[string]string{
334model.AddressLabel: "1.2.3.4:1000",
335model.ScrapeIntervalLabel: "1s",
336model.ScrapeTimeoutLabel: "2s",
337}),
338cfg: &config.ScrapeConfig{
339Scheme: "https",
340MetricsPath: "/metrics",
341JobName: "job",
342ScrapeInterval: model.Duration(time.Second),
343ScrapeTimeout: model.Duration(time.Second),
344},
345res: labels.EmptyLabels(),
346resOrig: labels.EmptyLabels(),
347err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
348},
349// Don't attach default port.
350{
351in: labels.FromMap(map[string]string{
352model.AddressLabel: "1.2.3.4",
353}),
354cfg: &config.ScrapeConfig{
355Scheme: "https",
356MetricsPath: "/metrics",
357JobName: "job",
358ScrapeInterval: model.Duration(time.Second),
359ScrapeTimeout: model.Duration(time.Second),
360},
361noDefaultPort: true,
362res: labels.FromMap(map[string]string{
363model.AddressLabel: "1.2.3.4",
364model.InstanceLabel: "1.2.3.4",
365model.SchemeLabel: "https",
366model.MetricsPathLabel: "/metrics",
367model.JobLabel: "job",
368model.ScrapeIntervalLabel: "1s",
369model.ScrapeTimeoutLabel: "1s",
370}),
371resOrig: labels.FromMap(map[string]string{
372model.AddressLabel: "1.2.3.4",
373model.SchemeLabel: "https",
374model.MetricsPathLabel: "/metrics",
375model.JobLabel: "job",
376model.ScrapeIntervalLabel: "1s",
377model.ScrapeTimeoutLabel: "1s",
378}),
379},
380// Remove default port (http).
381{
382in: labels.FromMap(map[string]string{
383model.AddressLabel: "1.2.3.4:80",
384}),
385cfg: &config.ScrapeConfig{
386Scheme: "http",
387MetricsPath: "/metrics",
388JobName: "job",
389ScrapeInterval: model.Duration(time.Second),
390ScrapeTimeout: model.Duration(time.Second),
391},
392noDefaultPort: true,
393res: labels.FromMap(map[string]string{
394model.AddressLabel: "1.2.3.4",
395model.InstanceLabel: "1.2.3.4:80",
396model.SchemeLabel: "http",
397model.MetricsPathLabel: "/metrics",
398model.JobLabel: "job",
399model.ScrapeIntervalLabel: "1s",
400model.ScrapeTimeoutLabel: "1s",
401}),
402resOrig: labels.FromMap(map[string]string{
403model.AddressLabel: "1.2.3.4:80",
404model.SchemeLabel: "http",
405model.MetricsPathLabel: "/metrics",
406model.JobLabel: "job",
407model.ScrapeIntervalLabel: "1s",
408model.ScrapeTimeoutLabel: "1s",
409}),
410},
411// Remove default port (https).
412{
413in: labels.FromMap(map[string]string{
414model.AddressLabel: "1.2.3.4:443",
415}),
416cfg: &config.ScrapeConfig{
417Scheme: "https",
418MetricsPath: "/metrics",
419JobName: "job",
420ScrapeInterval: model.Duration(time.Second),
421ScrapeTimeout: model.Duration(time.Second),
422},
423noDefaultPort: true,
424res: labels.FromMap(map[string]string{
425model.AddressLabel: "1.2.3.4",
426model.InstanceLabel: "1.2.3.4:443",
427model.SchemeLabel: "https",
428model.MetricsPathLabel: "/metrics",
429model.JobLabel: "job",
430model.ScrapeIntervalLabel: "1s",
431model.ScrapeTimeoutLabel: "1s",
432}),
433resOrig: labels.FromMap(map[string]string{
434model.AddressLabel: "1.2.3.4:443",
435model.SchemeLabel: "https",
436model.MetricsPathLabel: "/metrics",
437model.JobLabel: "job",
438model.ScrapeIntervalLabel: "1s",
439model.ScrapeTimeoutLabel: "1s",
440}),
441},
442}
443for _, c := range cases {
444in := c.in.Copy()
445
446res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort)
447if c.err != "" {
448require.EqualError(t, err, c.err)
449} else {
450require.NoError(t, err)
451}
452require.Equal(t, c.in, in)
453testutil.RequireEqual(t, c.res, res)
454testutil.RequireEqual(t, c.resOrig, orig)
455}
456}
457
458func loadConfiguration(t testing.TB, c string) *config.Config {
459t.Helper()
460
461cfg := &config.Config{}
462err := yaml.UnmarshalStrict([]byte(c), cfg)
463require.NoError(t, err, "Unable to load YAML config.")
464
465return cfg
466}
467
468func noopLoop() loop {
469return &testLoop{
470startFunc: func(interval, timeout time.Duration, errc chan<- error) {},
471stopFunc: func() {},
472}
473}
474
475func TestManagerApplyConfig(t *testing.T) {
476// Valid initial configuration.
477cfgText1 := `
478scrape_configs:
479- job_name: job1
480static_configs:
481- targets: ["foo:9090"]
482`
483// Invalid configuration.
484cfgText2 := `
485scrape_configs:
486- job_name: job1
487scheme: https
488static_configs:
489- targets: ["foo:9090"]
490tls_config:
491ca_file: /not/existing/ca/file
492`
493// Valid configuration.
494cfgText3 := `
495scrape_configs:
496- job_name: job1
497scheme: https
498static_configs:
499- targets: ["foo:9090"]
500`
501var (
502cfg1 = loadConfiguration(t, cfgText1)
503cfg2 = loadConfiguration(t, cfgText2)
504cfg3 = loadConfiguration(t, cfgText3)
505
506ch = make(chan struct{}, 1)
507
508testRegistry = prometheus.NewRegistry()
509)
510
511opts := Options{}
512scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
513require.NoError(t, err)
514newLoop := func(scrapeLoopOptions) loop {
515ch <- struct{}{}
516return noopLoop()
517}
518sp := &scrapePool{
519appendable: &nopAppendable{},
520activeTargets: map[uint64]*Target{
5211: {},
522},
523loops: map[uint64]loop{
5241: noopLoop(),
525},
526newLoop: newLoop,
527logger: nil,
528config: cfg1.ScrapeConfigs[0],
529client: http.DefaultClient,
530metrics: scrapeManager.metrics,
531symbolTable: labels.NewSymbolTable(),
532}
533scrapeManager.scrapePools = map[string]*scrapePool{
534"job1": sp,
535}
536
537// Apply the initial configuration.
538err = scrapeManager.ApplyConfig(cfg1)
539require.NoError(t, err, "Unable to apply configuration.")
540select {
541case <-ch:
542require.FailNow(t, "Reload happened.")
543default:
544}
545
546// Apply a configuration for which the reload fails.
547err = scrapeManager.ApplyConfig(cfg2)
548require.Error(t, err, "Expecting error but got none.")
549select {
550case <-ch:
551require.FailNow(t, "Reload happened.")
552default:
553}
554
555// Apply a configuration for which the reload succeeds.
556err = scrapeManager.ApplyConfig(cfg3)
557require.NoError(t, err, "Unable to apply configuration.")
558select {
559case <-ch:
560default:
561require.FailNow(t, "Reload didn't happen.")
562}
563
564// Re-applying the same configuration shouldn't trigger a reload.
565err = scrapeManager.ApplyConfig(cfg3)
566require.NoError(t, err, "Unable to apply configuration.")
567select {
568case <-ch:
569require.FailNow(t, "Reload happened.")
570default:
571}
572}
573
574func TestManagerTargetsUpdates(t *testing.T) {
575opts := Options{}
576testRegistry := prometheus.NewRegistry()
577m, err := NewManager(&opts, nil, nil, testRegistry)
578require.NoError(t, err)
579
580ts := make(chan map[string][]*targetgroup.Group)
581go m.Run(ts)
582defer m.Stop()
583
584tgSent := make(map[string][]*targetgroup.Group)
585for x := 0; x < 10; x++ {
586tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
587{
588Source: strconv.Itoa(x),
589},
590}
591
592select {
593case ts <- tgSent:
594case <-time.After(10 * time.Millisecond):
595require.Fail(t, "Scrape manager's channel remained blocked after the set threshold.")
596}
597}
598
599m.mtxScrape.Lock()
600tsetActual := m.targetSets
601m.mtxScrape.Unlock()
602
603// Make sure all updates have been received.
604require.Equal(t, tgSent, tsetActual)
605
606select {
607case <-m.triggerReload:
608default:
609require.Fail(t, "No scrape loops reload was triggered after targets update.")
610}
611}
612
613func TestSetOffsetSeed(t *testing.T) {
614getConfig := func(prometheus string) *config.Config {
615cfgText := `
616global:
617external_labels:
618prometheus: '` + prometheus + `'
619`
620
621cfg := &config.Config{}
622err := yaml.UnmarshalStrict([]byte(cfgText), cfg)
623require.NoError(t, err, "Unable to load YAML config cfgYaml.")
624
625return cfg
626}
627
628opts := Options{}
629testRegistry := prometheus.NewRegistry()
630scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
631require.NoError(t, err)
632
633// Load the first config.
634cfg1 := getConfig("ha1")
635err = scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels)
636require.NoError(t, err)
637offsetSeed1 := scrapeManager.offsetSeed
638
639require.NotZero(t, offsetSeed1, "Offset seed has to be a hash of uint64.")
640
641// Load the first config.
642cfg2 := getConfig("ha2")
643require.NoError(t, scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels))
644offsetSeed2 := scrapeManager.offsetSeed
645
646require.NotEqual(t, offsetSeed1, offsetSeed2, "Offset seed should not be the same on different set of external labels.")
647}
648
649func TestManagerScrapePools(t *testing.T) {
650cfgText1 := `
651scrape_configs:
652- job_name: job1
653static_configs:
654- targets: ["foo:9090"]
655- job_name: job2
656static_configs:
657- targets: ["foo:9091", "foo:9092"]
658`
659cfgText2 := `
660scrape_configs:
661- job_name: job1
662static_configs:
663- targets: ["foo:9090", "foo:9094"]
664- job_name: job3
665static_configs:
666- targets: ["foo:9093"]
667`
668var (
669cfg1 = loadConfiguration(t, cfgText1)
670cfg2 = loadConfiguration(t, cfgText2)
671testRegistry = prometheus.NewRegistry()
672)
673
674reload := func(scrapeManager *Manager, cfg *config.Config) {
675newLoop := func(scrapeLoopOptions) loop {
676return noopLoop()
677}
678scrapeManager.scrapePools = map[string]*scrapePool{}
679for _, sc := range cfg.ScrapeConfigs {
680_, cancel := context.WithCancel(context.Background())
681defer cancel()
682sp := &scrapePool{
683appendable: &nopAppendable{},
684activeTargets: map[uint64]*Target{},
685loops: map[uint64]loop{
6861: noopLoop(),
687},
688newLoop: newLoop,
689logger: nil,
690config: sc,
691client: http.DefaultClient,
692cancel: cancel,
693}
694for _, c := range sc.ServiceDiscoveryConfigs {
695staticConfig := c.(discovery.StaticConfig)
696for _, group := range staticConfig {
697for i := range group.Targets {
698sp.activeTargets[uint64(i)] = &Target{}
699}
700}
701}
702scrapeManager.scrapePools[sc.JobName] = sp
703}
704}
705
706opts := Options{}
707scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
708require.NoError(t, err)
709
710reload(scrapeManager, cfg1)
711require.ElementsMatch(t, []string{"job1", "job2"}, scrapeManager.ScrapePools())
712
713reload(scrapeManager, cfg2)
714require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools())
715}
716
717// TestManagerCTZeroIngestion tests scrape manager for CT cases.
718func TestManagerCTZeroIngestion(t *testing.T) {
719const mName = "expected_counter"
720
721for _, tc := range []struct {
722name string
723counterSample *dto.Counter
724enableCTZeroIngestion bool
725
726expectedValues []float64
727}{
728{
729name: "disabled with CT on counter",
730counterSample: &dto.Counter{
731Value: proto.Float64(1.0),
732// Timestamp does not matter as long as it exists in this test.
733CreatedTimestamp: timestamppb.Now(),
734},
735expectedValues: []float64{1.0},
736},
737{
738name: "enabled with CT on counter",
739counterSample: &dto.Counter{
740Value: proto.Float64(1.0),
741// Timestamp does not matter as long as it exists in this test.
742CreatedTimestamp: timestamppb.Now(),
743},
744enableCTZeroIngestion: true,
745expectedValues: []float64{0.0, 1.0},
746},
747{
748name: "enabled without CT on counter",
749counterSample: &dto.Counter{
750Value: proto.Float64(1.0),
751},
752enableCTZeroIngestion: true,
753expectedValues: []float64{1.0},
754},
755} {
756t.Run(tc.name, func(t *testing.T) {
757app := &collectResultAppender{}
758scrapeManager, err := NewManager(
759&Options{
760EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion,
761skipOffsetting: true,
762},
763log.NewLogfmtLogger(os.Stderr),
764&collectResultAppendable{app},
765prometheus.NewRegistry(),
766)
767require.NoError(t, err)
768
769require.NoError(t, scrapeManager.ApplyConfig(&config.Config{
770GlobalConfig: config.GlobalConfig{
771// Disable regular scrapes.
772ScrapeInterval: model.Duration(9999 * time.Minute),
773ScrapeTimeout: model.Duration(5 * time.Second),
774// Ensure the proto is chosen. We need proto as it's the only protocol
775// with the CT parsing support.
776ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto},
777},
778ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}},
779}))
780
781once := sync.Once{}
782// Start fake HTTP target to that allow one scrape only.
783server := httptest.NewServer(
784http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
785fail := true
786once.Do(func() {
787fail = false
788w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
789
790ctrType := dto.MetricType_COUNTER
791w.Write(protoMarshalDelimited(t, &dto.MetricFamily{
792Name: proto.String(mName),
793Type: &ctrType,
794Metric: []*dto.Metric{{Counter: tc.counterSample}},
795}))
796})
797
798if fail {
799w.WriteHeader(http.StatusInternalServerError)
800}
801}),
802)
803defer server.Close()
804
805serverURL, err := url.Parse(server.URL)
806require.NoError(t, err)
807
808// Add fake target directly into tsets + reload. Normally users would use
809// Manager.Run and wait for minimum 5s refresh interval.
810scrapeManager.updateTsets(map[string][]*targetgroup.Group{
811"test": {{
812Targets: []model.LabelSet{{
813model.SchemeLabel: model.LabelValue(serverURL.Scheme),
814model.AddressLabel: model.LabelValue(serverURL.Host),
815}},
816}},
817})
818scrapeManager.reload()
819
820// Wait for one scrape.
821ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
822defer cancel()
823require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
824if countFloatSamples(app, mName) != len(tc.expectedValues) {
825return fmt.Errorf("expected %v samples", tc.expectedValues)
826}
827return nil
828}), "after 1 minute")
829scrapeManager.Stop()
830
831require.Equal(t, tc.expectedValues, getResultFloats(app, mName))
832})
833}
834}
835
836func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) {
837a.mtx.Lock()
838defer a.mtx.Unlock()
839
840for _, f := range a.resultFloats {
841if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
842count++
843}
844}
845return count
846}
847
848func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) {
849app.mtx.Lock()
850defer app.mtx.Unlock()
851
852for _, f := range app.resultFloats {
853if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
854result = append(result, f.f)
855}
856}
857return result
858}
859
860func TestUnregisterMetrics(t *testing.T) {
861reg := prometheus.NewRegistry()
862// Check that all metrics can be unregistered, allowing a second manager to be created.
863for i := 0; i < 2; i++ {
864opts := Options{}
865manager, err := NewManager(&opts, nil, nil, reg)
866require.NotNil(t, manager)
867require.NoError(t, err)
868// Unregister all metrics.
869manager.UnregisterMetrics()
870}
871}
872