pangolin_exporter

Форк
0
/
pangolin_database.go 
568 строк · 20.2 Кб
1
// Copyright 2022 The Prometheus Authors
2
// Licensed under the Apache License, Version 2.0 (the "License");
3
// you may not use this file except in compliance with the License.
4
// You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software
9
// distributed under the License is distributed on an "AS IS" BASIS,
10
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
// See the License for the specific language governing permissions and
12
// limitations under the License.
13

14
package collector
15

16
import (
17
	"context"
18
	"database/sql"
19
	"strconv"
20

21
	"github.com/blang/semver/v4"
22
	"github.com/go-kit/log"
23
	"github.com/go-kit/log/level"
24
	"github.com/prometheus/client_golang/prometheus"
25
)
26

27
const statDatabaseSubsystem = "pangolin_database"
28

29
func init() {
30
	registerCollector(statDatabaseSubsystem, defaultEnabled, NewPGdatabaseCollector)
31
}
32

33
type PGdatabaseCollector struct {
34
	log log.Logger
35
}
36

37
func NewPGdatabaseCollector(config collectorConfig) (Collector, error) {
38
	return &PGdatabaseCollector{log: config.logger}, nil
39
}
40

41
var (
42
	// По просьбе НТ, для совместимости оставил имена pg_stat_database_xact_rollback, pg_stat_database_xact_commit
43
	pg_commit = prometheus.NewDesc(
44
		prometheus.BuildFQName(
45
			"pg",
46
			"stat_database",
47
			"xact_commit",
48
		),
49
		"Total number of transactions had been committed.",
50
		[]string{"datname"},
51
		prometheus.Labels{},
52
	)
53
	pg_rollback = prometheus.NewDesc(
54
		prometheus.BuildFQName(
55
			"pg",
56
			"stat_database",
57
			"xact_rollback",
58
		),
59
		"Total number of transactions had been rolled back.",
60
		[]string{"datname"},
61
		prometheus.Labels{},
62
	)
63

64
	commits = prometheus.NewDesc(
65
		prometheus.BuildFQName(
66
			"",
67
			statDatabaseSubsystem,
68
			"xact_commits_total",
69
		),
70
		"Total number of transactions had been committed.",
71
		[]string{"database"},
72
		prometheus.Labels{},
73
	)
74
	rollbacks = prometheus.NewDesc(
75
		prometheus.BuildFQName(
76
			"",
77
			statDatabaseSubsystem,
78
			"xact_rollbacks_total",
79
		),
80
		"Total number of transactions had been rolled back.",
81
		[]string{"database"},
82
		prometheus.Labels{},
83
	)
84
	blocks = prometheus.NewDesc(
85
		prometheus.BuildFQName(
86
			"",
87
			statDatabaseSubsystem,
88
			"blocks_total",
89
		),
90
		"Total number of disk blocks had been accessed by each type of access.",
91
		[]string{"database", "access"},
92
		prometheus.Labels{},
93
	)
94
	tuplesReturned = prometheus.NewDesc(
95
		prometheus.BuildFQName(
96
			"",
97
			statDatabaseSubsystem,
98
			"tuples_returned_total",
99
		),
100
		"Total number of rows returned per each database.",
101
		[]string{"database"},
102
		prometheus.Labels{},
103
	)
104
	tuplesFetched = prometheus.NewDesc(
105
		prometheus.BuildFQName(
106
			"",
107
			statDatabaseSubsystem,
108
			"tuples_fetched_total",
109
		),
110
		"Total number of rows fetched per each database.",
111
		[]string{"database"},
112
		prometheus.Labels{},
113
	)
114
	tuplesInserted = prometheus.NewDesc(
115
		prometheus.BuildFQName(
116
			"",
117
			statDatabaseSubsystem,
118
			"tuples_inserted_total",
119
		),
120
		"Total number of rows inserted per each database.",
121
		[]string{"database"},
122
		prometheus.Labels{},
123
	)
124
	tuplesUpdated = prometheus.NewDesc(
125
		prometheus.BuildFQName(
126
			"",
127
			statDatabaseSubsystem,
128
			"tuples_updated_total",
129
		),
130
		"Total number of rows updated per each database.",
131
		[]string{"database"},
132
		prometheus.Labels{},
133
	)
134
	tuplesDeleted = prometheus.NewDesc(
135
		prometheus.BuildFQName(
136
			"",
137
			statDatabaseSubsystem,
138
			"tuples_deleted_total",
139
		),
140
		"Total number of rows deleted per each database.",
141
		[]string{"database"},
142
		prometheus.Labels{},
143
	)
144
	tempbytes_ = prometheus.NewDesc(
145
		prometheus.BuildFQName(
146
			"",
147
			statDatabaseSubsystem,
148
			"temp_bytes_total",
149
		),
150
		"Total number of temporary files created by queries.",
151
		[]string{"database"},
152
		prometheus.Labels{},
153
	)
154
	tempfiles_ = prometheus.NewDesc(
155
		prometheus.BuildFQName(
156
			"",
157
			statDatabaseSubsystem,
158
			"temp_files_total",
159
		),
160
		"Total number of temporary files created by queries.",
161
		[]string{"database"},
162
		prometheus.Labels{},
163
	)
164
	conflicts_ = prometheus.NewDesc(
165
		prometheus.BuildFQName(
166
			"",
167
			statDatabaseSubsystem,
168
			"conflicts_total",
169
		),
170
		"Total number of recovery conflicts occurred.",
171
		[]string{"database"},
172
		prometheus.Labels{},
173
	)
174
	deadlocks_ = prometheus.NewDesc(
175
		prometheus.BuildFQName(
176
			"",
177
			statDatabaseSubsystem,
178
			"deadlocks_total",
179
		),
180
		"Total number of deadlocks occurred.",
181
		[]string{"database"},
182
		prometheus.Labels{},
183
	)
184
	csumfails_ = prometheus.NewDesc(
185
		prometheus.BuildFQName(
186
			"",
187
			statDatabaseSubsystem,
188
			"checksum_failures_total",
189
		),
190
		"Total number of checksum failures occurred.",
191
		[]string{"database"},
192
		prometheus.Labels{},
193
	)
194
	csumlastfailunixts_ = prometheus.NewDesc(
195
		prometheus.BuildFQName(
196
			"",
197
			statDatabaseSubsystem,
198
			"last_checksum_failure_seconds",
199
		),
200
		"Time of the last checksum failure occurred, in unixtime.",
201
		[]string{"database"},
202
		prometheus.Labels{},
203
	)
204
	blockstime = prometheus.NewDesc(
205
		prometheus.BuildFQName(
206
			"",
207
			statDatabaseSubsystem,
208
			"blk_time_seconds_total",
209
		),
210
		"Total time spent accessing data blocks by backends in this database in each access type, in seconds.",
211
		[]string{"database", "type"},
212
		prometheus.Labels{},
213
	)
214
	sessionalltime = prometheus.NewDesc(
215
		prometheus.BuildFQName(
216
			"",
217
			statDatabaseSubsystem,
218
			"session_time_seconds_all_total",
219
		),
220
		"Total time spent by database sessions in this database in all states, in seconds.",
221
		[]string{"database"},
222
		prometheus.Labels{},
223
	)
224
	sessiontime_ = prometheus.NewDesc(
225
		prometheus.BuildFQName(
226
			"",
227
			statDatabaseSubsystem,
228
			"session_time_seconds_total",
229
		),
230
		"Total time spent by database sessions in this database in each state, in seconds.",
231
		[]string{"database", "state"},
232
		prometheus.Labels{},
233
	)
234
	sessionsall = prometheus.NewDesc(
235
		prometheus.BuildFQName(
236
			"",
237
			statDatabaseSubsystem,
238
			"sessions_all_total",
239
		),
240
		"Total number of sessions established to this database.",
241
		[]string{"database"},
242
		prometheus.Labels{},
243
	)
244
	sessions_ = prometheus.NewDesc(
245
		prometheus.BuildFQName(
246
			"",
247
			statDatabaseSubsystem,
248
			"sessions_total",
249
		),
250
		"Total number of sessions established to this database and closed by each reason.",
251
		[]string{"database", "reason"},
252
		prometheus.Labels{},
253
	)
254
	sizes = prometheus.NewDesc(
255
		prometheus.BuildFQName(
256
			"",
257
			statDatabaseSubsystem,
258
			"size_bytes",
259
		),
260
		"Total size of the database, in bytes.",
261
		[]string{"database"},
262
		prometheus.Labels{},
263
	)
264
	statsage_ = prometheus.NewDesc(
265
		prometheus.BuildFQName(
266
			"",
267
			statDatabaseSubsystem,
268
			"stats_age_seconds_total",
269
		),
270
		"The age of the databases activity statistics, in seconds.",
271
		[]string{"database"},
272
		prometheus.Labels{},
273
	)
274
	xidlimit_ = prometheus.NewDesc(
275
		prometheus.BuildFQName(
276
			"",
277
			statDatabaseSubsystem,
278
			"left_before_wraparound",
279
		),
280
		"The number of transactions left before force shutdown due to XID wraparound.",
281
		[]string{"xid_from"},
282
		prometheus.Labels{},
283
	)
284

285
	// databasesQuery11 = "SELECT " +
286
	// 	"coalesce(datname, 'global') AS database, " +
287
	// 	"xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, " +
288
	// 	"conflicts, temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time, pg_database_size(datname) as size_bytes, " +
289
	// 	"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as stats_age_seconds " +
290
	// 	"FROM pg_stat_database WHERE datname IN (SELECT datname FROM pg_database WHERE datallowconn AND NOT datistemplate) " +
291
	// 	"OR datname IS NULL"
292

293
	databasesQuery13 = "SELECT " +
294
		"coalesce(datname, 'global') AS database, " +
295
		"xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, " +
296
		"conflicts, temp_files, temp_bytes, deadlocks, checksum_failures, coalesce(extract(epoch from checksum_last_failure), 0) AS last_checksum_failure_unixtime, " +
297
		"blk_read_time, blk_write_time, pg_database_size(datname) as size_bytes, " +
298
		"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as stats_age_seconds " +
299
		"FROM pg_stat_database WHERE datname IN (SELECT datname FROM pg_database WHERE datallowconn AND NOT datistemplate) " +
300
		"OR datname IS NULL"
301

302
	databasesQueryLatest = "SELECT " +
303
		"coalesce(datname, 'global') AS database, " +
304
		"xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, " +
305
		"conflicts, temp_files, temp_bytes, deadlocks, checksum_failures, coalesce(extract(epoch from checksum_last_failure), 0) AS last_checksum_failure_unixtime, " +
306
		"blk_read_time, blk_write_time, " +
307
		"session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed, " +
308
		"pg_database_size(datname) as size_bytes, " +
309
		"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as stats_age_seconds " +
310
		"FROM pg_stat_database WHERE datname IN (SELECT datname FROM pg_database WHERE datallowconn AND NOT datistemplate) " +
311
		"OR datname IS NULL"
312

313
	xidLimitQuery = `SELECT 'database' AS src, 2147483647 - greatest(max(age(datfrozenxid)), max(age(coalesce(nullif(datminmxid, 1), datfrozenxid)))) AS to_limit FROM pg_database
314
	UNION SELECT 'prepared_xacts' AS src, 2147483647 - coalesce(max(age(transaction)), 0) AS to_limit FROM pg_prepared_xacts
315
	UNION SELECT 'replication_slots' AS src, 2147483647 - greatest(coalesce(min(age(xmin)), 0), coalesce(min(age(catalog_xmin)), 0)) AS to_limit 
316
	FROM pg_replication_slots;`
317
)
318

319
func (c *PGdatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
320
	db := instance.getDB()
321
	after13 := instance.version.Compare(semver.MustParse("13.0.0"))
322
	after14 := instance.version.Compare(semver.MustParse("14.0.0"))
323
	if after13 < 0 {
324
		level.Warn(c.log).Log("msg", "pangolin_database collector is not available on PostgreSQL < 13.0.0, skipping")
325
		return nil
326
	}
327
	type xid_str struct {
328
		src      sql.NullString
329
		to_limit sql.NullString
330
	}
331
	type xidLimitStats struct {
332
		database float64 // based on pg_database.datfrozenxid and datminmxid
333
		prepared float64 // based on pg_prepared_xacts.transaction
334
		replSlot float64 // based on pg_replication_slots.xmin and catalog_xmin
335
	}
336
	rows_xid, err_xid := db.QueryContext(ctx, xidLimitQuery)
337
	if err_xid != nil {
338
		return err_xid
339
	}
340
	defer rows_xid.Close()
341

342
	var xid xid_str
343
	var xids = make([]xid_str, 0)
344
	var xidLimit xidLimitStats
345
	for rows_xid.Next() {
346
		err_xid := rows_xid.Scan(
347
			&xid.src, &xid.to_limit,
348
		)
349
		if err_xid != nil {
350
			return err_xid
351
		}
352
		xids = append(xids, xid)
353
	}
354

355
	for _, row := range xids {
356
		// Get data value and convert it to float64 used by Prometheus.
357
		value, err := strconv.ParseFloat(row.to_limit.String, 64)
358
		if err != nil {
359
			level.Warn(c.log).Log("invalid input, parse '%s' failed: %s; skip", row.to_limit.String, err)
360
			continue
361
		}
362
		switch row.src.String {
363
		case "database":
364
			xidLimit.database = value
365
		case "prepared_xacts":
366
			xidLimit.prepared = value
367
		case "replication_slots":
368
			xidLimit.replSlot = value
369
		}
370
	}
371

372
	ch <- prometheus.MustNewConstMetric(xidlimit_, prometheus.CounterValue, float64(xidLimit.database), "pg_database")
373
	ch <- prometheus.MustNewConstMetric(xidlimit_, prometheus.CounterValue, float64(xidLimit.prepared), "pg_prepared_xacts")
374
	ch <- prometheus.MustNewConstMetric(xidlimit_, prometheus.CounterValue, float64(xidLimit.replSlot), "pg_replication_slots")
375

376
	query := databasesQuery13
377
	if after14 >= 0 {
378
		query = databasesQueryLatest
379
	}
380

381
	rows, err := db.QueryContext(ctx, query)
382
	if err != nil {
383
		return err
384
	}
385
	defer rows.Close()
386

387
	var database sql.NullString
388
	var xactcommit, xactrollback, blksread, blkshit, tupreturned, tupfetched, tupinserted sql.NullFloat64
389
	var tupupdated, tupdeleted, conflicts, tempfiles, tempbytes, deadlocks, csumfails, csumlastfailunixts sql.NullFloat64
390
	var blkreadtime, blkwritetime sql.NullFloat64
391
	var sessiontime, activetime, idletxtime, sessions, sessabandoned, sessfatal, sesskilled sql.NullFloat64
392
	var sizebytes, statsage sql.NullFloat64
393

394
	for rows.Next() {
395
		if after13 >= 0 && after14 < 0 {
396
			err := rows.Scan(
397
				&database,
398
				&xactcommit, &xactrollback, &blksread, &blkshit, &tupreturned, &tupfetched, &tupinserted,
399
				&tupupdated, &tupdeleted, &conflicts, &tempfiles, &tempbytes, &deadlocks, &csumfails, &csumlastfailunixts,
400
				&blkreadtime, &blkwritetime,
401
				&sizebytes, &statsage,
402
			)
403
			if err != nil {
404
				return err
405
			}
406
		}
407
		if after14 >= 0 {
408
			err := rows.Scan(
409
				&database,
410
				&xactcommit, &xactrollback, &blksread, &blkshit, &tupreturned, &tupfetched, &tupinserted,
411
				&tupupdated, &tupdeleted, &conflicts, &tempfiles, &tempbytes, &deadlocks, &csumfails, &csumlastfailunixts,
412
				&blkreadtime, &blkwritetime,
413
				&sessiontime, &activetime, &idletxtime, &sessions, &sessabandoned, &sessfatal, &sesskilled,
414
				&sizebytes, &statsage,
415
			)
416
			if err != nil {
417
				return err
418
			}
419
			if !sessiontime.Valid {
420
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sessiontime")
421
				continue
422
			}
423
			if !activetime.Valid {
424
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no activetime")
425
				continue
426
			}
427
			if !idletxtime.Valid {
428
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no idletxtime")
429
				continue
430
			}
431
			if !sessions.Valid {
432
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sessions")
433
				continue
434
			}
435
			if !sessabandoned.Valid {
436
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sessabandoned")
437
				continue
438
			}
439
			if !sessfatal.Valid {
440
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sessfatal")
441
				continue
442
			}
443
			if !sesskilled.Valid {
444
				level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sesskilled")
445
				continue
446
			}
447
			ch <- prometheus.MustNewConstMetric(sessionalltime, prometheus.CounterValue, float64(sessiontime.Float64), database.String)
448
			ch <- prometheus.MustNewConstMetric(sessiontime_, prometheus.CounterValue, float64(activetime.Float64), database.String, "active")
449
			ch <- prometheus.MustNewConstMetric(sessiontime_, prometheus.CounterValue, float64(idletxtime.Float64), database.String, "idle_in_transaction")
450
			ch <- prometheus.MustNewConstMetric(sessiontime_, prometheus.CounterValue, float64(sessiontime.Float64)-(float64(activetime.Float64)+float64(idletxtime.Float64)), database.String, "idle")
451
			ch <- prometheus.MustNewConstMetric(sessionsall, prometheus.CounterValue, float64(sessions.Float64), database.String)
452
			ch <- prometheus.MustNewConstMetric(sessions_, prometheus.CounterValue, float64(sessabandoned.Float64), database.String, "abandoned")
453
			ch <- prometheus.MustNewConstMetric(sessions_, prometheus.CounterValue, float64(sessfatal.Float64), database.String, "fatal")
454
			ch <- prometheus.MustNewConstMetric(sessions_, prometheus.CounterValue, float64(sesskilled.Float64), database.String, "killed")
455
			ch <- prometheus.MustNewConstMetric(sessions_, prometheus.CounterValue, float64(sessions.Float64)-(float64(sessabandoned.Float64)+float64(sessfatal.Float64)+float64(sesskilled.Float64)), database.String, "normal")
456
		}
457
		if !database.Valid {
458
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no database")
459
			continue
460
		}
461
		if !xactcommit.Valid {
462
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xactcommit")
463
			continue
464
		}
465
		if !xactrollback.Valid {
466
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xactrollback")
467
			continue
468
		}
469
		if !blksread.Valid {
470
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blksread")
471
			continue
472
		}
473
		if !blkshit.Valid {
474
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blkshit")
475
			continue
476
		}
477
		if !tupreturned.Valid {
478
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tupreturned")
479
			continue
480
		}
481
		if !tupfetched.Valid {
482
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tupfetched")
483
			continue
484
		}
485
		if !tupinserted.Valid {
486
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tupinserted")
487
			continue
488
		}
489
		if !tupupdated.Valid {
490
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tupupdated")
491
			continue
492
		}
493
		if !tupdeleted.Valid {
494
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tupdeleted")
495
			continue
496
		}
497
		if !conflicts.Valid {
498
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no conflicts")
499
			continue
500
		}
501
		if !tempfiles.Valid {
502
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tempfiles")
503
			continue
504
		}
505
		if !tempbytes.Valid {
506
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tempbytes")
507
			continue
508
		}
509
		if !deadlocks.Valid {
510
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no deadlocks")
511
			continue
512
		}
513
		if !csumfails.Valid {
514
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no csumfails")
515
			continue
516
		}
517
		if !csumlastfailunixts.Valid {
518
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no csumlastfailunixts")
519
			continue
520
		}
521
		if !blkreadtime.Valid {
522
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blkreadtime")
523
			continue
524
		}
525
		if !blkwritetime.Valid {
526
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blkwritetime")
527
			continue
528
		}
529
		if !sizebytes.Valid {
530
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no sizebytes")
531
			continue
532
		}
533
		if !statsage.Valid {
534
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no statsage")
535
			continue
536
		}
537
		// По просьбе НТ, для совместимости оставил имена pg_stat_database_xact_rollback, pg_stat_database_xact_commit
538
		ch <- prometheus.MustNewConstMetric(pg_commit, prometheus.CounterValue, float64(xactcommit.Float64), database.String)
539
		ch <- prometheus.MustNewConstMetric(pg_rollback, prometheus.CounterValue, float64(xactrollback.Float64), database.String)
540

541
		ch <- prometheus.MustNewConstMetric(commits, prometheus.CounterValue, float64(xactcommit.Float64), database.String)
542
		ch <- prometheus.MustNewConstMetric(rollbacks, prometheus.CounterValue, float64(xactrollback.Float64), database.String)
543
		ch <- prometheus.MustNewConstMetric(blocks, prometheus.CounterValue, float64(blksread.Float64), database.String, "read")
544
		ch <- prometheus.MustNewConstMetric(blocks, prometheus.CounterValue, float64(blkshit.Float64), database.String, "hit")
545

546
		ch <- prometheus.MustNewConstMetric(tuplesReturned, prometheus.CounterValue, float64(tupreturned.Float64), database.String)
547
		ch <- prometheus.MustNewConstMetric(tuplesFetched, prometheus.CounterValue, float64(tupfetched.Float64), database.String)
548
		ch <- prometheus.MustNewConstMetric(tuplesInserted, prometheus.CounterValue, float64(tupinserted.Float64), database.String)
549
		ch <- prometheus.MustNewConstMetric(tuplesUpdated, prometheus.CounterValue, float64(tupupdated.Float64), database.String)
550
		ch <- prometheus.MustNewConstMetric(tuplesDeleted, prometheus.CounterValue, float64(tupdeleted.Float64), database.String)
551

552
		ch <- prometheus.MustNewConstMetric(tempbytes_, prometheus.CounterValue, float64(tempbytes.Float64), database.String)
553
		ch <- prometheus.MustNewConstMetric(tempfiles_, prometheus.CounterValue, float64(tempfiles.Float64), database.String)
554
		ch <- prometheus.MustNewConstMetric(conflicts_, prometheus.CounterValue, float64(conflicts.Float64), database.String)
555
		ch <- prometheus.MustNewConstMetric(deadlocks_, prometheus.CounterValue, float64(deadlocks.Float64), database.String)
556

557
		ch <- prometheus.MustNewConstMetric(blockstime, prometheus.CounterValue, float64(blkreadtime.Float64), database.String, "read")
558
		ch <- prometheus.MustNewConstMetric(blockstime, prometheus.CounterValue, float64(blkwritetime.Float64), database.String, "write")
559
		ch <- prometheus.MustNewConstMetric(sizes, prometheus.CounterValue, float64(sizebytes.Float64), database.String)
560
		ch <- prometheus.MustNewConstMetric(statsage_, prometheus.CounterValue, float64(statsage.Float64), database.String)
561

562
		ch <- prometheus.MustNewConstMetric(csumfails_, prometheus.CounterValue, float64(csumfails.Float64), database.String)
563
		ch <- prometheus.MustNewConstMetric(csumlastfailunixts_, prometheus.CounterValue, float64(csumlastfailunixts.Float64), database.String)
564

565
	}
566

567
	return nil
568
}
569

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.