pangolin_exporter

Форк
0
/
pg_stat_database.go 
499 строк · 12.9 Кб
1
// Copyright 2022 The Prometheus Authors
2
// Licensed under the Apache License, Version 2.0 (the "License");
3
// you may not use this file except in compliance with the License.
4
// You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software
9
// distributed under the License is distributed on an "AS IS" BASIS,
10
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
// See the License for the specific language governing permissions and
12
// limitations under the License.
13

14
package collector
15

16
import (
17
	"context"
18
	"database/sql"
19

20
	"github.com/go-kit/log"
21
	"github.com/go-kit/log/level"
22
	"github.com/prometheus/client_golang/prometheus"
23
)
24

25
const statDatabaseSubsystem = "stat_database"
26

27
func init() {
28
	registerCollector(statDatabaseSubsystem, defaultDisabled, NewPGStatDatabaseCollector)
29
}
30

31
type PGStatDatabaseCollector struct {
32
	log log.Logger
33
}
34

35
func NewPGStatDatabaseCollector(config collectorConfig) (Collector, error) {
36
	return &PGStatDatabaseCollector{log: config.logger}, nil
37
}
38

39
var (
40
	statDatabaseNumbackends = prometheus.NewDesc(
41
		prometheus.BuildFQName(
42
			namespace_pangolin,
43
			statDatabaseSubsystem,
44
			"numbackends",
45
		),
46
		"Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.",
47
		[]string{"datid", "datname"},
48
		prometheus.Labels{},
49
	)
50
	statDatabaseXactCommit = prometheus.NewDesc(
51
		prometheus.BuildFQName(
52
			namespace_pangolin,
53
			statDatabaseSubsystem,
54
			"xact_commit",
55
		),
56
		"Number of transactions in this database that have been committed",
57
		[]string{"datid", "datname"},
58
		prometheus.Labels{},
59
	)
60
	statDatabaseXactRollback = prometheus.NewDesc(
61
		prometheus.BuildFQName(
62
			namespace_pangolin,
63
			statDatabaseSubsystem,
64
			"xact_rollback",
65
		),
66
		"Number of transactions in this database that have been rolled back",
67
		[]string{"datid", "datname"},
68
		prometheus.Labels{},
69
	)
70
	statDatabaseBlksRead = prometheus.NewDesc(
71
		prometheus.BuildFQName(
72
			namespace_pangolin,
73
			statDatabaseSubsystem,
74
			"blks_read",
75
		),
76
		"Number of disk blocks read in this database",
77
		[]string{"datid", "datname"},
78
		prometheus.Labels{},
79
	)
80
	statDatabaseBlksHit = prometheus.NewDesc(
81
		prometheus.BuildFQName(
82
			namespace_pangolin,
83
			statDatabaseSubsystem,
84
			"blks_hit",
85
		),
86
		"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)",
87
		[]string{"datid", "datname"},
88
		prometheus.Labels{},
89
	)
90
	statDatabaseTupReturned = prometheus.NewDesc(
91
		prometheus.BuildFQName(
92
			namespace_pangolin,
93
			statDatabaseSubsystem,
94
			"tup_returned",
95
		),
96
		"Number of rows returned by queries in this database",
97
		[]string{"datid", "datname"},
98
		prometheus.Labels{},
99
	)
100
	statDatabaseTupFetched = prometheus.NewDesc(
101
		prometheus.BuildFQName(
102
			namespace_pangolin,
103
			statDatabaseSubsystem,
104
			"tup_fetched",
105
		),
106
		"Number of rows fetched by queries in this database",
107
		[]string{"datid", "datname"},
108
		prometheus.Labels{},
109
	)
110
	statDatabaseTupInserted = prometheus.NewDesc(
111
		prometheus.BuildFQName(
112
			namespace_pangolin,
113
			statDatabaseSubsystem,
114
			"tup_inserted",
115
		),
116
		"Number of rows inserted by queries in this database",
117
		[]string{"datid", "datname"},
118
		prometheus.Labels{},
119
	)
120
	statDatabaseTupUpdated = prometheus.NewDesc(
121
		prometheus.BuildFQName(
122
			namespace_pangolin,
123
			statDatabaseSubsystem,
124
			"tup_updated",
125
		),
126
		"Number of rows updated by queries in this database",
127
		[]string{"datid", "datname"},
128
		prometheus.Labels{},
129
	)
130
	statDatabaseTupDeleted = prometheus.NewDesc(
131
		prometheus.BuildFQName(
132
			namespace_pangolin,
133
			statDatabaseSubsystem,
134
			"tup_deleted",
135
		),
136
		"Number of rows deleted by queries in this database",
137
		[]string{"datid", "datname"},
138
		prometheus.Labels{},
139
	)
140
	statDatabaseConflicts = prometheus.NewDesc(
141
		prometheus.BuildFQName(
142
			namespace_pangolin,
143
			statDatabaseSubsystem,
144
			"conflicts",
145
		),
146
		"Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)",
147
		[]string{"datid", "datname"},
148
		prometheus.Labels{},
149
	)
150
	statDatabaseTempFiles = prometheus.NewDesc(
151
		prometheus.BuildFQName(
152
			namespace_pangolin,
153
			statDatabaseSubsystem,
154
			"temp_files",
155
		),
156
		"Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.",
157
		[]string{"datid", "datname"},
158
		prometheus.Labels{},
159
	)
160
	statDatabaseTempBytes = prometheus.NewDesc(
161
		prometheus.BuildFQName(
162
			namespace_pangolin,
163
			statDatabaseSubsystem,
164
			"temp_bytes",
165
		),
166
		"Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.",
167
		[]string{"datid", "datname"},
168
		prometheus.Labels{},
169
	)
170
	statDatabaseDeadlocks = prometheus.NewDesc(
171
		prometheus.BuildFQName(
172
			namespace_pangolin,
173
			statDatabaseSubsystem,
174
			"deadlocks",
175
		),
176
		"Number of deadlocks detected in this database",
177
		[]string{"datid", "datname"},
178
		prometheus.Labels{},
179
	)
180
	statDatabaseBlkReadTime = prometheus.NewDesc(
181
		prometheus.BuildFQName(
182
			namespace_pangolin,
183
			statDatabaseSubsystem,
184
			"blk_read_time",
185
		),
186
		"Time spent reading data file blocks by backends in this database, in milliseconds",
187
		[]string{"datid", "datname"},
188
		prometheus.Labels{},
189
	)
190
	statDatabaseBlkWriteTime = prometheus.NewDesc(
191
		prometheus.BuildFQName(
192
			namespace_pangolin,
193
			statDatabaseSubsystem,
194
			"blk_write_time",
195
		),
196
		"Time spent writing data file blocks by backends in this database, in milliseconds",
197
		[]string{"datid", "datname"},
198
		prometheus.Labels{},
199
	)
200
	statDatabaseStatsReset = prometheus.NewDesc(prometheus.BuildFQName(
201
		namespace_pangolin,
202
		statDatabaseSubsystem,
203
		"stats_reset",
204
	),
205
		"Time at which these statistics were last reset",
206
		[]string{"datid", "datname"},
207
		prometheus.Labels{},
208
	)
209
	// statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName(
210
	// 	namespace_pangolin,
211
	// 	statDatabaseSubsystem,
212
	// 	"active_time_seconds_total",
213
	// ),
214
	// 	"Time spent executing SQL statements in this database, in seconds",
215
	// 	[]string{"datid", "datname"},
216
	// 	prometheus.Labels{},
217
	// )
218

219
	statDatabaseQuery = `
220
		SELECT
221
			datid
222
			,datname
223
			,numbackends
224
			,xact_commit
225
			,xact_rollback
226
			,blks_read
227
			,blks_hit
228
			,tup_returned
229
			,tup_fetched
230
			,tup_inserted
231
			,tup_updated
232
			,tup_deleted
233
			,conflicts
234
			,temp_files
235
			,temp_bytes
236
			,deadlocks
237
			,blk_read_time
238
			,blk_write_time
239
			,stats_reset
240
		FROM pg_stat_database;
241
	`
242
) // ,active_time
243

244
func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
245
	db := instance.getDB()
246
	rows, err := db.QueryContext(ctx,
247
		statDatabaseQuery,
248
	)
249
	if err != nil {
250
		return err
251
	}
252
	defer rows.Close()
253

254
	for rows.Next() {
255
		var datid, datname sql.NullString
256
		var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64 //, activeTime
257
		var statsReset sql.NullTime
258

259
		err := rows.Scan(
260
			&datid,
261
			&datname,
262
			&numBackends,
263
			&xactCommit,
264
			&xactRollback,
265
			&blksRead,
266
			&blksHit,
267
			&tupReturned,
268
			&tupFetched,
269
			&tupInserted,
270
			&tupUpdated,
271
			&tupDeleted,
272
			&conflicts,
273
			&tempFiles,
274
			&tempBytes,
275
			&deadlocks,
276
			&blkReadTime,
277
			&blkWriteTime,
278
			// &activeTime,
279
			&statsReset,
280
		)
281
		if err != nil {
282
			return err
283
		}
284

285
		if !datid.Valid {
286
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no datid")
287
			continue
288
		}
289
		if !datname.Valid {
290
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no datname")
291
			continue
292
		}
293
		if !numBackends.Valid {
294
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no numbackends")
295
			continue
296
		}
297
		if !xactCommit.Valid {
298
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xact_commit")
299
			continue
300
		}
301
		if !xactRollback.Valid {
302
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xact_rollback")
303
			continue
304
		}
305
		if !blksRead.Valid {
306
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blks_read")
307
			continue
308
		}
309
		if !blksHit.Valid {
310
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blks_hit")
311
			continue
312
		}
313
		if !tupReturned.Valid {
314
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_returned")
315
			continue
316
		}
317
		if !tupFetched.Valid {
318
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_fetched")
319
			continue
320
		}
321
		if !tupInserted.Valid {
322
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_inserted")
323
			continue
324
		}
325
		if !tupUpdated.Valid {
326
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_updated")
327
			continue
328
		}
329
		if !tupDeleted.Valid {
330
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_deleted")
331
			continue
332
		}
333
		if !conflicts.Valid {
334
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no conflicts")
335
			continue
336
		}
337
		if !tempFiles.Valid {
338
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no temp_files")
339
			continue
340
		}
341
		if !tempBytes.Valid {
342
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no temp_bytes")
343
			continue
344
		}
345
		if !deadlocks.Valid {
346
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no deadlocks")
347
			continue
348
		}
349
		if !blkReadTime.Valid {
350
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_read_time")
351
			continue
352
		}
353
		if !blkWriteTime.Valid {
354
			level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_write_time")
355
			continue
356
		}
357
		// if !activeTime.Valid {
358
		// 	level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no active_time")
359
		// 	continue
360
		// }
361

362
		statsResetMetric := 0.0
363
		if !statsReset.Valid {
364
			level.Debug(c.log).Log("msg", "No metric for stats_reset, will collect 0 instead")
365
		}
366
		if statsReset.Valid {
367
			statsResetMetric = float64(statsReset.Time.Unix())
368
		}
369

370
		labels := []string{datid.String, datname.String}
371

372
		ch <- prometheus.MustNewConstMetric(
373
			statDatabaseNumbackends,
374
			prometheus.GaugeValue,
375
			numBackends.Float64,
376
			labels...,
377
		)
378

379
		ch <- prometheus.MustNewConstMetric(
380
			statDatabaseXactCommit,
381
			prometheus.CounterValue,
382
			xactCommit.Float64,
383
			labels...,
384
		)
385

386
		ch <- prometheus.MustNewConstMetric(
387
			statDatabaseXactRollback,
388
			prometheus.CounterValue,
389
			xactRollback.Float64,
390
			labels...,
391
		)
392

393
		ch <- prometheus.MustNewConstMetric(
394
			statDatabaseBlksRead,
395
			prometheus.CounterValue,
396
			blksRead.Float64,
397
			labels...,
398
		)
399

400
		ch <- prometheus.MustNewConstMetric(
401
			statDatabaseBlksHit,
402
			prometheus.CounterValue,
403
			blksHit.Float64,
404
			labels...,
405
		)
406

407
		ch <- prometheus.MustNewConstMetric(
408
			statDatabaseTupReturned,
409
			prometheus.CounterValue,
410
			tupReturned.Float64,
411
			labels...,
412
		)
413

414
		ch <- prometheus.MustNewConstMetric(
415
			statDatabaseTupFetched,
416
			prometheus.CounterValue,
417
			tupFetched.Float64,
418
			labels...,
419
		)
420

421
		ch <- prometheus.MustNewConstMetric(
422
			statDatabaseTupInserted,
423
			prometheus.CounterValue,
424
			tupInserted.Float64,
425
			labels...,
426
		)
427

428
		ch <- prometheus.MustNewConstMetric(
429
			statDatabaseTupUpdated,
430
			prometheus.CounterValue,
431
			tupUpdated.Float64,
432
			labels...,
433
		)
434

435
		ch <- prometheus.MustNewConstMetric(
436
			statDatabaseTupDeleted,
437
			prometheus.CounterValue,
438
			tupDeleted.Float64,
439
			labels...,
440
		)
441

442
		ch <- prometheus.MustNewConstMetric(
443
			statDatabaseConflicts,
444
			prometheus.CounterValue,
445
			conflicts.Float64,
446
			labels...,
447
		)
448

449
		ch <- prometheus.MustNewConstMetric(
450
			statDatabaseTempFiles,
451
			prometheus.CounterValue,
452
			tempFiles.Float64,
453
			labels...,
454
		)
455

456
		ch <- prometheus.MustNewConstMetric(
457
			statDatabaseTempBytes,
458
			prometheus.CounterValue,
459
			tempBytes.Float64,
460
			labels...,
461
		)
462

463
		ch <- prometheus.MustNewConstMetric(
464
			statDatabaseDeadlocks,
465
			prometheus.CounterValue,
466
			deadlocks.Float64,
467
			labels...,
468
		)
469

470
		ch <- prometheus.MustNewConstMetric(
471
			statDatabaseBlkReadTime,
472
			prometheus.CounterValue,
473
			blkReadTime.Float64,
474
			labels...,
475
		)
476

477
		ch <- prometheus.MustNewConstMetric(
478
			statDatabaseBlkWriteTime,
479
			prometheus.CounterValue,
480
			blkWriteTime.Float64,
481
			labels...,
482
		)
483

484
		// ch <- prometheus.MustNewConstMetric(
485
		// 	statDatabaseActiveTime,
486
		// 	prometheus.CounterValue,
487
		// 	activeTime.Float64/1000.0,
488
		// 	labels...,
489
		// )
490

491
		ch <- prometheus.MustNewConstMetric(
492
			statDatabaseStatsReset,
493
			prometheus.CounterValue,
494
			statsResetMetric,
495
			labels...,
496
		)
497
	}
498
	return nil
499
}
500

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.