pangolin_exporter
499 строк · 12.9 Кб
1// Copyright 2022 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package collector
15
16import (
17"context"
18"database/sql"
19
20"github.com/go-kit/log"
21"github.com/go-kit/log/level"
22"github.com/prometheus/client_golang/prometheus"
23)
24
25const statDatabaseSubsystem = "stat_database"
26
27func init() {
28registerCollector(statDatabaseSubsystem, defaultDisabled, NewPGStatDatabaseCollector)
29}
30
31type PGStatDatabaseCollector struct {
32log log.Logger
33}
34
35func NewPGStatDatabaseCollector(config collectorConfig) (Collector, error) {
36return &PGStatDatabaseCollector{log: config.logger}, nil
37}
38
39var (
40statDatabaseNumbackends = prometheus.NewDesc(
41prometheus.BuildFQName(
42namespace_pangolin,
43statDatabaseSubsystem,
44"numbackends",
45),
46"Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.",
47[]string{"datid", "datname"},
48prometheus.Labels{},
49)
50statDatabaseXactCommit = prometheus.NewDesc(
51prometheus.BuildFQName(
52namespace_pangolin,
53statDatabaseSubsystem,
54"xact_commit",
55),
56"Number of transactions in this database that have been committed",
57[]string{"datid", "datname"},
58prometheus.Labels{},
59)
60statDatabaseXactRollback = prometheus.NewDesc(
61prometheus.BuildFQName(
62namespace_pangolin,
63statDatabaseSubsystem,
64"xact_rollback",
65),
66"Number of transactions in this database that have been rolled back",
67[]string{"datid", "datname"},
68prometheus.Labels{},
69)
70statDatabaseBlksRead = prometheus.NewDesc(
71prometheus.BuildFQName(
72namespace_pangolin,
73statDatabaseSubsystem,
74"blks_read",
75),
76"Number of disk blocks read in this database",
77[]string{"datid", "datname"},
78prometheus.Labels{},
79)
80statDatabaseBlksHit = prometheus.NewDesc(
81prometheus.BuildFQName(
82namespace_pangolin,
83statDatabaseSubsystem,
84"blks_hit",
85),
86"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)",
87[]string{"datid", "datname"},
88prometheus.Labels{},
89)
90statDatabaseTupReturned = prometheus.NewDesc(
91prometheus.BuildFQName(
92namespace_pangolin,
93statDatabaseSubsystem,
94"tup_returned",
95),
96"Number of rows returned by queries in this database",
97[]string{"datid", "datname"},
98prometheus.Labels{},
99)
100statDatabaseTupFetched = prometheus.NewDesc(
101prometheus.BuildFQName(
102namespace_pangolin,
103statDatabaseSubsystem,
104"tup_fetched",
105),
106"Number of rows fetched by queries in this database",
107[]string{"datid", "datname"},
108prometheus.Labels{},
109)
110statDatabaseTupInserted = prometheus.NewDesc(
111prometheus.BuildFQName(
112namespace_pangolin,
113statDatabaseSubsystem,
114"tup_inserted",
115),
116"Number of rows inserted by queries in this database",
117[]string{"datid", "datname"},
118prometheus.Labels{},
119)
120statDatabaseTupUpdated = prometheus.NewDesc(
121prometheus.BuildFQName(
122namespace_pangolin,
123statDatabaseSubsystem,
124"tup_updated",
125),
126"Number of rows updated by queries in this database",
127[]string{"datid", "datname"},
128prometheus.Labels{},
129)
130statDatabaseTupDeleted = prometheus.NewDesc(
131prometheus.BuildFQName(
132namespace_pangolin,
133statDatabaseSubsystem,
134"tup_deleted",
135),
136"Number of rows deleted by queries in this database",
137[]string{"datid", "datname"},
138prometheus.Labels{},
139)
140statDatabaseConflicts = prometheus.NewDesc(
141prometheus.BuildFQName(
142namespace_pangolin,
143statDatabaseSubsystem,
144"conflicts",
145),
146"Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)",
147[]string{"datid", "datname"},
148prometheus.Labels{},
149)
150statDatabaseTempFiles = prometheus.NewDesc(
151prometheus.BuildFQName(
152namespace_pangolin,
153statDatabaseSubsystem,
154"temp_files",
155),
156"Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.",
157[]string{"datid", "datname"},
158prometheus.Labels{},
159)
160statDatabaseTempBytes = prometheus.NewDesc(
161prometheus.BuildFQName(
162namespace_pangolin,
163statDatabaseSubsystem,
164"temp_bytes",
165),
166"Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.",
167[]string{"datid", "datname"},
168prometheus.Labels{},
169)
170statDatabaseDeadlocks = prometheus.NewDesc(
171prometheus.BuildFQName(
172namespace_pangolin,
173statDatabaseSubsystem,
174"deadlocks",
175),
176"Number of deadlocks detected in this database",
177[]string{"datid", "datname"},
178prometheus.Labels{},
179)
180statDatabaseBlkReadTime = prometheus.NewDesc(
181prometheus.BuildFQName(
182namespace_pangolin,
183statDatabaseSubsystem,
184"blk_read_time",
185),
186"Time spent reading data file blocks by backends in this database, in milliseconds",
187[]string{"datid", "datname"},
188prometheus.Labels{},
189)
190statDatabaseBlkWriteTime = prometheus.NewDesc(
191prometheus.BuildFQName(
192namespace_pangolin,
193statDatabaseSubsystem,
194"blk_write_time",
195),
196"Time spent writing data file blocks by backends in this database, in milliseconds",
197[]string{"datid", "datname"},
198prometheus.Labels{},
199)
200statDatabaseStatsReset = prometheus.NewDesc(prometheus.BuildFQName(
201namespace_pangolin,
202statDatabaseSubsystem,
203"stats_reset",
204),
205"Time at which these statistics were last reset",
206[]string{"datid", "datname"},
207prometheus.Labels{},
208)
209// statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName(
210// namespace_pangolin,
211// statDatabaseSubsystem,
212// "active_time_seconds_total",
213// ),
214// "Time spent executing SQL statements in this database, in seconds",
215// []string{"datid", "datname"},
216// prometheus.Labels{},
217// )
218
219statDatabaseQuery = `
220SELECT
221datid
222,datname
223,numbackends
224,xact_commit
225,xact_rollback
226,blks_read
227,blks_hit
228,tup_returned
229,tup_fetched
230,tup_inserted
231,tup_updated
232,tup_deleted
233,conflicts
234,temp_files
235,temp_bytes
236,deadlocks
237,blk_read_time
238,blk_write_time
239,stats_reset
240FROM pg_stat_database;
241`
242) // ,active_time
243
244func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
245db := instance.getDB()
246rows, err := db.QueryContext(ctx,
247statDatabaseQuery,
248)
249if err != nil {
250return err
251}
252defer rows.Close()
253
254for rows.Next() {
255var datid, datname sql.NullString
256var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64 //, activeTime
257var statsReset sql.NullTime
258
259err := rows.Scan(
260&datid,
261&datname,
262&numBackends,
263&xactCommit,
264&xactRollback,
265&blksRead,
266&blksHit,
267&tupReturned,
268&tupFetched,
269&tupInserted,
270&tupUpdated,
271&tupDeleted,
272&conflicts,
273&tempFiles,
274&tempBytes,
275&deadlocks,
276&blkReadTime,
277&blkWriteTime,
278// &activeTime,
279&statsReset,
280)
281if err != nil {
282return err
283}
284
285if !datid.Valid {
286level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no datid")
287continue
288}
289if !datname.Valid {
290level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no datname")
291continue
292}
293if !numBackends.Valid {
294level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no numbackends")
295continue
296}
297if !xactCommit.Valid {
298level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xact_commit")
299continue
300}
301if !xactRollback.Valid {
302level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no xact_rollback")
303continue
304}
305if !blksRead.Valid {
306level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blks_read")
307continue
308}
309if !blksHit.Valid {
310level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blks_hit")
311continue
312}
313if !tupReturned.Valid {
314level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_returned")
315continue
316}
317if !tupFetched.Valid {
318level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_fetched")
319continue
320}
321if !tupInserted.Valid {
322level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_inserted")
323continue
324}
325if !tupUpdated.Valid {
326level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_updated")
327continue
328}
329if !tupDeleted.Valid {
330level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no tup_deleted")
331continue
332}
333if !conflicts.Valid {
334level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no conflicts")
335continue
336}
337if !tempFiles.Valid {
338level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no temp_files")
339continue
340}
341if !tempBytes.Valid {
342level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no temp_bytes")
343continue
344}
345if !deadlocks.Valid {
346level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no deadlocks")
347continue
348}
349if !blkReadTime.Valid {
350level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_read_time")
351continue
352}
353if !blkWriteTime.Valid {
354level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_write_time")
355continue
356}
357// if !activeTime.Valid {
358// level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no active_time")
359// continue
360// }
361
362statsResetMetric := 0.0
363if !statsReset.Valid {
364level.Debug(c.log).Log("msg", "No metric for stats_reset, will collect 0 instead")
365}
366if statsReset.Valid {
367statsResetMetric = float64(statsReset.Time.Unix())
368}
369
370labels := []string{datid.String, datname.String}
371
372ch <- prometheus.MustNewConstMetric(
373statDatabaseNumbackends,
374prometheus.GaugeValue,
375numBackends.Float64,
376labels...,
377)
378
379ch <- prometheus.MustNewConstMetric(
380statDatabaseXactCommit,
381prometheus.CounterValue,
382xactCommit.Float64,
383labels...,
384)
385
386ch <- prometheus.MustNewConstMetric(
387statDatabaseXactRollback,
388prometheus.CounterValue,
389xactRollback.Float64,
390labels...,
391)
392
393ch <- prometheus.MustNewConstMetric(
394statDatabaseBlksRead,
395prometheus.CounterValue,
396blksRead.Float64,
397labels...,
398)
399
400ch <- prometheus.MustNewConstMetric(
401statDatabaseBlksHit,
402prometheus.CounterValue,
403blksHit.Float64,
404labels...,
405)
406
407ch <- prometheus.MustNewConstMetric(
408statDatabaseTupReturned,
409prometheus.CounterValue,
410tupReturned.Float64,
411labels...,
412)
413
414ch <- prometheus.MustNewConstMetric(
415statDatabaseTupFetched,
416prometheus.CounterValue,
417tupFetched.Float64,
418labels...,
419)
420
421ch <- prometheus.MustNewConstMetric(
422statDatabaseTupInserted,
423prometheus.CounterValue,
424tupInserted.Float64,
425labels...,
426)
427
428ch <- prometheus.MustNewConstMetric(
429statDatabaseTupUpdated,
430prometheus.CounterValue,
431tupUpdated.Float64,
432labels...,
433)
434
435ch <- prometheus.MustNewConstMetric(
436statDatabaseTupDeleted,
437prometheus.CounterValue,
438tupDeleted.Float64,
439labels...,
440)
441
442ch <- prometheus.MustNewConstMetric(
443statDatabaseConflicts,
444prometheus.CounterValue,
445conflicts.Float64,
446labels...,
447)
448
449ch <- prometheus.MustNewConstMetric(
450statDatabaseTempFiles,
451prometheus.CounterValue,
452tempFiles.Float64,
453labels...,
454)
455
456ch <- prometheus.MustNewConstMetric(
457statDatabaseTempBytes,
458prometheus.CounterValue,
459tempBytes.Float64,
460labels...,
461)
462
463ch <- prometheus.MustNewConstMetric(
464statDatabaseDeadlocks,
465prometheus.CounterValue,
466deadlocks.Float64,
467labels...,
468)
469
470ch <- prometheus.MustNewConstMetric(
471statDatabaseBlkReadTime,
472prometheus.CounterValue,
473blkReadTime.Float64,
474labels...,
475)
476
477ch <- prometheus.MustNewConstMetric(
478statDatabaseBlkWriteTime,
479prometheus.CounterValue,
480blkWriteTime.Float64,
481labels...,
482)
483
484// ch <- prometheus.MustNewConstMetric(
485// statDatabaseActiveTime,
486// prometheus.CounterValue,
487// activeTime.Float64/1000.0,
488// labels...,
489// )
490
491ch <- prometheus.MustNewConstMetric(
492statDatabaseStatsReset,
493prometheus.CounterValue,
494statsResetMetric,
495labels...,
496)
497}
498return nil
499}
500