forked from ferringb/pgbouncer_exporter
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexporter.go
157 lines (132 loc) · 4.3 KB
/
exporter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
/*
Copyright 2019 The KubeDB Authors.
Copyright (c) 2017 Kristoffer K Larsen <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"database/sql"
"errors"
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// from Postgres. So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the Postgres DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Postgres instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.up
ch <- e.totalScrapes
ch <- e.error
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
log.Info("Ending scrape")
}(time.Now())
log.Info("Starting scrape")
e.mutex.RLock()
defer e.mutex.RUnlock()
e.error.Set(0)
e.totalScrapes.Inc()
rows, err := e.db.Query("SHOW STATS")
if err != nil {
log.Errorf("error pinging pgbouncer: %q", err)
e.error.Set(1)
e.up.Set(0)
return
}
_ = rows.Close()
log.Debug("Backend is up, proceeding with scrape")
e.up.Set(1)
for _, mapping := range e.metricMap {
nonfatal, err := mapping.Query(ch, e.db)
if len(nonfatal) > 0 {
for _, suberr := range nonfatal {
log.Errorln(suberr.Error())
}
}
if err != nil {
// this needs to be removed.
log.Fatal(err)
}
e.error.Add(float64(len(nonfatal)))
}
}
// the scrape fails, and a slice of errors if they were non-fatal.
func (m *MetricMapFromNamespace) Query(ch chan<- prometheus.Metric, db *sql.DB) ([]error, error) {
query := fmt.Sprintf("SHOW %s;", m.namespace)
// Don't fail on a bad scrape of one metric
rows, err := db.Query(query)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", m.namespace, err))
}
defer rows.Close()
var result rowResult
result.ColumnNames, err = rows.Columns()
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", m.namespace, err))
}
// Make a lookup map for the column indices
result.ColumnIdx = make(map[string]int, len(result.ColumnNames))
for i, n := range result.ColumnNames {
result.ColumnIdx[n] = i
}
result.ColumnData = make([]interface{}, len(result.ColumnNames))
var scanArgs = make([]interface{}, len(result.ColumnNames))
for i := range result.ColumnData {
scanArgs[i] = &(result.ColumnData[i])
}
var nonfatalErrors []error
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", m.namespace, err))
}
n, e := m.rowFunc(m, &result, ch)
if n != nil {
nonfatalErrors = append(nonfatalErrors, n...)
}
if e != nil {
return nonfatalErrors, e
}
}
if err := rows.Err(); err != nil {
log.Errorf("Failed scanning all rows due to scan failure: error was; %s", err)
nonfatalErrors = append(nonfatalErrors, fmt.Errorf("failed to consume all rows due to: %s", err))
}
return nonfatalErrors, nil
}