diff options
author | Björn Rabenstein <bjoern@rabenste.in> | 2018-09-17 17:09:52 +0200 |
---|---|---|
committer | Ben Kochie <superq@gmail.com> | 2018-09-17 17:09:52 +0200 |
commit | 1c9ea46ccab252e217971eebd5da6e492c108ea2 (patch) | |
tree | d4105e857bf476b877102a73470099f1e85dd0d6 | |
parent | b46cd802002dad3db02475f38aa6e45a38e8d3f5 (diff) | |
download | prometheus_node_collector-1c9ea46ccab252e217971eebd5da6e492c108ea2.tar.bz2 prometheus_node_collector-1c9ea46ccab252e217971eebd5da6e492c108ea2.tar.xz prometheus_node_collector-1c9ea46ccab252e217971eebd5da6e492c108ea2.zip |
Update vendoring for client_golang and friends (#1076)
Signed-off-by: beorn7 <beorn@soundcloud.com>
23 files changed, 965 insertions, 335 deletions
diff --git a/collector/fixtures/e2e-64k-page-output.txt b/collector/fixtures/e2e-64k-page-output.txt index 3e7f67a..edfb373 100644 --- a/collector/fixtures/e2e-64k-page-output.txt +++ b/collector/fixtures/e2e-64k-page-output.txt | |||
@@ -3300,6 +3300,8 @@ node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 | |||
3300 | # TYPE process_start_time_seconds gauge | 3300 | # TYPE process_start_time_seconds gauge |
3301 | # HELP process_virtual_memory_bytes Virtual memory size in bytes. | 3301 | # HELP process_virtual_memory_bytes Virtual memory size in bytes. |
3302 | # TYPE process_virtual_memory_bytes gauge | 3302 | # TYPE process_virtual_memory_bytes gauge |
3303 | # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. | ||
3304 | # TYPE process_virtual_memory_max_bytes gauge | ||
3303 | # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. | 3305 | # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. |
3304 | # TYPE promhttp_metric_handler_requests_in_flight gauge | 3306 | # TYPE promhttp_metric_handler_requests_in_flight gauge |
3305 | promhttp_metric_handler_requests_in_flight 1 | 3307 | promhttp_metric_handler_requests_in_flight 1 |
diff --git a/collector/fixtures/e2e-output.txt b/collector/fixtures/e2e-output.txt index b3a236b..9e24ada 100644 --- a/collector/fixtures/e2e-output.txt +++ b/collector/fixtures/e2e-output.txt | |||
@@ -3300,6 +3300,8 @@ node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 | |||
3300 | # TYPE process_start_time_seconds gauge | 3300 | # TYPE process_start_time_seconds gauge |
3301 | # HELP process_virtual_memory_bytes Virtual memory size in bytes. | 3301 | # HELP process_virtual_memory_bytes Virtual memory size in bytes. |
3302 | # TYPE process_virtual_memory_bytes gauge | 3302 | # TYPE process_virtual_memory_bytes gauge |
3303 | # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. | ||
3304 | # TYPE process_virtual_memory_max_bytes gauge | ||
3303 | # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. | 3305 | # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. |
3304 | # TYPE promhttp_metric_handler_requests_in_flight gauge | 3306 | # TYPE promhttp_metric_handler_requests_in_flight gauge |
3305 | promhttp_metric_handler_requests_in_flight 1 | 3307 | promhttp_metric_handler_requests_in_flight 1 |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 623d3d8..08491be 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go | |||
@@ -29,27 +29,71 @@ type Collector interface { | |||
29 | // collected by this Collector to the provided channel and returns once | 29 | // collected by this Collector to the provided channel and returns once |
30 | // the last descriptor has been sent. The sent descriptors fulfill the | 30 | // the last descriptor has been sent. The sent descriptors fulfill the |
31 | // consistency and uniqueness requirements described in the Desc | 31 | // consistency and uniqueness requirements described in the Desc |
32 | // documentation. (It is valid if one and the same Collector sends | 32 | // documentation. |
33 | // duplicate descriptors. Those duplicates are simply ignored. However, | 33 | // |
34 | // two different Collectors must not send duplicate descriptors.) This | 34 | // It is valid if one and the same Collector sends duplicate |
35 | // method idempotently sends the same descriptors throughout the | 35 | // descriptors. Those duplicates are simply ignored. However, two |
36 | // lifetime of the Collector. If a Collector encounters an error while | 36 | // different Collectors must not send duplicate descriptors. |
37 | // executing this method, it must send an invalid descriptor (created | 37 | // |
38 | // with NewInvalidDesc) to signal the error to the registry. | 38 | // Sending no descriptor at all marks the Collector as “unchecked”, |
39 | // i.e. no checks will be performed at registration time, and the | ||
40 | // Collector may yield any Metric it sees fit in its Collect method. | ||
41 | // | ||
42 | // This method idempotently sends the same descriptors throughout the | ||
43 | // lifetime of the Collector. | ||
44 | // | ||
45 | // If a Collector encounters an error while executing this method, it | ||
46 | // must send an invalid descriptor (created with NewInvalidDesc) to | ||
47 | // signal the error to the registry. | ||
39 | Describe(chan<- *Desc) | 48 | Describe(chan<- *Desc) |
40 | // Collect is called by the Prometheus registry when collecting | 49 | // Collect is called by the Prometheus registry when collecting |
41 | // metrics. The implementation sends each collected metric via the | 50 | // metrics. The implementation sends each collected metric via the |
42 | // provided channel and returns once the last metric has been sent. The | 51 | // provided channel and returns once the last metric has been sent. The |
43 | // descriptor of each sent metric is one of those returned by | 52 | // descriptor of each sent metric is one of those returned by Describe |
44 | // Describe. Returned metrics that share the same descriptor must differ | 53 | // (unless the Collector is unchecked, see above). Returned metrics that |
45 | // in their variable label values. This method may be called | 54 | // share the same descriptor must differ in their variable label |
46 | // concurrently and must therefore be implemented in a concurrency safe | 55 | // values. |
47 | // way. Blocking occurs at the expense of total performance of rendering | 56 | // |
48 | // all registered metrics. Ideally, Collector implementations support | 57 | // This method may be called concurrently and must therefore be |
49 | // concurrent readers. | 58 | // implemented in a concurrency safe way. Blocking occurs at the expense |
59 | // of total performance of rendering all registered metrics. Ideally, | ||
60 | // Collector implementations support concurrent readers. | ||
50 | Collect(chan<- Metric) | 61 | Collect(chan<- Metric) |
51 | } | 62 | } |
52 | 63 | ||
64 | // DescribeByCollect is a helper to implement the Describe method of a custom | ||
65 | // Collector. It collects the metrics from the provided Collector and sends | ||
66 | // their descriptors to the provided channel. | ||
67 | // | ||
68 | // If a Collector collects the same metrics throughout its lifetime, its | ||
69 | // Describe method can simply be implemented as: | ||
70 | // | ||
71 | // func (c customCollector) Describe(ch chan<- *Desc) { | ||
72 | // DescribeByCollect(c, ch) | ||
73 | // } | ||
74 | // | ||
75 | // However, this will not work if the metrics collected change dynamically over | ||
76 | // the lifetime of the Collector in a way that their combined set of descriptors | ||
77 | // changes as well. The shortcut implementation will then violate the contract | ||
78 | // of the Describe method. If a Collector sometimes collects no metrics at all | ||
79 | // (for example vectors like CounterVec, GaugeVec, etc., which only collect | ||
80 | // metrics after a metric with a fully specified label set has been accessed), | ||
81 | // it might even get registered as an unchecked Collecter (cf. the Register | ||
82 | // method of the Registerer interface). Hence, only use this shortcut | ||
83 | // implementation of Describe if you are certain to fulfill the contract. | ||
84 | // | ||
85 | // The Collector example demonstrates a use of DescribeByCollect. | ||
86 | func DescribeByCollect(c Collector, descs chan<- *Desc) { | ||
87 | metrics := make(chan Metric) | ||
88 | go func() { | ||
89 | c.Collect(metrics) | ||
90 | close(metrics) | ||
91 | }() | ||
92 | for m := range metrics { | ||
93 | descs <- m.Desc() | ||
94 | } | ||
95 | } | ||
96 | |||
53 | // selfCollector implements Collector for a single Metric so that the Metric | 97 | // selfCollector implements Collector for a single Metric so that the Metric |
54 | // collects itself. Add it as an anonymous field to a struct that implements | 98 | // collects itself. Add it as an anonymous field to a struct that implements |
55 | // Metric, and call init with the Metric itself as an argument. | 99 | // Metric, and call init with the Metric itself as an argument. |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4a755b0..7b8827f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go | |||
@@ -67,7 +67,7 @@ type Desc struct { | |||
67 | 67 | ||
68 | // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc | 68 | // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc |
69 | // and will be reported on registration time. variableLabels and constLabels can | 69 | // and will be reported on registration time. variableLabels and constLabels can |
70 | // be nil if no such labels should be set. fqName and help must not be empty. | 70 | // be nil if no such labels should be set. fqName must not be empty. |
71 | // | 71 | // |
72 | // variableLabels only contain the label names. Their label values are variable | 72 | // variableLabels only contain the label names. Their label values are variable |
73 | // and therefore not part of the Desc. (They are managed within the Metric.) | 73 | // and therefore not part of the Desc. (They are managed within the Metric.) |
@@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * | |||
80 | help: help, | 80 | help: help, |
81 | variableLabels: variableLabels, | 81 | variableLabels: variableLabels, |
82 | } | 82 | } |
83 | if help == "" { | ||
84 | d.err = errors.New("empty help string") | ||
85 | return d | ||
86 | } | ||
87 | if !model.IsValidMetricName(model.LabelValue(fqName)) { | 83 | if !model.IsValidMetricName(model.LabelValue(fqName)) { |
88 | d.err = fmt.Errorf("%q is not a valid metric name", fqName) | 84 | d.err = fmt.Errorf("%q is not a valid metric name", fqName) |
89 | return d | 85 | return d |
@@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * | |||
156 | Value: proto.String(v), | 152 | Value: proto.String(v), |
157 | }) | 153 | }) |
158 | } | 154 | } |
159 | sort.Sort(LabelPairSorter(d.constLabelPairs)) | 155 | sort.Sort(labelPairSorter(d.constLabelPairs)) |
160 | return d | 156 | return d |
161 | } | 157 | } |
162 | 158 | ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 83c3657..5d9525d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go | |||
@@ -121,7 +121,17 @@ | |||
121 | // NewConstSummary (and their respective Must… versions). That will happen in | 121 | // NewConstSummary (and their respective Must… versions). That will happen in |
122 | // the Collect method. The Describe method has to return separate Desc | 122 | // the Collect method. The Describe method has to return separate Desc |
123 | // instances, representative of the “throw-away” metrics to be created later. | 123 | // instances, representative of the “throw-away” metrics to be created later. |
124 | // NewDesc comes in handy to create those Desc instances. | 124 | // NewDesc comes in handy to create those Desc instances. Alternatively, you |
125 | // could return no Desc at all, which will marke the Collector “unchecked”. No | ||
126 | // checks are porformed at registration time, but metric consistency will still | ||
127 | // be ensured at scrape time, i.e. any inconsistencies will lead to scrape | ||
128 | // errors. Thus, with unchecked Collectors, the responsibility to not collect | ||
129 | // metrics that lead to inconsistencies in the total scrape result lies with the | ||
130 | // implementer of the Collector. While this is not a desirable state, it is | ||
131 | // sometimes necessary. The typical use case is a situatios where the exact | ||
132 | // metrics to be returned by a Collector cannot be predicted at registration | ||
133 | // time, but the implementer has sufficient knowledge of the whole system to | ||
134 | // guarantee metric consistency. | ||
125 | // | 135 | // |
126 | // The Collector example illustrates the use case. You can also look at the | 136 | // The Collector example illustrates the use case. You can also look at the |
127 | // source code of the processCollector (mirroring process metrics), the | 137 | // source code of the processCollector (mirroring process metrics), the |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go index e3b67df..3d383a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go | |||
@@ -1,3 +1,16 @@ | |||
1 | // Copyright 2018 The Prometheus Authors | ||
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
3 | // you may not use this file except in compliance with the License. | ||
4 | // You may obtain a copy of the License at | ||
5 | // | ||
6 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
7 | // | ||
8 | // Unless required by applicable law or agreed to in writing, software | ||
9 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
11 | // See the License for the specific language governing permissions and | ||
12 | // limitations under the License. | ||
13 | |||
1 | package prometheus | 14 | package prometheus |
2 | 15 | ||
3 | // Inline and byte-free variant of hash/fnv's fnv64a. | 16 | // Inline and byte-free variant of hash/fnv's fnv64a. |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 096454a..ba3b933 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go | |||
@@ -1,3 +1,16 @@ | |||
1 | // Copyright 2018 The Prometheus Authors | ||
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
3 | // you may not use this file except in compliance with the License. | ||
4 | // You may obtain a copy of the License at | ||
5 | // | ||
6 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
7 | // | ||
8 | // Unless required by applicable law or agreed to in writing, software | ||
9 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
11 | // See the License for the specific language governing permissions and | ||
12 | // limitations under the License. | ||
13 | |||
1 | package prometheus | 14 | package prometheus |
2 | 15 | ||
3 | import ( | 16 | import ( |
@@ -17,8 +30,12 @@ type goCollector struct { | |||
17 | metrics memStatsMetrics | 30 | metrics memStatsMetrics |
18 | } | 31 | } |
19 | 32 | ||
20 | // NewGoCollector returns a collector which exports metrics about the current | 33 | // NewGoCollector returns a collector which exports metrics about the current Go |
21 | // go process. | 34 | // process. This includes memory stats. To collect those, runtime.ReadMemStats |
35 | // is called. This causes a stop-the-world, which is very short with Go1.9+ | ||
36 | // (~25µs). However, with older Go versions, the stop-the-world duration depends | ||
37 | // on the heap size and can be quite significant (~1.7 ms/GiB as per | ||
38 | // https://go-review.googlesource.com/c/go/+/34937). | ||
22 | func NewGoCollector() Collector { | 39 | func NewGoCollector() Collector { |
23 | return &goCollector{ | 40 | return &goCollector{ |
24 | goroutinesDesc: NewDesc( | 41 | goroutinesDesc: NewDesc( |
@@ -265,7 +282,7 @@ func (c *goCollector) Collect(ch chan<- Metric) { | |||
265 | quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() | 282 | quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() |
266 | } | 283 | } |
267 | quantiles[0.0] = stats.PauseQuantiles[0].Seconds() | 284 | quantiles[0.0] = stats.PauseQuantiles[0].Seconds() |
268 | ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) | 285 | ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) |
269 | 286 | ||
270 | ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) | 287 | ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) |
271 | 288 | ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 331783a..29dc8e3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go | |||
@@ -16,7 +16,9 @@ package prometheus | |||
16 | import ( | 16 | import ( |
17 | "fmt" | 17 | "fmt" |
18 | "math" | 18 | "math" |
19 | "runtime" | ||
19 | "sort" | 20 | "sort" |
21 | "sync" | ||
20 | "sync/atomic" | 22 | "sync/atomic" |
21 | 23 | ||
22 | "github.com/golang/protobuf/proto" | 24 | "github.com/golang/protobuf/proto" |
@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { | |||
108 | } | 110 | } |
109 | 111 | ||
110 | // HistogramOpts bundles the options for creating a Histogram metric. It is | 112 | // HistogramOpts bundles the options for creating a Histogram metric. It is |
111 | // mandatory to set Name and Help to a non-empty string. All other fields are | 113 | // mandatory to set Name to a non-empty string. All other fields are optional |
112 | // optional and can safely be left at their zero value. | 114 | // and can safely be left at their zero value, although it is strongly |
115 | // encouraged to set a Help string. | ||
113 | type HistogramOpts struct { | 116 | type HistogramOpts struct { |
114 | // Namespace, Subsystem, and Name are components of the fully-qualified | 117 | // Namespace, Subsystem, and Name are components of the fully-qualified |
115 | // name of the Histogram (created by joining these components with | 118 | // name of the Histogram (created by joining these components with |
@@ -120,7 +123,7 @@ type HistogramOpts struct { | |||
120 | Subsystem string | 123 | Subsystem string |
121 | Name string | 124 | Name string |
122 | 125 | ||
123 | // Help provides information about this Histogram. Mandatory! | 126 | // Help provides information about this Histogram. |
124 | // | 127 | // |
125 | // Metrics with the same fully-qualified name must have the same Help | 128 | // Metrics with the same fully-qualified name must have the same Help |
126 | // string. | 129 | // string. |
@@ -200,28 +203,49 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr | |||
200 | } | 203 | } |
201 | } | 204 | } |
202 | } | 205 | } |
203 | // Finally we know the final length of h.upperBounds and can make counts. | 206 | // Finally we know the final length of h.upperBounds and can make counts |
204 | h.counts = make([]uint64, len(h.upperBounds)) | 207 | // for both states: |
208 | h.counts[0].buckets = make([]uint64, len(h.upperBounds)) | ||
209 | h.counts[1].buckets = make([]uint64, len(h.upperBounds)) | ||
205 | 210 | ||
206 | h.init(h) // Init self-collection. | 211 | h.init(h) // Init self-collection. |
207 | return h | 212 | return h |
208 | } | 213 | } |
209 | 214 | ||
210 | type histogram struct { | 215 | type histogramCounts struct { |
211 | // sumBits contains the bits of the float64 representing the sum of all | 216 | // sumBits contains the bits of the float64 representing the sum of all |
212 | // observations. sumBits and count have to go first in the struct to | 217 | // observations. sumBits and count have to go first in the struct to |
213 | // guarantee alignment for atomic operations. | 218 | // guarantee alignment for atomic operations. |
214 | // http://golang.org/pkg/sync/atomic/#pkg-note-BUG | 219 | // http://golang.org/pkg/sync/atomic/#pkg-note-BUG |
215 | sumBits uint64 | 220 | sumBits uint64 |
216 | count uint64 | 221 | count uint64 |
222 | buckets []uint64 | ||
223 | } | ||
217 | 224 | ||
225 | type histogram struct { | ||
218 | selfCollector | 226 | selfCollector |
219 | // Note that there is no mutex required. | 227 | desc *Desc |
220 | 228 | writeMtx sync.Mutex // Only used in the Write method. | |
221 | desc *Desc | ||
222 | 229 | ||
223 | upperBounds []float64 | 230 | upperBounds []float64 |
224 | counts []uint64 | 231 | |
232 | // Two counts, one is "hot" for lock-free observations, the other is | ||
233 | // "cold" for writing out a dto.Metric. | ||
234 | counts [2]histogramCounts | ||
235 | |||
236 | hotIdx int // Index of currently-hot counts. Only used within Write. | ||
237 | |||
238 | // This is a complicated one. For lock-free yet atomic observations, we | ||
239 | // need to save the total count of observations again, combined with the | ||
240 | // index of the currently-hot counts struct, so that we can perform the | ||
241 | // operation on both values atomically. The least significant bit | ||
242 | // defines the hot counts struct. The remaining 63 bits represent the | ||
243 | // total count of observations. This happens under the assumption that | ||
244 | // the 63bit count will never overflow. Rationale: An observations takes | ||
245 | // about 30ns. Let's assume it could happen in 10ns. Overflowing the | ||
246 | // counter will then take at least (2^63)*10ns, which is about 3000 | ||
247 | // years. | ||
248 | countAndHotIdx uint64 | ||
225 | 249 | ||
226 | labelPairs []*dto.LabelPair | 250 | labelPairs []*dto.LabelPair |
227 | } | 251 | } |
@@ -241,36 +265,113 @@ func (h *histogram) Observe(v float64) { | |||
241 | // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op | 265 | // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op |
242 | // 300 buckets: 154 ns/op linear - binary 61.6 ns/op | 266 | // 300 buckets: 154 ns/op linear - binary 61.6 ns/op |
243 | i := sort.SearchFloat64s(h.upperBounds, v) | 267 | i := sort.SearchFloat64s(h.upperBounds, v) |
244 | if i < len(h.counts) { | 268 | |
245 | atomic.AddUint64(&h.counts[i], 1) | 269 | // We increment h.countAndHotIdx by 2 so that the counter in the upper |
270 | // 63 bits gets incremented by 1. At the same time, we get the new value | ||
271 | // back, which we can use to find the currently-hot counts. | ||
272 | n := atomic.AddUint64(&h.countAndHotIdx, 2) | ||
273 | hotCounts := &h.counts[n%2] | ||
274 | |||
275 | if i < len(h.upperBounds) { | ||
276 | atomic.AddUint64(&hotCounts.buckets[i], 1) | ||
246 | } | 277 | } |
247 | atomic.AddUint64(&h.count, 1) | ||
248 | for { | 278 | for { |
249 | oldBits := atomic.LoadUint64(&h.sumBits) | 279 | oldBits := atomic.LoadUint64(&hotCounts.sumBits) |
250 | newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | 280 | newBits := math.Float64bits(math.Float64frombits(oldBits) + v) |
251 | if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { | 281 | if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { |
252 | break | 282 | break |
253 | } | 283 | } |
254 | } | 284 | } |
285 | // Increment count last as we take it as a signal that the observation | ||
286 | // is complete. | ||
287 | atomic.AddUint64(&hotCounts.count, 1) | ||
255 | } | 288 | } |
256 | 289 | ||
257 | func (h *histogram) Write(out *dto.Metric) error { | 290 | func (h *histogram) Write(out *dto.Metric) error { |
258 | his := &dto.Histogram{} | 291 | var ( |
259 | buckets := make([]*dto.Bucket, len(h.upperBounds)) | 292 | his = &dto.Histogram{} |
293 | buckets = make([]*dto.Bucket, len(h.upperBounds)) | ||
294 | hotCounts, coldCounts *histogramCounts | ||
295 | count uint64 | ||
296 | ) | ||
297 | |||
298 | // For simplicity, we mutex the rest of this method. It is not in the | ||
299 | // hot path, i.e. Observe is called much more often than Write. The | ||
300 | // complication of making Write lock-free isn't worth it. | ||
301 | h.writeMtx.Lock() | ||
302 | defer h.writeMtx.Unlock() | ||
303 | |||
304 | // This is a bit arcane, which is why the following spells out this if | ||
305 | // clause in English: | ||
306 | // | ||
307 | // If the currently-hot counts struct is #0, we atomically increment | ||
308 | // h.countAndHotIdx by 1 so that from now on Observe will use the counts | ||
309 | // struct #1. Furthermore, the atomic increment gives us the new value, | ||
310 | // which, in its most significant 63 bits, tells us the count of | ||
311 | // observations done so far up to and including currently ongoing | ||
312 | // observations still using the counts struct just changed from hot to | ||
313 | // cold. To have a normal uint64 for the count, we bitshift by 1 and | ||
314 | // save the result in count. We also set h.hotIdx to 1 for the next | ||
315 | // Write call, and we will refer to counts #1 as hotCounts and to counts | ||
316 | // #0 as coldCounts. | ||
317 | // | ||
318 | // If the currently-hot counts struct is #1, we do the corresponding | ||
319 | // things the other way round. We have to _decrement_ h.countAndHotIdx | ||
320 | // (which is a bit arcane in itself, as we have to express -1 with an | ||
321 | // unsigned int...). | ||
322 | if h.hotIdx == 0 { | ||
323 | count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 | ||
324 | h.hotIdx = 1 | ||
325 | hotCounts = &h.counts[1] | ||
326 | coldCounts = &h.counts[0] | ||
327 | } else { | ||
328 | count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. | ||
329 | h.hotIdx = 0 | ||
330 | hotCounts = &h.counts[0] | ||
331 | coldCounts = &h.counts[1] | ||
332 | } | ||
333 | |||
334 | // Now we have to wait for the now-declared-cold counts to actually cool | ||
335 | // down, i.e. wait for all observations still using it to finish. That's | ||
336 | // the case once the count in the cold counts struct is the same as the | ||
337 | // one atomically retrieved from the upper 63bits of h.countAndHotIdx. | ||
338 | for { | ||
339 | if count == atomic.LoadUint64(&coldCounts.count) { | ||
340 | break | ||
341 | } | ||
342 | runtime.Gosched() // Let observations get work done. | ||
343 | } | ||
260 | 344 | ||
261 | his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) | 345 | his.SampleCount = proto.Uint64(count) |
262 | his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) | 346 | his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) |
263 | var count uint64 | 347 | var cumCount uint64 |
264 | for i, upperBound := range h.upperBounds { | 348 | for i, upperBound := range h.upperBounds { |
265 | count += atomic.LoadUint64(&h.counts[i]) | 349 | cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) |
266 | buckets[i] = &dto.Bucket{ | 350 | buckets[i] = &dto.Bucket{ |
267 | CumulativeCount: proto.Uint64(count), | 351 | CumulativeCount: proto.Uint64(cumCount), |
268 | UpperBound: proto.Float64(upperBound), | 352 | UpperBound: proto.Float64(upperBound), |
269 | } | 353 | } |
270 | } | 354 | } |
355 | |||
271 | his.Bucket = buckets | 356 | his.Bucket = buckets |
272 | out.Histogram = his | 357 | out.Histogram = his |
273 | out.Label = h.labelPairs | 358 | out.Label = h.labelPairs |
359 | |||
360 | // Finally add all the cold counts to the new hot counts and reset the cold counts. | ||
361 | atomic.AddUint64(&hotCounts.count, count) | ||
362 | atomic.StoreUint64(&coldCounts.count, 0) | ||
363 | for { | ||
364 | oldBits := atomic.LoadUint64(&hotCounts.sumBits) | ||
365 | newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) | ||
366 | if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | ||
367 | atomic.StoreUint64(&coldCounts.sumBits, 0) | ||
368 | break | ||
369 | } | ||
370 | } | ||
371 | for i := range h.upperBounds { | ||
372 | atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) | ||
373 | atomic.StoreUint64(&coldCounts.buckets[i], 0) | ||
374 | } | ||
274 | return nil | 375 | return nil |
275 | } | 376 | } |
276 | 377 | ||
@@ -454,7 +555,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { | |||
454 | // bucket. | 555 | // bucket. |
455 | // | 556 | // |
456 | // NewConstHistogram returns an error if the length of labelValues is not | 557 | // NewConstHistogram returns an error if the length of labelValues is not |
457 | // consistent with the variable labels in Desc. | 558 | // consistent with the variable labels in Desc or if Desc is invalid. |
458 | func NewConstHistogram( | 559 | func NewConstHistogram( |
459 | desc *Desc, | 560 | desc *Desc, |
460 | count uint64, | 561 | count uint64, |
@@ -462,6 +563,9 @@ func NewConstHistogram( | |||
462 | buckets map[float64]uint64, | 563 | buckets map[float64]uint64, |
463 | labelValues ...string, | 564 | labelValues ...string, |
464 | ) (Metric, error) { | 565 | ) (Metric, error) { |
566 | if desc.err != nil { | ||
567 | return nil, desc.err | ||
568 | } | ||
465 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | 569 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { |
466 | return nil, err | 570 | return nil, err |
467 | } | 571 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index dd0f819..4b8e602 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go | |||
@@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) { | |||
61 | // name). | 61 | // name). |
62 | // | 62 | // |
63 | // Deprecated: Please note the issues described in the doc comment of | 63 | // Deprecated: Please note the issues described in the doc comment of |
64 | // InstrumentHandler. You might want to consider using | 64 | // InstrumentHandler. You might want to consider using promhttp.Handler instead. |
65 | // promhttp.InstrumentedHandler instead. | ||
66 | func Handler() http.Handler { | 65 | func Handler() http.Handler { |
67 | return InstrumentHandler("prometheus", UninstrumentedHandler()) | 66 | return InstrumentHandler("prometheus", UninstrumentedHandler()) |
68 | } | 67 | } |
69 | 68 | ||
70 | // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. | 69 | // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. |
71 | // | 70 | // |
72 | // Deprecated: Use promhttp.Handler instead. See there for further documentation. | 71 | // Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) |
72 | // instead. See there for further documentation. | ||
73 | func UninstrumentedHandler() http.Handler { | 73 | func UninstrumentedHandler() http.Handler { |
74 | return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | 74 | return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { |
75 | mfs, err := DefaultGatherer.Gather() | 75 | mfs, err := DefaultGatherer.Gather() |
@@ -115,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) | |||
115 | header := request.Header.Get(acceptEncodingHeader) | 115 | header := request.Header.Get(acceptEncodingHeader) |
116 | parts := strings.Split(header, ",") | 116 | parts := strings.Split(header, ",") |
117 | for _, part := range parts { | 117 | for _, part := range parts { |
118 | part := strings.TrimSpace(part) | 118 | part = strings.TrimSpace(part) |
119 | if part == "gzip" || strings.HasPrefix(part, "gzip;") { | 119 | if part == "gzip" || strings.HasPrefix(part, "gzip;") { |
120 | return gzip.NewWriter(writer), "gzip" | 120 | return gzip.NewWriter(writer), "gzip" |
121 | } | 121 | } |
@@ -139,16 +139,6 @@ var now nower = nowFunc(func() time.Time { | |||
139 | return time.Now() | 139 | return time.Now() |
140 | }) | 140 | }) |
141 | 141 | ||
142 | func nowSeries(t ...time.Time) nower { | ||
143 | return nowFunc(func() time.Time { | ||
144 | defer func() { | ||
145 | t = t[1:] | ||
146 | }() | ||
147 | |||
148 | return t[0] | ||
149 | }) | ||
150 | } | ||
151 | |||
152 | // InstrumentHandler wraps the given HTTP handler for instrumentation. It | 142 | // InstrumentHandler wraps the given HTTP handler for instrumentation. It |
153 | // registers four metric collectors (if not already done) and reports HTTP | 143 | // registers four metric collectors (if not already done) and reports HTTP |
154 | // metrics to the (newly or already) registered collectors: http_requests_total | 144 | // metrics to the (newly or already) registered collectors: http_requests_total |
@@ -159,21 +149,14 @@ func nowSeries(t ...time.Time) nower { | |||
159 | // (label name "method") and HTTP status code (label name "code"). | 149 | // (label name "method") and HTTP status code (label name "code"). |
160 | // | 150 | // |
161 | // Deprecated: InstrumentHandler has several issues. Use the tooling provided in | 151 | // Deprecated: InstrumentHandler has several issues. Use the tooling provided in |
162 | // package promhttp instead. The issues are the following: | 152 | // package promhttp instead. The issues are the following: (1) It uses Summaries |
163 | // | 153 | // rather than Histograms. Summaries are not useful if aggregation across |
164 | // - It uses Summaries rather than Histograms. Summaries are not useful if | 154 | // multiple instances is required. (2) It uses microseconds as unit, which is |
165 | // aggregation across multiple instances is required. | 155 | // deprecated and should be replaced by seconds. (3) The size of the request is |
166 | // | 156 | // calculated in a separate goroutine. Since this calculator requires access to |
167 | // - It uses microseconds as unit, which is deprecated and should be replaced by | 157 | // the request header, it creates a race with any writes to the header performed |
168 | // seconds. | 158 | // during request handling. httputil.ReverseProxy is a prominent example for a |
169 | // | 159 | // handler performing such writes. (4) It has additional issues with HTTP/2, cf. |
170 | // - The size of the request is calculated in a separate goroutine. Since this | ||
171 | // calculator requires access to the request header, it creates a race with | ||
172 | // any writes to the header performed during request handling. | ||
173 | // httputil.ReverseProxy is a prominent example for a handler | ||
174 | // performing such writes. | ||
175 | // | ||
176 | // - It has additional issues with HTTP/2, cf. | ||
177 | // https://github.com/prometheus/client_golang/issues/272. | 160 | // https://github.com/prometheus/client_golang/issues/272. |
178 | func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { | 161 | func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { |
179 | return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) | 162 | return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) |
@@ -317,7 +300,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo | |||
317 | } | 300 | } |
318 | 301 | ||
319 | func computeApproximateRequestSize(r *http.Request) <-chan int { | 302 | func computeApproximateRequestSize(r *http.Request) <-chan int { |
320 | // Get URL length in current go routine for avoiding a race condition. | 303 | // Get URL length in current goroutine for avoiding a race condition. |
321 | // HandlerFunc that runs in parallel may modify the URL. | 304 | // HandlerFunc that runs in parallel may modify the URL. |
322 | s := 0 | 305 | s := 0 |
323 | if r.URL != nil { | 306 | if r.URL != nil { |
@@ -352,10 +335,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int { | |||
352 | type responseWriterDelegator struct { | 335 | type responseWriterDelegator struct { |
353 | http.ResponseWriter | 336 | http.ResponseWriter |
354 | 337 | ||
355 | handler, method string | 338 | status int |
356 | status int | 339 | written int64 |
357 | written int64 | 340 | wroteHeader bool |
358 | wroteHeader bool | ||
359 | } | 341 | } |
360 | 342 | ||
361 | func (r *responseWriterDelegator) WriteHeader(code int) { | 343 | func (r *responseWriterDelegator) WriteHeader(code int) { |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 0000000..351c26e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go | |||
@@ -0,0 +1,85 @@ | |||
1 | // Copyright 2018 The Prometheus Authors | ||
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
3 | // you may not use this file except in compliance with the License. | ||
4 | // You may obtain a copy of the License at | ||
5 | // | ||
6 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
7 | // | ||
8 | // Unless required by applicable law or agreed to in writing, software | ||
9 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
11 | // See the License for the specific language governing permissions and | ||
12 | // limitations under the License. | ||
13 | |||
14 | package internal | ||
15 | |||
16 | import ( | ||
17 | "sort" | ||
18 | |||
19 | dto "github.com/prometheus/client_model/go" | ||
20 | ) | ||
21 | |||
22 | // metricSorter is a sortable slice of *dto.Metric. | ||
23 | type metricSorter []*dto.Metric | ||
24 | |||
25 | func (s metricSorter) Len() int { | ||
26 | return len(s) | ||
27 | } | ||
28 | |||
29 | func (s metricSorter) Swap(i, j int) { | ||
30 | s[i], s[j] = s[j], s[i] | ||
31 | } | ||
32 | |||
33 | func (s metricSorter) Less(i, j int) bool { | ||
34 | if len(s[i].Label) != len(s[j].Label) { | ||
35 | // This should not happen. The metrics are | ||
36 | // inconsistent. However, we have to deal with the fact, as | ||
37 | // people might use custom collectors or metric family injection | ||
38 | // to create inconsistent metrics. So let's simply compare the | ||
39 | // number of labels in this case. That will still yield | ||
40 | // reproducible sorting. | ||
41 | return len(s[i].Label) < len(s[j].Label) | ||
42 | } | ||
43 | for n, lp := range s[i].Label { | ||
44 | vi := lp.GetValue() | ||
45 | vj := s[j].Label[n].GetValue() | ||
46 | if vi != vj { | ||
47 | return vi < vj | ||
48 | } | ||
49 | } | ||
50 | |||
51 | // We should never arrive here. Multiple metrics with the same | ||
52 | // label set in the same scrape will lead to undefined ingestion | ||
53 | // behavior. However, as above, we have to provide stable sorting | ||
54 | // here, even for inconsistent metrics. So sort equal metrics | ||
55 | // by their timestamp, with missing timestamps (implying "now") | ||
56 | // coming last. | ||
57 | if s[i].TimestampMs == nil { | ||
58 | return false | ||
59 | } | ||
60 | if s[j].TimestampMs == nil { | ||
61 | return true | ||
62 | } | ||
63 | return s[i].GetTimestampMs() < s[j].GetTimestampMs() | ||
64 | } | ||
65 | |||
66 | // NormalizeMetricFamilies returns a MetricFamily slice with empty | ||
67 | // MetricFamilies pruned and the remaining MetricFamilies sorted by name within | ||
68 | // the slice, with the contained Metrics sorted within each MetricFamily. | ||
69 | func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { | ||
70 | for _, mf := range metricFamiliesByName { | ||
71 | sort.Sort(metricSorter(mf.Metric)) | ||
72 | } | ||
73 | names := make([]string, 0, len(metricFamiliesByName)) | ||
74 | for name, mf := range metricFamiliesByName { | ||
75 | if len(mf.Metric) > 0 { | ||
76 | names = append(names, name) | ||
77 | } | ||
78 | } | ||
79 | sort.Strings(names) | ||
80 | result := make([]*dto.MetricFamily, 0, len(names)) | ||
81 | for _, name := range names { | ||
82 | result = append(result, metricFamiliesByName[name]) | ||
83 | } | ||
84 | return result | ||
85 | } | ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2502e37..e68f132 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go | |||
@@ -1,3 +1,16 @@ | |||
1 | // Copyright 2018 The Prometheus Authors | ||
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
3 | // you may not use this file except in compliance with the License. | ||
4 | // You may obtain a copy of the License at | ||
5 | // | ||
6 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
7 | // | ||
8 | // Unless required by applicable law or agreed to in writing, software | ||
9 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
11 | // See the License for the specific language governing permissions and | ||
12 | // limitations under the License. | ||
13 | |||
1 | package prometheus | 14 | package prometheus |
2 | 15 | ||
3 | import ( | 16 | import ( |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 6213ee8..55e6d86 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go | |||
@@ -15,6 +15,9 @@ package prometheus | |||
15 | 15 | ||
16 | import ( | 16 | import ( |
17 | "strings" | 17 | "strings" |
18 | "time" | ||
19 | |||
20 | "github.com/golang/protobuf/proto" | ||
18 | 21 | ||
19 | dto "github.com/prometheus/client_model/go" | 22 | dto "github.com/prometheus/client_model/go" |
20 | ) | 23 | ) |
@@ -43,9 +46,8 @@ type Metric interface { | |||
43 | // While populating dto.Metric, it is the responsibility of the | 46 | // While populating dto.Metric, it is the responsibility of the |
44 | // implementation to ensure validity of the Metric protobuf (like valid | 47 | // implementation to ensure validity of the Metric protobuf (like valid |
45 | // UTF-8 strings or syntactically valid metric and label names). It is | 48 | // UTF-8 strings or syntactically valid metric and label names). It is |
46 | // recommended to sort labels lexicographically. (Implementers may find | 49 | // recommended to sort labels lexicographically. Callers of Write should |
47 | // LabelPairSorter useful for that.) Callers of Write should still make | 50 | // still make sure of sorting if they depend on it. |
48 | // sure of sorting if they depend on it. | ||
49 | Write(*dto.Metric) error | 51 | Write(*dto.Metric) error |
50 | // TODO(beorn7): The original rationale of passing in a pre-allocated | 52 | // TODO(beorn7): The original rationale of passing in a pre-allocated |
51 | // dto.Metric protobuf to save allocations has disappeared. The | 53 | // dto.Metric protobuf to save allocations has disappeared. The |
@@ -57,8 +59,9 @@ type Metric interface { | |||
57 | // implementation XXX has its own XXXOpts type, but in most cases, it is just be | 59 | // implementation XXX has its own XXXOpts type, but in most cases, it is just be |
58 | // an alias of this type (which might change when the requirement arises.) | 60 | // an alias of this type (which might change when the requirement arises.) |
59 | // | 61 | // |
60 | // It is mandatory to set Name and Help to a non-empty string. All other fields | 62 | // It is mandatory to set Name to a non-empty string. All other fields are |
61 | // are optional and can safely be left at their zero value. | 63 | // optional and can safely be left at their zero value, although it is strongly |
64 | // encouraged to set a Help string. | ||
62 | type Opts struct { | 65 | type Opts struct { |
63 | // Namespace, Subsystem, and Name are components of the fully-qualified | 66 | // Namespace, Subsystem, and Name are components of the fully-qualified |
64 | // name of the Metric (created by joining these components with | 67 | // name of the Metric (created by joining these components with |
@@ -69,7 +72,7 @@ type Opts struct { | |||
69 | Subsystem string | 72 | Subsystem string |
70 | Name string | 73 | Name string |
71 | 74 | ||
72 | // Help provides information about this metric. Mandatory! | 75 | // Help provides information about this metric. |
73 | // | 76 | // |
74 | // Metrics with the same fully-qualified name must have the same Help | 77 | // Metrics with the same fully-qualified name must have the same Help |
75 | // string. | 78 | // string. |
@@ -110,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string { | |||
110 | return name | 113 | return name |
111 | } | 114 | } |
112 | 115 | ||
113 | // LabelPairSorter implements sort.Interface. It is used to sort a slice of | 116 | // labelPairSorter implements sort.Interface. It is used to sort a slice of |
114 | // dto.LabelPair pointers. This is useful for implementing the Write method of | 117 | // dto.LabelPair pointers. |
115 | // custom metrics. | 118 | type labelPairSorter []*dto.LabelPair |
116 | type LabelPairSorter []*dto.LabelPair | ||
117 | 119 | ||
118 | func (s LabelPairSorter) Len() int { | 120 | func (s labelPairSorter) Len() int { |
119 | return len(s) | 121 | return len(s) |
120 | } | 122 | } |
121 | 123 | ||
122 | func (s LabelPairSorter) Swap(i, j int) { | 124 | func (s labelPairSorter) Swap(i, j int) { |
123 | s[i], s[j] = s[j], s[i] | 125 | s[i], s[j] = s[j], s[i] |
124 | } | 126 | } |
125 | 127 | ||
126 | func (s LabelPairSorter) Less(i, j int) bool { | 128 | func (s labelPairSorter) Less(i, j int) bool { |
127 | return s[i].GetName() < s[j].GetName() | 129 | return s[i].GetName() < s[j].GetName() |
128 | } | 130 | } |
129 | 131 | ||
130 | type hashSorter []uint64 | ||
131 | |||
132 | func (s hashSorter) Len() int { | ||
133 | return len(s) | ||
134 | } | ||
135 | |||
136 | func (s hashSorter) Swap(i, j int) { | ||
137 | s[i], s[j] = s[j], s[i] | ||
138 | } | ||
139 | |||
140 | func (s hashSorter) Less(i, j int) bool { | ||
141 | return s[i] < s[j] | ||
142 | } | ||
143 | |||
144 | type invalidMetric struct { | 132 | type invalidMetric struct { |
145 | desc *Desc | 133 | desc *Desc |
146 | err error | 134 | err error |
@@ -156,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { | |||
156 | func (m *invalidMetric) Desc() *Desc { return m.desc } | 144 | func (m *invalidMetric) Desc() *Desc { return m.desc } |
157 | 145 | ||
158 | func (m *invalidMetric) Write(*dto.Metric) error { return m.err } | 146 | func (m *invalidMetric) Write(*dto.Metric) error { return m.err } |
147 | |||
148 | type timestampedMetric struct { | ||
149 | Metric | ||
150 | t time.Time | ||
151 | } | ||
152 | |||
153 | func (m timestampedMetric) Write(pb *dto.Metric) error { | ||
154 | e := m.Metric.Write(pb) | ||
155 | pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) | ||
156 | return e | ||
157 | } | ||
158 | |||
159 | // NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a | ||
160 | // way that it has an explicit timestamp set to the provided Time. This is only | ||
161 | // useful in rare cases as the timestamp of a Prometheus metric should usually | ||
162 | // be set by the Prometheus server during scraping. Exceptions include mirroring | ||
163 | // metrics with given timestamps from other metric | ||
164 | // sources. | ||
165 | // | ||
166 | // NewMetricWithTimestamp works best with MustNewConstMetric, | ||
167 | // MustNewConstHistogram, and MustNewConstSummary, see example. | ||
168 | // | ||
169 | // Currently, the exposition formats used by Prometheus are limited to | ||
170 | // millisecond resolution. Thus, the provided time will be rounded down to the | ||
171 | // next full millisecond value. | ||
172 | func NewMetricWithTimestamp(t time.Time, m Metric) Metric { | ||
173 | return timestampedMetric{Metric: m, t: t} | ||
174 | } | ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 32ac74a..55176d5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go | |||
@@ -13,46 +13,74 @@ | |||
13 | 13 | ||
14 | package prometheus | 14 | package prometheus |
15 | 15 | ||
16 | import "github.com/prometheus/procfs" | 16 | import ( |
17 | "errors" | ||
18 | "os" | ||
19 | |||
20 | "github.com/prometheus/procfs" | ||
21 | ) | ||
17 | 22 | ||
18 | type processCollector struct { | 23 | type processCollector struct { |
19 | pid int | ||
20 | collectFn func(chan<- Metric) | 24 | collectFn func(chan<- Metric) |
21 | pidFn func() (int, error) | 25 | pidFn func() (int, error) |
26 | reportErrors bool | ||
22 | cpuTotal *Desc | 27 | cpuTotal *Desc |
23 | openFDs, maxFDs *Desc | 28 | openFDs, maxFDs *Desc |
24 | vsize, rss *Desc | 29 | vsize, maxVsize *Desc |
30 | rss *Desc | ||
25 | startTime *Desc | 31 | startTime *Desc |
26 | } | 32 | } |
27 | 33 | ||
34 | // ProcessCollectorOpts defines the behavior of a process metrics collector | ||
35 | // created with NewProcessCollector. | ||
36 | type ProcessCollectorOpts struct { | ||
37 | // PidFn returns the PID of the process the collector collects metrics | ||
38 | // for. It is called upon each collection. By default, the PID of the | ||
39 | // current process is used, as determined on construction time by | ||
40 | // calling os.Getpid(). | ||
41 | PidFn func() (int, error) | ||
42 | // If non-empty, each of the collected metrics is prefixed by the | ||
43 | // provided string and an underscore ("_"). | ||
44 | Namespace string | ||
45 | // If true, any error encountered during collection is reported as an | ||
46 | // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored | ||
47 | // and the collected metrics will be incomplete. (Possibly, no metrics | ||
48 | // will be collected at all.) While that's usually not desired, it is | ||
49 | // appropriate for the common "mix-in" of process metrics, where process | ||
50 | // metrics are nice to have, but failing to collect them should not | ||
51 | // disrupt the collection of the remaining metrics. | ||
52 | ReportErrors bool | ||
53 | } | ||
54 | |||
28 | // NewProcessCollector returns a collector which exports the current state of | 55 | // NewProcessCollector returns a collector which exports the current state of |
29 | // process metrics including CPU, memory and file descriptor usage as well as | 56 | // process metrics including CPU, memory and file descriptor usage as well as |
30 | // the process start time for the given process ID under the given namespace. | 57 | // the process start time. The detailed behavior is defined by the provided |
58 | // ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a | ||
59 | // collector for the current process with an empty namespace string and no error | ||
60 | // reporting. | ||
31 | // | 61 | // |
32 | // Currently, the collector depends on a Linux-style proc filesystem and | 62 | // Currently, the collector depends on a Linux-style proc filesystem and |
33 | // therefore only exports metrics for Linux. | 63 | // therefore only exports metrics for Linux. |
34 | func NewProcessCollector(pid int, namespace string) Collector { | 64 | // |
35 | return NewProcessCollectorPIDFn( | 65 | // Note: An older version of this function had the following signature: |
36 | func() (int, error) { return pid, nil }, | 66 | // |
37 | namespace, | 67 | // NewProcessCollector(pid int, namespace string) Collector |
38 | ) | 68 | // |
39 | } | 69 | // Most commonly, it was called as |
40 | 70 | // | |
41 | // NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is | 71 | // NewProcessCollector(os.Getpid(), "") |
42 | // determined on each collect anew by calling the given pidFn function. | 72 | // |
43 | func NewProcessCollectorPIDFn( | 73 | // The following call of the current version is equivalent to the above: |
44 | pidFn func() (int, error), | 74 | // |
45 | namespace string, | 75 | // NewProcessCollector(ProcessCollectorOpts{}) |
46 | ) Collector { | 76 | func NewProcessCollector(opts ProcessCollectorOpts) Collector { |
47 | ns := "" | 77 | ns := "" |
48 | if len(namespace) > 0 { | 78 | if len(opts.Namespace) > 0 { |
49 | ns = namespace + "_" | 79 | ns = opts.Namespace + "_" |
50 | } | 80 | } |
51 | 81 | ||
52 | c := processCollector{ | 82 | c := &processCollector{ |
53 | pidFn: pidFn, | 83 | reportErrors: opts.ReportErrors, |
54 | collectFn: func(chan<- Metric) {}, | ||
55 | |||
56 | cpuTotal: NewDesc( | 84 | cpuTotal: NewDesc( |
57 | ns+"process_cpu_seconds_total", | 85 | ns+"process_cpu_seconds_total", |
58 | "Total user and system CPU time spent in seconds.", | 86 | "Total user and system CPU time spent in seconds.", |
@@ -73,6 +101,11 @@ func NewProcessCollectorPIDFn( | |||
73 | "Virtual memory size in bytes.", | 101 | "Virtual memory size in bytes.", |
74 | nil, nil, | 102 | nil, nil, |
75 | ), | 103 | ), |
104 | maxVsize: NewDesc( | ||
105 | ns+"process_virtual_memory_max_bytes", | ||
106 | "Maximum amount of virtual memory available in bytes.", | ||
107 | nil, nil, | ||
108 | ), | ||
76 | rss: NewDesc( | 109 | rss: NewDesc( |
77 | ns+"process_resident_memory_bytes", | 110 | ns+"process_resident_memory_bytes", |
78 | "Resident memory size in bytes.", | 111 | "Resident memory size in bytes.", |
@@ -85,12 +118,23 @@ func NewProcessCollectorPIDFn( | |||
85 | ), | 118 | ), |
86 | } | 119 | } |
87 | 120 | ||
121 | if opts.PidFn == nil { | ||
122 | pid := os.Getpid() | ||
123 | c.pidFn = func() (int, error) { return pid, nil } | ||
124 | } else { | ||
125 | c.pidFn = opts.PidFn | ||
126 | } | ||
127 | |||
88 | // Set up process metric collection if supported by the runtime. | 128 | // Set up process metric collection if supported by the runtime. |
89 | if _, err := procfs.NewStat(); err == nil { | 129 | if _, err := procfs.NewStat(); err == nil { |
90 | c.collectFn = c.processCollect | 130 | c.collectFn = c.processCollect |
131 | } else { | ||
132 | c.collectFn = func(ch chan<- Metric) { | ||
133 | c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) | ||
134 | } | ||
91 | } | 135 | } |
92 | 136 | ||
93 | return &c | 137 | return c |
94 | } | 138 | } |
95 | 139 | ||
96 | // Describe returns all descriptions of the collector. | 140 | // Describe returns all descriptions of the collector. |
@@ -99,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) { | |||
99 | ch <- c.openFDs | 143 | ch <- c.openFDs |
100 | ch <- c.maxFDs | 144 | ch <- c.maxFDs |
101 | ch <- c.vsize | 145 | ch <- c.vsize |
146 | ch <- c.maxVsize | ||
102 | ch <- c.rss | 147 | ch <- c.rss |
103 | ch <- c.startTime | 148 | ch <- c.startTime |
104 | } | 149 | } |
@@ -108,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) { | |||
108 | c.collectFn(ch) | 153 | c.collectFn(ch) |
109 | } | 154 | } |
110 | 155 | ||
111 | // TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the | ||
112 | // client allows users to configure the error behavior. | ||
113 | func (c *processCollector) processCollect(ch chan<- Metric) { | 156 | func (c *processCollector) processCollect(ch chan<- Metric) { |
114 | pid, err := c.pidFn() | 157 | pid, err := c.pidFn() |
115 | if err != nil { | 158 | if err != nil { |
159 | c.reportError(ch, nil, err) | ||
116 | return | 160 | return |
117 | } | 161 | } |
118 | 162 | ||
119 | p, err := procfs.NewProc(pid) | 163 | p, err := procfs.NewProc(pid) |
120 | if err != nil { | 164 | if err != nil { |
165 | c.reportError(ch, nil, err) | ||
121 | return | 166 | return |
122 | } | 167 | } |
123 | 168 | ||
@@ -127,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) { | |||
127 | ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) | 172 | ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) |
128 | if startTime, err := stat.StartTime(); err == nil { | 173 | if startTime, err := stat.StartTime(); err == nil { |
129 | ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) | 174 | ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) |
175 | } else { | ||
176 | c.reportError(ch, c.startTime, err) | ||
130 | } | 177 | } |
178 | } else { | ||
179 | c.reportError(ch, nil, err) | ||
131 | } | 180 | } |
132 | 181 | ||
133 | if fds, err := p.FileDescriptorsLen(); err == nil { | 182 | if fds, err := p.FileDescriptorsLen(); err == nil { |
134 | ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) | 183 | ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) |
184 | } else { | ||
185 | c.reportError(ch, c.openFDs, err) | ||
135 | } | 186 | } |
136 | 187 | ||
137 | if limits, err := p.NewLimits(); err == nil { | 188 | if limits, err := p.NewLimits(); err == nil { |
138 | ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) | 189 | ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) |
190 | ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) | ||
191 | } else { | ||
192 | c.reportError(ch, nil, err) | ||
193 | } | ||
194 | } | ||
195 | |||
196 | func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { | ||
197 | if !c.reportErrors { | ||
198 | return | ||
199 | } | ||
200 | if desc == nil { | ||
201 | desc = NewInvalidDesc(err) | ||
139 | } | 202 | } |
203 | ch <- NewInvalidMetric(desc, err) | ||
140 | } | 204 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9c1c66d..67b56d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go | |||
@@ -76,16 +76,16 @@ type flusherDelegator struct{ *responseWriterDelegator } | |||
76 | type hijackerDelegator struct{ *responseWriterDelegator } | 76 | type hijackerDelegator struct{ *responseWriterDelegator } |
77 | type readerFromDelegator struct{ *responseWriterDelegator } | 77 | type readerFromDelegator struct{ *responseWriterDelegator } |
78 | 78 | ||
79 | func (d *closeNotifierDelegator) CloseNotify() <-chan bool { | 79 | func (d closeNotifierDelegator) CloseNotify() <-chan bool { |
80 | return d.ResponseWriter.(http.CloseNotifier).CloseNotify() | 80 | return d.ResponseWriter.(http.CloseNotifier).CloseNotify() |
81 | } | 81 | } |
82 | func (d *flusherDelegator) Flush() { | 82 | func (d flusherDelegator) Flush() { |
83 | d.ResponseWriter.(http.Flusher).Flush() | 83 | d.ResponseWriter.(http.Flusher).Flush() |
84 | } | 84 | } |
85 | func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { | 85 | func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { |
86 | return d.ResponseWriter.(http.Hijacker).Hijack() | 86 | return d.ResponseWriter.(http.Hijacker).Hijack() |
87 | } | 87 | } |
88 | func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { | 88 | func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { |
89 | if !d.wroteHeader { | 89 | if !d.wroteHeader { |
90 | d.WriteHeader(http.StatusOK) | 90 | d.WriteHeader(http.StatusOK) |
91 | } | 91 | } |
@@ -102,34 +102,34 @@ func init() { | |||
102 | return d | 102 | return d |
103 | } | 103 | } |
104 | pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 | 104 | pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 |
105 | return &closeNotifierDelegator{d} | 105 | return closeNotifierDelegator{d} |
106 | } | 106 | } |
107 | pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 | 107 | pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 |
108 | return &flusherDelegator{d} | 108 | return flusherDelegator{d} |
109 | } | 109 | } |
110 | pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 | 110 | pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 |
111 | return struct { | 111 | return struct { |
112 | *responseWriterDelegator | 112 | *responseWriterDelegator |
113 | http.Flusher | 113 | http.Flusher |
114 | http.CloseNotifier | 114 | http.CloseNotifier |
115 | }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 115 | }{d, flusherDelegator{d}, closeNotifierDelegator{d}} |
116 | } | 116 | } |
117 | pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 | 117 | pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 |
118 | return &hijackerDelegator{d} | 118 | return hijackerDelegator{d} |
119 | } | 119 | } |
120 | pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 | 120 | pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 |
121 | return struct { | 121 | return struct { |
122 | *responseWriterDelegator | 122 | *responseWriterDelegator |
123 | http.Hijacker | 123 | http.Hijacker |
124 | http.CloseNotifier | 124 | http.CloseNotifier |
125 | }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} | 125 | }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} |
126 | } | 126 | } |
127 | pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 | 127 | pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 |
128 | return struct { | 128 | return struct { |
129 | *responseWriterDelegator | 129 | *responseWriterDelegator |
130 | http.Hijacker | 130 | http.Hijacker |
131 | http.Flusher | 131 | http.Flusher |
132 | }{d, &hijackerDelegator{d}, &flusherDelegator{d}} | 132 | }{d, hijackerDelegator{d}, flusherDelegator{d}} |
133 | } | 133 | } |
134 | pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 | 134 | pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 |
135 | return struct { | 135 | return struct { |
@@ -137,7 +137,7 @@ func init() { | |||
137 | http.Hijacker | 137 | http.Hijacker |
138 | http.Flusher | 138 | http.Flusher |
139 | http.CloseNotifier | 139 | http.CloseNotifier |
140 | }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 140 | }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
141 | } | 141 | } |
142 | pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 | 142 | pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 |
143 | return readerFromDelegator{d} | 143 | return readerFromDelegator{d} |
@@ -147,14 +147,14 @@ func init() { | |||
147 | *responseWriterDelegator | 147 | *responseWriterDelegator |
148 | io.ReaderFrom | 148 | io.ReaderFrom |
149 | http.CloseNotifier | 149 | http.CloseNotifier |
150 | }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} | 150 | }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} |
151 | } | 151 | } |
152 | pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 | 152 | pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 |
153 | return struct { | 153 | return struct { |
154 | *responseWriterDelegator | 154 | *responseWriterDelegator |
155 | io.ReaderFrom | 155 | io.ReaderFrom |
156 | http.Flusher | 156 | http.Flusher |
157 | }{d, &readerFromDelegator{d}, &flusherDelegator{d}} | 157 | }{d, readerFromDelegator{d}, flusherDelegator{d}} |
158 | } | 158 | } |
159 | pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 | 159 | pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 |
160 | return struct { | 160 | return struct { |
@@ -162,14 +162,14 @@ func init() { | |||
162 | io.ReaderFrom | 162 | io.ReaderFrom |
163 | http.Flusher | 163 | http.Flusher |
164 | http.CloseNotifier | 164 | http.CloseNotifier |
165 | }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 165 | }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
166 | } | 166 | } |
167 | pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 | 167 | pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 |
168 | return struct { | 168 | return struct { |
169 | *responseWriterDelegator | 169 | *responseWriterDelegator |
170 | io.ReaderFrom | 170 | io.ReaderFrom |
171 | http.Hijacker | 171 | http.Hijacker |
172 | }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} | 172 | }{d, readerFromDelegator{d}, hijackerDelegator{d}} |
173 | } | 173 | } |
174 | pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 | 174 | pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 |
175 | return struct { | 175 | return struct { |
@@ -177,7 +177,7 @@ func init() { | |||
177 | io.ReaderFrom | 177 | io.ReaderFrom |
178 | http.Hijacker | 178 | http.Hijacker |
179 | http.CloseNotifier | 179 | http.CloseNotifier |
180 | }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} | 180 | }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} |
181 | } | 181 | } |
182 | pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 | 182 | pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 |
183 | return struct { | 183 | return struct { |
@@ -185,7 +185,7 @@ func init() { | |||
185 | io.ReaderFrom | 185 | io.ReaderFrom |
186 | http.Hijacker | 186 | http.Hijacker |
187 | http.Flusher | 187 | http.Flusher |
188 | }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} | 188 | }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} |
189 | } | 189 | } |
190 | pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 | 190 | pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 |
191 | return struct { | 191 | return struct { |
@@ -194,6 +194,6 @@ func init() { | |||
194 | http.Hijacker | 194 | http.Hijacker |
195 | http.Flusher | 195 | http.Flusher |
196 | http.CloseNotifier | 196 | http.CloseNotifier |
197 | }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 197 | }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
198 | } | 198 | } |
199 | } | 199 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go index 75a905e..31a7069 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go | |||
@@ -22,27 +22,27 @@ import ( | |||
22 | 22 | ||
23 | type pusherDelegator struct{ *responseWriterDelegator } | 23 | type pusherDelegator struct{ *responseWriterDelegator } |
24 | 24 | ||
25 | func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { | 25 | func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { |
26 | return d.ResponseWriter.(http.Pusher).Push(target, opts) | 26 | return d.ResponseWriter.(http.Pusher).Push(target, opts) |
27 | } | 27 | } |
28 | 28 | ||
29 | func init() { | 29 | func init() { |
30 | pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 | 30 | pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 |
31 | return &pusherDelegator{d} | 31 | return pusherDelegator{d} |
32 | } | 32 | } |
33 | pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 | 33 | pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 |
34 | return struct { | 34 | return struct { |
35 | *responseWriterDelegator | 35 | *responseWriterDelegator |
36 | http.Pusher | 36 | http.Pusher |
37 | http.CloseNotifier | 37 | http.CloseNotifier |
38 | }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} | 38 | }{d, pusherDelegator{d}, closeNotifierDelegator{d}} |
39 | } | 39 | } |
40 | pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 | 40 | pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 |
41 | return struct { | 41 | return struct { |
42 | *responseWriterDelegator | 42 | *responseWriterDelegator |
43 | http.Pusher | 43 | http.Pusher |
44 | http.Flusher | 44 | http.Flusher |
45 | }{d, &pusherDelegator{d}, &flusherDelegator{d}} | 45 | }{d, pusherDelegator{d}, flusherDelegator{d}} |
46 | } | 46 | } |
47 | pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 | 47 | pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 |
48 | return struct { | 48 | return struct { |
@@ -50,14 +50,14 @@ func init() { | |||
50 | http.Pusher | 50 | http.Pusher |
51 | http.Flusher | 51 | http.Flusher |
52 | http.CloseNotifier | 52 | http.CloseNotifier |
53 | }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 53 | }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
54 | } | 54 | } |
55 | pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 | 55 | pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 |
56 | return struct { | 56 | return struct { |
57 | *responseWriterDelegator | 57 | *responseWriterDelegator |
58 | http.Pusher | 58 | http.Pusher |
59 | http.Hijacker | 59 | http.Hijacker |
60 | }{d, &pusherDelegator{d}, &hijackerDelegator{d}} | 60 | }{d, pusherDelegator{d}, hijackerDelegator{d}} |
61 | } | 61 | } |
62 | pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 | 62 | pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 |
63 | return struct { | 63 | return struct { |
@@ -65,7 +65,7 @@ func init() { | |||
65 | http.Pusher | 65 | http.Pusher |
66 | http.Hijacker | 66 | http.Hijacker |
67 | http.CloseNotifier | 67 | http.CloseNotifier |
68 | }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} | 68 | }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} |
69 | } | 69 | } |
70 | pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 | 70 | pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 |
71 | return struct { | 71 | return struct { |
@@ -73,7 +73,7 @@ func init() { | |||
73 | http.Pusher | 73 | http.Pusher |
74 | http.Hijacker | 74 | http.Hijacker |
75 | http.Flusher | 75 | http.Flusher |
76 | }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} | 76 | }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} |
77 | } | 77 | } |
78 | pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 | 78 | pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 |
79 | return struct { | 79 | return struct { |
@@ -82,14 +82,14 @@ func init() { | |||
82 | http.Hijacker | 82 | http.Hijacker |
83 | http.Flusher | 83 | http.Flusher |
84 | http.CloseNotifier | 84 | http.CloseNotifier |
85 | }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 85 | }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
86 | } | 86 | } |
87 | pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 | 87 | pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 |
88 | return struct { | 88 | return struct { |
89 | *responseWriterDelegator | 89 | *responseWriterDelegator |
90 | http.Pusher | 90 | http.Pusher |
91 | io.ReaderFrom | 91 | io.ReaderFrom |
92 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}} | 92 | }{d, pusherDelegator{d}, readerFromDelegator{d}} |
93 | } | 93 | } |
94 | pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 | 94 | pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 |
95 | return struct { | 95 | return struct { |
@@ -97,7 +97,7 @@ func init() { | |||
97 | http.Pusher | 97 | http.Pusher |
98 | io.ReaderFrom | 98 | io.ReaderFrom |
99 | http.CloseNotifier | 99 | http.CloseNotifier |
100 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} | 100 | }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} |
101 | } | 101 | } |
102 | pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 | 102 | pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 |
103 | return struct { | 103 | return struct { |
@@ -105,7 +105,7 @@ func init() { | |||
105 | http.Pusher | 105 | http.Pusher |
106 | io.ReaderFrom | 106 | io.ReaderFrom |
107 | http.Flusher | 107 | http.Flusher |
108 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} | 108 | }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} |
109 | } | 109 | } |
110 | pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 | 110 | pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 |
111 | return struct { | 111 | return struct { |
@@ -114,7 +114,7 @@ func init() { | |||
114 | io.ReaderFrom | 114 | io.ReaderFrom |
115 | http.Flusher | 115 | http.Flusher |
116 | http.CloseNotifier | 116 | http.CloseNotifier |
117 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 117 | }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
118 | } | 118 | } |
119 | pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 | 119 | pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 |
120 | return struct { | 120 | return struct { |
@@ -122,7 +122,7 @@ func init() { | |||
122 | http.Pusher | 122 | http.Pusher |
123 | io.ReaderFrom | 123 | io.ReaderFrom |
124 | http.Hijacker | 124 | http.Hijacker |
125 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} | 125 | }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} |
126 | } | 126 | } |
127 | pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 | 127 | pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 |
128 | return struct { | 128 | return struct { |
@@ -131,7 +131,7 @@ func init() { | |||
131 | io.ReaderFrom | 131 | io.ReaderFrom |
132 | http.Hijacker | 132 | http.Hijacker |
133 | http.CloseNotifier | 133 | http.CloseNotifier |
134 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} | 134 | }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} |
135 | } | 135 | } |
136 | pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 | 136 | pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 |
137 | return struct { | 137 | return struct { |
@@ -140,7 +140,7 @@ func init() { | |||
140 | io.ReaderFrom | 140 | io.ReaderFrom |
141 | http.Hijacker | 141 | http.Hijacker |
142 | http.Flusher | 142 | http.Flusher |
143 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} | 143 | }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} |
144 | } | 144 | } |
145 | pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 | 145 | pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 |
146 | return struct { | 146 | return struct { |
@@ -150,7 +150,7 @@ func init() { | |||
150 | http.Hijacker | 150 | http.Hijacker |
151 | http.Flusher | 151 | http.Flusher |
152 | http.CloseNotifier | 152 | http.CloseNotifier |
153 | }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} | 153 | }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 8dc2603..0135737 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go | |||
@@ -302,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled | |||
302 | header := request.Header.Get(acceptEncodingHeader) | 302 | header := request.Header.Get(acceptEncodingHeader) |
303 | parts := strings.Split(header, ",") | 303 | parts := strings.Split(header, ",") |
304 | for _, part := range parts { | 304 | for _, part := range parts { |
305 | part := strings.TrimSpace(part) | 305 | part = strings.TrimSpace(part) |
306 | if part == "gzip" || strings.HasPrefix(part, "gzip;") { | 306 | if part == "gzip" || strings.HasPrefix(part, "gzip;") { |
307 | return gzip.NewWriter(writer), "gzip" | 307 | return gzip.NewWriter(writer), "gzip" |
308 | } | 308 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go index 0bd80c3..a034d1e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go | |||
@@ -81,8 +81,8 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro | |||
81 | } | 81 | } |
82 | }, | 82 | }, |
83 | DNSDone: func(_ httptrace.DNSDoneInfo) { | 83 | DNSDone: func(_ httptrace.DNSDoneInfo) { |
84 | if it.DNSStart != nil { | 84 | if it.DNSDone != nil { |
85 | it.DNSStart(time.Since(start).Seconds()) | 85 | it.DNSDone(time.Since(start).Seconds()) |
86 | } | 86 | } |
87 | }, | 87 | }, |
88 | ConnectStart: func(_, _ string) { | 88 | ConnectStart: func(_, _ string) { |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index bee3703..2c0b908 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go | |||
@@ -15,17 +15,18 @@ package prometheus | |||
15 | 15 | ||
16 | import ( | 16 | import ( |
17 | "bytes" | 17 | "bytes" |
18 | "errors" | ||
19 | "fmt" | 18 | "fmt" |
20 | "os" | ||
21 | "runtime" | 19 | "runtime" |
22 | "sort" | 20 | "sort" |
21 | "strings" | ||
23 | "sync" | 22 | "sync" |
24 | "unicode/utf8" | 23 | "unicode/utf8" |
25 | 24 | ||
26 | "github.com/golang/protobuf/proto" | 25 | "github.com/golang/protobuf/proto" |
27 | 26 | ||
28 | dto "github.com/prometheus/client_model/go" | 27 | dto "github.com/prometheus/client_model/go" |
28 | |||
29 | "github.com/prometheus/client_golang/prometheus/internal" | ||
29 | ) | 30 | ) |
30 | 31 | ||
31 | const ( | 32 | const ( |
@@ -38,12 +39,13 @@ const ( | |||
38 | // Registerer and Gatherer interface a number of convenience functions in this | 39 | // Registerer and Gatherer interface a number of convenience functions in this |
39 | // package act on. Initially, both variables point to the same Registry, which | 40 | // package act on. Initially, both variables point to the same Registry, which |
40 | // has a process collector (currently on Linux only, see NewProcessCollector) | 41 | // has a process collector (currently on Linux only, see NewProcessCollector) |
41 | // and a Go collector (see NewGoCollector) already registered. This approach to | 42 | // and a Go collector (see NewGoCollector, in particular the note about |
42 | // keep default instances as global state mirrors the approach of other packages | 43 | // stop-the-world implication with Go versions older than 1.9) already |
43 | // in the Go standard library. Note that there are caveats. Change the variables | 44 | // registered. This approach to keep default instances as global state mirrors |
44 | // with caution and only if you understand the consequences. Users who want to | 45 | // the approach of other packages in the Go standard library. Note that there |
45 | // avoid global state altogether should not use the convenience functions and | 46 | // are caveats. Change the variables with caution and only if you understand the |
46 | // act on custom instances instead. | 47 | // consequences. Users who want to avoid global state altogether should not use |
48 | // the convenience functions and act on custom instances instead. | ||
47 | var ( | 49 | var ( |
48 | defaultRegistry = NewRegistry() | 50 | defaultRegistry = NewRegistry() |
49 | DefaultRegisterer Registerer = defaultRegistry | 51 | DefaultRegisterer Registerer = defaultRegistry |
@@ -51,7 +53,7 @@ var ( | |||
51 | ) | 53 | ) |
52 | 54 | ||
53 | func init() { | 55 | func init() { |
54 | MustRegister(NewProcessCollector(os.Getpid(), "")) | 56 | MustRegister(NewProcessCollector(ProcessCollectorOpts{})) |
55 | MustRegister(NewGoCollector()) | 57 | MustRegister(NewGoCollector()) |
56 | } | 58 | } |
57 | 59 | ||
@@ -67,7 +69,8 @@ func NewRegistry() *Registry { | |||
67 | 69 | ||
68 | // NewPedanticRegistry returns a registry that checks during collection if each | 70 | // NewPedanticRegistry returns a registry that checks during collection if each |
69 | // collected Metric is consistent with its reported Desc, and if the Desc has | 71 | // collected Metric is consistent with its reported Desc, and if the Desc has |
70 | // actually been registered with the registry. | 72 | // actually been registered with the registry. Unchecked Collectors (those whose |
73 | // Describe methed does not yield any descriptors) are excluded from the check. | ||
71 | // | 74 | // |
72 | // Usually, a Registry will be happy as long as the union of all collected | 75 | // Usually, a Registry will be happy as long as the union of all collected |
73 | // Metrics is consistent and valid even if some metrics are not consistent with | 76 | // Metrics is consistent and valid even if some metrics are not consistent with |
@@ -97,6 +100,14 @@ type Registerer interface { | |||
97 | // returned error is an instance of AlreadyRegisteredError, which | 100 | // returned error is an instance of AlreadyRegisteredError, which |
98 | // contains the previously registered Collector. | 101 | // contains the previously registered Collector. |
99 | // | 102 | // |
103 | // A Collector whose Describe method does not yield any Desc is treated | ||
104 | // as unchecked. Registration will always succeed. No check for | ||
105 | // re-registering (see previous paragraph) is performed. Thus, the | ||
106 | // caller is responsible for not double-registering the same unchecked | ||
107 | // Collector, and for providing a Collector that will not cause | ||
108 | // inconsistent metrics on collection. (This would lead to scrape | ||
109 | // errors.) | ||
110 | // | ||
100 | // It is in general not safe to register the same Collector multiple | 111 | // It is in general not safe to register the same Collector multiple |
101 | // times concurrently. | 112 | // times concurrently. |
102 | Register(Collector) error | 113 | Register(Collector) error |
@@ -107,7 +118,9 @@ type Registerer interface { | |||
107 | // Unregister unregisters the Collector that equals the Collector passed | 118 | // Unregister unregisters the Collector that equals the Collector passed |
108 | // in as an argument. (Two Collectors are considered equal if their | 119 | // in as an argument. (Two Collectors are considered equal if their |
109 | // Describe method yields the same set of descriptors.) The function | 120 | // Describe method yields the same set of descriptors.) The function |
110 | // returns whether a Collector was unregistered. | 121 | // returns whether a Collector was unregistered. Note that an unchecked |
122 | // Collector cannot be unregistered (as its Describe method does not | ||
123 | // yield any descriptor). | ||
111 | // | 124 | // |
112 | // Note that even after unregistering, it will not be possible to | 125 | // Note that even after unregistering, it will not be possible to |
113 | // register a new Collector that is inconsistent with the unregistered | 126 | // register a new Collector that is inconsistent with the unregistered |
@@ -125,15 +138,23 @@ type Registerer interface { | |||
125 | type Gatherer interface { | 138 | type Gatherer interface { |
126 | // Gather calls the Collect method of the registered Collectors and then | 139 | // Gather calls the Collect method of the registered Collectors and then |
127 | // gathers the collected metrics into a lexicographically sorted slice | 140 | // gathers the collected metrics into a lexicographically sorted slice |
128 | // of MetricFamily protobufs. Even if an error occurs, Gather attempts | 141 | // of uniquely named MetricFamily protobufs. Gather ensures that the |
129 | // to gather as many metrics as possible. Hence, if a non-nil error is | 142 | // returned slice is valid and self-consistent so that it can be used |
130 | // returned, the returned MetricFamily slice could be nil (in case of a | 143 | // for valid exposition. As an exception to the strict consistency |
131 | // fatal error that prevented any meaningful metric collection) or | 144 | // requirements described for metric.Desc, Gather will tolerate |
132 | // contain a number of MetricFamily protobufs, some of which might be | 145 | // different sets of label names for metrics of the same metric family. |
133 | // incomplete, and some might be missing altogether. The returned error | 146 | // |
134 | // (which might be a MultiError) explains the details. In scenarios | 147 | // Even if an error occurs, Gather attempts to gather as many metrics as |
135 | // where complete collection is critical, the returned MetricFamily | 148 | // possible. Hence, if a non-nil error is returned, the returned |
136 | // protobufs should be disregarded if the returned error is non-nil. | 149 | // MetricFamily slice could be nil (in case of a fatal error that |
150 | // prevented any meaningful metric collection) or contain a number of | ||
151 | // MetricFamily protobufs, some of which might be incomplete, and some | ||
152 | // might be missing altogether. The returned error (which might be a | ||
153 | // MultiError) explains the details. Note that this is mostly useful for | ||
154 | // debugging purposes. If the gathered protobufs are to be used for | ||
155 | // exposition in actual monitoring, it is almost always better to not | ||
156 | // expose an incomplete result and instead disregard the returned | ||
157 | // MetricFamily protobufs in case the returned error is non-nil. | ||
137 | Gather() ([]*dto.MetricFamily, error) | 158 | Gather() ([]*dto.MetricFamily, error) |
138 | } | 159 | } |
139 | 160 | ||
@@ -234,6 +255,7 @@ type Registry struct { | |||
234 | collectorsByID map[uint64]Collector // ID is a hash of the descIDs. | 255 | collectorsByID map[uint64]Collector // ID is a hash of the descIDs. |
235 | descIDs map[uint64]struct{} | 256 | descIDs map[uint64]struct{} |
236 | dimHashesByName map[string]uint64 | 257 | dimHashesByName map[string]uint64 |
258 | uncheckedCollectors []Collector | ||
237 | pedanticChecksEnabled bool | 259 | pedanticChecksEnabled bool |
238 | } | 260 | } |
239 | 261 | ||
@@ -291,9 +313,10 @@ func (r *Registry) Register(c Collector) error { | |||
291 | } | 313 | } |
292 | } | 314 | } |
293 | } | 315 | } |
294 | // Did anything happen at all? | 316 | // A Collector yielding no Desc at all is considered unchecked. |
295 | if len(newDescIDs) == 0 { | 317 | if len(newDescIDs) == 0 { |
296 | return errors.New("collector has no descriptors") | 318 | r.uncheckedCollectors = append(r.uncheckedCollectors, c) |
319 | return nil | ||
297 | } | 320 | } |
298 | if existing, exists := r.collectorsByID[collectorID]; exists { | 321 | if existing, exists := r.collectorsByID[collectorID]; exists { |
299 | return AlreadyRegisteredError{ | 322 | return AlreadyRegisteredError{ |
@@ -367,20 +390,24 @@ func (r *Registry) MustRegister(cs ...Collector) { | |||
367 | // Gather implements Gatherer. | 390 | // Gather implements Gatherer. |
368 | func (r *Registry) Gather() ([]*dto.MetricFamily, error) { | 391 | func (r *Registry) Gather() ([]*dto.MetricFamily, error) { |
369 | var ( | 392 | var ( |
370 | metricChan = make(chan Metric, capMetricChan) | 393 | checkedMetricChan = make(chan Metric, capMetricChan) |
371 | metricHashes = map[uint64]struct{}{} | 394 | uncheckedMetricChan = make(chan Metric, capMetricChan) |
372 | dimHashes = map[string]uint64{} | 395 | metricHashes = map[uint64]struct{}{} |
373 | wg sync.WaitGroup | 396 | wg sync.WaitGroup |
374 | errs MultiError // The collected errors to return in the end. | 397 | errs MultiError // The collected errors to return in the end. |
375 | registeredDescIDs map[uint64]struct{} // Only used for pedantic checks | 398 | registeredDescIDs map[uint64]struct{} // Only used for pedantic checks |
376 | ) | 399 | ) |
377 | 400 | ||
378 | r.mtx.RLock() | 401 | r.mtx.RLock() |
379 | goroutineBudget := len(r.collectorsByID) | 402 | goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) |
380 | metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) | 403 | metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) |
381 | collectors := make(chan Collector, len(r.collectorsByID)) | 404 | checkedCollectors := make(chan Collector, len(r.collectorsByID)) |
405 | uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) | ||
382 | for _, collector := range r.collectorsByID { | 406 | for _, collector := range r.collectorsByID { |
383 | collectors <- collector | 407 | checkedCollectors <- collector |
408 | } | ||
409 | for _, collector := range r.uncheckedCollectors { | ||
410 | uncheckedCollectors <- collector | ||
384 | } | 411 | } |
385 | // In case pedantic checks are enabled, we have to copy the map before | 412 | // In case pedantic checks are enabled, we have to copy the map before |
386 | // giving up the RLock. | 413 | // giving up the RLock. |
@@ -397,12 +424,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { | |||
397 | collectWorker := func() { | 424 | collectWorker := func() { |
398 | for { | 425 | for { |
399 | select { | 426 | select { |
400 | case collector := <-collectors: | 427 | case collector := <-checkedCollectors: |
401 | collector.Collect(metricChan) | 428 | collector.Collect(checkedMetricChan) |
402 | wg.Done() | 429 | case collector := <-uncheckedCollectors: |
430 | collector.Collect(uncheckedMetricChan) | ||
403 | default: | 431 | default: |
404 | return | 432 | return |
405 | } | 433 | } |
434 | wg.Done() | ||
406 | } | 435 | } |
407 | } | 436 | } |
408 | 437 | ||
@@ -410,53 +439,96 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { | |||
410 | go collectWorker() | 439 | go collectWorker() |
411 | goroutineBudget-- | 440 | goroutineBudget-- |
412 | 441 | ||
413 | // Close the metricChan once all collectors are collected. | 442 | // Close checkedMetricChan and uncheckedMetricChan once all collectors |
443 | // are collected. | ||
414 | go func() { | 444 | go func() { |
415 | wg.Wait() | 445 | wg.Wait() |
416 | close(metricChan) | 446 | close(checkedMetricChan) |
447 | close(uncheckedMetricChan) | ||
417 | }() | 448 | }() |
418 | 449 | ||
419 | // Drain metricChan in case of premature return. | 450 | // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. |
420 | defer func() { | 451 | defer func() { |
421 | for range metricChan { | 452 | if checkedMetricChan != nil { |
453 | for range checkedMetricChan { | ||
454 | } | ||
455 | } | ||
456 | if uncheckedMetricChan != nil { | ||
457 | for range uncheckedMetricChan { | ||
458 | } | ||
422 | } | 459 | } |
423 | }() | 460 | }() |
424 | 461 | ||
425 | collectLoop: | 462 | // Copy the channel references so we can nil them out later to remove |
463 | // them from the select statements below. | ||
464 | cmc := checkedMetricChan | ||
465 | umc := uncheckedMetricChan | ||
466 | |||
426 | for { | 467 | for { |
427 | select { | 468 | select { |
428 | case metric, ok := <-metricChan: | 469 | case metric, ok := <-cmc: |
429 | if !ok { | 470 | if !ok { |
430 | // metricChan is closed, we are done. | 471 | cmc = nil |
431 | break collectLoop | 472 | break |
432 | } | 473 | } |
433 | errs.Append(processMetric( | 474 | errs.Append(processMetric( |
434 | metric, metricFamiliesByName, | 475 | metric, metricFamiliesByName, |
435 | metricHashes, dimHashes, | 476 | metricHashes, |
436 | registeredDescIDs, | 477 | registeredDescIDs, |
437 | )) | 478 | )) |
479 | case metric, ok := <-umc: | ||
480 | if !ok { | ||
481 | umc = nil | ||
482 | break | ||
483 | } | ||
484 | errs.Append(processMetric( | ||
485 | metric, metricFamiliesByName, | ||
486 | metricHashes, | ||
487 | nil, | ||
488 | )) | ||
438 | default: | 489 | default: |
439 | if goroutineBudget <= 0 || len(collectors) == 0 { | 490 | if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { |
440 | // All collectors are aleady being worked on or | 491 | // All collectors are already being worked on or |
441 | // we have already as many goroutines started as | 492 | // we have already as many goroutines started as |
442 | // there are collectors. Just process metrics | 493 | // there are collectors. Do the same as above, |
443 | // from now on. | 494 | // just without the default. |
444 | for metric := range metricChan { | 495 | select { |
496 | case metric, ok := <-cmc: | ||
497 | if !ok { | ||
498 | cmc = nil | ||
499 | break | ||
500 | } | ||
445 | errs.Append(processMetric( | 501 | errs.Append(processMetric( |
446 | metric, metricFamiliesByName, | 502 | metric, metricFamiliesByName, |
447 | metricHashes, dimHashes, | 503 | metricHashes, |
448 | registeredDescIDs, | 504 | registeredDescIDs, |
449 | )) | 505 | )) |
506 | case metric, ok := <-umc: | ||
507 | if !ok { | ||
508 | umc = nil | ||
509 | break | ||
510 | } | ||
511 | errs.Append(processMetric( | ||
512 | metric, metricFamiliesByName, | ||
513 | metricHashes, | ||
514 | nil, | ||
515 | )) | ||
450 | } | 516 | } |
451 | break collectLoop | 517 | break |
452 | } | 518 | } |
453 | // Start more workers. | 519 | // Start more workers. |
454 | go collectWorker() | 520 | go collectWorker() |
455 | goroutineBudget-- | 521 | goroutineBudget-- |
456 | runtime.Gosched() | 522 | runtime.Gosched() |
457 | } | 523 | } |
524 | // Once both checkedMetricChan and uncheckdMetricChan are closed | ||
525 | // and drained, the contraption above will nil out cmc and umc, | ||
526 | // and then we can leave the collect loop here. | ||
527 | if cmc == nil && umc == nil { | ||
528 | break | ||
529 | } | ||
458 | } | 530 | } |
459 | return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | 531 | return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() |
460 | } | 532 | } |
461 | 533 | ||
462 | // processMetric is an internal helper method only used by the Gather method. | 534 | // processMetric is an internal helper method only used by the Gather method. |
@@ -464,16 +536,20 @@ func processMetric( | |||
464 | metric Metric, | 536 | metric Metric, |
465 | metricFamiliesByName map[string]*dto.MetricFamily, | 537 | metricFamiliesByName map[string]*dto.MetricFamily, |
466 | metricHashes map[uint64]struct{}, | 538 | metricHashes map[uint64]struct{}, |
467 | dimHashes map[string]uint64, | ||
468 | registeredDescIDs map[uint64]struct{}, | 539 | registeredDescIDs map[uint64]struct{}, |
469 | ) error { | 540 | ) error { |
470 | desc := metric.Desc() | 541 | desc := metric.Desc() |
542 | // Wrapped metrics collected by an unchecked Collector can have an | ||
543 | // invalid Desc. | ||
544 | if desc.err != nil { | ||
545 | return desc.err | ||
546 | } | ||
471 | dtoMetric := &dto.Metric{} | 547 | dtoMetric := &dto.Metric{} |
472 | if err := metric.Write(dtoMetric); err != nil { | 548 | if err := metric.Write(dtoMetric); err != nil { |
473 | return fmt.Errorf("error collecting metric %v: %s", desc, err) | 549 | return fmt.Errorf("error collecting metric %v: %s", desc, err) |
474 | } | 550 | } |
475 | metricFamily, ok := metricFamiliesByName[desc.fqName] | 551 | metricFamily, ok := metricFamiliesByName[desc.fqName] |
476 | if ok { | 552 | if ok { // Existing name. |
477 | if metricFamily.GetHelp() != desc.help { | 553 | if metricFamily.GetHelp() != desc.help { |
478 | return fmt.Errorf( | 554 | return fmt.Errorf( |
479 | "collected metric %s %s has help %q but should have %q", | 555 | "collected metric %s %s has help %q but should have %q", |
@@ -520,7 +596,7 @@ func processMetric( | |||
520 | default: | 596 | default: |
521 | panic("encountered MetricFamily with invalid type") | 597 | panic("encountered MetricFamily with invalid type") |
522 | } | 598 | } |
523 | } else { | 599 | } else { // New name. |
524 | metricFamily = &dto.MetricFamily{} | 600 | metricFamily = &dto.MetricFamily{} |
525 | metricFamily.Name = proto.String(desc.fqName) | 601 | metricFamily.Name = proto.String(desc.fqName) |
526 | metricFamily.Help = proto.String(desc.help) | 602 | metricFamily.Help = proto.String(desc.help) |
@@ -539,9 +615,12 @@ func processMetric( | |||
539 | default: | 615 | default: |
540 | return fmt.Errorf("empty metric collected: %s", dtoMetric) | 616 | return fmt.Errorf("empty metric collected: %s", dtoMetric) |
541 | } | 617 | } |
618 | if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { | ||
619 | return err | ||
620 | } | ||
542 | metricFamiliesByName[desc.fqName] = metricFamily | 621 | metricFamiliesByName[desc.fqName] = metricFamily |
543 | } | 622 | } |
544 | if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { | 623 | if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { |
545 | return err | 624 | return err |
546 | } | 625 | } |
547 | if registeredDescIDs != nil { | 626 | if registeredDescIDs != nil { |
@@ -583,7 +662,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { | |||
583 | var ( | 662 | var ( |
584 | metricFamiliesByName = map[string]*dto.MetricFamily{} | 663 | metricFamiliesByName = map[string]*dto.MetricFamily{} |
585 | metricHashes = map[uint64]struct{}{} | 664 | metricHashes = map[uint64]struct{}{} |
586 | dimHashes = map[string]uint64{} | ||
587 | errs MultiError // The collected errors to return in the end. | 665 | errs MultiError // The collected errors to return in the end. |
588 | ) | 666 | ) |
589 | 667 | ||
@@ -620,10 +698,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { | |||
620 | existingMF.Name = mf.Name | 698 | existingMF.Name = mf.Name |
621 | existingMF.Help = mf.Help | 699 | existingMF.Help = mf.Help |
622 | existingMF.Type = mf.Type | 700 | existingMF.Type = mf.Type |
701 | if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { | ||
702 | errs = append(errs, err) | ||
703 | continue | ||
704 | } | ||
623 | metricFamiliesByName[mf.GetName()] = existingMF | 705 | metricFamiliesByName[mf.GetName()] = existingMF |
624 | } | 706 | } |
625 | for _, m := range mf.Metric { | 707 | for _, m := range mf.Metric { |
626 | if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { | 708 | if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { |
627 | errs = append(errs, err) | 709 | errs = append(errs, err) |
628 | continue | 710 | continue |
629 | } | 711 | } |
@@ -631,87 +713,77 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { | |||
631 | } | 713 | } |
632 | } | 714 | } |
633 | } | 715 | } |
634 | return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | 716 | return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() |
635 | } | 717 | } |
636 | 718 | ||
637 | // metricSorter is a sortable slice of *dto.Metric. | 719 | // checkSuffixCollisions checks for collisions with the “magic” suffixes the |
638 | type metricSorter []*dto.Metric | 720 | // Prometheus text format and the internal metric representation of the |
639 | 721 | // Prometheus server add while flattening Summaries and Histograms. | |
640 | func (s metricSorter) Len() int { | 722 | func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { |
641 | return len(s) | 723 | var ( |
642 | } | 724 | newName = mf.GetName() |
643 | 725 | newType = mf.GetType() | |
644 | func (s metricSorter) Swap(i, j int) { | 726 | newNameWithoutSuffix = "" |
645 | s[i], s[j] = s[j], s[i] | 727 | ) |
646 | } | 728 | switch { |
647 | 729 | case strings.HasSuffix(newName, "_count"): | |
648 | func (s metricSorter) Less(i, j int) bool { | 730 | newNameWithoutSuffix = newName[:len(newName)-6] |
649 | if len(s[i].Label) != len(s[j].Label) { | 731 | case strings.HasSuffix(newName, "_sum"): |
650 | // This should not happen. The metrics are | 732 | newNameWithoutSuffix = newName[:len(newName)-4] |
651 | // inconsistent. However, we have to deal with the fact, as | 733 | case strings.HasSuffix(newName, "_bucket"): |
652 | // people might use custom collectors or metric family injection | 734 | newNameWithoutSuffix = newName[:len(newName)-7] |
653 | // to create inconsistent metrics. So let's simply compare the | 735 | } |
654 | // number of labels in this case. That will still yield | 736 | if newNameWithoutSuffix != "" { |
655 | // reproducible sorting. | 737 | if existingMF, ok := mfs[newNameWithoutSuffix]; ok { |
656 | return len(s[i].Label) < len(s[j].Label) | 738 | switch existingMF.GetType() { |
657 | } | 739 | case dto.MetricType_SUMMARY: |
658 | for n, lp := range s[i].Label { | 740 | if !strings.HasSuffix(newName, "_bucket") { |
659 | vi := lp.GetValue() | 741 | return fmt.Errorf( |
660 | vj := s[j].Label[n].GetValue() | 742 | "collected metric named %q collides with previously collected summary named %q", |
661 | if vi != vj { | 743 | newName, newNameWithoutSuffix, |
662 | return vi < vj | 744 | ) |
745 | } | ||
746 | case dto.MetricType_HISTOGRAM: | ||
747 | return fmt.Errorf( | ||
748 | "collected metric named %q collides with previously collected histogram named %q", | ||
749 | newName, newNameWithoutSuffix, | ||
750 | ) | ||
751 | } | ||
663 | } | 752 | } |
664 | } | 753 | } |
665 | 754 | if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { | |
666 | // We should never arrive here. Multiple metrics with the same | 755 | if _, ok := mfs[newName+"_count"]; ok { |
667 | // label set in the same scrape will lead to undefined ingestion | 756 | return fmt.Errorf( |
668 | // behavior. However, as above, we have to provide stable sorting | 757 | "collected histogram or summary named %q collides with previously collected metric named %q", |
669 | // here, even for inconsistent metrics. So sort equal metrics | 758 | newName, newName+"_count", |
670 | // by their timestamp, with missing timestamps (implying "now") | 759 | ) |
671 | // coming last. | 760 | } |
672 | if s[i].TimestampMs == nil { | 761 | if _, ok := mfs[newName+"_sum"]; ok { |
673 | return false | 762 | return fmt.Errorf( |
674 | } | 763 | "collected histogram or summary named %q collides with previously collected metric named %q", |
675 | if s[j].TimestampMs == nil { | 764 | newName, newName+"_sum", |
676 | return true | 765 | ) |
677 | } | ||
678 | return s[i].GetTimestampMs() < s[j].GetTimestampMs() | ||
679 | } | ||
680 | |||
681 | // normalizeMetricFamilies returns a MetricFamily slice with empty | ||
682 | // MetricFamilies pruned and the remaining MetricFamilies sorted by name within | ||
683 | // the slice, with the contained Metrics sorted within each MetricFamily. | ||
684 | func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { | ||
685 | for _, mf := range metricFamiliesByName { | ||
686 | sort.Sort(metricSorter(mf.Metric)) | ||
687 | } | ||
688 | names := make([]string, 0, len(metricFamiliesByName)) | ||
689 | for name, mf := range metricFamiliesByName { | ||
690 | if len(mf.Metric) > 0 { | ||
691 | names = append(names, name) | ||
692 | } | 766 | } |
693 | } | 767 | } |
694 | sort.Strings(names) | 768 | if newType == dto.MetricType_HISTOGRAM { |
695 | result := make([]*dto.MetricFamily, 0, len(names)) | 769 | if _, ok := mfs[newName+"_bucket"]; ok { |
696 | for _, name := range names { | 770 | return fmt.Errorf( |
697 | result = append(result, metricFamiliesByName[name]) | 771 | "collected histogram named %q collides with previously collected metric named %q", |
772 | newName, newName+"_bucket", | ||
773 | ) | ||
774 | } | ||
698 | } | 775 | } |
699 | return result | 776 | return nil |
700 | } | 777 | } |
701 | 778 | ||
702 | // checkMetricConsistency checks if the provided Metric is consistent with the | 779 | // checkMetricConsistency checks if the provided Metric is consistent with the |
703 | // provided MetricFamily. It also hashed the Metric labels and the MetricFamily | 780 | // provided MetricFamily. It also hashes the Metric labels and the MetricFamily |
704 | // name. If the resulting hash is already in the provided metricHashes, an error | 781 | // name. If the resulting hash is already in the provided metricHashes, an error |
705 | // is returned. If not, it is added to metricHashes. The provided dimHashes maps | 782 | // is returned. If not, it is added to metricHashes. |
706 | // MetricFamily names to their dimHash (hashed sorted label names). If dimHashes | ||
707 | // doesn't yet contain a hash for the provided MetricFamily, it is | ||
708 | // added. Otherwise, an error is returned if the existing dimHashes in not equal | ||
709 | // the calculated dimHash. | ||
710 | func checkMetricConsistency( | 783 | func checkMetricConsistency( |
711 | metricFamily *dto.MetricFamily, | 784 | metricFamily *dto.MetricFamily, |
712 | dtoMetric *dto.Metric, | 785 | dtoMetric *dto.Metric, |
713 | metricHashes map[uint64]struct{}, | 786 | metricHashes map[uint64]struct{}, |
714 | dimHashes map[string]uint64, | ||
715 | ) error { | 787 | ) error { |
716 | // Type consistency with metric family. | 788 | // Type consistency with metric family. |
717 | if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || | 789 | if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || |
@@ -720,47 +792,50 @@ func checkMetricConsistency( | |||
720 | metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || | 792 | metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || |
721 | metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { | 793 | metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { |
722 | return fmt.Errorf( | 794 | return fmt.Errorf( |
723 | "collected metric %s %s is not a %s", | 795 | "collected metric %q { %s} is not a %s", |
724 | metricFamily.GetName(), dtoMetric, metricFamily.GetType(), | 796 | metricFamily.GetName(), dtoMetric, metricFamily.GetType(), |
725 | ) | 797 | ) |
726 | } | 798 | } |
727 | 799 | ||
728 | for _, labelPair := range dtoMetric.GetLabel() { | 800 | for _, labelPair := range dtoMetric.GetLabel() { |
729 | if !utf8.ValidString(*labelPair.Value) { | 801 | if !checkLabelName(labelPair.GetName()) { |
730 | return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) | 802 | return fmt.Errorf( |
803 | "collected metric %q { %s} has a label with an invalid name: %s", | ||
804 | metricFamily.GetName(), dtoMetric, labelPair.GetName(), | ||
805 | ) | ||
806 | } | ||
807 | if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel { | ||
808 | return fmt.Errorf( | ||
809 | "collected metric %q { %s} must not have an explicit %q label", | ||
810 | metricFamily.GetName(), dtoMetric, quantileLabel, | ||
811 | ) | ||
812 | } | ||
813 | if !utf8.ValidString(labelPair.GetValue()) { | ||
814 | return fmt.Errorf( | ||
815 | "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", | ||
816 | metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue()) | ||
731 | } | 817 | } |
732 | } | 818 | } |
733 | 819 | ||
734 | // Is the metric unique (i.e. no other metric with the same name and the same label values)? | 820 | // Is the metric unique (i.e. no other metric with the same name and the same labels)? |
735 | h := hashNew() | 821 | h := hashNew() |
736 | h = hashAdd(h, metricFamily.GetName()) | 822 | h = hashAdd(h, metricFamily.GetName()) |
737 | h = hashAddByte(h, separatorByte) | 823 | h = hashAddByte(h, separatorByte) |
738 | dh := hashNew() | ||
739 | // Make sure label pairs are sorted. We depend on it for the consistency | 824 | // Make sure label pairs are sorted. We depend on it for the consistency |
740 | // check. | 825 | // check. |
741 | sort.Sort(LabelPairSorter(dtoMetric.Label)) | 826 | sort.Sort(labelPairSorter(dtoMetric.Label)) |
742 | for _, lp := range dtoMetric.Label { | 827 | for _, lp := range dtoMetric.Label { |
828 | h = hashAdd(h, lp.GetName()) | ||
829 | h = hashAddByte(h, separatorByte) | ||
743 | h = hashAdd(h, lp.GetValue()) | 830 | h = hashAdd(h, lp.GetValue()) |
744 | h = hashAddByte(h, separatorByte) | 831 | h = hashAddByte(h, separatorByte) |
745 | dh = hashAdd(dh, lp.GetName()) | ||
746 | dh = hashAddByte(dh, separatorByte) | ||
747 | } | 832 | } |
748 | if _, exists := metricHashes[h]; exists { | 833 | if _, exists := metricHashes[h]; exists { |
749 | return fmt.Errorf( | 834 | return fmt.Errorf( |
750 | "collected metric %s %s was collected before with the same name and label values", | 835 | "collected metric %q { %s} was collected before with the same name and label values", |
751 | metricFamily.GetName(), dtoMetric, | 836 | metricFamily.GetName(), dtoMetric, |
752 | ) | 837 | ) |
753 | } | 838 | } |
754 | if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { | ||
755 | if dimHash != dh { | ||
756 | return fmt.Errorf( | ||
757 | "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", | ||
758 | metricFamily.GetName(), dtoMetric, | ||
759 | ) | ||
760 | } | ||
761 | } else { | ||
762 | dimHashes[metricFamily.GetName()] = dh | ||
763 | } | ||
764 | metricHashes[h] = struct{}{} | 839 | metricHashes[h] = struct{}{} |
765 | return nil | 840 | return nil |
766 | } | 841 | } |
@@ -792,7 +867,7 @@ func checkDescConsistency( | |||
792 | metricFamily.GetName(), dtoMetric, desc, | 867 | metricFamily.GetName(), dtoMetric, desc, |
793 | ) | 868 | ) |
794 | } | 869 | } |
795 | sort.Sort(LabelPairSorter(lpsFromDesc)) | 870 | sort.Sort(labelPairSorter(lpsFromDesc)) |
796 | for i, lpFromDesc := range lpsFromDesc { | 871 | for i, lpFromDesc := range lpsFromDesc { |
797 | lpFromMetric := dtoMetric.Label[i] | 872 | lpFromMetric := dtoMetric.Label[i] |
798 | if lpFromDesc.GetName() != lpFromMetric.GetName() || | 873 | if lpFromDesc.GetName() != lpFromMetric.GetName() || |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index f7dc85b..f7e92d8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go | |||
@@ -37,7 +37,7 @@ const quantileLabel = "quantile" | |||
37 | // A typical use-case is the observation of request latencies. By default, a | 37 | // A typical use-case is the observation of request latencies. By default, a |
38 | // Summary provides the median, the 90th and the 99th percentile of the latency | 38 | // Summary provides the median, the 90th and the 99th percentile of the latency |
39 | // as rank estimations. However, the default behavior will change in the | 39 | // as rank estimations. However, the default behavior will change in the |
40 | // upcoming v0.10 of the library. There will be no rank estiamtions at all by | 40 | // upcoming v0.10 of the library. There will be no rank estimations at all by |
41 | // default. For a sane transition, it is recommended to set the desired rank | 41 | // default. For a sane transition, it is recommended to set the desired rank |
42 | // estimations explicitly. | 42 | // estimations explicitly. |
43 | // | 43 | // |
@@ -81,10 +81,10 @@ const ( | |||
81 | ) | 81 | ) |
82 | 82 | ||
83 | // SummaryOpts bundles the options for creating a Summary metric. It is | 83 | // SummaryOpts bundles the options for creating a Summary metric. It is |
84 | // mandatory to set Name and Help to a non-empty string. While all other fields | 84 | // mandatory to set Name to a non-empty string. While all other fields are |
85 | // are optional and can safely be left at their zero value, it is recommended to | 85 | // optional and can safely be left at their zero value, it is recommended to set |
86 | // explicitly set the Objectives field to the desired value as the default value | 86 | // a help string and to explicitly set the Objectives field to the desired value |
87 | // will change in the upcoming v0.10 of the library. | 87 | // as the default value will change in the upcoming v0.10 of the library. |
88 | type SummaryOpts struct { | 88 | type SummaryOpts struct { |
89 | // Namespace, Subsystem, and Name are components of the fully-qualified | 89 | // Namespace, Subsystem, and Name are components of the fully-qualified |
90 | // name of the Summary (created by joining these components with | 90 | // name of the Summary (created by joining these components with |
@@ -95,7 +95,7 @@ type SummaryOpts struct { | |||
95 | Subsystem string | 95 | Subsystem string |
96 | Name string | 96 | Name string |
97 | 97 | ||
98 | // Help provides information about this Summary. Mandatory! | 98 | // Help provides information about this Summary. |
99 | // | 99 | // |
100 | // Metrics with the same fully-qualified name must have the same Help | 100 | // Metrics with the same fully-qualified name must have the same Help |
101 | // string. | 101 | // string. |
@@ -105,6 +105,11 @@ type SummaryOpts struct { | |||
105 | // with the same fully-qualified name must have the same label names in | 105 | // with the same fully-qualified name must have the same label names in |
106 | // their ConstLabels. | 106 | // their ConstLabels. |
107 | // | 107 | // |
108 | // Due to the way a Summary is represented in the Prometheus text format | ||
109 | // and how it is handled by the Prometheus server internally, “quantile” | ||
110 | // is an illegal label name. Construction of a Summary or SummaryVec | ||
111 | // will panic if this label name is used in ConstLabels. | ||
112 | // | ||
108 | // ConstLabels are only used rarely. In particular, do not use them to | 113 | // ConstLabels are only used rarely. In particular, do not use them to |
109 | // attach the same labels to all your metrics. Those use cases are | 114 | // attach the same labels to all your metrics. Those use cases are |
110 | // better covered by target labels set by the scraping Prometheus | 115 | // better covered by target labels set by the scraping Prometheus |
@@ -402,7 +407,16 @@ type SummaryVec struct { | |||
402 | 407 | ||
403 | // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and | 408 | // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and |
404 | // partitioned by the given label names. | 409 | // partitioned by the given label names. |
410 | // | ||
411 | // Due to the way a Summary is represented in the Prometheus text format and how | ||
412 | // it is handled by the Prometheus server internally, “quantile” is an illegal | ||
413 | // label name. NewSummaryVec will panic if this label name is used. | ||
405 | func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { | 414 | func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { |
415 | for _, ln := range labelNames { | ||
416 | if ln == quantileLabel { | ||
417 | panic(errQuantileLabelNotAllowed) | ||
418 | } | ||
419 | } | ||
406 | desc := NewDesc( | 420 | desc := NewDesc( |
407 | BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | 421 | BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
408 | opts.Help, | 422 | opts.Help, |
@@ -572,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error { | |||
572 | // map[float64]float64{0.5: 0.23, 0.99: 0.56} | 586 | // map[float64]float64{0.5: 0.23, 0.99: 0.56} |
573 | // | 587 | // |
574 | // NewConstSummary returns an error if the length of labelValues is not | 588 | // NewConstSummary returns an error if the length of labelValues is not |
575 | // consistent with the variable labels in Desc. | 589 | // consistent with the variable labels in Desc or if Desc is invalid. |
576 | func NewConstSummary( | 590 | func NewConstSummary( |
577 | desc *Desc, | 591 | desc *Desc, |
578 | count uint64, | 592 | count uint64, |
@@ -580,6 +594,9 @@ func NewConstSummary( | |||
580 | quantiles map[float64]float64, | 594 | quantiles map[float64]float64, |
581 | labelValues ...string, | 595 | labelValues ...string, |
582 | ) (Metric, error) { | 596 | ) (Metric, error) { |
597 | if desc.err != nil { | ||
598 | return nil, desc.err | ||
599 | } | ||
583 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | 600 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { |
584 | return nil, err | 601 | return nil, err |
585 | } | 602 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 543b57c..eb248f1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go | |||
@@ -17,9 +17,9 @@ import ( | |||
17 | "fmt" | 17 | "fmt" |
18 | "sort" | 18 | "sort" |
19 | 19 | ||
20 | dto "github.com/prometheus/client_model/go" | ||
21 | |||
22 | "github.com/golang/protobuf/proto" | 20 | "github.com/golang/protobuf/proto" |
21 | |||
22 | dto "github.com/prometheus/client_model/go" | ||
23 | ) | 23 | ) |
24 | 24 | ||
25 | // ValueType is an enumeration of metric types that represent a simple value. | 25 | // ValueType is an enumeration of metric types that represent a simple value. |
@@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error { | |||
77 | // operations. However, when implementing custom Collectors, it is useful as a | 77 | // operations. However, when implementing custom Collectors, it is useful as a |
78 | // throw-away metric that is generated on the fly to send it to Prometheus in | 78 | // throw-away metric that is generated on the fly to send it to Prometheus in |
79 | // the Collect method. NewConstMetric returns an error if the length of | 79 | // the Collect method. NewConstMetric returns an error if the length of |
80 | // labelValues is not consistent with the variable labels in Desc. | 80 | // labelValues is not consistent with the variable labels in Desc or if Desc is |
81 | // invalid. | ||
81 | func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { | 82 | func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { |
83 | if desc.err != nil { | ||
84 | return nil, desc.err | ||
85 | } | ||
82 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | 86 | if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { |
83 | return nil, err | 87 | return nil, err |
84 | } | 88 | } |
@@ -152,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { | |||
152 | Value: proto.String(labelValues[i]), | 156 | Value: proto.String(labelValues[i]), |
153 | }) | 157 | }) |
154 | } | 158 | } |
155 | for _, lp := range desc.constLabelPairs { | 159 | labelPairs = append(labelPairs, desc.constLabelPairs...) |
156 | labelPairs = append(labelPairs, lp) | 160 | sort.Sort(labelPairSorter(labelPairs)) |
157 | } | ||
158 | sort.Sort(LabelPairSorter(labelPairs)) | ||
159 | return labelPairs | 161 | return labelPairs |
160 | } | 162 | } |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index cea1582..14ed9e8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go | |||
@@ -277,6 +277,9 @@ func (m *metricMap) deleteByHashWithLabelValues( | |||
277 | func (m *metricMap) deleteByHashWithLabels( | 277 | func (m *metricMap) deleteByHashWithLabels( |
278 | h uint64, labels Labels, curry []curriedLabelValue, | 278 | h uint64, labels Labels, curry []curriedLabelValue, |
279 | ) bool { | 279 | ) bool { |
280 | m.mtx.Lock() | ||
281 | defer m.mtx.Unlock() | ||
282 | |||
280 | metrics, ok := m.metrics[h] | 283 | metrics, ok := m.metrics[h] |
281 | if !ok { | 284 | if !ok { |
282 | return false | 285 | return false |
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 0000000..49159bf --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go | |||
@@ -0,0 +1,179 @@ | |||
1 | // Copyright 2018 The Prometheus Authors | ||
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
3 | // you may not use this file except in compliance with the License. | ||
4 | // You may obtain a copy of the License at | ||
5 | // | ||
6 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
7 | // | ||
8 | // Unless required by applicable law or agreed to in writing, software | ||
9 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
11 | // See the License for the specific language governing permissions and | ||
12 | // limitations under the License. | ||
13 | |||
14 | package prometheus | ||
15 | |||
16 | import ( | ||
17 | "fmt" | ||
18 | "sort" | ||
19 | |||
20 | "github.com/golang/protobuf/proto" | ||
21 | |||
22 | dto "github.com/prometheus/client_model/go" | ||
23 | ) | ||
24 | |||
25 | // WrapRegistererWith returns a Registerer wrapping the provided | ||
26 | // Registerer. Collectors registered with the returned Registerer will be | ||
27 | // registered with the wrapped Registerer in a modified way. The modified | ||
28 | // Collector adds the provided Labels to all Metrics it collects (as | ||
29 | // ConstLabels). The Metrics collected by the unmodified Collector must not | ||
30 | // duplicate any of those labels. | ||
31 | // | ||
32 | // WrapRegistererWith provides a way to add fixed labels to a subset of | ||
33 | // Collectors. It should not be used to add fixed labels to all metrics exposed. | ||
34 | // | ||
35 | // The Collector example demonstrates a use of WrapRegistererWith. | ||
36 | func WrapRegistererWith(labels Labels, reg Registerer) Registerer { | ||
37 | return &wrappingRegisterer{ | ||
38 | wrappedRegisterer: reg, | ||
39 | labels: labels, | ||
40 | } | ||
41 | } | ||
42 | |||
43 | // WrapRegistererWithPrefix returns a Registerer wrapping the provided | ||
44 | // Registerer. Collectors registered with the returned Registerer will be | ||
45 | // registered with the wrapped Registerer in a modified way. The modified | ||
46 | // Collector adds the provided prefix to the name of all Metrics it collects. | ||
47 | // | ||
48 | // WrapRegistererWithPrefix is useful to have one place to prefix all metrics of | ||
49 | // a sub-system. To make this work, register metrics of the sub-system with the | ||
50 | // wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful | ||
51 | // to use the same prefix for all metrics exposed. In particular, do not prefix | ||
52 | // metric names that are standardized across applications, as that would break | ||
53 | // horizontal monitoring, for example the metrics provided by the Go collector | ||
54 | // (see NewGoCollector) and the process collector (see NewProcessCollector). (In | ||
55 | // fact, those metrics are already prefixed with “go_” or “process_”, | ||
56 | // respectively.) | ||
57 | func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { | ||
58 | return &wrappingRegisterer{ | ||
59 | wrappedRegisterer: reg, | ||
60 | prefix: prefix, | ||
61 | } | ||
62 | } | ||
63 | |||
64 | type wrappingRegisterer struct { | ||
65 | wrappedRegisterer Registerer | ||
66 | prefix string | ||
67 | labels Labels | ||
68 | } | ||
69 | |||
70 | func (r *wrappingRegisterer) Register(c Collector) error { | ||
71 | return r.wrappedRegisterer.Register(&wrappingCollector{ | ||
72 | wrappedCollector: c, | ||
73 | prefix: r.prefix, | ||
74 | labels: r.labels, | ||
75 | }) | ||
76 | } | ||
77 | |||
78 | func (r *wrappingRegisterer) MustRegister(cs ...Collector) { | ||
79 | for _, c := range cs { | ||
80 | if err := r.Register(c); err != nil { | ||
81 | panic(err) | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
86 | func (r *wrappingRegisterer) Unregister(c Collector) bool { | ||
87 | return r.wrappedRegisterer.Unregister(&wrappingCollector{ | ||
88 | wrappedCollector: c, | ||
89 | prefix: r.prefix, | ||
90 | labels: r.labels, | ||
91 | }) | ||
92 | } | ||
93 | |||
94 | type wrappingCollector struct { | ||
95 | wrappedCollector Collector | ||
96 | prefix string | ||
97 | labels Labels | ||
98 | } | ||
99 | |||
100 | func (c *wrappingCollector) Collect(ch chan<- Metric) { | ||
101 | wrappedCh := make(chan Metric) | ||
102 | go func() { | ||
103 | c.wrappedCollector.Collect(wrappedCh) | ||
104 | close(wrappedCh) | ||
105 | }() | ||
106 | for m := range wrappedCh { | ||
107 | ch <- &wrappingMetric{ | ||
108 | wrappedMetric: m, | ||
109 | prefix: c.prefix, | ||
110 | labels: c.labels, | ||
111 | } | ||
112 | } | ||
113 | } | ||
114 | |||
115 | func (c *wrappingCollector) Describe(ch chan<- *Desc) { | ||
116 | wrappedCh := make(chan *Desc) | ||
117 | go func() { | ||
118 | c.wrappedCollector.Describe(wrappedCh) | ||
119 | close(wrappedCh) | ||
120 | }() | ||
121 | for desc := range wrappedCh { | ||
122 | ch <- wrapDesc(desc, c.prefix, c.labels) | ||
123 | } | ||
124 | } | ||
125 | |||
126 | type wrappingMetric struct { | ||
127 | wrappedMetric Metric | ||
128 | prefix string | ||
129 | labels Labels | ||
130 | } | ||
131 | |||
132 | func (m *wrappingMetric) Desc() *Desc { | ||
133 | return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) | ||
134 | } | ||
135 | |||
136 | func (m *wrappingMetric) Write(out *dto.Metric) error { | ||
137 | if err := m.wrappedMetric.Write(out); err != nil { | ||
138 | return err | ||
139 | } | ||
140 | if len(m.labels) == 0 { | ||
141 | // No wrapping labels. | ||
142 | return nil | ||
143 | } | ||
144 | for ln, lv := range m.labels { | ||
145 | out.Label = append(out.Label, &dto.LabelPair{ | ||
146 | Name: proto.String(ln), | ||
147 | Value: proto.String(lv), | ||
148 | }) | ||
149 | } | ||
150 | sort.Sort(labelPairSorter(out.Label)) | ||
151 | return nil | ||
152 | } | ||
153 | |||
154 | func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { | ||
155 | constLabels := Labels{} | ||
156 | for _, lp := range desc.constLabelPairs { | ||
157 | constLabels[*lp.Name] = *lp.Value | ||
158 | } | ||
159 | for ln, lv := range labels { | ||
160 | if _, alreadyUsed := constLabels[ln]; alreadyUsed { | ||
161 | return &Desc{ | ||
162 | fqName: desc.fqName, | ||
163 | help: desc.help, | ||
164 | variableLabels: desc.variableLabels, | ||
165 | constLabelPairs: desc.constLabelPairs, | ||
166 | err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), | ||
167 | } | ||
168 | } | ||
169 | constLabels[ln] = lv | ||
170 | } | ||
171 | // NewDesc will do remaining validations. | ||
172 | newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) | ||
173 | // Propagate errors if there was any. This will override any errer | ||
174 | // created by NewDesc above, i.e. earlier errors get precedence. | ||
175 | if desc.err != nil { | ||
176 | newDesc.err = desc.err | ||
177 | } | ||
178 | return newDesc | ||
179 | } | ||
diff --git a/vendor/vendor.json b/vendor/vendor.json index 02667b6..a807c9c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json | |||
@@ -115,16 +115,22 @@ | |||
115 | "revisionTime": "2017-09-01T18:29:50Z" | 115 | "revisionTime": "2017-09-01T18:29:50Z" |
116 | }, | 116 | }, |
117 | { | 117 | { |
118 | "checksumSHA1": "I87tkF1e/hrl4d/XIKFfkPRq1ww=", | 118 | "checksumSHA1": "lLvg5TpUtFbkyAoh+aI5T/nnpWw=", |
119 | "path": "github.com/prometheus/client_golang/prometheus", | 119 | "path": "github.com/prometheus/client_golang/prometheus", |
120 | "revision": "e69720d204a4aa3b0c65dc91208645ba0a52b9cd", | 120 | "revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49", |
121 | "revisionTime": "2018-02-16T13:12:53Z" | 121 | "revisionTime": "2018-09-17T10:21:22Z" |
122 | }, | 122 | }, |
123 | { | 123 | { |
124 | "checksumSHA1": "mIWVz1E1QJ6yZnf7ELNwLboyK4w=", | 124 | "checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=", |
125 | "path": "github.com/prometheus/client_golang/prometheus/internal", | ||
126 | "revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49", | ||
127 | "revisionTime": "2018-09-17T10:21:22Z" | ||
128 | }, | ||
129 | { | ||
130 | "checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=", | ||
125 | "path": "github.com/prometheus/client_golang/prometheus/promhttp", | 131 | "path": "github.com/prometheus/client_golang/prometheus/promhttp", |
126 | "revision": "e69720d204a4aa3b0c65dc91208645ba0a52b9cd", | 132 | "revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49", |
127 | "revisionTime": "2018-02-16T13:12:53Z" | 133 | "revisionTime": "2018-09-17T10:21:22Z" |
128 | }, | 134 | }, |
129 | { | 135 | { |
130 | "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", | 136 | "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", |