aboutsummaryrefslogtreecommitdiff
path: root/node_exporter.go
diff options
context:
space:
mode:
authorBen Kochie <superq@gmail.com>2018-11-20 18:11:40 +0100
committerJohannes 'fish' Ziemke <github@freigeist.org>2018-11-20 18:11:40 +0100
commitffefc8e74dd59143f93af4cecbbae4799bce8c35 (patch)
treecba111f047c981e60913e6c29461cda56b1958fa /node_exporter.go
parentbcec99e0aa2264d64e51956251f09307b85e8c81 (diff)
downloadprometheus_node_collector-ffefc8e74dd59143f93af4cecbbae4799bce8c35.tar.bz2
prometheus_node_collector-ffefc8e74dd59143f93af4cecbbae4799bce8c35.tar.xz
prometheus_node_collector-ffefc8e74dd59143f93af4cecbbae4799bce8c35.zip
Add a limit to the number of in-flight requests (#1166)
In order to avoid stuck collectors using up all system resources, add a limit to the number of parallel in-flight scrape requests. This will return a 503 error. Default to 40 requests, this seems like a reasonable number based on: * Two Prometheus servers scraping every 15 seconds. * Failing scrapes after 5 minutes of stuckness. Signed-off-by: Ben Kochie <superq@gmail.com>
Diffstat (limited to 'node_exporter.go')
-rw-r--r--node_exporter.go15
1 files changed, 11 insertions, 4 deletions
diff --git a/node_exporter.go b/node_exporter.go
index 0475a6a..550d7b1 100644
--- a/node_exporter.go
+++ b/node_exporter.go
@@ -36,12 +36,14 @@ type handler struct {
36 // the exporter itself. 36 // the exporter itself.
37 exporterMetricsRegistry *prometheus.Registry 37 exporterMetricsRegistry *prometheus.Registry
38 includeExporterMetrics bool 38 includeExporterMetrics bool
39 maxRequests int
39} 40}
40 41
41func newHandler(includeExporterMetrics bool) *handler { 42func newHandler(includeExporterMetrics bool, maxRequests int) *handler {
42 h := &handler{ 43 h := &handler{
43 exporterMetricsRegistry: prometheus.NewRegistry(), 44 exporterMetricsRegistry: prometheus.NewRegistry(),
44 includeExporterMetrics: includeExporterMetrics, 45 includeExporterMetrics: includeExporterMetrics,
46 maxRequests: maxRequests,
45 } 47 }
46 if h.includeExporterMetrics { 48 if h.includeExporterMetrics {
47 h.exporterMetricsRegistry.MustRegister( 49 h.exporterMetricsRegistry.MustRegister(
@@ -111,8 +113,9 @@ func (h *handler) innerHandler(filters ...string) (http.Handler, error) {
111 handler := promhttp.HandlerFor( 113 handler := promhttp.HandlerFor(
112 prometheus.Gatherers{h.exporterMetricsRegistry, r}, 114 prometheus.Gatherers{h.exporterMetricsRegistry, r},
113 promhttp.HandlerOpts{ 115 promhttp.HandlerOpts{
114 ErrorLog: log.NewErrorLogger(), 116 ErrorLog: log.NewErrorLogger(),
115 ErrorHandling: promhttp.ContinueOnError, 117 ErrorHandling: promhttp.ContinueOnError,
118 MaxRequestsInFlight: h.maxRequests,
116 }, 119 },
117 ) 120 )
118 if h.includeExporterMetrics { 121 if h.includeExporterMetrics {
@@ -139,6 +142,10 @@ func main() {
139 "web.disable-exporter-metrics", 142 "web.disable-exporter-metrics",
140 "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).", 143 "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).",
141 ).Bool() 144 ).Bool()
145 maxRequests = kingpin.Flag(
146 "web.max-requests",
147 "Maximum number of parallel scrape requests. Use 0 to disable.",
148 ).Default("40").Int()
142 ) 149 )
143 150
144 log.AddFlags(kingpin.CommandLine) 151 log.AddFlags(kingpin.CommandLine)
@@ -149,7 +156,7 @@ func main() {
149 log.Infoln("Starting node_exporter", version.Info()) 156 log.Infoln("Starting node_exporter", version.Info())
150 log.Infoln("Build context", version.BuildContext()) 157 log.Infoln("Build context", version.BuildContext())
151 158
152 http.Handle(*metricsPath, newHandler(!*disableExporterMetrics)) 159 http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests))
153 http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 160 http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
154 w.Write([]byte(`<html> 161 w.Write([]byte(`<html>
155 <head><title>Node Exporter</title></head> 162 <head><title>Node Exporter</title></head>