aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGELOG.md10
-rw-r--r--Makefile8
-rw-r--r--VERSION2
-rwxr-xr-xcheckmetrics.sh10
-rw-r--r--collector/arp_linux.go4
-rw-r--r--collector/bonding_linux.go5
-rw-r--r--collector/btrfs_linux.go4
-rw-r--r--collector/buddyinfo.go2
-rw-r--r--collector/diskstats_darwin.go2
-rw-r--r--collector/diskstats_linux.go4
-rw-r--r--collector/drbd_linux.go5
-rw-r--r--collector/edac_linux.go12
-rw-r--r--collector/entropy_linux.go2
-rw-r--r--collector/filefd_linux.go4
-rw-r--r--collector/filesystem_common.go3
-rw-r--r--collector/filesystem_freebsd.go6
-rw-r--r--collector/filesystem_linux.go3
-rw-r--r--collector/filesystem_linux_test.go2
-rw-r--r--collector/fixtures/e2e-64k-page-output.txt3
-rw-r--r--collector/fixtures/e2e-output.txt3
-rw-r--r--collector/fixtures/sys.ttar11
-rw-r--r--collector/helper.go14
-rw-r--r--collector/helper_test.go63
-rw-r--r--collector/hwmon_linux.go2
-rw-r--r--collector/infiniband_linux.go5
-rw-r--r--collector/interrupts_linux.go4
-rw-r--r--collector/interrupts_openbsd.go2
-rw-r--r--collector/ipvs_linux.go7
-rw-r--r--collector/kvm_bsd.go2
-rw-r--r--collector/loadavg.go2
-rw-r--r--collector/loadavg_linux.go2
-rw-r--r--collector/logind_linux.go6
-rw-r--r--collector/mdadm_linux.go11
-rw-r--r--collector/meminfo.go2
-rw-r--r--collector/meminfo_linux.go2
-rw-r--r--collector/meminfo_numa_linux.go6
-rw-r--r--collector/meminfo_openbsd.go2
-rw-r--r--collector/memory_bsd.go6
-rw-r--r--collector/mountstats_linux.go2
-rw-r--r--collector/netclass_linux.go4
-rw-r--r--collector/netdev_common.go69
-rw-r--r--collector/netstat_linux.go13
-rw-r--r--collector/nfs_linux.go5
-rw-r--r--collector/nfsd_linux.go5
-rw-r--r--collector/ntp.go2
-rw-r--r--collector/perf_linux.go2
-rw-r--r--collector/powersupplyclass.go9
-rw-r--r--collector/processes_linux.go9
-rw-r--r--collector/schedstat_linux.go5
-rw-r--r--collector/sockstat_linux.go5
-rw-r--r--collector/softnet_linux.go2
-rw-r--r--collector/supervisord.go2
-rw-r--r--collector/sysctl_bsd.go2
-rw-r--r--collector/systemd_linux.go50
-rw-r--r--collector/systemd_linux_test.go10
-rw-r--r--collector/tcpstat_linux.go4
-rw-r--r--collector/textfile.go6
-rw-r--r--collector/udp_queues_linux.go13
-rw-r--r--collector/wifi_linux.go15
-rw-r--r--collector/xfs_linux.go2
-rw-r--r--collector/zfs_freebsd.go4
-rw-r--r--collector/zfs_linux.go4
-rw-r--r--collector/zfs_solaris.go1
-rw-r--r--go.mod2
-rw-r--r--go.sum2
-rw-r--r--https/web-config.yml15
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile.common11
-rw-r--r--vendor/github.com/prometheus/procfs/bcache/bcache.go20
-rw-r--r--vendor/github.com/prometheus/procfs/bcache/get.go109
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo.go265
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_arm.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_arm64.go19
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_default.go19
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_mips.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_mips64.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go18
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_s390x.go18
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures.ttar737
-rw-r--r--vendor/github.com/prometheus/procfs/fscache.go422
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go9
-rw-r--r--vendor/github.com/prometheus/procfs/kernel_random.go62
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go2
-rw-r--r--vendor/github.com/prometheus/procfs/mountinfo.go8
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go20
-rw-r--r--vendor/github.com/prometheus/procfs/net_conntrackstat.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go21
-rw-r--r--vendor/github.com/prometheus/procfs/proc_cgroup.go98
-rw-r--r--vendor/github.com/prometheus/procfs/proc_fdinfo.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc_maps.go3
-rw-r--r--vendor/github.com/prometheus/procfs/proc_smaps.go165
-rw-r--r--vendor/github.com/prometheus/procfs/sysfs/class_fibrechannel.go249
-rw-r--r--vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go44
-rw-r--r--vendor/github.com/prometheus/procfs/sysfs/class_power_supply.go2
-rw-r--r--vendor/github.com/prometheus/procfs/sysfs/class_thermal.go26
-rw-r--r--vendor/modules.txt2
98 files changed, 2748 insertions, 220 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7f32b94..9d45de4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,18 @@
1## master / unreleased 1## master / unreleased
2 2
3* [CHANGE] Improve filter flag names.
3* [CHANGE] 4* [CHANGE]
4* [FEATURE] 5* [FEATURE]
5* [ENHANCEMENT] 6* [ENHANCEMENT] Include TCP OutRsts in netstat metrics
6* [BUGFIX] 7* [BUGFIX]
7 8
9## 1.0.1 / 2020-06-15
10
11* [BUGFIX] filesystem_freebsd: Fix label values #1728
12* [BUGFIX] Update prometheus/procfs to fix log noise #1735
13* [BUGFIX] Fix build tags for collectors #1745
14* [BUGFIX] Handle no data from powersupplyclass #1747, #1749
15
8## 1.0.0 / 2020-05-25 16## 1.0.0 / 2020-05-25
9 17
10### **Breaking changes** 18### **Breaking changes**
diff --git a/Makefile b/Makefile
index daa51ff..504ed3d 100644
--- a/Makefile
+++ b/Makefile
@@ -19,7 +19,7 @@ DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
19 19
20include Makefile.common 20include Makefile.common
21 21
22PROMTOOL_VERSION ?= 2.5.0 22PROMTOOL_VERSION ?= 2.18.1
23PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz 23PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz
24PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool 24PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool
25 25
@@ -129,9 +129,5 @@ test-docker:
129promtool: $(PROMTOOL) 129promtool: $(PROMTOOL)
130 130
131$(PROMTOOL): 131$(PROMTOOL):
132 $(eval PROMTOOL_TMP := $(shell mktemp -d))
133 curl -s -L $(PROMTOOL_URL) | tar -xvzf - -C $(PROMTOOL_TMP)
134 mkdir -p $(FIRST_GOPATH)/bin 132 mkdir -p $(FIRST_GOPATH)/bin
135 cp $(PROMTOOL_TMP)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM)/promtool $(FIRST_GOPATH)/bin/promtool 133 curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --no-anchored --strip 1 promtool
136 rm -r $(PROMTOOL_TMP)
137
diff --git a/VERSION b/VERSION
index 3eefcb9..7dea76e 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
1.0.0 1.0.1
diff --git a/checkmetrics.sh b/checkmetrics.sh
index 5d846cc..c3104ca 100755
--- a/checkmetrics.sh
+++ b/checkmetrics.sh
@@ -5,14 +5,12 @@ if [[ ( -z "$1" ) || ( -z "$2" ) ]]; then
5 exit 1 5 exit 1
6fi 6fi
7 7
8# Only check node_exporter's metrics, as the Prometheus Go client currently 8# Ignore known issues in auto-generated and network specific collectors.
9# exposes a metric with a unit of microseconds. Once that is fixed, remove 9lint=$($1 check metrics < "$2" 2>&1 | grep -v -E "^node_(entropy|memory|netstat|wifi_station)_")
10# this filter.
11lint=$($1 check metrics < $2 2>&1 | grep "node_")
12 10
13if [[ ! -z $lint ]]; then 11if [[ -n $lint ]]; then
14 echo -e "Some Prometheus metrics do not follow best practices:\n" 12 echo -e "Some Prometheus metrics do not follow best practices:\n"
15 echo "$lint" 13 echo "$lint"
16 14
17 exit 1 15 exit 1
18fi \ No newline at end of file 16fi
diff --git a/collector/arp_linux.go b/collector/arp_linux.go
index 6a18879..86cb78a 100644
--- a/collector/arp_linux.go
+++ b/collector/arp_linux.go
@@ -84,7 +84,7 @@ func parseARPEntries(data io.Reader) (map[string]uint32, error) {
84 } 84 }
85 85
86 if err := scanner.Err(); err != nil { 86 if err := scanner.Err(); err != nil {
87 return nil, fmt.Errorf("failed to parse ARP info: %s", err) 87 return nil, fmt.Errorf("failed to parse ARP info: %w", err)
88 } 88 }
89 89
90 return entries, nil 90 return entries, nil
@@ -93,7 +93,7 @@ func parseARPEntries(data io.Reader) (map[string]uint32, error) {
93func (c *arpCollector) Update(ch chan<- prometheus.Metric) error { 93func (c *arpCollector) Update(ch chan<- prometheus.Metric) error {
94 entries, err := getARPEntries() 94 entries, err := getARPEntries()
95 if err != nil { 95 if err != nil {
96 return fmt.Errorf("could not get ARP entries: %s", err) 96 return fmt.Errorf("could not get ARP entries: %w", err)
97 } 97 }
98 98
99 for device, entryCount := range entries { 99 for device, entryCount := range entries {
diff --git a/collector/bonding_linux.go b/collector/bonding_linux.go
index 78e94b2..863f62c 100644
--- a/collector/bonding_linux.go
+++ b/collector/bonding_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "io/ioutil" 21 "io/ioutil"
21 "os" 22 "os"
@@ -59,7 +60,7 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error {
59 statusfile := sysFilePath("class/net") 60 statusfile := sysFilePath("class/net")
60 bondingStats, err := readBondingStats(statusfile) 61 bondingStats, err := readBondingStats(statusfile)
61 if err != nil { 62 if err != nil {
62 if os.IsNotExist(err) { 63 if errors.Is(err, os.ErrNotExist) {
63 level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile) 64 level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile)
64 return ErrNoData 65 return ErrNoData
65 } 66 }
@@ -86,7 +87,7 @@ func readBondingStats(root string) (status map[string][2]int, err error) {
86 sstat := [2]int{0, 0} 87 sstat := [2]int{0, 0}
87 for _, slave := range strings.Fields(string(slaves)) { 88 for _, slave := range strings.Fields(string(slaves)) {
88 state, err := ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status")) 89 state, err := ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status"))
89 if os.IsNotExist(err) { 90 if errors.Is(err, os.ErrNotExist) {
90 // some older? kernels use slave_ prefix 91 // some older? kernels use slave_ prefix
91 state, err = ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status")) 92 state, err = ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status"))
92 } 93 }
diff --git a/collector/btrfs_linux.go b/collector/btrfs_linux.go
index 2336e65..4d4857b 100644
--- a/collector/btrfs_linux.go
+++ b/collector/btrfs_linux.go
@@ -37,7 +37,7 @@ func init() {
37func NewBtrfsCollector(logger log.Logger) (Collector, error) { 37func NewBtrfsCollector(logger log.Logger) (Collector, error) {
38 fs, err := btrfs.NewFS(*sysPath) 38 fs, err := btrfs.NewFS(*sysPath)
39 if err != nil { 39 if err != nil {
40 return nil, fmt.Errorf("failed to open sysfs: %v", err) 40 return nil, fmt.Errorf("failed to open sysfs: %w", err)
41 } 41 }
42 42
43 return &btrfsCollector{ 43 return &btrfsCollector{
@@ -51,7 +51,7 @@ func NewBtrfsCollector(logger log.Logger) (Collector, error) {
51func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error { 51func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error {
52 stats, err := c.fs.Stats() 52 stats, err := c.fs.Stats()
53 if err != nil { 53 if err != nil {
54 return fmt.Errorf("failed to retrieve Btrfs stats: %v", err) 54 return fmt.Errorf("failed to retrieve Btrfs stats: %w", err)
55 } 55 }
56 56
57 for _, s := range stats { 57 for _, s := range stats {
diff --git a/collector/buddyinfo.go b/collector/buddyinfo.go
index e94e283..579c3e4 100644
--- a/collector/buddyinfo.go
+++ b/collector/buddyinfo.go
@@ -59,7 +59,7 @@ func NewBuddyinfoCollector(logger log.Logger) (Collector, error) {
59func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error { 59func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error {
60 buddyInfo, err := c.fs.BuddyInfo() 60 buddyInfo, err := c.fs.BuddyInfo()
61 if err != nil { 61 if err != nil {
62 return fmt.Errorf("couldn't get buddyinfo: %s", err) 62 return fmt.Errorf("couldn't get buddyinfo: %w", err)
63 } 63 }
64 64
65 level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo) 65 level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo)
diff --git a/collector/diskstats_darwin.go b/collector/diskstats_darwin.go
index 89622a3..864220b 100644
--- a/collector/diskstats_darwin.go
+++ b/collector/diskstats_darwin.go
@@ -189,7 +189,7 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
189func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { 189func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
190 diskStats, err := iostat.ReadDriveStats() 190 diskStats, err := iostat.ReadDriveStats()
191 if err != nil { 191 if err != nil {
192 return fmt.Errorf("couldn't get diskstats: %s", err) 192 return fmt.Errorf("couldn't get diskstats: %w", err)
193 } 193 }
194 194
195 for _, stats := range diskStats { 195 for _, stats := range diskStats {
diff --git a/collector/diskstats_linux.go b/collector/diskstats_linux.go
index 5d71422..b5ae82d 100644
--- a/collector/diskstats_linux.go
+++ b/collector/diskstats_linux.go
@@ -187,7 +187,7 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
187func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { 187func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
188 diskStats, err := getDiskStats() 188 diskStats, err := getDiskStats()
189 if err != nil { 189 if err != nil {
190 return fmt.Errorf("couldn't get diskstats: %s", err) 190 return fmt.Errorf("couldn't get diskstats: %w", err)
191 } 191 }
192 192
193 for dev, stats := range diskStats { 193 for dev, stats := range diskStats {
@@ -203,7 +203,7 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
203 } 203 }
204 v, err := strconv.ParseFloat(value, 64) 204 v, err := strconv.ParseFloat(value, 64)
205 if err != nil { 205 if err != nil {
206 return fmt.Errorf("invalid value %s in diskstats: %s", value, err) 206 return fmt.Errorf("invalid value %s in diskstats: %w", value, err)
207 } 207 }
208 ch <- c.descs[i].mustNewConstMetric(v, dev) 208 ch <- c.descs[i].mustNewConstMetric(v, dev)
209 } 209 }
diff --git a/collector/drbd_linux.go b/collector/drbd_linux.go
index 6815c5f..281c079 100644
--- a/collector/drbd_linux.go
+++ b/collector/drbd_linux.go
@@ -11,10 +11,13 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nodrbd
15
14package collector 16package collector
15 17
16import ( 18import (
17 "bufio" 19 "bufio"
20 "errors"
18 "fmt" 21 "fmt"
19 "os" 22 "os"
20 "strconv" 23 "strconv"
@@ -186,7 +189,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
186 statsFile := procFilePath("drbd") 189 statsFile := procFilePath("drbd")
187 file, err := os.Open(statsFile) 190 file, err := os.Open(statsFile)
188 if err != nil { 191 if err != nil {
189 if os.IsNotExist(err) { 192 if errors.Is(err, os.ErrNotExist) {
190 level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err) 193 level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err)
191 return ErrNoData 194 return ErrNoData
192 } 195 }
diff --git a/collector/edac_linux.go b/collector/edac_linux.go
index 91b9510..1248e2e 100644
--- a/collector/edac_linux.go
+++ b/collector/edac_linux.go
@@ -86,28 +86,28 @@ func (c *edacCollector) Update(ch chan<- prometheus.Metric) error {
86 86
87 value, err := readUintFromFile(filepath.Join(controller, "ce_count")) 87 value, err := readUintFromFile(filepath.Join(controller, "ce_count"))
88 if err != nil { 88 if err != nil {
89 return fmt.Errorf("couldn't get ce_count for controller %s: %s", controllerNumber, err) 89 return fmt.Errorf("couldn't get ce_count for controller %s: %w", controllerNumber, err)
90 } 90 }
91 ch <- prometheus.MustNewConstMetric( 91 ch <- prometheus.MustNewConstMetric(
92 c.ceCount, prometheus.CounterValue, float64(value), controllerNumber) 92 c.ceCount, prometheus.CounterValue, float64(value), controllerNumber)
93 93
94 value, err = readUintFromFile(filepath.Join(controller, "ce_noinfo_count")) 94 value, err = readUintFromFile(filepath.Join(controller, "ce_noinfo_count"))
95 if err != nil { 95 if err != nil {
96 return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %s", controllerNumber, err) 96 return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %w", controllerNumber, err)
97 } 97 }
98 ch <- prometheus.MustNewConstMetric( 98 ch <- prometheus.MustNewConstMetric(
99 c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") 99 c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown")
100 100
101 value, err = readUintFromFile(filepath.Join(controller, "ue_count")) 101 value, err = readUintFromFile(filepath.Join(controller, "ue_count"))
102 if err != nil { 102 if err != nil {
103 return fmt.Errorf("couldn't get ue_count for controller %s: %s", controllerNumber, err) 103 return fmt.Errorf("couldn't get ue_count for controller %s: %w", controllerNumber, err)
104 } 104 }
105 ch <- prometheus.MustNewConstMetric( 105 ch <- prometheus.MustNewConstMetric(
106 c.ueCount, prometheus.CounterValue, float64(value), controllerNumber) 106 c.ueCount, prometheus.CounterValue, float64(value), controllerNumber)
107 107
108 value, err = readUintFromFile(filepath.Join(controller, "ue_noinfo_count")) 108 value, err = readUintFromFile(filepath.Join(controller, "ue_noinfo_count"))
109 if err != nil { 109 if err != nil {
110 return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %s", controllerNumber, err) 110 return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %w", controllerNumber, err)
111 } 111 }
112 ch <- prometheus.MustNewConstMetric( 112 ch <- prometheus.MustNewConstMetric(
113 c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") 113 c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown")
@@ -126,14 +126,14 @@ func (c *edacCollector) Update(ch chan<- prometheus.Metric) error {
126 126
127 value, err = readUintFromFile(filepath.Join(csrow, "ce_count")) 127 value, err = readUintFromFile(filepath.Join(csrow, "ce_count"))
128 if err != nil { 128 if err != nil {
129 return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err) 129 return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err)
130 } 130 }
131 ch <- prometheus.MustNewConstMetric( 131 ch <- prometheus.MustNewConstMetric(
132 c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) 132 c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber)
133 133
134 value, err = readUintFromFile(filepath.Join(csrow, "ue_count")) 134 value, err = readUintFromFile(filepath.Join(csrow, "ue_count"))
135 if err != nil { 135 if err != nil {
136 return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err) 136 return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err)
137 } 137 }
138 ch <- prometheus.MustNewConstMetric( 138 ch <- prometheus.MustNewConstMetric(
139 c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) 139 c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber)
diff --git a/collector/entropy_linux.go b/collector/entropy_linux.go
index e68eb95..3c42c3d 100644
--- a/collector/entropy_linux.go
+++ b/collector/entropy_linux.go
@@ -46,7 +46,7 @@ func NewEntropyCollector(logger log.Logger) (Collector, error) {
46func (c *entropyCollector) Update(ch chan<- prometheus.Metric) error { 46func (c *entropyCollector) Update(ch chan<- prometheus.Metric) error {
47 value, err := readUintFromFile(procFilePath("sys/kernel/random/entropy_avail")) 47 value, err := readUintFromFile(procFilePath("sys/kernel/random/entropy_avail"))
48 if err != nil { 48 if err != nil {
49 return fmt.Errorf("couldn't get entropy_avail: %s", err) 49 return fmt.Errorf("couldn't get entropy_avail: %w", err)
50 } 50 }
51 ch <- prometheus.MustNewConstMetric( 51 ch <- prometheus.MustNewConstMetric(
52 c.entropyAvail, prometheus.GaugeValue, float64(value)) 52 c.entropyAvail, prometheus.GaugeValue, float64(value))
diff --git a/collector/filefd_linux.go b/collector/filefd_linux.go
index f8d3fce..450c4e3 100644
--- a/collector/filefd_linux.go
+++ b/collector/filefd_linux.go
@@ -46,12 +46,12 @@ func NewFileFDStatCollector(logger log.Logger) (Collector, error) {
46func (c *fileFDStatCollector) Update(ch chan<- prometheus.Metric) error { 46func (c *fileFDStatCollector) Update(ch chan<- prometheus.Metric) error {
47 fileFDStat, err := parseFileFDStats(procFilePath("sys/fs/file-nr")) 47 fileFDStat, err := parseFileFDStats(procFilePath("sys/fs/file-nr"))
48 if err != nil { 48 if err != nil {
49 return fmt.Errorf("couldn't get file-nr: %s", err) 49 return fmt.Errorf("couldn't get file-nr: %w", err)
50 } 50 }
51 for name, value := range fileFDStat { 51 for name, value := range fileFDStat {
52 v, err := strconv.ParseFloat(value, 64) 52 v, err := strconv.ParseFloat(value, 64)
53 if err != nil { 53 if err != nil {
54 return fmt.Errorf("invalid value %s in file-nr: %s", value, err) 54 return fmt.Errorf("invalid value %s in file-nr: %w", value, err)
55 } 55 }
56 ch <- prometheus.MustNewConstMetric( 56 ch <- prometheus.MustNewConstMetric(
57 prometheus.NewDesc( 57 prometheus.NewDesc(
diff --git a/collector/filesystem_common.go b/collector/filesystem_common.go
index 6971f44..7def2b9 100644
--- a/collector/filesystem_common.go
+++ b/collector/filesystem_common.go
@@ -20,6 +20,7 @@ import (
20 "regexp" 20 "regexp"
21 21
22 "github.com/go-kit/kit/log" 22 "github.com/go-kit/kit/log"
23 "github.com/go-kit/kit/log/level"
23 "github.com/prometheus/client_golang/prometheus" 24 "github.com/prometheus/client_golang/prometheus"
24 "gopkg.in/alecthomas/kingpin.v2" 25 "gopkg.in/alecthomas/kingpin.v2"
25) 26)
@@ -70,7 +71,9 @@ func init() {
70// NewFilesystemCollector returns a new Collector exposing filesystems stats. 71// NewFilesystemCollector returns a new Collector exposing filesystems stats.
71func NewFilesystemCollector(logger log.Logger) (Collector, error) { 72func NewFilesystemCollector(logger log.Logger) (Collector, error) {
72 subsystem := "filesystem" 73 subsystem := "filesystem"
74 level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.ignored-mount-points", "flag", *ignoredMountPoints)
73 mountPointPattern := regexp.MustCompile(*ignoredMountPoints) 75 mountPointPattern := regexp.MustCompile(*ignoredMountPoints)
76 level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.ignored-fs-types", "flag", *ignoredMountPoints)
74 filesystemsTypesPattern := regexp.MustCompile(*ignoredFSTypes) 77 filesystemsTypesPattern := regexp.MustCompile(*ignoredFSTypes)
75 78
76 sizeDesc := prometheus.NewDesc( 79 sizeDesc := prometheus.NewDesc(
diff --git a/collector/filesystem_freebsd.go b/collector/filesystem_freebsd.go
index f37029e..1d377b1 100644
--- a/collector/filesystem_freebsd.go
+++ b/collector/filesystem_freebsd.go
@@ -40,14 +40,14 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
40 } 40 }
41 stats := []filesystemStats{} 41 stats := []filesystemStats{}
42 for _, fs := range buf { 42 for _, fs := range buf {
43 mountpoint := string(fs.Mntonname[:]) 43 mountpoint := bytesToString(fs.Mntonname[:])
44 if c.ignoredMountPointsPattern.MatchString(mountpoint) { 44 if c.ignoredMountPointsPattern.MatchString(mountpoint) {
45 level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) 45 level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
46 continue 46 continue
47 } 47 }
48 48
49 device := string(fs.Mntfromname[:]) 49 device := bytesToString(fs.Mntfromname[:])
50 fstype := string(fs.Fstypename[:]) 50 fstype := bytesToString(fs.Fstypename[:])
51 if c.ignoredFSTypesPattern.MatchString(fstype) { 51 if c.ignoredFSTypesPattern.MatchString(fstype) {
52 level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) 52 level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
53 continue 53 continue
diff --git a/collector/filesystem_linux.go b/collector/filesystem_linux.go
index e83b27a..00a7323 100644
--- a/collector/filesystem_linux.go
+++ b/collector/filesystem_linux.go
@@ -17,6 +17,7 @@ package collector
17 17
18import ( 18import (
19 "bufio" 19 "bufio"
20 "errors"
20 "fmt" 21 "fmt"
21 "io" 22 "io"
22 "os" 23 "os"
@@ -139,7 +140,7 @@ func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logg
139 140
140func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) { 141func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) {
141 file, err := os.Open(procFilePath("1/mounts")) 142 file, err := os.Open(procFilePath("1/mounts"))
142 if os.IsNotExist(err) { 143 if errors.Is(err, os.ErrNotExist) {
143 // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. 144 // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid.
144 level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err) 145 level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err)
145 file, err = os.Open(procFilePath("mounts")) 146 file, err = os.Open(procFilePath("mounts"))
diff --git a/collector/filesystem_linux_test.go b/collector/filesystem_linux_test.go
index 973cd14..e401779 100644
--- a/collector/filesystem_linux_test.go
+++ b/collector/filesystem_linux_test.go
@@ -11,8 +11,6 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nofilesystem
15
16package collector 14package collector
17 15
18import ( 16import (
diff --git a/collector/fixtures/e2e-64k-page-output.txt b/collector/fixtures/e2e-64k-page-output.txt
index 6c9473f..7b857ff 100644
--- a/collector/fixtures/e2e-64k-page-output.txt
+++ b/collector/fixtures/e2e-64k-page-output.txt
@@ -1886,6 +1886,9 @@ node_netstat_Tcp_InErrs 5
1886# HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. 1886# HELP node_netstat_Tcp_InSegs Statistic TcpInSegs.
1887# TYPE node_netstat_Tcp_InSegs untyped 1887# TYPE node_netstat_Tcp_InSegs untyped
1888node_netstat_Tcp_InSegs 5.7252008e+07 1888node_netstat_Tcp_InSegs 5.7252008e+07
1889# HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts.
1890# TYPE node_netstat_Tcp_OutRsts untyped
1891node_netstat_Tcp_OutRsts 1003
1889# HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. 1892# HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs.
1890# TYPE node_netstat_Tcp_OutSegs untyped 1893# TYPE node_netstat_Tcp_OutSegs untyped
1891node_netstat_Tcp_OutSegs 5.4915039e+07 1894node_netstat_Tcp_OutSegs 5.4915039e+07
diff --git a/collector/fixtures/e2e-output.txt b/collector/fixtures/e2e-output.txt
index af9925b..e8a5779 100644
--- a/collector/fixtures/e2e-output.txt
+++ b/collector/fixtures/e2e-output.txt
@@ -1955,6 +1955,9 @@ node_netstat_Tcp_InErrs 5
1955# HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. 1955# HELP node_netstat_Tcp_InSegs Statistic TcpInSegs.
1956# TYPE node_netstat_Tcp_InSegs untyped 1956# TYPE node_netstat_Tcp_InSegs untyped
1957node_netstat_Tcp_InSegs 5.7252008e+07 1957node_netstat_Tcp_InSegs 5.7252008e+07
1958# HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts.
1959# TYPE node_netstat_Tcp_OutRsts untyped
1960node_netstat_Tcp_OutRsts 1003
1958# HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. 1961# HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs.
1959# TYPE node_netstat_Tcp_OutSegs untyped 1962# TYPE node_netstat_Tcp_OutSegs untyped
1960node_netstat_Tcp_OutSegs 5.4915039e+07 1963node_netstat_Tcp_OutSegs 5.4915039e+07
diff --git a/collector/fixtures/sys.ttar b/collector/fixtures/sys.ttar
index a4550a5..3e52c60 100644
--- a/collector/fixtures/sys.ttar
+++ b/collector/fixtures/sys.ttar
@@ -1756,6 +1756,17 @@ Lines: 1
17560 17560
1757Mode: 644 1757Mode: 644
1758# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1758# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1759Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/writeback_rate_debug
1760Lines: 7
1761rate: 1.1M/sec
1762dirty: 20.4G
1763target: 20.4G
1764proportional: 427.5k
1765integral: 790.0k
1766change: 321.5k/sec
1767next io: 17ms
1768Mode: 644
1769# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1759Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 1770Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5
1760Mode: 755 1771Mode: 755
1761# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1772# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/collector/helper.go b/collector/helper.go
index 2bf461e..df5cd26 100644
--- a/collector/helper.go
+++ b/collector/helper.go
@@ -14,6 +14,7 @@
14package collector 14package collector
15 15
16import ( 16import (
17 "bytes"
17 "io/ioutil" 18 "io/ioutil"
18 "strconv" 19 "strconv"
19 "strings" 20 "strings"
@@ -30,3 +31,16 @@ func readUintFromFile(path string) (uint64, error) {
30 } 31 }
31 return value, nil 32 return value, nil
32} 33}
34
35// Take a []byte{} and return a string based on null termination.
36// This is useful for situations where the OS has returned a null terminated
37// string to use.
38// If this function happens to receive a byteArray that contains no nulls, we
39// simply convert the array to a string with no bounding.
40func bytesToString(byteArray []byte) string {
41 n := bytes.IndexByte(byteArray, 0)
42 if n < 0 {
43 return string(byteArray)
44 }
45 return string(byteArray[:n])
46}
diff --git a/collector/helper_test.go b/collector/helper_test.go
new file mode 100644
index 0000000..0424d48
--- /dev/null
+++ b/collector/helper_test.go
@@ -0,0 +1,63 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package collector
15
16import (
17 "testing"
18)
19
20func TestBytesToString(t *testing.T) {
21 tests := []struct {
22 name string
23 b []byte
24 expected string
25 }{
26 {
27 "Single null byte",
28 []byte{0},
29 "",
30 },
31 {
32 "Empty byte array",
33 []byte{},
34 "",
35 },
36 {
37 "Not null terminated",
38 []byte{65, 66, 67},
39 "ABC",
40 },
41 {
42 "Null randomly in array",
43 []byte{65, 66, 67, 0, 65, 0, 65},
44 "ABC",
45 },
46 {
47 "Array starts with null and contains other valid bytes",
48 []byte{0, 65, 66, 67, 0},
49 "",
50 },
51 }
52
53 for _, tt := range tests {
54 name := tt.name
55 b := tt.b
56 result := bytesToString(b)
57 expected := tt.expected
58
59 if result != expected {
60 t.Errorf("bytesToString(%#v): Name: %s, expected %#v, got %#v)", b, name, expected, result)
61 }
62 }
63}
diff --git a/collector/hwmon_linux.go b/collector/hwmon_linux.go
index 5649942..261a7c5 100644
--- a/collector/hwmon_linux.go
+++ b/collector/hwmon_linux.go
@@ -424,7 +424,7 @@ func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error {
424 424
425 hwmonFiles, err := ioutil.ReadDir(hwmonPathName) 425 hwmonFiles, err := ioutil.ReadDir(hwmonPathName)
426 if err != nil { 426 if err != nil {
427 if os.IsNotExist(err) { 427 if errors.Is(err, os.ErrNotExist) {
428 level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system") 428 level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system")
429 return ErrNoData 429 return ErrNoData
430 } 430 }
diff --git a/collector/infiniband_linux.go b/collector/infiniband_linux.go
index 1f453b8..d938210 100644
--- a/collector/infiniband_linux.go
+++ b/collector/infiniband_linux.go
@@ -17,6 +17,7 @@
17package collector 17package collector
18 18
19import ( 19import (
20 "errors"
20 "fmt" 21 "fmt"
21 "os" 22 "os"
22 "strconv" 23 "strconv"
@@ -108,11 +109,11 @@ func (c *infinibandCollector) pushCounter(ch chan<- prometheus.Metric, name stri
108func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { 109func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error {
109 devices, err := c.fs.InfiniBandClass() 110 devices, err := c.fs.InfiniBandClass()
110 if err != nil { 111 if err != nil {
111 if os.IsNotExist(err) { 112 if errors.Is(err, os.ErrNotExist) {
112 level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping") 113 level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping")
113 return ErrNoData 114 return ErrNoData
114 } 115 }
115 return fmt.Errorf("error obtaining InfiniBand class info: %s", err) 116 return fmt.Errorf("error obtaining InfiniBand class info: %w", err)
116 } 117 }
117 118
118 for _, device := range devices { 119 for _, device := range devices {
diff --git a/collector/interrupts_linux.go b/collector/interrupts_linux.go
index cacfeda..5fcbebc 100644
--- a/collector/interrupts_linux.go
+++ b/collector/interrupts_linux.go
@@ -34,13 +34,13 @@ var (
34func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) { 34func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) {
35 interrupts, err := getInterrupts() 35 interrupts, err := getInterrupts()
36 if err != nil { 36 if err != nil {
37 return fmt.Errorf("couldn't get interrupts: %s", err) 37 return fmt.Errorf("couldn't get interrupts: %w", err)
38 } 38 }
39 for name, interrupt := range interrupts { 39 for name, interrupt := range interrupts {
40 for cpuNo, value := range interrupt.values { 40 for cpuNo, value := range interrupt.values {
41 fv, err := strconv.ParseFloat(value, 64) 41 fv, err := strconv.ParseFloat(value, 64)
42 if err != nil { 42 if err != nil {
43 return fmt.Errorf("invalid value %s in interrupts: %s", value, err) 43 return fmt.Errorf("invalid value %s in interrupts: %w", value, err)
44 } 44 }
45 ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices) 45 ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices)
46 } 46 }
diff --git a/collector/interrupts_openbsd.go b/collector/interrupts_openbsd.go
index c9aae3c..bf38fe3 100644
--- a/collector/interrupts_openbsd.go
+++ b/collector/interrupts_openbsd.go
@@ -101,7 +101,7 @@ var (
101func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { 101func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error {
102 interrupts, err := getInterrupts() 102 interrupts, err := getInterrupts()
103 if err != nil { 103 if err != nil {
104 return fmt.Errorf("couldn't get interrupts: %s", err) 104 return fmt.Errorf("couldn't get interrupts: %w", err)
105 } 105 }
106 for dev, interrupt := range interrupts { 106 for dev, interrupt := range interrupts {
107 for cpuNo, value := range interrupt.values { 107 for cpuNo, value := range interrupt.values {
diff --git a/collector/ipvs_linux.go b/collector/ipvs_linux.go
index b5c8d73..c2e9d70 100644
--- a/collector/ipvs_linux.go
+++ b/collector/ipvs_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "os" 21 "os"
21 "sort" 22 "sort"
@@ -140,11 +141,11 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
140 ipvsStats, err := c.fs.IPVSStats() 141 ipvsStats, err := c.fs.IPVSStats()
141 if err != nil { 142 if err != nil {
142 // Cannot access ipvs metrics, report no error. 143 // Cannot access ipvs metrics, report no error.
143 if os.IsNotExist(err) { 144 if errors.Is(err, os.ErrNotExist) {
144 level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system") 145 level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system")
145 return ErrNoData 146 return ErrNoData
146 } 147 }
147 return fmt.Errorf("could not get IPVS stats: %s", err) 148 return fmt.Errorf("could not get IPVS stats: %w", err)
148 } 149 }
149 ch <- c.connections.mustNewConstMetric(float64(ipvsStats.Connections)) 150 ch <- c.connections.mustNewConstMetric(float64(ipvsStats.Connections))
150 ch <- c.incomingPackets.mustNewConstMetric(float64(ipvsStats.IncomingPackets)) 151 ch <- c.incomingPackets.mustNewConstMetric(float64(ipvsStats.IncomingPackets))
@@ -154,7 +155,7 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
154 155
155 backendStats, err := c.fs.IPVSBackendStatus() 156 backendStats, err := c.fs.IPVSBackendStatus()
156 if err != nil { 157 if err != nil {
157 return fmt.Errorf("could not get backend status: %s", err) 158 return fmt.Errorf("could not get backend status: %w", err)
158 } 159 }
159 160
160 sums := map[string]ipvsBackendStatus{} 161 sums := map[string]ipvsBackendStatus{}
diff --git a/collector/kvm_bsd.go b/collector/kvm_bsd.go
index b4e95e6..8798736 100644
--- a/collector/kvm_bsd.go
+++ b/collector/kvm_bsd.go
@@ -11,7 +11,7 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nomeminfo 14// +build !nokvm
15// +build freebsd dragonfly 15// +build freebsd dragonfly
16 16
17package collector 17package collector
diff --git a/collector/loadavg.go b/collector/loadavg.go
index 7c8bcff..7c1fd99 100644
--- a/collector/loadavg.go
+++ b/collector/loadavg.go
@@ -48,7 +48,7 @@ func NewLoadavgCollector(logger log.Logger) (Collector, error) {
48func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error { 48func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error {
49 loads, err := getLoad() 49 loads, err := getLoad()
50 if err != nil { 50 if err != nil {
51 return fmt.Errorf("couldn't get load: %s", err) 51 return fmt.Errorf("couldn't get load: %w", err)
52 } 52 }
53 for i, load := range loads { 53 for i, load := range loads {
54 level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load) 54 level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load)
diff --git a/collector/loadavg_linux.go b/collector/loadavg_linux.go
index 668d3ed..7b89668 100644
--- a/collector/loadavg_linux.go
+++ b/collector/loadavg_linux.go
@@ -45,7 +45,7 @@ func parseLoad(data string) (loads []float64, err error) {
45 for i, load := range parts[0:3] { 45 for i, load := range parts[0:3] {
46 loads[i], err = strconv.ParseFloat(load, 64) 46 loads[i], err = strconv.ParseFloat(load, 64)
47 if err != nil { 47 if err != nil {
48 return nil, fmt.Errorf("could not parse load '%s': %s", load, err) 48 return nil, fmt.Errorf("could not parse load '%s': %w", load, err)
49 } 49 }
50 } 50 }
51 return loads, nil 51 return loads, nil
diff --git a/collector/logind_linux.go b/collector/logind_linux.go
index fb3cb57..19a29a2 100644
--- a/collector/logind_linux.go
+++ b/collector/logind_linux.go
@@ -92,7 +92,7 @@ func NewLogindCollector(logger log.Logger) (Collector, error) {
92func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error { 92func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error {
93 c, err := newDbus() 93 c, err := newDbus()
94 if err != nil { 94 if err != nil {
95 return fmt.Errorf("unable to connect to dbus: %s", err) 95 return fmt.Errorf("unable to connect to dbus: %w", err)
96 } 96 }
97 defer c.conn.Close() 97 defer c.conn.Close()
98 98
@@ -102,12 +102,12 @@ func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error {
102func collectMetrics(ch chan<- prometheus.Metric, c logindInterface) error { 102func collectMetrics(ch chan<- prometheus.Metric, c logindInterface) error {
103 seats, err := c.listSeats() 103 seats, err := c.listSeats()
104 if err != nil { 104 if err != nil {
105 return fmt.Errorf("unable to get seats: %s", err) 105 return fmt.Errorf("unable to get seats: %w", err)
106 } 106 }
107 107
108 sessionList, err := c.listSessions() 108 sessionList, err := c.listSessions()
109 if err != nil { 109 if err != nil {
110 return fmt.Errorf("unable to get sessions: %s", err) 110 return fmt.Errorf("unable to get sessions: %w", err)
111 } 111 }
112 112
113 sessions := make(map[logindSession]float64) 113 sessions := make(map[logindSession]float64)
diff --git a/collector/mdadm_linux.go b/collector/mdadm_linux.go
index 05f83ee..865553f 100644
--- a/collector/mdadm_linux.go
+++ b/collector/mdadm_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "os" 21 "os"
21 22
@@ -94,21 +95,21 @@ var (
94) 95)
95 96
96func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { 97func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
97 fs, errFs := procfs.NewFS(*procPath) 98 fs, err := procfs.NewFS(*procPath)
98 99
99 if errFs != nil { 100 if err != nil {
100 return fmt.Errorf("failed to open procfs: %w", errFs) 101 return fmt.Errorf("failed to open procfs: %w", err)
101 } 102 }
102 103
103 mdStats, err := fs.MDStat() 104 mdStats, err := fs.MDStat()
104 105
105 if err != nil { 106 if err != nil {
106 if os.IsNotExist(err) { 107 if errors.Is(err, os.ErrNotExist) {
107 level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath) 108 level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath)
108 return ErrNoData 109 return ErrNoData
109 } 110 }
110 111
111 return fmt.Errorf("error parsing mdstatus: %s", err) 112 return fmt.Errorf("error parsing mdstatus: %w", err)
112 } 113 }
113 114
114 for _, mdStat := range mdStats { 115 for _, mdStat := range mdStats {
diff --git a/collector/meminfo.go b/collector/meminfo.go
index d3d3b8d..38b2326 100644
--- a/collector/meminfo.go
+++ b/collector/meminfo.go
@@ -48,7 +48,7 @@ func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error {
48 var metricType prometheus.ValueType 48 var metricType prometheus.ValueType
49 memInfo, err := c.getMemInfo() 49 memInfo, err := c.getMemInfo()
50 if err != nil { 50 if err != nil {
51 return fmt.Errorf("couldn't get meminfo: %s", err) 51 return fmt.Errorf("couldn't get meminfo: %w", err)
52 } 52 }
53 level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo) 53 level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo)
54 for k, v := range memInfo { 54 for k, v := range memInfo {
diff --git a/collector/meminfo_linux.go b/collector/meminfo_linux.go
index 3b9e2e9..88505da 100644
--- a/collector/meminfo_linux.go
+++ b/collector/meminfo_linux.go
@@ -54,7 +54,7 @@ func parseMemInfo(r io.Reader) (map[string]float64, error) {
54 } 54 }
55 fv, err := strconv.ParseFloat(parts[1], 64) 55 fv, err := strconv.ParseFloat(parts[1], 64)
56 if err != nil { 56 if err != nil {
57 return nil, fmt.Errorf("invalid value in meminfo: %s", err) 57 return nil, fmt.Errorf("invalid value in meminfo: %w", err)
58 } 58 }
59 key := parts[0][:len(parts[0])-1] // remove trailing : from key 59 key := parts[0][:len(parts[0])-1] // remove trailing : from key
60 // Active(anon) -> Active_anon 60 // Active(anon) -> Active_anon
diff --git a/collector/meminfo_numa_linux.go b/collector/meminfo_numa_linux.go
index d36f6a0..f3d9307 100644
--- a/collector/meminfo_numa_linux.go
+++ b/collector/meminfo_numa_linux.go
@@ -62,7 +62,7 @@ func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) {
62func (c *meminfoNumaCollector) Update(ch chan<- prometheus.Metric) error { 62func (c *meminfoNumaCollector) Update(ch chan<- prometheus.Metric) error {
63 metrics, err := getMemInfoNuma() 63 metrics, err := getMemInfoNuma()
64 if err != nil { 64 if err != nil {
65 return fmt.Errorf("couldn't get NUMA meminfo: %s", err) 65 return fmt.Errorf("couldn't get NUMA meminfo: %w", err)
66 } 66 }
67 for _, v := range metrics { 67 for _, v := range metrics {
68 desc, ok := c.metricDescs[v.metricName] 68 desc, ok := c.metricDescs[v.metricName]
@@ -137,7 +137,7 @@ func parseMemInfoNuma(r io.Reader) ([]meminfoMetric, error) {
137 137
138 fv, err := strconv.ParseFloat(parts[3], 64) 138 fv, err := strconv.ParseFloat(parts[3], 64)
139 if err != nil { 139 if err != nil {
140 return nil, fmt.Errorf("invalid value in meminfo: %s", err) 140 return nil, fmt.Errorf("invalid value in meminfo: %w", err)
141 } 141 }
142 switch l := len(parts); { 142 switch l := len(parts); {
143 case l == 4: // no unit 143 case l == 4: // no unit
@@ -174,7 +174,7 @@ func parseMemInfoNumaStat(r io.Reader, nodeNumber string) ([]meminfoMetric, erro
174 174
175 fv, err := strconv.ParseFloat(parts[1], 64) 175 fv, err := strconv.ParseFloat(parts[1], 64)
176 if err != nil { 176 if err != nil {
177 return nil, fmt.Errorf("invalid value in numastat: %s", err) 177 return nil, fmt.Errorf("invalid value in numastat: %w", err)
178 } 178 }
179 179
180 numaStat = append(numaStat, meminfoMetric{parts[0] + "_total", prometheus.CounterValue, nodeNumber, fv}) 180 numaStat = append(numaStat, meminfoMetric{parts[0] + "_total", prometheus.CounterValue, nodeNumber, fv})
diff --git a/collector/meminfo_openbsd.go b/collector/meminfo_openbsd.go
index 073fbe7..81102d5 100644
--- a/collector/meminfo_openbsd.go
+++ b/collector/meminfo_openbsd.go
@@ -62,7 +62,7 @@ func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
62 } 62 }
63 63
64 if _, err := C.sysctl_bcstats(&bcstats); err != nil { 64 if _, err := C.sysctl_bcstats(&bcstats); err != nil {
65 return nil, fmt.Errorf("sysctl CTL_VFS VFS_GENERIC VFS_BCACHESTAT failed: %v", err) 65 return nil, fmt.Errorf("sysctl CTL_VFS VFS_GENERIC VFS_BCACHESTAT failed: %w", err)
66 } 66 }
67 67
68 ps := float64(uvmexp.pagesize) 68 ps := float64(uvmexp.pagesize)
diff --git a/collector/memory_bsd.go b/collector/memory_bsd.go
index 4be5ddd..ac8b301 100644
--- a/collector/memory_bsd.go
+++ b/collector/memory_bsd.go
@@ -43,7 +43,7 @@ func init() {
43func NewMemoryCollector(logger log.Logger) (Collector, error) { 43func NewMemoryCollector(logger log.Logger) (Collector, error) {
44 tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size") 44 tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size")
45 if err != nil { 45 if err != nil {
46 return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %s", err) 46 return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %w", err)
47 } 47 }
48 size := float64(tmp32) 48 size := float64(tmp32)
49 49
@@ -136,7 +136,7 @@ func (c *memoryCollector) Update(ch chan<- prometheus.Metric) error {
136 for _, m := range c.sysctls { 136 for _, m := range c.sysctls {
137 v, err := m.Value() 137 v, err := m.Value()
138 if err != nil { 138 if err != nil {
139 return fmt.Errorf("couldn't get memory: %s", err) 139 return fmt.Errorf("couldn't get memory: %w", err)
140 } 140 }
141 141
142 // Most are gauges. 142 // Most are gauges.
@@ -154,7 +154,7 @@ func (c *memoryCollector) Update(ch chan<- prometheus.Metric) error {
154 154
155 swapUsed, err := c.kvm.SwapUsedPages() 155 swapUsed, err := c.kvm.SwapUsedPages()
156 if err != nil { 156 if err != nil {
157 return fmt.Errorf("couldn't get kvm: %s", err) 157 return fmt.Errorf("couldn't get kvm: %w", err)
158 } 158 }
159 159
160 ch <- prometheus.MustNewConstMetric( 160 ch <- prometheus.MustNewConstMetric(
diff --git a/collector/mountstats_linux.go b/collector/mountstats_linux.go
index 3fa1597..4102067 100644
--- a/collector/mountstats_linux.go
+++ b/collector/mountstats_linux.go
@@ -11,6 +11,8 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nomountstats
15
14package collector 16package collector
15 17
16import ( 18import (
diff --git a/collector/netclass_linux.go b/collector/netclass_linux.go
index cbfcb62..0fde219 100644
--- a/collector/netclass_linux.go
+++ b/collector/netclass_linux.go
@@ -61,7 +61,7 @@ func NewNetClassCollector(logger log.Logger) (Collector, error) {
61func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error { 61func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error {
62 netClass, err := c.getNetClassInfo() 62 netClass, err := c.getNetClassInfo()
63 if err != nil { 63 if err != nil {
64 return fmt.Errorf("could not get net class info: %s", err) 64 return fmt.Errorf("could not get net class info: %w", err)
65 } 65 }
66 for _, ifaceInfo := range netClass { 66 for _, ifaceInfo := range netClass {
67 upDesc := prometheus.NewDesc( 67 upDesc := prometheus.NewDesc(
@@ -175,7 +175,7 @@ func (c *netClassCollector) getNetClassInfo() (sysfs.NetClass, error) {
175 netClass, err := c.fs.NetClass() 175 netClass, err := c.fs.NetClass()
176 176
177 if err != nil { 177 if err != nil {
178 return netClass, fmt.Errorf("error obtaining net class info: %s", err) 178 return netClass, fmt.Errorf("error obtaining net class info: %w", err)
179 } 179 }
180 180
181 for device := range netClass { 181 for device := range netClass {
diff --git a/collector/netdev_common.go b/collector/netdev_common.go
index 2164469..a28bd43 100644
--- a/collector/netdev_common.go
+++ b/collector/netdev_common.go
@@ -23,21 +23,24 @@ import (
23 "strconv" 23 "strconv"
24 24
25 "github.com/go-kit/kit/log" 25 "github.com/go-kit/kit/log"
26 "github.com/go-kit/kit/log/level"
26 "github.com/prometheus/client_golang/prometheus" 27 "github.com/prometheus/client_golang/prometheus"
27 "gopkg.in/alecthomas/kingpin.v2" 28 "gopkg.in/alecthomas/kingpin.v2"
28) 29)
29 30
30var ( 31var (
31 netdevIgnoredDevices = kingpin.Flag("collector.netdev.device-blacklist", "Regexp of net devices to blacklist (mutually exclusive to device-whitelist).").String() 32 netdevDeviceInclude = kingpin.Flag("collector.netdev.device-include", "Regexp of net devices to include (mutually exclusive to device-exclude).").String()
32 netdevAcceptDevices = kingpin.Flag("collector.netdev.device-whitelist", "Regexp of net devices to whitelist (mutually exclusive to device-blacklist).").String() 33 oldNetdevDeviceInclude = kingpin.Flag("collector.netdev.device-whitelist", "DEPRECATED: Use collector.netdev.device-include").Hidden().String()
34 netdevDeviceExclude = kingpin.Flag("collector.netdev.device-exclude", "Regexp of net devices to exclude (mutually exclusive to device-include).").String()
35 oldNetdevDeviceExclude = kingpin.Flag("collector.netdev.device-blacklist", "DEPRECATED: Use collector.netdev.device-exclude").Hidden().String()
33) 36)
34 37
35type netDevCollector struct { 38type netDevCollector struct {
36 subsystem string 39 subsystem string
37 ignoredDevicesPattern *regexp.Regexp 40 deviceExcludePattern *regexp.Regexp
38 acceptDevicesPattern *regexp.Regexp 41 deviceIncludePattern *regexp.Regexp
39 metricDescs map[string]*prometheus.Desc 42 metricDescs map[string]*prometheus.Desc
40 logger log.Logger 43 logger log.Logger
41} 44}
42 45
43func init() { 46func init() {
@@ -46,33 +49,53 @@ func init() {
46 49
47// NewNetDevCollector returns a new Collector exposing network device stats. 50// NewNetDevCollector returns a new Collector exposing network device stats.
48func NewNetDevCollector(logger log.Logger) (Collector, error) { 51func NewNetDevCollector(logger log.Logger) (Collector, error) {
49 if *netdevIgnoredDevices != "" && *netdevAcceptDevices != "" { 52 if *oldNetdevDeviceInclude != "" {
50 return nil, errors.New("device-blacklist & accept-devices are mutually exclusive") 53 if *netdevDeviceInclude == "" {
54 level.Warn(logger).Log("msg", "--collector.netdev.device-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-include")
55 *netdevDeviceInclude = *oldNetdevDeviceInclude
56 } else {
57 return nil, errors.New("--collector.netdev.device-whitelist and --collector.netdev.device-include are mutually exclusive")
58 }
59 }
60
61 if *oldNetdevDeviceExclude != "" {
62 if *netdevDeviceExclude == "" {
63 level.Warn(logger).Log("msg", "--collector.netdev.device-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-exclude")
64 *netdevDeviceExclude = *oldNetdevDeviceExclude
65 } else {
66 return nil, errors.New("--collector.netdev.device-blacklist and --collector.netdev.device-exclude are mutually exclusive")
67 }
68 }
69
70 if *netdevDeviceExclude != "" && *netdevDeviceInclude != "" {
71 return nil, errors.New("device-exclude & device-include are mutually exclusive")
51 } 72 }
52 73
53 var ignorePattern *regexp.Regexp 74 var excludePattern *regexp.Regexp
54 if *netdevIgnoredDevices != "" { 75 if *netdevDeviceExclude != "" {
55 ignorePattern = regexp.MustCompile(*netdevIgnoredDevices) 76 level.Info(logger).Log("msg", "Parsed flag --collector.netdev.device-exclude", "flag", *netdevDeviceExclude)
77 excludePattern = regexp.MustCompile(*netdevDeviceExclude)
56 } 78 }
57 79
58 var acceptPattern *regexp.Regexp 80 var includePattern *regexp.Regexp
59 if *netdevAcceptDevices != "" { 81 if *netdevDeviceInclude != "" {
60 acceptPattern = regexp.MustCompile(*netdevAcceptDevices) 82 level.Info(logger).Log("msg", "Parsed Flag --collector.netdev.device-include", "flag", *netdevDeviceInclude)
83 includePattern = regexp.MustCompile(*netdevDeviceInclude)
61 } 84 }
62 85
63 return &netDevCollector{ 86 return &netDevCollector{
64 subsystem: "network", 87 subsystem: "network",
65 ignoredDevicesPattern: ignorePattern, 88 deviceExcludePattern: excludePattern,
66 acceptDevicesPattern: acceptPattern, 89 deviceIncludePattern: includePattern,
67 metricDescs: map[string]*prometheus.Desc{}, 90 metricDescs: map[string]*prometheus.Desc{},
68 logger: logger, 91 logger: logger,
69 }, nil 92 }, nil
70} 93}
71 94
72func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error { 95func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error {
73 netDev, err := getNetDevStats(c.ignoredDevicesPattern, c.acceptDevicesPattern, c.logger) 96 netDev, err := getNetDevStats(c.deviceExcludePattern, c.deviceIncludePattern, c.logger)
74 if err != nil { 97 if err != nil {
75 return fmt.Errorf("couldn't get netstats: %s", err) 98 return fmt.Errorf("couldn't get netstats: %w", err)
76 } 99 }
77 for dev, devStats := range netDev { 100 for dev, devStats := range netDev {
78 for key, value := range devStats { 101 for key, value := range devStats {
@@ -88,7 +111,7 @@ func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error {
88 } 111 }
89 v, err := strconv.ParseFloat(value, 64) 112 v, err := strconv.ParseFloat(value, 64)
90 if err != nil { 113 if err != nil {
91 return fmt.Errorf("invalid value %s in netstats: %s", value, err) 114 return fmt.Errorf("invalid value %s in netstats: %w", value, err)
92 } 115 }
93 ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, v, dev) 116 ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, v, dev)
94 } 117 }
diff --git a/collector/netstat_linux.go b/collector/netstat_linux.go
index 9b4be49..98447f8 100644
--- a/collector/netstat_linux.go
+++ b/collector/netstat_linux.go
@@ -17,6 +17,7 @@ package collector
17 17
18import ( 18import (
19 "bufio" 19 "bufio"
20 "errors"
20 "fmt" 21 "fmt"
21 "io" 22 "io"
22 "os" 23 "os"
@@ -34,7 +35,7 @@ const (
34) 35)
35 36
36var ( 37var (
37 netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans)|Tcp_(ActiveOpens|InSegs|OutSegs|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String() 38 netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String()
38) 39)
39 40
40type netStatCollector struct { 41type netStatCollector struct {
@@ -59,15 +60,15 @@ func NewNetStatCollector(logger log.Logger) (Collector, error) {
59func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { 60func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error {
60 netStats, err := getNetStats(procFilePath("net/netstat")) 61 netStats, err := getNetStats(procFilePath("net/netstat"))
61 if err != nil { 62 if err != nil {
62 return fmt.Errorf("couldn't get netstats: %s", err) 63 return fmt.Errorf("couldn't get netstats: %w", err)
63 } 64 }
64 snmpStats, err := getNetStats(procFilePath("net/snmp")) 65 snmpStats, err := getNetStats(procFilePath("net/snmp"))
65 if err != nil { 66 if err != nil {
66 return fmt.Errorf("couldn't get SNMP stats: %s", err) 67 return fmt.Errorf("couldn't get SNMP stats: %w", err)
67 } 68 }
68 snmp6Stats, err := getSNMP6Stats(procFilePath("net/snmp6")) 69 snmp6Stats, err := getSNMP6Stats(procFilePath("net/snmp6"))
69 if err != nil { 70 if err != nil {
70 return fmt.Errorf("couldn't get SNMP6 stats: %s", err) 71 return fmt.Errorf("couldn't get SNMP6 stats: %w", err)
71 } 72 }
72 // Merge the results of snmpStats into netStats (collisions are possible, but 73 // Merge the results of snmpStats into netStats (collisions are possible, but
73 // we know that the keys are always unique for the given use case). 74 // we know that the keys are always unique for the given use case).
@@ -82,7 +83,7 @@ func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error {
82 key := protocol + "_" + name 83 key := protocol + "_" + name
83 v, err := strconv.ParseFloat(value, 64) 84 v, err := strconv.ParseFloat(value, 64)
84 if err != nil { 85 if err != nil {
85 return fmt.Errorf("invalid value %s in netstats: %s", value, err) 86 return fmt.Errorf("invalid value %s in netstats: %w", value, err)
86 } 87 }
87 if !c.fieldPattern.MatchString(key) { 88 if !c.fieldPattern.MatchString(key) {
88 continue 89 continue
@@ -140,7 +141,7 @@ func getSNMP6Stats(fileName string) (map[string]map[string]string, error) {
140 if err != nil { 141 if err != nil {
141 // On systems with IPv6 disabled, this file won't exist. 142 // On systems with IPv6 disabled, this file won't exist.
142 // Do nothing. 143 // Do nothing.
143 if os.IsNotExist(err) { 144 if errors.Is(err, os.ErrNotExist) {
144 return nil, nil 145 return nil, nil
145 } 146 }
146 147
diff --git a/collector/nfs_linux.go b/collector/nfs_linux.go
index 55f9e19..60803eb 100644
--- a/collector/nfs_linux.go
+++ b/collector/nfs_linux.go
@@ -11,9 +11,12 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nonfs
15
14package collector 16package collector
15 17
16import ( 18import (
19 "errors"
17 "fmt" 20 "fmt"
18 "os" 21 "os"
19 "reflect" 22 "reflect"
@@ -95,7 +98,7 @@ func NewNfsCollector(logger log.Logger) (Collector, error) {
95func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error { 98func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error {
96 stats, err := c.fs.ClientRPCStats() 99 stats, err := c.fs.ClientRPCStats()
97 if err != nil { 100 if err != nil {
98 if os.IsNotExist(err) { 101 if errors.Is(err, os.ErrNotExist) {
99 level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err) 102 level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err)
100 return ErrNoData 103 return ErrNoData
101 } 104 }
diff --git a/collector/nfsd_linux.go b/collector/nfsd_linux.go
index b6f2f8e..3dba899 100644
--- a/collector/nfsd_linux.go
+++ b/collector/nfsd_linux.go
@@ -11,9 +11,12 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nonfsd
15
14package collector 16package collector
15 17
16import ( 18import (
19 "errors"
17 "fmt" 20 "fmt"
18 "os" 21 "os"
19 22
@@ -61,7 +64,7 @@ func NewNFSdCollector(logger log.Logger) (Collector, error) {
61func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error { 64func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error {
62 stats, err := c.fs.ServerRPCStats() 65 stats, err := c.fs.ServerRPCStats()
63 if err != nil { 66 if err != nil {
64 if os.IsNotExist(err) { 67 if errors.Is(err, os.ErrNotExist) {
65 level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err) 68 level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err)
66 return ErrNoData 69 return ErrNoData
67 } 70 }
diff --git a/collector/ntp.go b/collector/ntp.go
index 411ba25..c7d55c9 100644
--- a/collector/ntp.go
+++ b/collector/ntp.go
@@ -125,7 +125,7 @@ func (c *ntpCollector) Update(ch chan<- prometheus.Metric) error {
125 Timeout: time.Second, // default `ntpdate` timeout 125 Timeout: time.Second, // default `ntpdate` timeout
126 }) 126 })
127 if err != nil { 127 if err != nil {
128 return fmt.Errorf("couldn't get SNTP reply: %s", err) 128 return fmt.Errorf("couldn't get SNTP reply: %w", err)
129 } 129 }
130 130
131 ch <- c.stratum.mustNewConstMetric(float64(resp.Stratum)) 131 ch <- c.stratum.mustNewConstMetric(float64(resp.Stratum))
diff --git a/collector/perf_linux.go b/collector/perf_linux.go
index e452754..3a2f739 100644
--- a/collector/perf_linux.go
+++ b/collector/perf_linux.go
@@ -11,6 +11,8 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !noperf
15
14package collector 16package collector
15 17
16import ( 18import (
diff --git a/collector/powersupplyclass.go b/collector/powersupplyclass.go
index 988b0ee..adebf0e 100644
--- a/collector/powersupplyclass.go
+++ b/collector/powersupplyclass.go
@@ -17,7 +17,9 @@
17package collector 17package collector
18 18
19import ( 19import (
20 "errors"
20 "fmt" 21 "fmt"
22 "os"
21 "regexp" 23 "regexp"
22 24
23 "github.com/go-kit/kit/log" 25 "github.com/go-kit/kit/log"
@@ -54,7 +56,10 @@ func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) {
54func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { 56func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error {
55 powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern) 57 powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern)
56 if err != nil { 58 if err != nil {
57 return fmt.Errorf("could not get power_supply class info: %s", err) 59 if errors.Is(err, os.ErrNotExist) {
60 return ErrNoData
61 }
62 return fmt.Errorf("could not get power_supply class info: %w", err)
58 } 63 }
59 for _, powerSupply := range powerSupplyClass { 64 for _, powerSupply := range powerSupplyClass {
60 65
@@ -184,7 +189,7 @@ func getPowerSupplyClassInfo(ignore *regexp.Regexp) (sysfs.PowerSupplyClass, err
184 powerSupplyClass, err := fs.PowerSupplyClass() 189 powerSupplyClass, err := fs.PowerSupplyClass()
185 190
186 if err != nil { 191 if err != nil {
187 return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %s", err) 192 return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %w", err)
188 } 193 }
189 194
190 for device := range powerSupplyClass { 195 for device := range powerSupplyClass {
diff --git a/collector/processes_linux.go b/collector/processes_linux.go
index 3d64cbd..3d4e95d 100644
--- a/collector/processes_linux.go
+++ b/collector/processes_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "os" 21 "os"
21 22
@@ -75,13 +76,13 @@ func NewProcessStatCollector(logger log.Logger) (Collector, error) {
75func (c *processCollector) Update(ch chan<- prometheus.Metric) error { 76func (c *processCollector) Update(ch chan<- prometheus.Metric) error {
76 pids, states, threads, err := c.getAllocatedThreads() 77 pids, states, threads, err := c.getAllocatedThreads()
77 if err != nil { 78 if err != nil {
78 return fmt.Errorf("unable to retrieve number of allocated threads: %q", err) 79 return fmt.Errorf("unable to retrieve number of allocated threads: %w", err)
79 } 80 }
80 81
81 ch <- prometheus.MustNewConstMetric(c.threadAlloc, prometheus.GaugeValue, float64(threads)) 82 ch <- prometheus.MustNewConstMetric(c.threadAlloc, prometheus.GaugeValue, float64(threads))
82 maxThreads, err := readUintFromFile(procFilePath("sys/kernel/threads-max")) 83 maxThreads, err := readUintFromFile(procFilePath("sys/kernel/threads-max"))
83 if err != nil { 84 if err != nil {
84 return fmt.Errorf("unable to retrieve limit number of threads: %q", err) 85 return fmt.Errorf("unable to retrieve limit number of threads: %w", err)
85 } 86 }
86 ch <- prometheus.MustNewConstMetric(c.threadLimit, prometheus.GaugeValue, float64(maxThreads)) 87 ch <- prometheus.MustNewConstMetric(c.threadLimit, prometheus.GaugeValue, float64(maxThreads))
87 88
@@ -91,7 +92,7 @@ func (c *processCollector) Update(ch chan<- prometheus.Metric) error {
91 92
92 pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) 93 pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max"))
93 if err != nil { 94 if err != nil {
94 return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %q", err) 95 return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %w", err)
95 } 96 }
96 ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids)) 97 ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids))
97 ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM)) 98 ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM))
@@ -110,7 +111,7 @@ func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, er
110 for _, pid := range p { 111 for _, pid := range p {
111 stat, err := pid.Stat() 112 stat, err := pid.Stat()
112 // PIDs can vanish between getting the list and getting stats. 113 // PIDs can vanish between getting the list and getting stats.
113 if os.IsNotExist(err) { 114 if errors.Is(err, os.ErrNotExist) {
114 level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid, "err", err) 115 level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid, "err", err)
115 continue 116 continue
116 } 117 }
diff --git a/collector/schedstat_linux.go b/collector/schedstat_linux.go
index d2f04f6..9f29a7e 100644
--- a/collector/schedstat_linux.go
+++ b/collector/schedstat_linux.go
@@ -11,9 +11,12 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !noshedstat
15
14package collector 16package collector
15 17
16import ( 18import (
19 "errors"
17 "fmt" 20 "fmt"
18 "os" 21 "os"
19 22
@@ -70,7 +73,7 @@ func init() {
70func (c *schedstatCollector) Update(ch chan<- prometheus.Metric) error { 73func (c *schedstatCollector) Update(ch chan<- prometheus.Metric) error {
71 stats, err := c.fs.Schedstat() 74 stats, err := c.fs.Schedstat()
72 if err != nil { 75 if err != nil {
73 if os.IsNotExist(err) { 76 if errors.Is(err, os.ErrNotExist) {
74 level.Debug(c.logger).Log("msg", "schedstat file does not exist") 77 level.Debug(c.logger).Log("msg", "schedstat file does not exist")
75 return ErrNoData 78 return ErrNoData
76 } 79 }
diff --git a/collector/sockstat_linux.go b/collector/sockstat_linux.go
index c7596c9..8f5a99f 100644
--- a/collector/sockstat_linux.go
+++ b/collector/sockstat_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "os" 21 "os"
21 22
@@ -55,7 +56,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error {
55 stat4, err := fs.NetSockstat() 56 stat4, err := fs.NetSockstat()
56 switch { 57 switch {
57 case err == nil: 58 case err == nil:
58 case os.IsNotExist(err): 59 case errors.Is(err, os.ErrNotExist):
59 level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping") 60 level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping")
60 default: 61 default:
61 return fmt.Errorf("failed to get IPv4 sockstat data: %w", err) 62 return fmt.Errorf("failed to get IPv4 sockstat data: %w", err)
@@ -64,7 +65,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error {
64 stat6, err := fs.NetSockstat6() 65 stat6, err := fs.NetSockstat6()
65 switch { 66 switch {
66 case err == nil: 67 case err == nil:
67 case os.IsNotExist(err): 68 case errors.Is(err, os.ErrNotExist):
68 level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping") 69 level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping")
69 default: 70 default:
70 return fmt.Errorf("failed to get IPv6 sockstat data: %w", err) 71 return fmt.Errorf("failed to get IPv6 sockstat data: %w", err)
diff --git a/collector/softnet_linux.go b/collector/softnet_linux.go
index e48f182..befec8e 100644
--- a/collector/softnet_linux.go
+++ b/collector/softnet_linux.go
@@ -72,7 +72,7 @@ func NewSoftnetCollector(logger log.Logger) (Collector, error) {
72func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { 72func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error {
73 stats, err := c.fs.NetSoftnetStat() 73 stats, err := c.fs.NetSoftnetStat()
74 if err != nil { 74 if err != nil {
75 return fmt.Errorf("could not get softnet statistics: %s", err) 75 return fmt.Errorf("could not get softnet statistics: %w", err)
76 } 76 }
77 77
78 for cpuNumber, cpuStats := range stats { 78 for cpuNumber, cpuStats := range stats {
diff --git a/collector/supervisord.go b/collector/supervisord.go
index 33f3b8a..6a6b909 100644
--- a/collector/supervisord.go
+++ b/collector/supervisord.go
@@ -134,7 +134,7 @@ func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error {
134 134
135 res, err := xrpc.Call("supervisor.getAllProcessInfo") 135 res, err := xrpc.Call("supervisor.getAllProcessInfo")
136 if err != nil { 136 if err != nil {
137 return fmt.Errorf("unable to call supervisord: %s", err) 137 return fmt.Errorf("unable to call supervisord: %w", err)
138 } 138 }
139 139
140 for _, p := range res.(xmlrpc.Array) { 140 for _, p := range res.(xmlrpc.Array) {
diff --git a/collector/sysctl_bsd.go b/collector/sysctl_bsd.go
index 3038f41..a671bc2 100644
--- a/collector/sysctl_bsd.go
+++ b/collector/sysctl_bsd.go
@@ -12,7 +12,7 @@
12// limitations under the License. 12// limitations under the License.
13 13
14// +build freebsd dragonfly openbsd netbsd darwin 14// +build freebsd dragonfly openbsd netbsd darwin
15// +build !nomeminfo 15// +build cgo
16 16
17package collector 17package collector
18 18
diff --git a/collector/systemd_linux.go b/collector/systemd_linux.go
index 0827069..b374b37 100644
--- a/collector/systemd_linux.go
+++ b/collector/systemd_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "math" 21 "math"
21 "regexp" 22 "regexp"
@@ -39,8 +40,10 @@ const (
39) 40)
40 41
41var ( 42var (
42 unitWhitelist = kingpin.Flag("collector.systemd.unit-whitelist", "Regexp of systemd units to whitelist. Units must both match whitelist and not match blacklist to be included.").Default(".+").String() 43 unitInclude = kingpin.Flag("collector.systemd.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").String()
43 unitBlacklist = kingpin.Flag("collector.systemd.unit-blacklist", "Regexp of systemd units to blacklist. Units must both match whitelist and not match blacklist to be included.").Default(".+\\.(automount|device|mount|scope|slice)").String() 44 oldUnitInclude = kingpin.Flag("collector.systemd.unit-whitelist", "DEPRECATED: Use --collector.systemd.unit-include").Hidden().String()
45 unitExclude = kingpin.Flag("collector.systemd.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(automount|device|mount|scope|slice)").String()
46 oldUnitExclude = kingpin.Flag("collector.systemd.unit-blacklist", "DEPRECATED: Use collector.systemd.unit-exclude").Hidden().String()
44 systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus (Strongly discouraged since it requires root. For testing purposes only).").Hidden().Bool() 47 systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus (Strongly discouraged since it requires root. For testing purposes only).").Hidden().Bool()
45 enableTaskMetrics = kingpin.Flag("collector.systemd.enable-task-metrics", "Enables service unit tasks metrics unit_tasks_current and unit_tasks_max").Bool() 48 enableTaskMetrics = kingpin.Flag("collector.systemd.enable-task-metrics", "Enables service unit tasks metrics unit_tasks_current and unit_tasks_max").Bool()
46 enableRestartsMetrics = kingpin.Flag("collector.systemd.enable-restarts-metrics", "Enables service unit metric service_restart_total").Bool() 49 enableRestartsMetrics = kingpin.Flag("collector.systemd.enable-restarts-metrics", "Enables service unit metric service_restart_total").Bool()
@@ -61,8 +64,8 @@ type systemdCollector struct {
61 socketRefusedConnectionsDesc *prometheus.Desc 64 socketRefusedConnectionsDesc *prometheus.Desc
62 systemdVersionDesc *prometheus.Desc 65 systemdVersionDesc *prometheus.Desc
63 systemdVersion int 66 systemdVersion int
64 unitWhitelistPattern *regexp.Regexp 67 unitIncludePattern *regexp.Regexp
65 unitBlacklistPattern *regexp.Regexp 68 unitExcludePattern *regexp.Regexp
66 logger log.Logger 69 logger log.Logger
67} 70}
68 71
@@ -118,8 +121,27 @@ func NewSystemdCollector(logger log.Logger) (Collector, error) {
118 systemdVersionDesc := prometheus.NewDesc( 121 systemdVersionDesc := prometheus.NewDesc(
119 prometheus.BuildFQName(namespace, subsystem, "version"), 122 prometheus.BuildFQName(namespace, subsystem, "version"),
120 "Detected systemd version", []string{}, nil) 123 "Detected systemd version", []string{}, nil)
121 unitWhitelistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitWhitelist)) 124
122 unitBlacklistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitBlacklist)) 125 if *oldUnitExclude != "" {
126 if *unitExclude == "" {
127 level.Warn(logger).Log("msg", "--collector.systemd.unit-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-exclude")
128 *unitExclude = *oldUnitExclude
129 } else {
130 return nil, errors.New("--collector.systemd.unit-blacklist and --collector.systemd.unit-exclude are mutually exclusive")
131 }
132 }
133 if *oldUnitInclude != "" {
134 if *unitInclude == "" {
135 level.Warn(logger).Log("msg", "--collector.systemd.unit-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-include")
136 *unitInclude = *oldUnitInclude
137 } else {
138 return nil, errors.New("--collector.systemd.unit-whitelist and --collector.systemd.unit-include are mutually exclusive")
139 }
140 }
141 level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-include", "flag", *unitInclude)
142 unitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitInclude))
143 level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-exclude", "flag", *unitExclude)
144 unitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitExclude))
123 145
124 systemdVersion := getSystemdVersion(logger) 146 systemdVersion := getSystemdVersion(logger)
125 if systemdVersion < minSystemdVersionSystemState { 147 if systemdVersion < minSystemdVersionSystemState {
@@ -141,8 +163,8 @@ func NewSystemdCollector(logger log.Logger) (Collector, error) {
141 socketRefusedConnectionsDesc: socketRefusedConnectionsDesc, 163 socketRefusedConnectionsDesc: socketRefusedConnectionsDesc,
142 systemdVersionDesc: systemdVersionDesc, 164 systemdVersionDesc: systemdVersionDesc,
143 systemdVersion: systemdVersion, 165 systemdVersion: systemdVersion,
144 unitWhitelistPattern: unitWhitelistPattern, 166 unitIncludePattern: unitIncludePattern,
145 unitBlacklistPattern: unitBlacklistPattern, 167 unitExcludePattern: unitExcludePattern,
146 logger: logger, 168 logger: logger,
147 }, nil 169 }, nil
148} 170}
@@ -153,13 +175,13 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
153 begin := time.Now() 175 begin := time.Now()
154 conn, err := newSystemdDbusConn() 176 conn, err := newSystemdDbusConn()
155 if err != nil { 177 if err != nil {
156 return fmt.Errorf("couldn't get dbus connection: %s", err) 178 return fmt.Errorf("couldn't get dbus connection: %w", err)
157 } 179 }
158 defer conn.Close() 180 defer conn.Close()
159 181
160 allUnits, err := c.getAllUnits(conn) 182 allUnits, err := c.getAllUnits(conn)
161 if err != nil { 183 if err != nil {
162 return fmt.Errorf("couldn't get units: %s", err) 184 return fmt.Errorf("couldn't get units: %w", err)
163 } 185 }
164 level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds()) 186 level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds())
165 187
@@ -169,7 +191,7 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
169 level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds()) 191 level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds())
170 192
171 begin = time.Now() 193 begin = time.Now()
172 units := filterUnits(allUnits, c.unitWhitelistPattern, c.unitBlacklistPattern, c.logger) 194 units := filterUnits(allUnits, c.unitIncludePattern, c.unitExcludePattern, c.logger)
173 level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds()) 195 level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds())
174 196
175 var wg sync.WaitGroup 197 var wg sync.WaitGroup
@@ -391,7 +413,7 @@ func (c *systemdCollector) collectSummaryMetrics(ch chan<- prometheus.Metric, su
391func (c *systemdCollector) collectSystemState(conn *dbus.Conn, ch chan<- prometheus.Metric) error { 413func (c *systemdCollector) collectSystemState(conn *dbus.Conn, ch chan<- prometheus.Metric) error {
392 systemState, err := conn.GetManagerProperty("SystemState") 414 systemState, err := conn.GetManagerProperty("SystemState")
393 if err != nil { 415 if err != nil {
394 return fmt.Errorf("couldn't get system state: %s", err) 416 return fmt.Errorf("couldn't get system state: %w", err)
395 } 417 }
396 isSystemRunning := 0.0 418 isSystemRunning := 0.0
397 if systemState == `"running"` { 419 if systemState == `"running"` {
@@ -443,10 +465,10 @@ func summarizeUnits(units []unit) map[string]float64 {
443 return summarized 465 return summarized
444} 466}
445 467
446func filterUnits(units []unit, whitelistPattern, blacklistPattern *regexp.Regexp, logger log.Logger) []unit { 468func filterUnits(units []unit, includePattern, excludePattern *regexp.Regexp, logger log.Logger) []unit {
447 filtered := make([]unit, 0, len(units)) 469 filtered := make([]unit, 0, len(units))
448 for _, unit := range units { 470 for _, unit := range units {
449 if whitelistPattern.MatchString(unit.Name) && !blacklistPattern.MatchString(unit.Name) && unit.LoadState == "loaded" { 471 if includePattern.MatchString(unit.Name) && !excludePattern.MatchString(unit.Name) && unit.LoadState == "loaded" {
450 level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name) 472 level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name)
451 filtered = append(filtered, unit) 473 filtered = append(filtered, unit)
452 } else { 474 } else {
diff --git a/collector/systemd_linux_test.go b/collector/systemd_linux_test.go
index 613a1ab..93137f2 100644
--- a/collector/systemd_linux_test.go
+++ b/collector/systemd_linux_test.go
@@ -89,11 +89,11 @@ func getUnitListFixtures() [][]unit {
89 89
90func TestSystemdIgnoreFilter(t *testing.T) { 90func TestSystemdIgnoreFilter(t *testing.T) {
91 fixtures := getUnitListFixtures() 91 fixtures := getUnitListFixtures()
92 whitelistPattern := regexp.MustCompile("^foo$") 92 includePattern := regexp.MustCompile("^foo$")
93 blacklistPattern := regexp.MustCompile("^bar$") 93 excludePattern := regexp.MustCompile("^bar$")
94 filtered := filterUnits(fixtures[0], whitelistPattern, blacklistPattern, log.NewNopLogger()) 94 filtered := filterUnits(fixtures[0], includePattern, excludePattern, log.NewNopLogger())
95 for _, unit := range filtered { 95 for _, unit := range filtered {
96 if blacklistPattern.MatchString(unit.Name) || !whitelistPattern.MatchString(unit.Name) { 96 if excludePattern.MatchString(unit.Name) || !includePattern.MatchString(unit.Name) {
97 t.Error(unit.Name, "should not be in the filtered list") 97 t.Error(unit.Name, "should not be in the filtered list")
98 } 98 }
99 } 99 }
@@ -106,7 +106,7 @@ func TestSystemdIgnoreFilterDefaultKeepsAll(t *testing.T) {
106 } 106 }
107 fixtures := getUnitListFixtures() 107 fixtures := getUnitListFixtures()
108 collector := c.(*systemdCollector) 108 collector := c.(*systemdCollector)
109 filtered := filterUnits(fixtures[0], collector.unitWhitelistPattern, collector.unitBlacklistPattern, logger) 109 filtered := filterUnits(fixtures[0], collector.unitIncludePattern, collector.unitExcludePattern, logger)
110 // Adjust fixtures by 3 "not-found" units. 110 // Adjust fixtures by 3 "not-found" units.
111 if len(filtered) != len(fixtures[0])-3 { 111 if len(filtered) != len(fixtures[0])-3 {
112 t.Error("Default filters removed units") 112 t.Error("Default filters removed units")
diff --git a/collector/tcpstat_linux.go b/collector/tcpstat_linux.go
index db9c655..af5ae98 100644
--- a/collector/tcpstat_linux.go
+++ b/collector/tcpstat_linux.go
@@ -82,7 +82,7 @@ func NewTCPStatCollector(logger log.Logger) (Collector, error) {
82func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { 82func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error {
83 tcpStats, err := getTCPStats(procFilePath("net/tcp")) 83 tcpStats, err := getTCPStats(procFilePath("net/tcp"))
84 if err != nil { 84 if err != nil {
85 return fmt.Errorf("couldn't get tcpstats: %s", err) 85 return fmt.Errorf("couldn't get tcpstats: %w", err)
86 } 86 }
87 87
88 // if enabled ipv6 system 88 // if enabled ipv6 system
@@ -90,7 +90,7 @@ func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error {
90 if _, hasIPv6 := os.Stat(tcp6File); hasIPv6 == nil { 90 if _, hasIPv6 := os.Stat(tcp6File); hasIPv6 == nil {
91 tcp6Stats, err := getTCPStats(tcp6File) 91 tcp6Stats, err := getTCPStats(tcp6File)
92 if err != nil { 92 if err != nil {
93 return fmt.Errorf("couldn't get tcp6stats: %s", err) 93 return fmt.Errorf("couldn't get tcp6stats: %w", err)
94 } 94 }
95 95
96 for st, value := range tcp6Stats { 96 for st, value := range tcp6Stats {
diff --git a/collector/textfile.go b/collector/textfile.go
index 64165e6..50c1807 100644
--- a/collector/textfile.go
+++ b/collector/textfile.go
@@ -238,14 +238,14 @@ func (c *textFileCollector) processFile(name string, ch chan<- prometheus.Metric
238 path := filepath.Join(c.path, name) 238 path := filepath.Join(c.path, name)
239 f, err := os.Open(path) 239 f, err := os.Open(path)
240 if err != nil { 240 if err != nil {
241 return nil, fmt.Errorf("failed to open textfile data file %q: %v", path, err) 241 return nil, fmt.Errorf("failed to open textfile data file %q: %w", path, err)
242 } 242 }
243 defer f.Close() 243 defer f.Close()
244 244
245 var parser expfmt.TextParser 245 var parser expfmt.TextParser
246 families, err := parser.TextToMetricFamilies(f) 246 families, err := parser.TextToMetricFamilies(f)
247 if err != nil { 247 if err != nil {
248 return nil, fmt.Errorf("failed to parse textfile data from %q: %v", path, err) 248 return nil, fmt.Errorf("failed to parse textfile data from %q: %w", path, err)
249 } 249 }
250 250
251 if hasTimestamps(families) { 251 if hasTimestamps(families) {
@@ -267,7 +267,7 @@ func (c *textFileCollector) processFile(name string, ch chan<- prometheus.Metric
267 // a failure does not appear fresh. 267 // a failure does not appear fresh.
268 stat, err := f.Stat() 268 stat, err := f.Stat()
269 if err != nil { 269 if err != nil {
270 return nil, fmt.Errorf("failed to stat %q: %v", path, err) 270 return nil, fmt.Errorf("failed to stat %q: %w", path, err)
271 } 271 }
272 272
273 t := stat.ModTime() 273 t := stat.ModTime()
diff --git a/collector/udp_queues_linux.go b/collector/udp_queues_linux.go
index 512c010..e1b347e 100644
--- a/collector/udp_queues_linux.go
+++ b/collector/udp_queues_linux.go
@@ -16,6 +16,7 @@
16package collector 16package collector
17 17
18import ( 18import (
19 "errors"
19 "fmt" 20 "fmt"
20 "os" 21 "os"
21 22
@@ -41,7 +42,7 @@ func init() {
41func NewUDPqueuesCollector(logger log.Logger) (Collector, error) { 42func NewUDPqueuesCollector(logger log.Logger) (Collector, error) {
42 fs, err := procfs.NewFS(*procPath) 43 fs, err := procfs.NewFS(*procPath)
43 if err != nil { 44 if err != nil {
44 return nil, fmt.Errorf("failed to open procfs: %v", err) 45 return nil, fmt.Errorf("failed to open procfs: %w", err)
45 } 46 }
46 return &udpQueuesCollector{ 47 return &udpQueuesCollector{
47 fs: fs, 48 fs: fs,
@@ -61,10 +62,10 @@ func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error {
61 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.TxQueueLength), "tx", "v4") 62 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.TxQueueLength), "tx", "v4")
62 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.RxQueueLength), "rx", "v4") 63 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.RxQueueLength), "rx", "v4")
63 } else { 64 } else {
64 if os.IsNotExist(errIPv4) { 65 if errors.Is(errIPv4, os.ErrNotExist) {
65 level.Debug(c.logger).Log("msg", "not collecting ipv4 based metrics") 66 level.Debug(c.logger).Log("msg", "not collecting ipv4 based metrics")
66 } else { 67 } else {
67 return fmt.Errorf("couldn't get upd queued bytes: %s", errIPv4) 68 return fmt.Errorf("couldn't get upd queued bytes: %w", errIPv4)
68 } 69 }
69 } 70 }
70 71
@@ -73,14 +74,14 @@ func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error {
73 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.TxQueueLength), "tx", "v6") 74 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.TxQueueLength), "tx", "v6")
74 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.RxQueueLength), "rx", "v6") 75 ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.RxQueueLength), "rx", "v6")
75 } else { 76 } else {
76 if os.IsNotExist(errIPv6) { 77 if errors.Is(errIPv6, os.ErrNotExist) {
77 level.Debug(c.logger).Log("msg", "not collecting ipv6 based metrics") 78 level.Debug(c.logger).Log("msg", "not collecting ipv6 based metrics")
78 } else { 79 } else {
79 return fmt.Errorf("couldn't get upd6 queued bytes: %s", errIPv6) 80 return fmt.Errorf("couldn't get upd6 queued bytes: %w", errIPv6)
80 } 81 }
81 } 82 }
82 83
83 if os.IsNotExist(errIPv4) && os.IsNotExist(errIPv6) { 84 if errors.Is(errIPv4, os.ErrNotExist) && errors.Is(errIPv6, os.ErrNotExist) {
84 return ErrNoData 85 return ErrNoData
85 } 86 }
86 return nil 87 return nil
diff --git a/collector/wifi_linux.go b/collector/wifi_linux.go
index 118e714..076982d 100644
--- a/collector/wifi_linux.go
+++ b/collector/wifi_linux.go
@@ -11,10 +11,13 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nowifi
15
14package collector 16package collector
15 17
16import ( 18import (
17 "encoding/json" 19 "encoding/json"
20 "errors"
18 "fmt" 21 "fmt"
19 "io/ioutil" 22 "io/ioutil"
20 "os" 23 "os"
@@ -165,11 +168,11 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
165 stat, err := newWifiStater(*collectorWifi) 168 stat, err := newWifiStater(*collectorWifi)
166 if err != nil { 169 if err != nil {
167 // Cannot access wifi metrics, report no error. 170 // Cannot access wifi metrics, report no error.
168 if os.IsNotExist(err) { 171 if errors.Is(err, os.ErrNotExist) {
169 level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system") 172 level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system")
170 return ErrNoData 173 return ErrNoData
171 } 174 }
172 if os.IsPermission(err) { 175 if errors.Is(err, os.ErrPermission) {
173 level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics") 176 level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics")
174 return ErrNoData 177 return ErrNoData
175 } 178 }
@@ -199,14 +202,14 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
199 ) 202 )
200 203
201 // When a statistic is not available for a given interface, package wifi 204 // When a statistic is not available for a given interface, package wifi
202 // returns an error compatible with os.IsNotExist. We leverage this to 205 // returns a os.ErrNotExist error. We leverage this to only export
203 // only export metrics which are actually valid for given interface types. 206 // metrics which are actually valid for given interface types.
204 207
205 bss, err := stat.BSS(ifi) 208 bss, err := stat.BSS(ifi)
206 switch { 209 switch {
207 case err == nil: 210 case err == nil:
208 c.updateBSSStats(ch, ifi.Name, bss) 211 c.updateBSSStats(ch, ifi.Name, bss)
209 case os.IsNotExist(err): 212 case errors.Is(err, os.ErrNotExist):
210 level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name) 213 level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name)
211 default: 214 default:
212 return fmt.Errorf("failed to retrieve BSS for device %s: %v", 215 return fmt.Errorf("failed to retrieve BSS for device %s: %v",
@@ -219,7 +222,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
219 for _, station := range stations { 222 for _, station := range stations {
220 c.updateStationStats(ch, ifi.Name, station) 223 c.updateStationStats(ch, ifi.Name, station)
221 } 224 }
222 case os.IsNotExist(err): 225 case errors.Is(err, os.ErrNotExist):
223 level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name) 226 level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name)
224 default: 227 default:
225 return fmt.Errorf("failed to retrieve station info for device %q: %v", 228 return fmt.Errorf("failed to retrieve station info for device %q: %v",
diff --git a/collector/xfs_linux.go b/collector/xfs_linux.go
index 77824c5..36dfff5 100644
--- a/collector/xfs_linux.go
+++ b/collector/xfs_linux.go
@@ -11,6 +11,8 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !noxfs
15
14package collector 16package collector
15 17
16import ( 18import (
diff --git a/collector/zfs_freebsd.go b/collector/zfs_freebsd.go
index a625bbc..2f20096 100644
--- a/collector/zfs_freebsd.go
+++ b/collector/zfs_freebsd.go
@@ -11,6 +11,8 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nozfs
15
14package collector 16package collector
15 17
16import ( 18import (
@@ -248,7 +250,7 @@ func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error {
248 for _, m := range c.sysctls { 250 for _, m := range c.sysctls {
249 v, err := m.Value() 251 v, err := m.Value()
250 if err != nil { 252 if err != nil {
251 return fmt.Errorf("couldn't get sysctl: %s", err) 253 return fmt.Errorf("couldn't get sysctl: %w", err)
252 } 254 }
253 255
254 ch <- prometheus.MustNewConstMetric( 256 ch <- prometheus.MustNewConstMetric(
diff --git a/collector/zfs_linux.go b/collector/zfs_linux.go
index e2c9749..f55d98a 100644
--- a/collector/zfs_linux.go
+++ b/collector/zfs_linux.go
@@ -11,6 +11,8 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !nozfs
15
14package collector 16package collector
15 17
16import ( 18import (
@@ -185,7 +187,7 @@ func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, h
185 187
186 value, err := strconv.ParseUint(line[i], 10, 64) 188 value, err := strconv.ParseUint(line[i], 10, 64)
187 if err != nil { 189 if err != nil {
188 return fmt.Errorf("could not parse expected integer value for %q: %v", key, err) 190 return fmt.Errorf("could not parse expected integer value for %q: %w", key, err)
189 } 191 }
190 handler(zpoolName, zfsSysctl(key), value) 192 handler(zpoolName, zfsSysctl(key), value)
191 } 193 }
diff --git a/collector/zfs_solaris.go b/collector/zfs_solaris.go
index bfda64f..1c0460c 100644
--- a/collector/zfs_solaris.go
+++ b/collector/zfs_solaris.go
@@ -12,6 +12,7 @@
12// limitations under the License. 12// limitations under the License.
13 13
14// +build solaris 14// +build solaris
15// +build !nozfs
15 16
16package collector 17package collector
17 18
diff --git a/go.mod b/go.mod
index 8c1fa87..a98b7db 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,7 @@ require (
20 github.com/prometheus/client_golang v1.6.0 20 github.com/prometheus/client_golang v1.6.0
21 github.com/prometheus/client_model v0.2.0 21 github.com/prometheus/client_model v0.2.0
22 github.com/prometheus/common v0.10.0 22 github.com/prometheus/common v0.10.0
23 github.com/prometheus/procfs v0.0.11 23 github.com/prometheus/procfs v0.1.3
24 github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5 24 github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5
25 github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a 25 github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a
26 go.uber.org/multierr v1.5.0 // indirect 26 go.uber.org/multierr v1.5.0 // indirect
diff --git a/go.sum b/go.sum
index f810565..6697ba5 100644
--- a/go.sum
+++ b/go.sum
@@ -289,6 +289,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
289github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= 289github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
290github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= 290github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
291github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= 291github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
292github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
293github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
292github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 294github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
293github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= 295github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
294github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= 296github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
diff --git a/https/web-config.yml b/https/web-config.yml
index 9937291..7d40d9b 100644
--- a/https/web-config.yml
+++ b/https/web-config.yml
@@ -1,11 +1,6 @@
1tls_config: 1# Minimal TLS configuration example. Additionally, a certificate and a key file
2 # Certificate and key files for server to use to authenticate to client 2# are needed.
3 cert_file: <filename> 3tls_server_config:
4 key_file: <filename> 4 cert_file: server.crt
5 key_file: server.key
5 6
6 # Server policy for client authentication. Maps to ClientAuth Policies
7 # For more detail on clientAuth options: [ClientAuthType](https://golang.org/pkg/crypto/tls/#ClientAuthType)
8 [ client_auth_type: <string> | default = "NoClientCert" ]
9
10 # CA certificate for client certificate authentication to the server
11 [ client_ca_file: <filename> ]
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index b978dfc..9320176 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -150,6 +150,17 @@ else
150 $(GO) get $(GOOPTS) -t ./... 150 $(GO) get $(GOOPTS) -t ./...
151endif 151endif
152 152
153.PHONY: update-go-deps
154update-go-deps:
155 @echo ">> updating Go dependencies"
156 @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
157 $(GO) get $$m; \
158 done
159 GO111MODULE=$(GO111MODULE) $(GO) mod tidy
160ifneq (,$(wildcard vendor))
161 GO111MODULE=$(GO111MODULE) $(GO) mod vendor
162endif
163
153.PHONY: common-test-short 164.PHONY: common-test-short
154common-test-short: $(GOTEST_DIR) 165common-test-short: $(GOTEST_DIR)
155 @echo ">> running short tests" 166 @echo ">> running short tests"
diff --git a/vendor/github.com/prometheus/procfs/bcache/bcache.go b/vendor/github.com/prometheus/procfs/bcache/bcache.go
index df724ed..1176a55 100644
--- a/vendor/github.com/prometheus/procfs/bcache/bcache.go
+++ b/vendor/github.com/prometheus/procfs/bcache/bcache.go
@@ -43,10 +43,11 @@ type BcacheStats struct { // nolint:golint
43 43
44// BdevStats contains statistics for one backing device. 44// BdevStats contains statistics for one backing device.
45type BdevStats struct { 45type BdevStats struct {
46 Name string 46 Name string
47 DirtyData uint64 47 DirtyData uint64
48 FiveMin PeriodStats 48 FiveMin PeriodStats
49 Total PeriodStats 49 Total PeriodStats
50 WritebackRateDebug WritebackRateDebugStats
50} 51}
51 52
52// CacheStats contains statistics for one cache device. 53// CacheStats contains statistics for one cache device.
@@ -82,3 +83,14 @@ type PeriodStats struct {
82 CacheMisses uint64 83 CacheMisses uint64
83 CacheReadaheads uint64 84 CacheReadaheads uint64
84} 85}
86
87// WritebackRateDebugStats contains bcache writeback statistics.
88type WritebackRateDebugStats struct {
89 Rate uint64
90 Dirty uint64
91 Target uint64
92 Proportional int64
93 Integral int64
94 Change int64
95 NextIO int64
96}
diff --git a/vendor/github.com/prometheus/procfs/bcache/get.go b/vendor/github.com/prometheus/procfs/bcache/get.go
index 67fcf8b..c4747ea 100644
--- a/vendor/github.com/prometheus/procfs/bcache/get.go
+++ b/vendor/github.com/prometheus/procfs/bcache/get.go
@@ -174,6 +174,17 @@ func dehumanize(hbytes []byte) (uint64, error) {
174 return res, nil 174 return res, nil
175} 175}
176 176
177func dehumanizeSigned(str string) (int64, error) {
178 value, err := dehumanize([]byte(strings.TrimPrefix(str, "-")))
179 if err != nil {
180 return 0, err
181 }
182 if strings.HasPrefix(str, "-") {
183 return int64(-value), nil
184 }
185 return int64(value), nil
186}
187
177type parser struct { 188type parser struct {
178 uuidPath string 189 uuidPath string
179 subDir string 190 subDir string
@@ -232,6 +243,72 @@ func parsePriorityStats(line string, ps *PriorityStats) error {
232 return nil 243 return nil
233} 244}
234 245
246// ParseWritebackRateDebug parses lines from the writeback_rate_debug file.
247func parseWritebackRateDebug(line string, wrd *WritebackRateDebugStats) error {
248 switch {
249 case strings.HasPrefix(line, "rate:"):
250 fields := strings.Fields(line)
251 rawValue := fields[len(fields)-1]
252 valueStr := strings.TrimSuffix(rawValue, "/sec")
253 value, err := dehumanize([]byte(valueStr))
254 if err != nil {
255 return err
256 }
257 wrd.Rate = value
258 case strings.HasPrefix(line, "dirty:"):
259 fields := strings.Fields(line)
260 valueStr := fields[len(fields)-1]
261 value, err := dehumanize([]byte(valueStr))
262 if err != nil {
263 return err
264 }
265 wrd.Dirty = value
266 case strings.HasPrefix(line, "target:"):
267 fields := strings.Fields(line)
268 valueStr := fields[len(fields)-1]
269 value, err := dehumanize([]byte(valueStr))
270 if err != nil {
271 return err
272 }
273 wrd.Target = value
274 case strings.HasPrefix(line, "proportional:"):
275 fields := strings.Fields(line)
276 valueStr := fields[len(fields)-1]
277 value, err := dehumanizeSigned(valueStr)
278 if err != nil {
279 return err
280 }
281 wrd.Proportional = value
282 case strings.HasPrefix(line, "integral:"):
283 fields := strings.Fields(line)
284 valueStr := fields[len(fields)-1]
285 value, err := dehumanizeSigned(valueStr)
286 if err != nil {
287 return err
288 }
289 wrd.Integral = value
290 case strings.HasPrefix(line, "change:"):
291 fields := strings.Fields(line)
292 rawValue := fields[len(fields)-1]
293 valueStr := strings.TrimSuffix(rawValue, "/sec")
294 value, err := dehumanizeSigned(valueStr)
295 if err != nil {
296 return err
297 }
298 wrd.Change = value
299 case strings.HasPrefix(line, "next io:"):
300 fields := strings.Fields(line)
301 rawValue := fields[len(fields)-1]
302 valueStr := strings.TrimSuffix(rawValue, "ms")
303 value, err := strconv.ParseInt(valueStr, 10, 64)
304 if err != nil {
305 return err
306 }
307 wrd.NextIO = value
308 }
309 return nil
310}
311
235func (p *parser) getPriorityStats() PriorityStats { 312func (p *parser) getPriorityStats() PriorityStats {
236 var res PriorityStats 313 var res PriorityStats
237 314
@@ -263,6 +340,35 @@ func (p *parser) getPriorityStats() PriorityStats {
263 return res 340 return res
264} 341}
265 342
343func (p *parser) getWritebackRateDebug() WritebackRateDebugStats {
344 var res WritebackRateDebugStats
345
346 if p.err != nil {
347 return res
348 }
349 path := path.Join(p.currentDir, "writeback_rate_debug")
350 file, err := os.Open(path)
351 if err != nil {
352 p.err = fmt.Errorf("failed to read: %s", path)
353 return res
354 }
355 defer file.Close()
356
357 scanner := bufio.NewScanner(file)
358 for scanner.Scan() {
359 err = parseWritebackRateDebug(scanner.Text(), &res)
360 if err != nil {
361 p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
362 return res
363 }
364 }
365 if err := scanner.Err(); err != nil {
366 p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
367 return res
368 }
369 return res
370}
371
266// GetStats collects from sysfs files data tied to one bcache ID. 372// GetStats collects from sysfs files data tied to one bcache ID.
267func GetStats(uuidPath string, priorityStats bool) (*Stats, error) { 373func GetStats(uuidPath string, priorityStats bool) (*Stats, error) {
268 var bs Stats 374 var bs Stats
@@ -339,6 +445,9 @@ func GetStats(uuidPath string, priorityStats bool) (*Stats, error) {
339 par.setSubDir(bds.Name) 445 par.setSubDir(bds.Name)
340 bds.DirtyData = par.readValue("dirty_data") 446 bds.DirtyData = par.readValue("dirty_data")
341 447
448 wrd := par.getWritebackRateDebug()
449 bds.WritebackRateDebug = wrd
450
342 // dir <uuidPath>/<bds.Name>/stats_five_minute 451 // dir <uuidPath>/<bds.Name>/stats_five_minute
343 par.setSubDir(bds.Name, "stats_five_minute") 452 par.setSubDir(bds.Name, "stats_five_minute")
344 bds.FiveMin.Bypassed = par.readValue("bypassed") 453 bds.FiveMin.Bypassed = par.readValue("bypassed")
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index 2e02215..31d42f7 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -11,11 +11,15 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build linux
15
14package procfs 16package procfs
15 17
16import ( 18import (
17 "bufio" 19 "bufio"
18 "bytes" 20 "bytes"
21 "errors"
22 "regexp"
19 "strconv" 23 "strconv"
20 "strings" 24 "strings"
21 25
@@ -52,6 +56,11 @@ type CPUInfo struct {
52 PowerManagement string 56 PowerManagement string
53} 57}
54 58
59var (
60 cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
61 cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
62)
63
55// CPUInfo returns information about current system CPUs. 64// CPUInfo returns information about current system CPUs.
56// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt 65// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
57func (fs FS) CPUInfo() ([]CPUInfo, error) { 66func (fs FS) CPUInfo() ([]CPUInfo, error) {
@@ -62,14 +71,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) {
62 return parseCPUInfo(data) 71 return parseCPUInfo(data)
63} 72}
64 73
65// parseCPUInfo parses data from /proc/cpuinfo 74func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
66func parseCPUInfo(info []byte) ([]CPUInfo, error) {
67 cpuinfo := []CPUInfo{}
68 i := -1
69 scanner := bufio.NewScanner(bytes.NewReader(info)) 75 scanner := bufio.NewScanner(bytes.NewReader(info))
76
77 // find the first "processor" line
78 firstLine := firstNonEmptyLine(scanner)
79 if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
80 return nil, errors.New("invalid cpuinfo file: " + firstLine)
81 }
82 field := strings.SplitN(firstLine, ": ", 2)
83 v, err := strconv.ParseUint(field[1], 0, 32)
84 if err != nil {
85 return nil, err
86 }
87 firstcpu := CPUInfo{Processor: uint(v)}
88 cpuinfo := []CPUInfo{firstcpu}
89 i := 0
90
70 for scanner.Scan() { 91 for scanner.Scan() {
71 line := scanner.Text() 92 line := scanner.Text()
72 if strings.TrimSpace(line) == "" { 93 if !strings.Contains(line, ":") {
73 continue 94 continue
74 } 95 }
75 field := strings.SplitN(line, ": ", 2) 96 field := strings.SplitN(line, ": ", 2)
@@ -82,7 +103,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
82 return nil, err 103 return nil, err
83 } 104 }
84 cpuinfo[i].Processor = uint(v) 105 cpuinfo[i].Processor = uint(v)
85 case "vendor_id": 106 case "vendor", "vendor_id":
86 cpuinfo[i].VendorID = field[1] 107 cpuinfo[i].VendorID = field[1]
87 case "cpu family": 108 case "cpu family":
88 cpuinfo[i].CPUFamily = field[1] 109 cpuinfo[i].CPUFamily = field[1]
@@ -163,5 +184,237 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
163 } 184 }
164 } 185 }
165 return cpuinfo, nil 186 return cpuinfo, nil
187}
188
189func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
190 scanner := bufio.NewScanner(bytes.NewReader(info))
191
192 firstLine := firstNonEmptyLine(scanner)
193 match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
194 if !match || !strings.Contains(firstLine, ":") {
195 return nil, errors.New("invalid cpuinfo file: " + firstLine)
196 }
197 field := strings.SplitN(firstLine, ": ", 2)
198 cpuinfo := []CPUInfo{}
199 featuresLine := ""
200 commonCPUInfo := CPUInfo{}
201 i := 0
202 if strings.TrimSpace(field[0]) == "Processor" {
203 commonCPUInfo = CPUInfo{ModelName: field[1]}
204 i = -1
205 } else {
206 v, err := strconv.ParseUint(field[1], 0, 32)
207 if err != nil {
208 return nil, err
209 }
210 firstcpu := CPUInfo{Processor: uint(v)}
211 cpuinfo = []CPUInfo{firstcpu}
212 }
213
214 for scanner.Scan() {
215 line := scanner.Text()
216 if !strings.Contains(line, ":") {
217 continue
218 }
219 field := strings.SplitN(line, ": ", 2)
220 switch strings.TrimSpace(field[0]) {
221 case "processor":
222 cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
223 i++
224 v, err := strconv.ParseUint(field[1], 0, 32)
225 if err != nil {
226 return nil, err
227 }
228 cpuinfo[i].Processor = uint(v)
229 case "BogoMIPS":
230 if i == -1 {
231 cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
232 i++
233 cpuinfo[i].Processor = 0
234 }
235 v, err := strconv.ParseFloat(field[1], 64)
236 if err != nil {
237 return nil, err
238 }
239 cpuinfo[i].BogoMips = v
240 case "Features":
241 featuresLine = line
242 case "model name":
243 cpuinfo[i].ModelName = field[1]
244 }
245 }
246 fields := strings.SplitN(featuresLine, ": ", 2)
247 for i := range cpuinfo {
248 cpuinfo[i].Flags = strings.Fields(fields[1])
249 }
250 return cpuinfo, nil
251
252}
253
254func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
255 scanner := bufio.NewScanner(bytes.NewReader(info))
256
257 firstLine := firstNonEmptyLine(scanner)
258 if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
259 return nil, errors.New("invalid cpuinfo file: " + firstLine)
260 }
261 field := strings.SplitN(firstLine, ": ", 2)
262 cpuinfo := []CPUInfo{}
263 commonCPUInfo := CPUInfo{VendorID: field[1]}
264
265 for scanner.Scan() {
266 line := scanner.Text()
267 if !strings.Contains(line, ":") {
268 continue
269 }
270 field := strings.SplitN(line, ": ", 2)
271 switch strings.TrimSpace(field[0]) {
272 case "bogomips per cpu":
273 v, err := strconv.ParseFloat(field[1], 64)
274 if err != nil {
275 return nil, err
276 }
277 commonCPUInfo.BogoMips = v
278 case "features":
279 commonCPUInfo.Flags = strings.Fields(field[1])
280 }
281 if strings.HasPrefix(line, "processor") {
282 match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
283 if len(match) < 2 {
284 return nil, errors.New("Invalid line found in cpuinfo: " + line)
285 }
286 cpu := commonCPUInfo
287 v, err := strconv.ParseUint(match[1], 0, 32)
288 if err != nil {
289 return nil, err
290 }
291 cpu.Processor = uint(v)
292 cpuinfo = append(cpuinfo, cpu)
293 }
294 if strings.HasPrefix(line, "cpu number") {
295 break
296 }
297 }
298
299 i := 0
300 for scanner.Scan() {
301 line := scanner.Text()
302 if !strings.Contains(line, ":") {
303 continue
304 }
305 field := strings.SplitN(line, ": ", 2)
306 switch strings.TrimSpace(field[0]) {
307 case "cpu number":
308 i++
309 case "cpu MHz dynamic":
310 clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
311 v, err := strconv.ParseFloat(clock, 64)
312 if err != nil {
313 return nil, err
314 }
315 cpuinfo[i].CPUMHz = v
316 }
317 }
318
319 return cpuinfo, nil
320}
321
322func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
323 scanner := bufio.NewScanner(bytes.NewReader(info))
324
325 // find the first "processor" line
326 firstLine := firstNonEmptyLine(scanner)
327 if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
328 return nil, errors.New("invalid cpuinfo file: " + firstLine)
329 }
330 field := strings.SplitN(firstLine, ": ", 2)
331 cpuinfo := []CPUInfo{}
332 systemType := field[1]
333
334 i := 0
335
336 for scanner.Scan() {
337 line := scanner.Text()
338 if !strings.Contains(line, ":") {
339 continue
340 }
341 field := strings.SplitN(line, ": ", 2)
342 switch strings.TrimSpace(field[0]) {
343 case "processor":
344 v, err := strconv.ParseUint(field[1], 0, 32)
345 if err != nil {
346 return nil, err
347 }
348 i = int(v)
349 cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
350 cpuinfo[i].Processor = uint(v)
351 cpuinfo[i].VendorID = systemType
352 case "cpu model":
353 cpuinfo[i].ModelName = field[1]
354 case "BogoMIPS":
355 v, err := strconv.ParseFloat(field[1], 64)
356 if err != nil {
357 return nil, err
358 }
359 cpuinfo[i].BogoMips = v
360 }
361 }
362 return cpuinfo, nil
363}
364
365func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
366 scanner := bufio.NewScanner(bytes.NewReader(info))
367
368 firstLine := firstNonEmptyLine(scanner)
369 if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
370 return nil, errors.New("invalid cpuinfo file: " + firstLine)
371 }
372 field := strings.SplitN(firstLine, ": ", 2)
373 v, err := strconv.ParseUint(field[1], 0, 32)
374 if err != nil {
375 return nil, err
376 }
377 firstcpu := CPUInfo{Processor: uint(v)}
378 cpuinfo := []CPUInfo{firstcpu}
379 i := 0
380
381 for scanner.Scan() {
382 line := scanner.Text()
383 if !strings.Contains(line, ":") {
384 continue
385 }
386 field := strings.SplitN(line, ": ", 2)
387 switch strings.TrimSpace(field[0]) {
388 case "processor":
389 cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
390 i++
391 v, err := strconv.ParseUint(field[1], 0, 32)
392 if err != nil {
393 return nil, err
394 }
395 cpuinfo[i].Processor = uint(v)
396 case "cpu":
397 cpuinfo[i].VendorID = field[1]
398 case "clock":
399 clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
400 v, err := strconv.ParseFloat(clock, 64)
401 if err != nil {
402 return nil, err
403 }
404 cpuinfo[i].CPUMHz = v
405 }
406 }
407 return cpuinfo, nil
408}
166 409
410// firstNonEmptyLine advances the scanner to the first non-empty line
411// and returns the contents of that line
412func firstNonEmptyLine(scanner *bufio.Scanner) string {
413 for scanner.Scan() {
414 line := scanner.Text()
415 if strings.TrimSpace(line) != "" {
416 return line
417 }
418 }
419 return ""
167} 420}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go
new file mode 100644
index 0000000..8355507
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go
new file mode 100644
index 0000000..4f5d172
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go
@@ -0,0 +1,19 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15// +build arm64
16
17package procfs
18
19var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_default.go
new file mode 100644
index 0000000..d5bedf9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_default.go
@@ -0,0 +1,19 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15// +build 386 amd64
16
17package procfs
18
19var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go
new file mode 100644
index 0000000..22d93f8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go
new file mode 100644
index 0000000..22d93f8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go
new file mode 100644
index 0000000..22d93f8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go
new file mode 100644
index 0000000..22d93f8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go
new file mode 100644
index 0000000..64aee9c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go
new file mode 100644
index 0000000..64aee9c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
new file mode 100644
index 0000000..26814ee
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -0,0 +1,18 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build linux
15
16package procfs
17
18var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
index 45a7321..868c857 100644
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -173,6 +173,283 @@ Lines: 1
173411605849 93680043 79 173411605849 93680043 79
174Mode: 644 174Mode: 644
175# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 175# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
176Path: fixtures/proc/26231/smaps
177Lines: 252
17800400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager
179Size: 8900 kB
180KernelPageSize: 4 kB
181MMUPageSize: 4 kB
182Rss: 2952 kB
183Pss: 2952 kB
184Shared_Clean: 0 kB
185Shared_Dirty: 0 kB
186Private_Clean: 2952 kB
187Private_Dirty: 0 kB
188Referenced: 2864 kB
189Anonymous: 0 kB
190LazyFree: 0 kB
191AnonHugePages: 0 kB
192ShmemPmdMapped: 0 kB
193Shared_Hugetlb: 0 kB
194Private_Hugetlb: 0 kB
195Swap: 0 kB
196SwapPss: 0 kB
197Locked: 0 kB
198VmFlags: rd ex mr mw me dw sd
19900cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager
200Size: 10236 kB
201KernelPageSize: 4 kB
202MMUPageSize: 4 kB
203Rss: 6152 kB
204Pss: 6152 kB
205Shared_Clean: 0 kB
206Shared_Dirty: 0 kB
207Private_Clean: 6152 kB
208Private_Dirty: 0 kB
209Referenced: 5308 kB
210Anonymous: 0 kB
211LazyFree: 0 kB
212AnonHugePages: 0 kB
213ShmemPmdMapped: 0 kB
214Shared_Hugetlb: 0 kB
215Private_Hugetlb: 0 kB
216Swap: 0 kB
217SwapPss: 0 kB
218Locked: 0 kB
219VmFlags: rd mr mw me dw sd
220016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager
221Size: 424 kB
222KernelPageSize: 4 kB
223MMUPageSize: 4 kB
224Rss: 176 kB
225Pss: 176 kB
226Shared_Clean: 0 kB
227Shared_Dirty: 0 kB
228Private_Clean: 84 kB
229Private_Dirty: 92 kB
230Referenced: 176 kB
231Anonymous: 92 kB
232LazyFree: 0 kB
233AnonHugePages: 0 kB
234ShmemPmdMapped: 0 kB
235Shared_Hugetlb: 0 kB
236Private_Hugetlb: 0 kB
237Swap: 12 kB
238SwapPss: 12 kB
239Locked: 0 kB
240VmFlags: rd wr mr mw me dw ac sd
2410171a000-0173f000 rw-p 00000000 00:00 0
242Size: 148 kB
243KernelPageSize: 4 kB
244MMUPageSize: 4 kB
245Rss: 76 kB
246Pss: 76 kB
247Shared_Clean: 0 kB
248Shared_Dirty: 0 kB
249Private_Clean: 0 kB
250Private_Dirty: 76 kB
251Referenced: 76 kB
252Anonymous: 76 kB
253LazyFree: 0 kB
254AnonHugePages: 0 kB
255ShmemPmdMapped: 0 kB
256Shared_Hugetlb: 0 kB
257Private_Hugetlb: 0 kB
258Swap: 0 kB
259SwapPss: 0 kB
260Locked: 0 kB
261VmFlags: rd wr mr mw me ac sd
262c000000000-c000400000 rw-p 00000000 00:00 0
263Size: 4096 kB
264KernelPageSize: 4 kB
265MMUPageSize: 4 kB
266Rss: 2564 kB
267Pss: 2564 kB
268Shared_Clean: 0 kB
269Shared_Dirty: 0 kB
270Private_Clean: 20 kB
271Private_Dirty: 2544 kB
272Referenced: 2544 kB
273Anonymous: 2564 kB
274LazyFree: 0 kB
275AnonHugePages: 0 kB
276ShmemPmdMapped: 0 kB
277Shared_Hugetlb: 0 kB
278Private_Hugetlb: 0 kB
279Swap: 1100 kB
280SwapPss: 1100 kB
281Locked: 0 kB
282VmFlags: rd wr mr mw me ac sd
283c000400000-c001600000 rw-p 00000000 00:00 0
284Size: 18432 kB
285KernelPageSize: 4 kB
286MMUPageSize: 4 kB
287Rss: 16024 kB
288Pss: 16024 kB
289Shared_Clean: 0 kB
290Shared_Dirty: 0 kB
291Private_Clean: 5864 kB
292Private_Dirty: 10160 kB
293Referenced: 11944 kB
294Anonymous: 16024 kB
295LazyFree: 5848 kB
296AnonHugePages: 0 kB
297ShmemPmdMapped: 0 kB
298Shared_Hugetlb: 0 kB
299Private_Hugetlb: 0 kB
300Swap: 440 kB
301SwapPss: 440 kB
302Locked: 0 kB
303VmFlags: rd wr mr mw me ac sd nh
304c001600000-c004000000 rw-p 00000000 00:00 0
305Size: 43008 kB
306KernelPageSize: 4 kB
307MMUPageSize: 4 kB
308Rss: 0 kB
309Pss: 0 kB
310Shared_Clean: 0 kB
311Shared_Dirty: 0 kB
312Private_Clean: 0 kB
313Private_Dirty: 0 kB
314Referenced: 0 kB
315Anonymous: 0 kB
316LazyFree: 0 kB
317AnonHugePages: 0 kB
318ShmemPmdMapped: 0 kB
319Shared_Hugetlb: 0 kB
320Private_Hugetlb: 0 kB
321Swap: 0 kB
322SwapPss: 0 kB
323Locked: 0 kB
324VmFlags: rd wr mr mw me ac sd
3257f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0
326Size: 38596 kB
327KernelPageSize: 4 kB
328MMUPageSize: 4 kB
329Rss: 1992 kB
330Pss: 1992 kB
331Shared_Clean: 0 kB
332Shared_Dirty: 0 kB
333Private_Clean: 476 kB
334Private_Dirty: 1516 kB
335Referenced: 1828 kB
336Anonymous: 1992 kB
337LazyFree: 0 kB
338AnonHugePages: 0 kB
339ShmemPmdMapped: 0 kB
340Shared_Hugetlb: 0 kB
341Private_Hugetlb: 0 kB
342Swap: 384 kB
343SwapPss: 384 kB
344Locked: 0 kB
345VmFlags: rd wr mr mw me ac sd
3467ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack]
347Size: 132 kB
348KernelPageSize: 4 kB
349MMUPageSize: 4 kB
350Rss: 8 kB
351Pss: 8 kB
352Shared_Clean: 0 kB
353Shared_Dirty: 0 kB
354Private_Clean: 0 kB
355Private_Dirty: 8 kB
356Referenced: 8 kB
357Anonymous: 8 kB
358LazyFree: 0 kB
359AnonHugePages: 0 kB
360ShmemPmdMapped: 0 kB
361Shared_Hugetlb: 0 kB
362Private_Hugetlb: 0 kB
363Swap: 4 kB
364SwapPss: 4 kB
365Locked: 0 kB
366VmFlags: rd wr mr mw me gd ac
3677ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar]
368Size: 12 kB
369KernelPageSize: 4 kB
370MMUPageSize: 4 kB
371Rss: 0 kB
372Pss: 0 kB
373Shared_Clean: 0 kB
374Shared_Dirty: 0 kB
375Private_Clean: 0 kB
376Private_Dirty: 0 kB
377Referenced: 0 kB
378Anonymous: 0 kB
379LazyFree: 0 kB
380AnonHugePages: 0 kB
381ShmemPmdMapped: 0 kB
382Shared_Hugetlb: 0 kB
383Private_Hugetlb: 0 kB
384Swap: 0 kB
385SwapPss: 0 kB
386Locked: 0 kB
387VmFlags: rd mr pf io de dd sd
3887ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso]
389Size: 8 kB
390KernelPageSize: 4 kB
391MMUPageSize: 4 kB
392Rss: 4 kB
393Pss: 0 kB
394Shared_Clean: 4 kB
395Shared_Dirty: 0 kB
396Private_Clean: 0 kB
397Private_Dirty: 0 kB
398Referenced: 4 kB
399Anonymous: 0 kB
400LazyFree: 0 kB
401AnonHugePages: 0 kB
402ShmemPmdMapped: 0 kB
403Shared_Hugetlb: 0 kB
404Private_Hugetlb: 0 kB
405Swap: 0 kB
406SwapPss: 0 kB
407Locked: 0 kB
408VmFlags: rd ex mr mw me de sd
409ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
410Size: 4 kB
411KernelPageSize: 4 kB
412MMUPageSize: 4 kB
413Rss: 0 kB
414Pss: 0 kB
415Shared_Clean: 0 kB
416Shared_Dirty: 0 kB
417Private_Clean: 0 kB
418Private_Dirty: 0 kB
419Referenced: 0 kB
420Anonymous: 0 kB
421LazyFree: 0 kB
422AnonHugePages: 0 kB
423ShmemPmdMapped: 0 kB
424Shared_Hugetlb: 0 kB
425Private_Hugetlb: 0 kB
426Swap: 0 kB
427SwapPss: 0 kB
428Locked: 0 kB
429VmFlags: rd ex
430Mode: 644
431# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
432Path: fixtures/proc/26231/smaps_rollup
433Lines: 17
43400400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup]
435Rss: 29948 kB
436Pss: 29944 kB
437Shared_Clean: 4 kB
438Shared_Dirty: 0 kB
439Private_Clean: 15548 kB
440Private_Dirty: 14396 kB
441Referenced: 24752 kB
442Anonymous: 20756 kB
443LazyFree: 5848 kB
444AnonHugePages: 0 kB
445ShmemPmdMapped: 0 kB
446Shared_Hugetlb: 0 kB
447Private_Hugetlb: 0 kB
448Swap: 1940 kB
449SwapPss: 1940 kB
450Locked: 0 kB
451Mode: 644
452# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
176Path: fixtures/proc/26231/stat 453Path: fixtures/proc/26231/stat
177Lines: 1 454Lines: 1
17826231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 45526231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
@@ -235,6 +512,11 @@ voluntary_ctxt_switches: 4742839
235nonvoluntary_ctxt_switches: 1727500 512nonvoluntary_ctxt_switches: 1727500
236Mode: 644 513Mode: 644
237# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 514# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
515Path: fixtures/proc/26231/wchan
516Lines: 1
517poll_schedule_timeoutEOF
518Mode: 664
519# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
238Directory: fixtures/proc/26232 520Directory: fixtures/proc/26232
239Mode: 755 521Mode: 755
240# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 522# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -310,6 +592,11 @@ Lines: 1
31033 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 59233 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
311Mode: 644 593Mode: 644
312# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 594# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
595Path: fixtures/proc/26232/wchan
596Lines: 1
5970EOF
598Mode: 664
599# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
313Directory: fixtures/proc/26233 600Directory: fixtures/proc/26233
314Mode: 755 601Mode: 755
315# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 602# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1554,7 +1841,7 @@ max keysize : 32
1554Mode: 444 1841Mode: 444
1555# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1842# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1556Path: fixtures/proc/diskstats 1843Path: fixtures/proc/diskstats
1557Lines: 49 1844Lines: 52
1558 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 1845 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
1559 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 1846 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
1560 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 1847 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
@@ -1604,11 +1891,45 @@ Lines: 49
1604 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 1891 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
1605 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 1892 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
1606 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 1893 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
1894 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182
1895 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0
1896 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0
1607Mode: 664 1897Mode: 664
1608# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1898# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1609Directory: fixtures/proc/fs 1899Directory: fixtures/proc/fs
1610Mode: 755 1900Mode: 755
1611# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1901# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1902Directory: fixtures/proc/fs/fscache
1903Mode: 755
1904# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1905Path: fixtures/proc/fs/fscache/stats
1906Lines: 24
1907FS-Cache statistics
1908Cookies: idx=3 dat=67877 spc=0
1909Objects: alc=67473 nal=0 avl=67473 ded=388
1910ChkAux : non=12 ok=33 upd=44 obs=55
1911Pages : mrk=547164 unc=364577
1912Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26
1913Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85
1914Invals : n=14 run=13
1915Updates: n=7 nul=3 run=8
1916Relinqs: n=394 nul=1 wcr=2 rtr=3
1917AttrChg: n=6 ok=5 nbf=4 oom=3 run=2
1918Allocs : n=20 ok=19 wt=18 nbf=17 int=16
1919Allocs : ops=15 owt=14 abt=13
1920Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43
1921Retrvls: ops=151959 owt=42747 abt=44
1922Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14
1923Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43
1924VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66
1925Ops : pend=42753 run=221129 enq=628798 can=11 rej=88
1926Ops : ini=377538 dfr=27 rel=377538 gc=37
1927CacheOp: alo=1 luo=2 luc=3 gro=4
1928CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10
1929CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17
1930CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF
1931Mode: 644
1932# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1612Directory: fixtures/proc/fs/xfs 1933Directory: fixtures/proc/fs/xfs
1613Mode: 755 1934Mode: 755
1614# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1935# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2025,6 +2346,32 @@ Mode: 644
2025Directory: fixtures/proc/sys 2346Directory: fixtures/proc/sys
2026Mode: 775 2347Mode: 775
2027# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2348# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2349Directory: fixtures/proc/sys/kernel
2350Mode: 775
2351# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2352Directory: fixtures/proc/sys/kernel/random
2353Mode: 755
2354# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2355Path: fixtures/proc/sys/kernel/random/entropy_avail
2356Lines: 1
23573943
2358Mode: 644
2359# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2360Path: fixtures/proc/sys/kernel/random/poolsize
2361Lines: 1
23624096
2363Mode: 644
2364# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2365Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs
2366Lines: 1
236760
2368Mode: 644
2369# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2370Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold
2371Lines: 1
23723072
2373Mode: 644
2374# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2028Directory: fixtures/proc/sys/vm 2375Directory: fixtures/proc/sys/vm
2029Mode: 775 2376Mode: 775
2030# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2377# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2526,6 +2873,237 @@ Mode: 664
2526Directory: fixtures/sys/block/sda 2873Directory: fixtures/sys/block/sda
2527Mode: 775 2874Mode: 775
2528# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2875# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2876Directory: fixtures/sys/block/sda/queue
2877Mode: 755
2878# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2879Path: fixtures/sys/block/sda/queue/add_random
2880Lines: 1
28811
2882Mode: 644
2883# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2884Path: fixtures/sys/block/sda/queue/chunk_sectors
2885Lines: 1
28860
2887Mode: 444
2888# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2889Path: fixtures/sys/block/sda/queue/dax
2890Lines: 1
28910
2892Mode: 444
2893# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2894Path: fixtures/sys/block/sda/queue/discard_granularity
2895Lines: 1
28960
2897Mode: 444
2898# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2899Path: fixtures/sys/block/sda/queue/discard_max_bytes
2900Lines: 1
29010
2902Mode: 644
2903# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2904Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes
2905Lines: 1
29060
2907Mode: 444
2908# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2909Path: fixtures/sys/block/sda/queue/discard_zeroes_data
2910Lines: 1
29110
2912Mode: 444
2913# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2914Path: fixtures/sys/block/sda/queue/fua
2915Lines: 1
29160
2917Mode: 444
2918# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2919Path: fixtures/sys/block/sda/queue/hw_sector_size
2920Lines: 1
2921512
2922Mode: 444
2923# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2924Path: fixtures/sys/block/sda/queue/io_poll
2925Lines: 1
29260
2927Mode: 644
2928# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2929Path: fixtures/sys/block/sda/queue/io_poll_delay
2930Lines: 1
2931-1
2932Mode: 644
2933# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2934Path: fixtures/sys/block/sda/queue/io_timeout
2935Lines: 1
293630000
2937Mode: 644
2938# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2939Directory: fixtures/sys/block/sda/queue/iosched
2940Mode: 755
2941# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2942Path: fixtures/sys/block/sda/queue/iosched/back_seek_max
2943Lines: 1
294416384
2945Mode: 644
2946# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2947Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty
2948Lines: 1
29492
2950Mode: 644
2951# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2952Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async
2953Lines: 1
2954250
2955Mode: 644
2956# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2957Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync
2958Lines: 1
2959125
2960Mode: 644
2961# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2962Path: fixtures/sys/block/sda/queue/iosched/low_latency
2963Lines: 1
29641
2965Mode: 644
2966# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2967Path: fixtures/sys/block/sda/queue/iosched/max_budget
2968Lines: 1
29690
2970Mode: 644
2971# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2972Path: fixtures/sys/block/sda/queue/iosched/slice_idle
2973Lines: 1
29748
2975Mode: 644
2976# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2977Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us
2978Lines: 1
29798000
2980Mode: 644
2981# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2982Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees
2983Lines: 1
29840
2985Mode: 644
2986# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2987Path: fixtures/sys/block/sda/queue/iosched/timeout_sync
2988Lines: 1
2989125
2990Mode: 644
2991# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2992Path: fixtures/sys/block/sda/queue/iostats
2993Lines: 1
29941
2995Mode: 644
2996# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2997Path: fixtures/sys/block/sda/queue/logical_block_size
2998Lines: 1
2999512
3000Mode: 444
3001# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3002Path: fixtures/sys/block/sda/queue/max_discard_segments
3003Lines: 1
30041
3005Mode: 444
3006# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3007Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb
3008Lines: 1
300932767
3010Mode: 444
3011# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3012Path: fixtures/sys/block/sda/queue/max_integrity_segments
3013Lines: 1
30140
3015Mode: 444
3016# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3017Path: fixtures/sys/block/sda/queue/max_sectors_kb
3018Lines: 1
30191280
3020Mode: 644
3021# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3022Path: fixtures/sys/block/sda/queue/max_segment_size
3023Lines: 1
302465536
3025Mode: 444
3026# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3027Path: fixtures/sys/block/sda/queue/max_segments
3028Lines: 1
3029168
3030Mode: 444
3031# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3032Path: fixtures/sys/block/sda/queue/minimum_io_size
3033Lines: 1
3034512
3035Mode: 444
3036# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3037Path: fixtures/sys/block/sda/queue/nomerges
3038Lines: 1
30390
3040Mode: 644
3041# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3042Path: fixtures/sys/block/sda/queue/nr_requests
3043Lines: 1
304464
3045Mode: 644
3046# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3047Path: fixtures/sys/block/sda/queue/nr_zones
3048Lines: 1
30490
3050Mode: 444
3051# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3052Path: fixtures/sys/block/sda/queue/optimal_io_size
3053Lines: 1
30540
3055Mode: 444
3056# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3057Path: fixtures/sys/block/sda/queue/physical_block_size
3058Lines: 1
3059512
3060Mode: 444
3061# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3062Path: fixtures/sys/block/sda/queue/read_ahead_kb
3063Lines: 1
3064128
3065Mode: 644
3066# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3067Path: fixtures/sys/block/sda/queue/rotational
3068Lines: 1
30691
3070Mode: 644
3071# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3072Path: fixtures/sys/block/sda/queue/rq_affinity
3073Lines: 1
30741
3075Mode: 644
3076# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3077Path: fixtures/sys/block/sda/queue/scheduler
3078Lines: 1
3079mq-deadline kyber [bfq] none
3080Mode: 644
3081# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3082Path: fixtures/sys/block/sda/queue/wbt_lat_usec
3083Lines: 1
308475000
3085Mode: 644
3086# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3087Path: fixtures/sys/block/sda/queue/write_cache
3088Lines: 1
3089write back
3090Mode: 644
3091# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3092Path: fixtures/sys/block/sda/queue/write_same_max_bytes
3093Lines: 1
30940
3095Mode: 444
3096# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3097Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes
3098Lines: 1
30990
3100Mode: 444
3101# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3102Path: fixtures/sys/block/sda/queue/zoned
3103Lines: 1
3104none
3105Mode: 444
3106# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2529Path: fixtures/sys/block/sda/stat 3107Path: fixtures/sys/block/sda/stat
2530Lines: 1 3108Lines: 1
25319652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 31099652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12
@@ -2534,6 +3112,140 @@ Mode: 664
2534Directory: fixtures/sys/class 3112Directory: fixtures/sys/class
2535Mode: 775 3113Mode: 775
2536# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3114# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3115Directory: fixtures/sys/class/fc_host
3116Mode: 755
3117# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3118Directory: fixtures/sys/class/fc_host/host0
3119Mode: 755
3120# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3121Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo
3122Lines: 1
312330
3124Mode: 644
3125# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3126Path: fixtures/sys/class/fc_host/host0/fabric_name
3127Lines: 1
31280x0
3129Mode: 644
3130# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3131Path: fixtures/sys/class/fc_host/host0/node_name
3132Lines: 1
31330x2000e0071bce95f2
3134Mode: 644
3135# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3136Path: fixtures/sys/class/fc_host/host0/port_id
3137Lines: 1
31380x000002
3139Mode: 644
3140# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3141Path: fixtures/sys/class/fc_host/host0/port_name
3142Lines: 1
31430x1000e0071bce95f2
3144Mode: 644
3145# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3146Path: fixtures/sys/class/fc_host/host0/port_state
3147Lines: 1
3148Online
3149Mode: 644
3150# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3151Path: fixtures/sys/class/fc_host/host0/port_type
3152Lines: 1
3153Point-To-Point (direct nport connection)
3154Mode: 644
3155# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3156Path: fixtures/sys/class/fc_host/host0/speed
3157Lines: 1
315816 Gbit
3159Mode: 644
3160# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3161Directory: fixtures/sys/class/fc_host/host0/statistics
3162Mode: 755
3163# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3164Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames
3165Lines: 1
31660xffffffffffffffff
3167Mode: 644
3168# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3169Path: fixtures/sys/class/fc_host/host0/statistics/error_frames
3170Lines: 1
31710x0
3172Mode: 644
3173# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3174Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts
3175Lines: 1
31760x13
3177Mode: 644
3178# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3179Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count
3180Lines: 1
31810x2
3182Mode: 644
3183# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3184Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count
3185Lines: 1
31860x8
3187Mode: 644
3188# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3189Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count
3190Lines: 1
31910x9
3192Mode: 644
3193# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3194Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count
3195Lines: 1
31960x11
3197Mode: 644
3198# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3199Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count
3200Lines: 1
32010x10
3202Mode: 644
3203# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3204Path: fixtures/sys/class/fc_host/host0/statistics/nos_count
3205Lines: 1
32060x12
3207Mode: 644
3208# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3209Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames
3210Lines: 1
32110x3
3212Mode: 644
3213# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3214Path: fixtures/sys/class/fc_host/host0/statistics/rx_words
3215Lines: 1
32160x4
3217Mode: 644
3218# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3219Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset
3220Lines: 1
32210x7
3222Mode: 644
3223# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3224Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames
3225Lines: 1
32260x5
3227Mode: 644
3228# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3229Path: fixtures/sys/class/fc_host/host0/statistics/tx_words
3230Lines: 1
32310x6
3232Mode: 644
3233# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3234Path: fixtures/sys/class/fc_host/host0/supported_classes
3235Lines: 1
3236Class 3
3237Mode: 644
3238# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3239Path: fixtures/sys/class/fc_host/host0/supported_speeds
3240Lines: 1
32414 Gbit, 8 Gbit, 16 Gbit
3242Mode: 644
3243# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3244Path: fixtures/sys/class/fc_host/host0/symbolic_name
3245Lines: 1
3246Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
3247Mode: 644
3248# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2537Directory: fixtures/sys/class/infiniband 3249Directory: fixtures/sys/class/infiniband
2538Mode: 755 3250Mode: 755
2539# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3251# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2564,6 +3276,11 @@ Mode: 755
2564Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters 3276Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters
2565Mode: 755 3277Mode: 755
2566# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3278# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3279Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped
3280Lines: 1
32810
3282Mode: 664
3283# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2567Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors 3284Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors
2568Lines: 1 3285Lines: 1
25690 32860
@@ -2665,6 +3382,11 @@ Mode: 755
2665Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters 3382Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters
2666Mode: 755 3383Mode: 755
2667# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3384# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3385Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped
3386Lines: 1
33870
3388Mode: 664
3389# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2668Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors 3390Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors
2669Lines: 1 3391Lines: 1
26700 33920
@@ -3109,7 +3831,7 @@ Mode: 664
3109# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3831# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3110Path: fixtures/sys/class/thermal/thermal_zone1/temp 3832Path: fixtures/sys/class/thermal/thermal_zone1/temp
3111Lines: 1 3833Lines: 1
311244000 3834-44000
3113Mode: 664 3835Mode: 664
3114# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3836# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3115Path: fixtures/sys/class/thermal/thermal_zone1/type 3837Path: fixtures/sys/class/thermal/thermal_zone1/type
@@ -4287,6 +5009,17 @@ Lines: 1
42870 50090
4288Mode: 644 5010Mode: 644
4289# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 5011# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
5012Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug
5013Lines: 7
5014rate: 1.1M/sec
5015dirty: 20.4G
5016target: 20.4G
5017proportional: 427.5k
5018integral: 790.0k
5019change: 321.5k/sec
5020next io: 17ms
5021Mode: 644
5022# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4290Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size 5023Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
4291Lines: 1 5024Lines: 1
42920 50250
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
new file mode 100644
index 0000000..8783cf3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -0,0 +1,422 @@
1// Copyright 2019 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package procfs
15
16import (
17 "bufio"
18 "bytes"
19 "fmt"
20 "io"
21 "strconv"
22 "strings"
23
24 "github.com/prometheus/procfs/internal/util"
25)
26
27// Fscacheinfo represents fscache statistics.
28type Fscacheinfo struct {
29 // Number of index cookies allocated
30 IndexCookiesAllocated uint64
31 // data storage cookies allocated
32 DataStorageCookiesAllocated uint64
33 // Number of special cookies allocated
34 SpecialCookiesAllocated uint64
35 // Number of objects allocated
36 ObjectsAllocated uint64
37 // Number of object allocation failures
38 ObjectAllocationsFailure uint64
39 // Number of objects that reached the available state
40 ObjectsAvailable uint64
41 // Number of objects that reached the dead state
42 ObjectsDead uint64
43 // Number of objects that didn't have a coherency check
44 ObjectsWithoutCoherencyCheck uint64
45 // Number of objects that passed a coherency check
46 ObjectsWithCoherencyCheck uint64
47 // Number of objects that needed a coherency data update
48 ObjectsNeedCoherencyCheckUpdate uint64
49 // Number of objects that were declared obsolete
50 ObjectsDeclaredObsolete uint64
51 // Number of pages marked as being cached
52 PagesMarkedAsBeingCached uint64
53 // Number of uncache page requests seen
54 UncachePagesRequestSeen uint64
55 // Number of acquire cookie requests seen
56 AcquireCookiesRequestSeen uint64
57 // Number of acq reqs given a NULL parent
58 AcquireRequestsWithNullParent uint64
59 // Number of acq reqs rejected due to no cache available
60 AcquireRequestsRejectedNoCacheAvailable uint64
61 // Number of acq reqs succeeded
62 AcquireRequestsSucceeded uint64
63 // Number of acq reqs rejected due to error
64 AcquireRequestsRejectedDueToError uint64
65 // Number of acq reqs failed on ENOMEM
66 AcquireRequestsFailedDueToEnomem uint64
67 // Number of lookup calls made on cache backends
68 LookupsNumber uint64
69 // Number of negative lookups made
70 LookupsNegative uint64
71 // Number of positive lookups made
72 LookupsPositive uint64
73 // Number of objects created by lookup
74 ObjectsCreatedByLookup uint64
75 // Number of lookups timed out and requeued
76 LookupsTimedOutAndRequed uint64
77 InvalidationsNumber uint64
78 InvalidationsRunning uint64
79 // Number of update cookie requests seen
80 UpdateCookieRequestSeen uint64
81 // Number of upd reqs given a NULL parent
82 UpdateRequestsWithNullParent uint64
83 // Number of upd reqs granted CPU time
84 UpdateRequestsRunning uint64
85 // Number of relinquish cookie requests seen
86 RelinquishCookiesRequestSeen uint64
87 // Number of rlq reqs given a NULL parent
88 RelinquishCookiesWithNullParent uint64
89 // Number of rlq reqs waited on completion of creation
90 RelinquishRequestsWaitingCompleteCreation uint64
91 // Relinqs rtr
92 RelinquishRetries uint64
93 // Number of attribute changed requests seen
94 AttributeChangedRequestsSeen uint64
95 // Number of attr changed requests queued
96 AttributeChangedRequestsQueued uint64
97 // Number of attr changed rejected -ENOBUFS
98 AttributeChangedRejectDueToEnobufs uint64
99 // Number of attr changed failed -ENOMEM
100 AttributeChangedFailedDueToEnomem uint64
101 // Number of attr changed ops given CPU time
102 AttributeChangedOps uint64
103 // Number of allocation requests seen
104 AllocationRequestsSeen uint64
105 // Number of successful alloc reqs
106 AllocationOkRequests uint64
107 // Number of alloc reqs that waited on lookup completion
108 AllocationWaitingOnLookup uint64
109 // Number of alloc reqs rejected -ENOBUFS
110 AllocationsRejectedDueToEnobufs uint64
111 // Number of alloc reqs aborted -ERESTARTSYS
112 AllocationsAbortedDueToErestartsys uint64
113 // Number of alloc reqs submitted
114 AllocationOperationsSubmitted uint64
115 // Number of alloc reqs waited for CPU time
116 AllocationsWaitedForCPU uint64
117 // Number of alloc reqs aborted due to object death
118 AllocationsAbortedDueToObjectDeath uint64
119 // Number of retrieval (read) requests seen
120 RetrievalsReadRequests uint64
121 // Number of successful retr reqs
122 RetrievalsOk uint64
123 // Number of retr reqs that waited on lookup completion
124 RetrievalsWaitingLookupCompletion uint64
125 // Number of retr reqs returned -ENODATA
126 RetrievalsReturnedEnodata uint64
127 // Number of retr reqs rejected -ENOBUFS
128 RetrievalsRejectedDueToEnobufs uint64
129 // Number of retr reqs aborted -ERESTARTSYS
130 RetrievalsAbortedDueToErestartsys uint64
131 // Number of retr reqs failed -ENOMEM
132 RetrievalsFailedDueToEnomem uint64
133 // Number of retr reqs submitted
134 RetrievalsRequests uint64
135 // Number of retr reqs waited for CPU time
136 RetrievalsWaitingCPU uint64
137 // Number of retr reqs aborted due to object death
138 RetrievalsAbortedDueToObjectDeath uint64
139 // Number of storage (write) requests seen
140 StoreWriteRequests uint64
141 // Number of successful store reqs
142 StoreSuccessfulRequests uint64
143 // Number of store reqs on a page already pending storage
144 StoreRequestsOnPendingStorage uint64
145 // Number of store reqs rejected -ENOBUFS
146 StoreRequestsRejectedDueToEnobufs uint64
147 // Number of store reqs failed -ENOMEM
148 StoreRequestsFailedDueToEnomem uint64
149 // Number of store reqs submitted
150 StoreRequestsSubmitted uint64
151 // Number of store reqs granted CPU time
152 StoreRequestsRunning uint64
153 // Number of pages given store req processing time
154 StorePagesWithRequestsProcessing uint64
155 // Number of store reqs deleted from tracking tree
156 StoreRequestsDeleted uint64
157 // Number of store reqs over store limit
158 StoreRequestsOverStoreLimit uint64
159 // Number of release reqs against pages with no pending store
160 ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
161 // Number of release reqs against pages stored by time lock granted
162 ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
163 // Number of release reqs ignored due to in-progress store
164 ReleaseRequestsIgnoredDueToInProgressStore uint64
165 // Number of page stores cancelled due to release req
166 PageStoresCancelledByReleaseRequests uint64
167 VmscanWaiting uint64
168 // Number of times async ops added to pending queues
169 OpsPending uint64
170 // Number of times async ops given CPU time
171 OpsRunning uint64
172 // Number of times async ops queued for processing
173 OpsEnqueued uint64
174 // Number of async ops cancelled
175 OpsCancelled uint64
176 // Number of async ops rejected due to object lookup/create failure
177 OpsRejected uint64
178 // Number of async ops initialised
179 OpsInitialised uint64
180 // Number of async ops queued for deferred release
181 OpsDeferred uint64
182 // Number of async ops released (should equal ini=N when idle)
183 OpsReleased uint64
184 // Number of deferred-release async ops garbage collected
185 OpsGarbageCollected uint64
186 // Number of in-progress alloc_object() cache ops
187 CacheopAllocationsinProgress uint64
188 // Number of in-progress lookup_object() cache ops
189 CacheopLookupObjectInProgress uint64
190 // Number of in-progress lookup_complete() cache ops
191 CacheopLookupCompleteInPorgress uint64
192 // Number of in-progress grab_object() cache ops
193 CacheopGrabObjectInProgress uint64
194 CacheopInvalidations uint64
195 // Number of in-progress update_object() cache ops
196 CacheopUpdateObjectInProgress uint64
197 // Number of in-progress drop_object() cache ops
198 CacheopDropObjectInProgress uint64
199 // Number of in-progress put_object() cache ops
200 CacheopPutObjectInProgress uint64
201 // Number of in-progress attr_changed() cache ops
202 CacheopAttributeChangeInProgress uint64
203 // Number of in-progress sync_cache() cache ops
204 CacheopSyncCacheInProgress uint64
205 // Number of in-progress read_or_alloc_page() cache ops
206 CacheopReadOrAllocPageInProgress uint64
207 // Number of in-progress read_or_alloc_pages() cache ops
208 CacheopReadOrAllocPagesInProgress uint64
209 // Number of in-progress allocate_page() cache ops
210 CacheopAllocatePageInProgress uint64
211 // Number of in-progress allocate_pages() cache ops
212 CacheopAllocatePagesInProgress uint64
213 // Number of in-progress write_page() cache ops
214 CacheopWritePagesInProgress uint64
215 // Number of in-progress uncache_page() cache ops
216 CacheopUncachePagesInProgress uint64
217 // Number of in-progress dissociate_pages() cache ops
218 CacheopDissociatePagesInProgress uint64
219 // Number of object lookups/creations rejected due to lack of space
220 CacheevLookupsAndCreationsRejectedLackSpace uint64
221 // Number of stale objects deleted
222 CacheevStaleObjectsDeleted uint64
223 // Number of objects retired when relinquished
224 CacheevRetiredWhenReliquished uint64
225 // Number of objects culled
226 CacheevObjectsCulled uint64
227}
228
229// Fscacheinfo returns information about current fscache statistics.
230// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
231func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
232 b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
233 if err != nil {
234 return Fscacheinfo{}, err
235 }
236
237 m, err := parseFscacheinfo(bytes.NewReader(b))
238 if err != nil {
239 return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err)
240 }
241
242 return *m, nil
243}
244
245func setFSCacheFields(fields []string, setFields ...*uint64) error {
246 var err error
247 if len(fields) < len(setFields) {
248 return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
249 }
250
251 for i := range setFields {
252 *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
253 if err != nil {
254 return err
255 }
256 }
257 return nil
258}
259
260func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
261 var m Fscacheinfo
262 s := bufio.NewScanner(r)
263 for s.Scan() {
264 fields := strings.Fields(s.Text())
265 if len(fields) < 2 {
266 return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
267 }
268
269 switch fields[0] {
270 case "Cookies:":
271 err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
272 &m.SpecialCookiesAllocated)
273 if err != nil {
274 return &m, err
275 }
276 case "Objects:":
277 err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
278 &m.ObjectsAvailable, &m.ObjectsDead)
279 if err != nil {
280 return &m, err
281 }
282 case "ChkAux":
283 err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
284 &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
285 if err != nil {
286 return &m, err
287 }
288 case "Pages":
289 err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
290 if err != nil {
291 return &m, err
292 }
293 case "Acquire:":
294 err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
295 &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
296 &m.AcquireRequestsFailedDueToEnomem)
297 if err != nil {
298 return &m, err
299 }
300 case "Lookups:":
301 err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
302 &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
303 if err != nil {
304 return &m, err
305 }
306 case "Invals":
307 err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
308 if err != nil {
309 return &m, err
310 }
311 case "Updates:":
312 err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
313 &m.UpdateRequestsRunning)
314 if err != nil {
315 return &m, err
316 }
317 case "Relinqs:":
318 err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
319 &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
320 if err != nil {
321 return &m, err
322 }
323 case "AttrChg:":
324 err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
325 &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
326 if err != nil {
327 return &m, err
328 }
329 case "Allocs":
330 if strings.Split(fields[2], "=")[0] == "n" {
331 err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
332 &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
333 if err != nil {
334 return &m, err
335 }
336 } else {
337 err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
338 &m.AllocationsAbortedDueToObjectDeath)
339 if err != nil {
340 return &m, err
341 }
342 }
343 case "Retrvls:":
344 if strings.Split(fields[1], "=")[0] == "n" {
345 err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
346 &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
347 &m.RetrievalsFailedDueToEnomem)
348 if err != nil {
349 return &m, err
350 }
351 } else {
352 err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
353 if err != nil {
354 return &m, err
355 }
356 }
357 case "Stores":
358 if strings.Split(fields[2], "=")[0] == "n" {
359 err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
360 &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
361 if err != nil {
362 return &m, err
363 }
364 } else {
365 err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
366 &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
367 if err != nil {
368 return &m, err
369 }
370 }
371 case "VmScan":
372 err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
373 &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
374 &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
375 if err != nil {
376 return &m, err
377 }
378 case "Ops":
379 if strings.Split(fields[2], "=")[0] == "pend" {
380 err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
381 if err != nil {
382 return &m, err
383 }
384 } else {
385 err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
386 if err != nil {
387 return &m, err
388 }
389 }
390 case "CacheOp:":
391 if strings.Split(fields[1], "=")[0] == "alo" {
392 err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
393 &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
394 if err != nil {
395 return &m, err
396 }
397 } else if strings.Split(fields[1], "=")[0] == "inv" {
398 err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
399 &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
400 &m.CacheopSyncCacheInProgress)
401 if err != nil {
402 return &m, err
403 }
404 } else {
405 err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
406 &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
407 &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
408 if err != nil {
409 return &m, err
410 }
411 }
412 case "CacheEv:":
413 err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
414 &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
415 if err != nil {
416 return &m, err
417 }
418 }
419 }
420
421 return &m, nil
422}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 755591d..22cb07a 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) {
73 return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) 73 return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
74} 74}
75 75
76// ReadIntFromFile reads a file and attempts to parse a int64 from it.
77func ReadIntFromFile(path string) (int64, error) {
78 data, err := ioutil.ReadFile(path)
79 if err != nil {
80 return 0, err
81 }
82 return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
83}
84
76// ParseBool parses a string into a boolean pointer. 85// ParseBool parses a string into a boolean pointer.
77func ParseBool(b string) *bool { 86func ParseBool(b string) *bool {
78 var truth bool 87 var truth bool
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
new file mode 100644
index 0000000..beefdf0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -0,0 +1,62 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build !windows
15
16package procfs
17
18import (
19 "os"
20
21 "github.com/prometheus/procfs/internal/util"
22)
23
24// KernelRandom contains information about to the kernel's random number generator.
25type KernelRandom struct {
26 // EntropyAvaliable gives the available entropy, in bits.
27 EntropyAvaliable *uint64
28 // PoolSize gives the size of the entropy pool, in bytes.
29 PoolSize *uint64
30 // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
31 URandomMinReseedSeconds *uint64
32 // WriteWakeupThreshold the number of bits of entropy below which we wake up processes
33 // that do a select(2) or poll(2) for write access to /dev/random.
34 WriteWakeupThreshold *uint64
35 // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
36 // waiting for entropy from /dev/random.
37 ReadWakeupThreshold *uint64
38}
39
40// KernelRandom returns values from /proc/sys/kernel/random.
41func (fs FS) KernelRandom() (KernelRandom, error) {
42 random := KernelRandom{}
43
44 for file, p := range map[string]**uint64{
45 "entropy_avail": &random.EntropyAvaliable,
46 "poolsize": &random.PoolSize,
47 "urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
48 "write_wakeup_threshold": &random.WriteWakeupThreshold,
49 "read_wakeup_threshold": &random.ReadWakeupThreshold,
50 } {
51 val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
52 if os.IsNotExist(err) {
53 continue
54 }
55 if err != nil {
56 return random, err
57 }
58 *p = &val
59 }
60
61 return random, nil
62}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index 2af3ada..3e9362a 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -52,7 +52,7 @@ type MDStat struct {
52func (fs FS) MDStat() ([]MDStat, error) { 52func (fs FS) MDStat() ([]MDStat, error) {
53 data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) 53 data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
54 if err != nil { 54 if err != nil {
55 return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) 55 return nil, err
56 } 56 }
57 mdstat, err := parseMDStat(data) 57 mdstat, err := parseMDStat(data)
58 if err != nil { 58 if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 9471136..59f4d50 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
77 77
78 mountInfo := strings.Split(mountString, " ") 78 mountInfo := strings.Split(mountString, " ")
79 mountInfoLength := len(mountInfo) 79 mountInfoLength := len(mountInfo)
80 if mountInfoLength < 11 { 80 if mountInfoLength < 10 {
81 return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) 81 return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
82 } 82 }
83 83
@@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
144 return optionalFields, nil 144 return optionalFields, nil
145} 145}
146 146
147// Parses the mount options, superblock options. 147// mountOptionsParser parses the mount options, superblock options.
148func mountOptionsParser(mountOptions string) map[string]string { 148func mountOptionsParser(mountOptions string) map[string]string {
149 opts := make(map[string]string) 149 opts := make(map[string]string)
150 options := strings.Split(mountOptions, ",") 150 options := strings.Split(mountOptions, ",")
@@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string {
161 return opts 161 return opts
162} 162}
163 163
164// Retrieves mountinfo information from `/proc/self/mountinfo`. 164// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
165func GetMounts() ([]*MountInfo, error) { 165func GetMounts() ([]*MountInfo, error) {
166 data, err := util.ReadFileNoStat("/proc/self/mountinfo") 166 data, err := util.ReadFileNoStat("/proc/self/mountinfo")
167 if err != nil { 167 if err != nil {
@@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) {
170 return parseMountInfo(data) 170 return parseMountInfo(data)
171} 171}
172 172
173// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`. 173// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
174func GetProcMounts(pid int) ([]*MountInfo, error) { 174func GetProcMounts(pid int) ([]*MountInfo, error) {
175 data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) 175 data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
176 if err != nil { 176 if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 35b2ef3..861ced9 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -186,6 +186,8 @@ type NFSOperationStats struct {
186 CumulativeTotalResponseMilliseconds uint64 186 CumulativeTotalResponseMilliseconds uint64
187 // Duration from when a request was enqueued to when it was completely handled. 187 // Duration from when a request was enqueued to when it was completely handled.
188 CumulativeTotalRequestMilliseconds uint64 188 CumulativeTotalRequestMilliseconds uint64
189 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
190 Errors uint64
189} 191}
190 192
191// A NFSTransportStats contains statistics for the NFS mount RPC requests and 193// A NFSTransportStats contains statistics for the NFS mount RPC requests and
@@ -494,8 +496,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
494// line is reached. 496// line is reached.
495func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { 497func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
496 const ( 498 const (
497 // Number of expected fields in each per-operation statistics set 499 // Minimum number of expected fields in each per-operation statistics set
498 numFields = 9 500 minFields = 9
499 ) 501 )
500 502
501 var ops []NFSOperationStats 503 var ops []NFSOperationStats
@@ -508,12 +510,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
508 break 510 break
509 } 511 }
510 512
511 if len(ss) != numFields { 513 if len(ss) < minFields {
512 return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) 514 return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
513 } 515 }
514 516
515 // Skip string operation name for integers 517 // Skip string operation name for integers
516 ns := make([]uint64, 0, numFields-1) 518 ns := make([]uint64, 0, minFields-1)
517 for _, st := range ss[1:] { 519 for _, st := range ss[1:] {
518 n, err := strconv.ParseUint(st, 10, 64) 520 n, err := strconv.ParseUint(st, 10, 64)
519 if err != nil { 521 if err != nil {
@@ -523,7 +525,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
523 ns = append(ns, n) 525 ns = append(ns, n)
524 } 526 }
525 527
526 ops = append(ops, NFSOperationStats{ 528 opStats := NFSOperationStats{
527 Operation: strings.TrimSuffix(ss[0], ":"), 529 Operation: strings.TrimSuffix(ss[0], ":"),
528 Requests: ns[0], 530 Requests: ns[0],
529 Transmissions: ns[1], 531 Transmissions: ns[1],
@@ -533,7 +535,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
533 CumulativeQueueMilliseconds: ns[5], 535 CumulativeQueueMilliseconds: ns[5],
534 CumulativeTotalResponseMilliseconds: ns[6], 536 CumulativeTotalResponseMilliseconds: ns[6],
535 CumulativeTotalRequestMilliseconds: ns[7], 537 CumulativeTotalRequestMilliseconds: ns[7],
536 }) 538 }
539
540 if len(ns) > 8 {
541 opStats.Errors = ns[8]
542 }
543
544 ops = append(ops, opStats)
537 } 545 }
538 546
539 return ops, s.Err() 547 return ops, s.Err()
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 1e27c83..b637be9 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -38,7 +38,7 @@ type ConntrackStatEntry struct {
38 SearchRestart uint64 38 SearchRestart uint64
39} 39}
40 40
41// Retrieves netfilter's conntrack statistics, split by CPU cores 41// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
42func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { 42func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
43 return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) 43 return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
44} 44}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 330e472..9f97b6e 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) {
134 return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil 134 return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
135} 135}
136 136
137// Wchan returns the wchan (wait channel) of a process.
138func (p Proc) Wchan() (string, error) {
139 f, err := os.Open(p.path("wchan"))
140 if err != nil {
141 return "", err
142 }
143 defer f.Close()
144
145 data, err := ioutil.ReadAll(f)
146 if err != nil {
147 return "", err
148 }
149
150 wchan := string(data)
151 if wchan == "" || wchan == "0" {
152 return "", nil
153 }
154
155 return wchan, nil
156}
157
137// Comm returns the command name of a process. 158// Comm returns the command name of a process.
138func (p Proc) Comm() (string, error) { 159func (p Proc) Comm() (string, error) {
139 data, err := util.ReadFileNoStat(p.path("comm")) 160 data, err := util.ReadFileNoStat(p.path("comm"))
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
new file mode 100644
index 0000000..4abd464
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -0,0 +1,98 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package procfs
15
16import (
17 "bufio"
18 "bytes"
19 "fmt"
20 "strconv"
21 "strings"
22
23 "github.com/prometheus/procfs/internal/util"
24)
25
26// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
27// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
28// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
29// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
30// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
31// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
32// in this hierarchy
33//
34// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
35type Cgroup struct {
36 // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
37 // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
38 HierarchyID int
39 // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
40 // Cgroups V2 this may be empty, as all active controllers use the same hierarchy
41 Controllers []string
42 // Path of this control group, relative to the mount point of the cgroupfs representing this specific
43 // hierarchy
44 Path string
45}
46
47// parseCgroupString parses each line of the /proc/[pid]/cgroup file
48// Line format is hierarchyID:[controller1,controller2]:path
49func parseCgroupString(cgroupStr string) (*Cgroup, error) {
50 var err error
51
52 fields := strings.Split(cgroupStr, ":")
53 if len(fields) < 3 {
54 return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
55 }
56
57 cgroup := &Cgroup{
58 Path: fields[2],
59 Controllers: nil,
60 }
61 cgroup.HierarchyID, err = strconv.Atoi(fields[0])
62 if err != nil {
63 return nil, fmt.Errorf("failed to parse hierarchy ID")
64 }
65 if fields[1] != "" {
66 ssNames := strings.Split(fields[1], ",")
67 cgroup.Controllers = append(cgroup.Controllers, ssNames...)
68 }
69 return cgroup, nil
70}
71
72// parseCgroups reads each line of the /proc/[pid]/cgroup file
73func parseCgroups(data []byte) ([]Cgroup, error) {
74 var cgroups []Cgroup
75 scanner := bufio.NewScanner(bytes.NewReader(data))
76 for scanner.Scan() {
77 mountString := scanner.Text()
78 parsedMounts, err := parseCgroupString(mountString)
79 if err != nil {
80 return nil, err
81 }
82 cgroups = append(cgroups, *parsedMounts)
83 }
84
85 err := scanner.Err()
86 return cgroups, err
87}
88
89// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
90// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
91// so the len of the returned struct is equal to the number of active hierarchies on this system
92func (p Proc) Cgroups() ([]Cgroup, error) {
93 data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
94 if err != nil {
95 return nil, err
96 }
97 return parseCgroups(data)
98}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index 0c9c402..a76ca70 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -41,7 +41,7 @@ type ProcFDInfo struct {
41 Flags string 41 Flags string
42 // Mount point ID 42 // Mount point ID
43 MntID string 43 MntID string
44 // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) 44 // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
45 InotifyInfos []InotifyInfo 45 InotifyInfos []InotifyInfo
46} 46}
47 47
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index 28d5c6e..1d7772d 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -11,7 +11,7 @@
11// See the License for the specific language governing permissions and 11// See the License for the specific language governing permissions and
12// limitations under the License. 12// limitations under the License.
13 13
14// +build !windows 14// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
15 15
16package procfs 16package procfs
17 17
@@ -25,6 +25,7 @@ import (
25 "golang.org/x/sys/unix" 25 "golang.org/x/sys/unix"
26) 26)
27 27
28// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
28type ProcMapPermissions struct { 29type ProcMapPermissions struct {
29 // mapping has the [R]ead flag set 30 // mapping has the [R]ead flag set
30 Read bool 31 Read bool
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
new file mode 100644
index 0000000..a576a72
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -0,0 +1,165 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build !windows
15
16package procfs
17
18import (
19 "bufio"
20 "errors"
21 "fmt"
22 "os"
23 "regexp"
24 "strconv"
25 "strings"
26
27 "github.com/prometheus/procfs/internal/util"
28)
29
30var (
31 // match the header line before each mapped zone in /proc/pid/smaps
32 procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
33)
34
35type ProcSMapsRollup struct {
36 // Amount of the mapping that is currently resident in RAM
37 Rss uint64
38 // Process's proportional share of this mapping
39 Pss uint64
40 // Size in bytes of clean shared pages
41 SharedClean uint64
42 // Size in bytes of dirty shared pages
43 SharedDirty uint64
44 // Size in bytes of clean private pages
45 PrivateClean uint64
46 // Size in bytes of dirty private pages
47 PrivateDirty uint64
48 // Amount of memory currently marked as referenced or accessed
49 Referenced uint64
50 // Amount of memory that does not belong to any file
51 Anonymous uint64
52 // Amount would-be-anonymous memory currently on swap
53 Swap uint64
54 // Process's proportional memory on swap
55 SwapPss uint64
56}
57
58// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
59// process.
60//
61// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
62// we read and summed.
63func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
64 data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
65 if err != nil && os.IsNotExist(err) {
66 return p.procSMapsRollupManual()
67 }
68 if err != nil {
69 return ProcSMapsRollup{}, err
70 }
71
72 lines := strings.Split(string(data), "\n")
73 smaps := ProcSMapsRollup{}
74
75 // skip first line which don't contains information we need
76 lines = lines[1:]
77 for _, line := range lines {
78 if line == "" {
79 continue
80 }
81
82 if err := smaps.parseLine(line); err != nil {
83 return ProcSMapsRollup{}, err
84 }
85 }
86
87 return smaps, nil
88}
89
90// Read /proc/pid/smaps and do the roll-up in Go code.
91func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
92 file, err := os.Open(p.path("smaps"))
93 if err != nil {
94 return ProcSMapsRollup{}, err
95 }
96 defer file.Close()
97
98 smaps := ProcSMapsRollup{}
99 scan := bufio.NewScanner(file)
100
101 for scan.Scan() {
102 line := scan.Text()
103
104 if procSMapsHeaderLine.MatchString(line) {
105 continue
106 }
107
108 if err := smaps.parseLine(line); err != nil {
109 return ProcSMapsRollup{}, err
110 }
111 }
112
113 return smaps, nil
114}
115
116func (s *ProcSMapsRollup) parseLine(line string) error {
117 kv := strings.SplitN(line, ":", 2)
118 if len(kv) != 2 {
119 fmt.Println(line)
120 return errors.New("invalid net/dev line, missing colon")
121 }
122
123 k := kv[0]
124 if k == "VmFlags" {
125 return nil
126 }
127
128 v := strings.TrimSpace(kv[1])
129 v = strings.TrimRight(v, " kB")
130
131 vKBytes, err := strconv.ParseUint(v, 10, 64)
132 if err != nil {
133 return err
134 }
135 vBytes := vKBytes * 1024
136
137 s.addValue(k, v, vKBytes, vBytes)
138
139 return nil
140}
141
142func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
143 switch k {
144 case "Rss":
145 s.Rss += vUintBytes
146 case "Pss":
147 s.Pss += vUintBytes
148 case "Shared_Clean":
149 s.SharedClean += vUintBytes
150 case "Shared_Dirty":
151 s.SharedDirty += vUintBytes
152 case "Private_Clean":
153 s.PrivateClean += vUintBytes
154 case "Private_Dirty":
155 s.PrivateDirty += vUintBytes
156 case "Referenced":
157 s.Referenced += vUintBytes
158 case "Anonymous":
159 s.Anonymous += vUintBytes
160 case "Swap":
161 s.Swap += vUintBytes
162 case "SwapPss":
163 s.SwapPss += vUintBytes
164 }
165}
diff --git a/vendor/github.com/prometheus/procfs/sysfs/class_fibrechannel.go b/vendor/github.com/prometheus/procfs/sysfs/class_fibrechannel.go
new file mode 100644
index 0000000..d57ff9c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/sysfs/class_fibrechannel.go
@@ -0,0 +1,249 @@
1// Copyright 2020 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build !windows
15
16package sysfs
17
18import (
19 "fmt"
20 "io/ioutil"
21 "os"
22 "path/filepath"
23
24 "github.com/prometheus/procfs/internal/util"
25)
26
27const fibrechannelClassPath = "class/fc_host"
28
29type FibreChannelCounters struct {
30 DumpedFrames uint64 // /sys/class/fc_host/<Name>/statistics/dumped_frames
31 ErrorFrames uint64 // /sys/class/fc_host/<Name>/statistics/error_frames
32 InvalidCRCCount uint64 // /sys/class/fc_host/<Name>/statistics/invalid_crc_count
33 RXFrames uint64 // /sys/class/fc_host/<Name>/statistics/rx_frames
34 RXWords uint64 // /sys/class/fc_host/<Name>/statistics/rx_words
35 TXFrames uint64 // /sys/class/fc_host/<Name>/statistics/tx_frames
36 TXWords uint64 // /sys/class/fc_host/<Name>/statistics/tx_words
37 SecondsSinceLastReset uint64 // /sys/class/fc_host/<Name>/statistics/seconds_since_last_reset
38 InvalidTXWordCount uint64 // /sys/class/fc_host/<Name>/statistics/invalid_tx_word_count
39 LinkFailureCount uint64 // /sys/class/fc_host/<Name>/statistics/link_failure_count
40 LossOfSyncCount uint64 // /sys/class/fc_host/<Name>/statistics/loss_of_sync_count
41 LossOfSignalCount uint64 // /sys/class/fc_host/<Name>/statistics/loss_of_signal_count
42 NosCount uint64 // /sys/class/fc_host/<Name>/statistics/nos_count
43 FCPPacketAborts uint64 // / sys/class/fc_host/<Name>/statistics/fcp_packet_aborts
44}
45
46type FibreChannelHost struct {
47 Name string // /sys/class/fc_host/<Name>
48 Speed string // /sys/class/fc_host/<Name>/speed
49 PortState string // /sys/class/fc_host/<Name>/port_state
50 PortType string // /sys/class/fc_host/<Name>/port_type
51 SymbolicName string // /sys/class/fc_host/<Name>/symbolic_name
52 NodeName string // /sys/class/fc_host/<Name>/node_name
53 PortID string // /sys/class/fc_host/<Name>/port_id
54 PortName string // /sys/class/fc_host/<Name>/port_name
55 FabricName string // /sys/class/fc_host/<Name>/fabric_name
56 DevLossTMO string // /sys/class/fc_host/<Name>/dev_loss_tmo
57 SupportedClasses string // /sys/class/fc_host/<Name>/supported_classes
58 SupportedSpeeds string // /sys/class/fc_host/<Name>/supported_speeds
59 Counters FibreChannelCounters // /sys/class/fc_host/<Name>/statistics/*
60}
61
62type FibreChannelClass map[string]FibreChannelHost
63
64// FibreChannelClass parses everything in /sys/class/fc_host.
65func (fs FS) FibreChannelClass() (FibreChannelClass, error) {
66 path := fs.sys.Path(fibrechannelClassPath)
67
68 dirs, err := ioutil.ReadDir(path)
69 if err != nil {
70 return nil, err
71 }
72
73 fcc := make(FibreChannelClass, len(dirs))
74 for _, d := range dirs {
75 host, err := fs.parseFibreChannelHost(d.Name())
76 if err != nil {
77 return nil, err
78 }
79
80 fcc[host.Name] = *host
81 }
82
83 return fcc, nil
84}
85
86// Parse a single FC host
87func (fs FS) parseFibreChannelHost(name string) (*FibreChannelHost, error) {
88 path := fs.sys.Path(fibrechannelClassPath, name)
89 host := FibreChannelHost{Name: name}
90
91 for _, f := range [...]string{"speed", "port_state", "port_type", "node_name", "port_id", "port_name", "fabric_name", "dev_loss_tmo", "symbolic_name", "supported_classes", "supported_speeds"} {
92 name := filepath.Join(path, f)
93 value, err := util.SysReadFile(name)
94 if err != nil {
95 return nil, fmt.Errorf("failed to read file %q: %v", name, err)
96 }
97
98 switch f {
99 case "speed":
100 host.Speed = value
101 case "port_state":
102 host.PortState = value
103 case "port_type":
104 host.PortType = value
105 case "node_name":
106 if len(value) > 2 {
107 value = value[2:]
108 }
109 host.NodeName = value
110 case "port_id":
111 if len(value) > 2 {
112 value = value[2:]
113 }
114 host.PortID = value
115 case "port_name":
116 if len(value) > 2 {
117 value = value[2:]
118 }
119 host.PortName = value
120 case "fabric_name":
121 if len(value) > 2 {
122 value = value[2:]
123 }
124 host.FabricName = value
125 case "dev_loss_tmo":
126 host.DevLossTMO = value
127 case "supported_classes":
128 host.SupportedClasses = value
129 case "supported_speeds":
130 host.SupportedSpeeds = value
131 case "symbolic_name":
132 host.SymbolicName = value
133 }
134 }
135
136 counters, err := parseFibreChannelStatistics(path)
137 if err != nil {
138 return nil, err
139 }
140 host.Counters = *counters
141
142 return &host, nil
143}
144
145// parseFibreChannelStatistics parses metrics from a single FC host.
146func parseFibreChannelStatistics(hostPath string) (*FibreChannelCounters, error) {
147 var counters FibreChannelCounters
148
149 path := filepath.Join(hostPath, "statistics")
150 files, err := ioutil.ReadDir(path)
151 if err != nil {
152 return nil, err
153 }
154
155 for _, f := range files {
156 if !f.Mode().IsRegular() || f.Name() == "reset_statistics" {
157 continue
158 }
159
160 name := filepath.Join(path, f.Name())
161 value, err := util.SysReadFile(name)
162 if err != nil {
163 // there are some write-only files in this directory; we can safely skip over them
164 if os.IsNotExist(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" {
165 continue
166 }
167 return nil, fmt.Errorf("failed to read file %q: %v", name, err)
168 }
169
170 vp := util.NewValueParser(value)
171
172 // Below switch was automatically generated. Don't need everything in there yet, so the unwanted bits are commented out.
173 switch f.Name() {
174 case "dumped_frames":
175 counters.DumpedFrames = *vp.PUInt64()
176 case "error_frames":
177 counters.ErrorFrames = *vp.PUInt64()
178 /*
179 case "fc_no_free_exch":
180 counters.FcNoFreeExch = *vp.PUInt64()
181 case "fc_no_free_exch_xid":
182 counters.FcNoFreeExchXid = *vp.PUInt64()
183 case "fc_non_bls_resp":
184 counters.FcNonBlsResp = *vp.PUInt64()
185 case "fc_seq_not_found":
186 counters.FcSeqNotFound = *vp.PUInt64()
187 case "fc_xid_busy":
188 counters.FcXidBusy = *vp.PUInt64()
189 case "fc_xid_not_found":
190 counters.FcXidNotFound = *vp.PUInt64()
191 case "fcp_control_requests":
192 counters.FcpControlRequests = *vp.PUInt64()
193 case "fcp_frame_alloc_failures":
194 counters.FcpFrameAllocFailures = *vp.PUInt64()
195 case "fcp_input_megabytes":
196 counters.FcpInputMegabytes = *vp.PUInt64()
197 case "fcp_input_requests":
198 counters.FcpInputRequests = *vp.PUInt64()
199 case "fcp_output_megabytes":
200 counters.FcpOutputMegabytes = *vp.PUInt64()
201 case "fcp_output_requests":
202 counters.FcpOutputRequests = *vp.PUInt64()
203 */
204 case "fcp_packet_aborts":
205 counters.FCPPacketAborts = *vp.PUInt64()
206 /*
207 case "fcp_packet_alloc_failures":
208 counters.FcpPacketAllocFailures = *vp.PUInt64()
209 */
210 case "invalid_tx_word_count":
211 counters.InvalidTXWordCount = *vp.PUInt64()
212 case "invalid_crc_count":
213 counters.InvalidCRCCount = *vp.PUInt64()
214 case "link_failure_count":
215 counters.LinkFailureCount = *vp.PUInt64()
216 /*
217 case "lip_count":
218 counters.LipCount = *vp.PUInt64()
219 */
220 case "loss_of_signal_count":
221 counters.LossOfSignalCount = *vp.PUInt64()
222 case "loss_of_sync_count":
223 counters.LossOfSyncCount = *vp.PUInt64()
224 case "nos_count":
225 counters.NosCount = *vp.PUInt64()
226 /*
227 case "prim_seq_protocol_err_count":
228 counters.PrimSeqProtocolErrCount = *vp.PUInt64()
229 */
230 case "rx_frames":
231 counters.RXFrames = *vp.PUInt64()
232 case "rx_words":
233 counters.RXWords = *vp.PUInt64()
234 case "seconds_since_last_reset":
235 counters.SecondsSinceLastReset = *vp.PUInt64()
236 case "tx_frames":
237 counters.TXFrames = *vp.PUInt64()
238 case "tx_words":
239 counters.TXWords = *vp.PUInt64()
240 }
241
242 if err := vp.Err(); err != nil {
243 return nil, err
244 }
245
246 }
247
248 return &counters, nil
249}
diff --git a/vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go b/vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go
index be900fa..0826e1c 100644
--- a/vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go
+++ b/vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go
@@ -42,22 +42,26 @@ type InfiniBandCounters struct {
42 LegacyPortXmitData64 *uint64 // counters_ext/port_xmit_data_64 42 LegacyPortXmitData64 *uint64 // counters_ext/port_xmit_data_64
43 LegacyPortXmitPackets64 *uint64 // counters_ext/port_xmit_packets_64 43 LegacyPortXmitPackets64 *uint64 // counters_ext/port_xmit_packets_64
44 44
45 LinkDowned *uint64 // counters/link_downed 45 LinkDowned *uint64 // counters/link_downed
46 LinkErrorRecovery *uint64 // counters/link_error_recovery 46 LinkErrorRecovery *uint64 // counters/link_error_recovery
47 MulticastRcvPackets *uint64 // counters/multicast_rcv_packets 47 MulticastRcvPackets *uint64 // counters/multicast_rcv_packets
48 MulticastXmitPackets *uint64 // counters/multicast_xmit_packets 48 MulticastXmitPackets *uint64 // counters/multicast_xmit_packets
49 PortRcvConstraintErrors *uint64 // counters/port_rcv_constraint_errors 49 PortRcvConstraintErrors *uint64 // counters/port_rcv_constraint_errors
50 PortRcvData *uint64 // counters/port_rcv_data 50 PortRcvData *uint64 // counters/port_rcv_data
51 PortRcvDiscards *uint64 // counters/port_rcv_discards 51 PortRcvDiscards *uint64 // counters/port_rcv_discards
52 PortRcvErrors *uint64 // counters/port_rcv_errors 52 PortRcvErrors *uint64 // counters/port_rcv_errors
53 PortRcvPackets *uint64 // counters/port_rcv_packets 53 PortRcvPackets *uint64 // counters/port_rcv_packets
54 PortXmitConstraintErrors *uint64 // counters/port_xmit_constraint_errors 54 PortRcvRemotePhysicalErrors *uint64 // counters/port_rcv_remote_physical_errors
55 PortXmitData *uint64 // counters/port_xmit_data 55 PortRcvSwitchRelayErrors *uint64 // counters/port_rcv_switch_relay_errors
56 PortXmitDiscards *uint64 // counters/port_xmit_discards 56 PortXmitConstraintErrors *uint64 // counters/port_xmit_constraint_errors
57 PortXmitPackets *uint64 // counters/port_xmit_packets 57 PortXmitData *uint64 // counters/port_xmit_data
58 PortXmitWait *uint64 // counters/port_xmit_wait 58 PortXmitDiscards *uint64 // counters/port_xmit_discards
59 UnicastRcvPackets *uint64 // counters/unicast_rcv_packets 59 PortXmitPackets *uint64 // counters/port_xmit_packets
60 UnicastXmitPackets *uint64 // counters/unicast_xmit_packets 60 PortXmitWait *uint64 // counters/port_xmit_wait
61 SymbolError *uint64 // counters/symbol_error
62 UnicastRcvPackets *uint64 // counters/unicast_rcv_packets
63 UnicastXmitPackets *uint64 // counters/unicast_xmit_packets
64 VL15Dropped *uint64 // counters/VL15_dropped
61} 65}
62 66
63// InfiniBandPort contains info from files in 67// InfiniBandPort contains info from files in
@@ -287,6 +291,10 @@ func parseInfiniBandCounters(portPath string) (*InfiniBandCounters, error) {
287 counters.PortRcvErrors = vp.PUInt64() 291 counters.PortRcvErrors = vp.PUInt64()
288 case "port_rcv_packets": 292 case "port_rcv_packets":
289 counters.PortRcvPackets = vp.PUInt64() 293 counters.PortRcvPackets = vp.PUInt64()
294 case "port_rcv_remote_physical_errors":
295 counters.PortRcvRemotePhysicalErrors = vp.PUInt64()
296 case "port_rcv_switch_relay_errors":
297 counters.PortRcvSwitchRelayErrors = vp.PUInt64()
290 case "port_xmit_constraint_errors": 298 case "port_xmit_constraint_errors":
291 counters.PortXmitConstraintErrors = vp.PUInt64() 299 counters.PortXmitConstraintErrors = vp.PUInt64()
292 case "port_xmit_data": 300 case "port_xmit_data":
@@ -300,10 +308,14 @@ func parseInfiniBandCounters(portPath string) (*InfiniBandCounters, error) {
300 counters.PortXmitPackets = vp.PUInt64() 308 counters.PortXmitPackets = vp.PUInt64()
301 case "port_xmit_wait": 309 case "port_xmit_wait":
302 counters.PortXmitWait = vp.PUInt64() 310 counters.PortXmitWait = vp.PUInt64()
311 case "symbol_error":
312 counters.SymbolError = vp.PUInt64()
303 case "unicast_rcv_packets": 313 case "unicast_rcv_packets":
304 counters.UnicastRcvPackets = vp.PUInt64() 314 counters.UnicastRcvPackets = vp.PUInt64()
305 case "unicast_xmit_packets": 315 case "unicast_xmit_packets":
306 counters.UnicastXmitPackets = vp.PUInt64() 316 counters.UnicastXmitPackets = vp.PUInt64()
317 case "VL15_dropped":
318 counters.VL15Dropped = vp.PUInt64()
307 } 319 }
308 320
309 if err := vp.Err(); err != nil { 321 if err := vp.Err(); err != nil {
diff --git a/vendor/github.com/prometheus/procfs/sysfs/class_power_supply.go b/vendor/github.com/prometheus/procfs/sysfs/class_power_supply.go
index d12e913..dceec9e 100644
--- a/vendor/github.com/prometheus/procfs/sysfs/class_power_supply.go
+++ b/vendor/github.com/prometheus/procfs/sysfs/class_power_supply.go
@@ -110,7 +110,7 @@ func (fs FS) PowerSupplyClass() (PowerSupplyClass, error) {
110 110
111 dirs, err := ioutil.ReadDir(path) 111 dirs, err := ioutil.ReadDir(path)
112 if err != nil { 112 if err != nil {
113 return nil, fmt.Errorf("failed to list power supplies at %q: %v", path, err) 113 return nil, err
114 } 114 }
115 115
116 psc := make(PowerSupplyClass, len(dirs)) 116 psc := make(PowerSupplyClass, len(dirs))
diff --git a/vendor/github.com/prometheus/procfs/sysfs/class_thermal.go b/vendor/github.com/prometheus/procfs/sysfs/class_thermal.go
index cfe11ad..493a531 100644
--- a/vendor/github.com/prometheus/procfs/sysfs/class_thermal.go
+++ b/vendor/github.com/prometheus/procfs/sysfs/class_thermal.go
@@ -16,9 +16,11 @@
16package sysfs 16package sysfs
17 17
18import ( 18import (
19 "errors"
19 "os" 20 "os"
20 "path/filepath" 21 "path/filepath"
21 "strings" 22 "strings"
23 "syscall"
22 24
23 "github.com/prometheus/procfs/internal/util" 25 "github.com/prometheus/procfs/internal/util"
24) 26)
@@ -29,7 +31,7 @@ import (
29type ClassThermalZoneStats struct { 31type ClassThermalZoneStats struct {
30 Name string // The name of the zone from the directory structure. 32 Name string // The name of the zone from the directory structure.
31 Type string // The type of thermal zone. 33 Type string // The type of thermal zone.
32 Temp uint64 // Temperature in millidegree Celsius. 34 Temp int64 // Temperature in millidegree Celsius.
33 Policy string // One of the various thermal governors used for a particular zone. 35 Policy string // One of the various thermal governors used for a particular zone.
34 Mode *bool // Optional: One of the predefined values in [enabled, disabled]. 36 Mode *bool // Optional: One of the predefined values in [enabled, disabled].
35 Passive *uint64 // Optional: millidegrees Celsius. (0 for disabled, > 1000 for enabled+value) 37 Passive *uint64 // Optional: millidegrees Celsius. (0 for disabled, > 1000 for enabled+value)
@@ -39,20 +41,20 @@ type ClassThermalZoneStats struct {
39func (fs FS) ClassThermalZoneStats() ([]ClassThermalZoneStats, error) { 41func (fs FS) ClassThermalZoneStats() ([]ClassThermalZoneStats, error) {
40 zones, err := filepath.Glob(fs.sys.Path("class/thermal/thermal_zone[0-9]*")) 42 zones, err := filepath.Glob(fs.sys.Path("class/thermal/thermal_zone[0-9]*"))
41 if err != nil { 43 if err != nil {
42 return []ClassThermalZoneStats{}, err 44 return nil, err
43 } 45 }
44 46
45 var zoneStats = ClassThermalZoneStats{} 47 stats := make([]ClassThermalZoneStats, 0, len(zones))
46 stats := make([]ClassThermalZoneStats, len(zones)) 48 for _, zone := range zones {
47 for i, zone := range zones { 49 zoneStats, err := parseClassThermalZone(zone)
48 zoneName := strings.TrimPrefix(filepath.Base(zone), "thermal_zone")
49
50 zoneStats, err = parseClassThermalZone(zone)
51 if err != nil { 50 if err != nil {
52 return []ClassThermalZoneStats{}, err 51 if errors.Is(err, syscall.ENODATA) {
52 continue
53 }
54 return nil, err
53 } 55 }
54 zoneStats.Name = zoneName 56 zoneStats.Name = strings.TrimPrefix(filepath.Base(zone), "thermal_zone")
55 stats[i] = zoneStats 57 stats = append(stats, zoneStats)
56 } 58 }
57 return stats, nil 59 return stats, nil
58} 60}
@@ -67,7 +69,7 @@ func parseClassThermalZone(zone string) (ClassThermalZoneStats, error) {
67 if err != nil { 69 if err != nil {
68 return ClassThermalZoneStats{}, err 70 return ClassThermalZoneStats{}, err
69 } 71 }
70 zoneTemp, err := util.ReadUintFromFile(filepath.Join(zone, "temp")) 72 zoneTemp, err := util.ReadIntFromFile(filepath.Join(zone, "temp"))
71 if err != nil { 73 if err != nil {
72 return ClassThermalZoneStats{}, err 74 return ClassThermalZoneStats{}, err
73 } 75 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 05395a0..983f45d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -81,7 +81,7 @@ github.com/prometheus/common/model
81github.com/prometheus/common/promlog 81github.com/prometheus/common/promlog
82github.com/prometheus/common/promlog/flag 82github.com/prometheus/common/promlog/flag
83github.com/prometheus/common/version 83github.com/prometheus/common/version
84# github.com/prometheus/procfs v0.0.11 84# github.com/prometheus/procfs v0.1.3
85## explicit 85## explicit
86github.com/prometheus/procfs 86github.com/prometheus/procfs
87github.com/prometheus/procfs/bcache 87github.com/prometheus/procfs/bcache