aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Hodges <hodges.daniel.scott@gmail.com>2019-05-07 07:21:41 -0400
committerBen Kochie <superq@gmail.com>2019-05-07 13:21:41 +0200
commit78820098702e41929c84619720e19c89f8b2cdae (patch)
tree30402068c47002d017dbdcc976cf328940e6588f
parent0c6b90be4e018d92e15925994403f351b668f76d (diff)
downloadprometheus_node_collector-78820098702e41929c84619720e19c89f8b2cdae.tar.bz2
prometheus_node_collector-78820098702e41929c84619720e19c89f8b2cdae.tar.xz
prometheus_node_collector-78820098702e41929c84619720e19c89f8b2cdae.zip
Add perf exporter (#1274)
Signed-off-by: Daniel Hodges <hodges.daniel.scott@gmail.com>
-rw-r--r--CHANGELOG.md1
-rw-r--r--README.md18
-rw-r--r--collector/perf_linux.go567
-rw-r--r--collector/perf_linux_test.go55
-rw-r--r--go.mod3
-rw-r--r--go.sum6
-rw-r--r--vendor/github.com/hodgesds/perf-utils/.gitignore2
-rw-r--r--vendor/github.com/hodgesds/perf-utils/Gopkg.lock15
-rw-r--r--vendor/github.com/hodgesds/perf-utils/Gopkg.toml34
-rw-r--r--vendor/github.com/hodgesds/perf-utils/LICENSE22
-rw-r--r--vendor/github.com/hodgesds/perf-utils/README.md120
-rw-r--r--vendor/github.com/hodgesds/perf-utils/bpf.go22
-rw-r--r--vendor/github.com/hodgesds/perf-utils/cache_profiler.go336
-rw-r--r--vendor/github.com/hodgesds/perf-utils/events.go98
-rw-r--r--vendor/github.com/hodgesds/perf-utils/fs_utils.go102
-rw-r--r--vendor/github.com/hodgesds/perf-utils/group_profiler.go170
-rw-r--r--vendor/github.com/hodgesds/perf-utils/hardware_profiler.go157
-rw-r--r--vendor/github.com/hodgesds/perf-utils/process_profile.go507
-rw-r--r--vendor/github.com/hodgesds/perf-utils/software_profiler.go151
-rw-r--r--vendor/github.com/hodgesds/perf-utils/utils.go681
-rw-r--r--vendor/go.uber.org/atomic/.codecov.yml15
-rw-r--r--vendor/go.uber.org/atomic/.gitignore11
-rw-r--r--vendor/go.uber.org/atomic/.travis.yml23
-rw-r--r--vendor/go.uber.org/atomic/LICENSE.txt19
-rw-r--r--vendor/go.uber.org/atomic/Makefile64
-rw-r--r--vendor/go.uber.org/atomic/README.md36
-rw-r--r--vendor/go.uber.org/atomic/atomic.go351
-rw-r--r--vendor/go.uber.org/atomic/glide.lock17
-rw-r--r--vendor/go.uber.org/atomic/glide.yaml6
-rw-r--r--vendor/go.uber.org/atomic/string.go49
-rw-r--r--vendor/go.uber.org/multierr/.codecov.yml15
-rw-r--r--vendor/go.uber.org/multierr/.gitignore1
-rw-r--r--vendor/go.uber.org/multierr/.travis.yml33
-rw-r--r--vendor/go.uber.org/multierr/CHANGELOG.md28
-rw-r--r--vendor/go.uber.org/multierr/LICENSE.txt19
-rw-r--r--vendor/go.uber.org/multierr/Makefile74
-rw-r--r--vendor/go.uber.org/multierr/README.md23
-rw-r--r--vendor/go.uber.org/multierr/error.go401
-rw-r--r--vendor/go.uber.org/multierr/glide.lock19
-rw-r--r--vendor/go.uber.org/multierr/glide.yaml8
-rw-r--r--vendor/modules.txt6
41 files changed, 4285 insertions, 0 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9237e24..62515a2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -32,6 +32,7 @@
32* [FEATURE] Add diskstats collector for OpenBSD #1250 32* [FEATURE] Add diskstats collector for OpenBSD #1250
33* [CHANGE] Bonding state uses mii_status #1124 33* [CHANGE] Bonding state uses mii_status #1124
34* [FEATURE] Add pressure collector exposing pressure stall information for Linux #1174 34* [FEATURE] Add pressure collector exposing pressure stall information for Linux #1174
35* [FEATURE] Add perf exporter for Linux #1274
35 36
36## 0.17.0 / 2018-11-30 37## 0.17.0 / 2018-11-30
37 38
diff --git a/README.md b/README.md
index 521c818..b73c8af 100644
--- a/README.md
+++ b/README.md
@@ -63,6 +63,23 @@ zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | [Linux](http
63 63
64### Disabled by default 64### Disabled by default
65 65
66The perf collector may not work by default on all Linux systems due to kernel
67configuration and security settings. To allow access, set the following sysctl
68parameter:
69
70```
71sysctl -w kernel.perf_event_paranoid=X
72```
73
74- 2 allow only user-space measurements (default since Linux 4.6).
75- 1 allow both kernel and user measurements (default before Linux 4.6).
76- 0 allow access to CPU-specific data but not raw tracepoint samples.
77- -1 no restrictions.
78
79Depending on the configured value different metrics will be available, for most
80cases `0` will provide the most complete set. For more information see [`man 2
81perf_event_open`](http://man7.org/linux/man-pages/man2/perf_event_open.2.html).
82
66Name | Description | OS 83Name | Description | OS
67---------|-------------|---- 84---------|-------------|----
68buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux 85buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux
@@ -81,6 +98,7 @@ supervisord | Exposes service status from [supervisord](http://supervisord.org/)
81systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux 98systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux
82tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux 99tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux
83wifi | Exposes WiFi device and station statistics. | Linux 100wifi | Exposes WiFi device and station statistics. | Linux
101perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux
84 102
85### Textfile Collector 103### Textfile Collector
86 104
diff --git a/collector/perf_linux.go b/collector/perf_linux.go
new file mode 100644
index 0000000..0ab7b84
--- /dev/null
+++ b/collector/perf_linux.go
@@ -0,0 +1,567 @@
1// Copyright 2019 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14package collector
15
16import (
17 "fmt"
18 "runtime"
19
20 perf "github.com/hodgesds/perf-utils"
21 "github.com/prometheus/client_golang/prometheus"
22)
23
24const (
25 perfSubsystem = "perf"
26)
27
28func init() {
29 registerCollector(perfSubsystem, defaultDisabled, NewPerfCollector)
30}
31
32// perfCollector is a Collecter that uses the perf subsystem to collect
33// metrics. It uses perf_event_open an ioctls for profiling. Due to the fact
34// that the perf subsystem is highly dependent on kernel configuration and
35// settings not all profiler values may be exposed on the target system at any
36// given time.
37type perfCollector struct {
38 perfHwProfilers map[int]perf.HardwareProfiler
39 perfSwProfilers map[int]perf.SoftwareProfiler
40 perfCacheProfilers map[int]perf.CacheProfiler
41 desc map[string]*prometheus.Desc
42}
43
44// NewPerfCollector returns a new perf based collector, it creates a profiler
45// per CPU.
46func NewPerfCollector() (Collector, error) {
47 collector := &perfCollector{
48 perfHwProfilers: map[int]perf.HardwareProfiler{},
49 perfSwProfilers: map[int]perf.SoftwareProfiler{},
50 perfCacheProfilers: map[int]perf.CacheProfiler{},
51 }
52 ncpus := runtime.NumCPU()
53 for i := 0; i < ncpus; i++ {
54 // Use -1 to profile all processes on the CPU, see:
55 // man perf_event_open
56 collector.perfHwProfilers[i] = perf.NewHardwareProfiler(-1, i)
57 if err := collector.perfHwProfilers[i].Start(); err != nil {
58 return collector, err
59 }
60 collector.perfSwProfilers[i] = perf.NewSoftwareProfiler(-1, i)
61 if err := collector.perfSwProfilers[i].Start(); err != nil {
62 return collector, err
63 }
64 collector.perfCacheProfilers[i] = perf.NewCacheProfiler(-1, i)
65 if err := collector.perfCacheProfilers[i].Start(); err != nil {
66 return collector, err
67 }
68 }
69 collector.desc = map[string]*prometheus.Desc{
70 "cpucycles_total": prometheus.NewDesc(
71 prometheus.BuildFQName(
72 namespace,
73 perfSubsystem,
74 "cpucycles_total",
75 ),
76 "Number of CPU cycles (frequency scaled)",
77 []string{"cpu"},
78 nil,
79 ),
80 "instructions_total": prometheus.NewDesc(
81 prometheus.BuildFQName(
82 namespace,
83 perfSubsystem,
84 "instructions_total",
85 ),
86 "Number of CPU instructions",
87 []string{"cpu"},
88 nil,
89 ),
90 "branch_instructions_total": prometheus.NewDesc(
91 prometheus.BuildFQName(
92 namespace,
93 perfSubsystem,
94 "branch_instructions_total",
95 ),
96 "Number of CPU branch instructions",
97 []string{"cpu"},
98 nil,
99 ),
100 "branch_misses_total": prometheus.NewDesc(
101 prometheus.BuildFQName(
102 namespace,
103 perfSubsystem,
104 "branch_misses_total",
105 ),
106 "Number of CPU branch misses",
107 []string{"cpu"},
108 nil,
109 ),
110 "cache_refs_total": prometheus.NewDesc(
111 prometheus.BuildFQName(
112 namespace,
113 perfSubsystem,
114 "cache_refs_total",
115 ),
116 "Number of cache references (non frequency scaled)",
117 []string{"cpu"},
118 nil,
119 ),
120 "cache_misses_total": prometheus.NewDesc(
121 prometheus.BuildFQName(
122 namespace,
123 perfSubsystem,
124 "cache_misses_total",
125 ),
126 "Number of cache misses",
127 []string{"cpu"},
128 nil,
129 ),
130 "ref_cpucycles_total": prometheus.NewDesc(
131 prometheus.BuildFQName(
132 namespace,
133 perfSubsystem,
134 "ref_cpucycles_total",
135 ),
136 "Number of CPU cycles",
137 []string{"cpu"},
138 nil,
139 ),
140 "page_faults_total": prometheus.NewDesc(
141 prometheus.BuildFQName(
142 namespace,
143 perfSubsystem,
144 "page_faults_total",
145 ),
146 "Number of page faults",
147 []string{"cpu"},
148 nil,
149 ),
150 "context_switches_total": prometheus.NewDesc(
151 prometheus.BuildFQName(
152 namespace,
153 perfSubsystem,
154 "context_switches_total",
155 ),
156 "Number of context switches",
157 []string{"cpu"},
158 nil,
159 ),
160 "cpu_migrations_total": prometheus.NewDesc(
161 prometheus.BuildFQName(
162 namespace,
163 perfSubsystem,
164 "cpu_migrations_total",
165 ),
166 "Number of CPU process migrations",
167 []string{"cpu"},
168 nil,
169 ),
170 "minor_faults_total": prometheus.NewDesc(
171 prometheus.BuildFQName(
172 namespace,
173 perfSubsystem,
174 "minor_faults_total",
175 ),
176 "Number of minor page faults",
177 []string{"cpu"},
178 nil,
179 ),
180 "major_faults_total": prometheus.NewDesc(
181 prometheus.BuildFQName(
182 namespace,
183 perfSubsystem,
184 "major_faults_total",
185 ),
186 "Number of major page faults",
187 []string{"cpu"},
188 nil,
189 ),
190 "cache_l1d_read_hits_total": prometheus.NewDesc(
191 prometheus.BuildFQName(
192 namespace,
193 perfSubsystem,
194 "cache_l1d_read_hits_total",
195 ),
196 "Number L1 data cache read hits",
197 []string{"cpu"},
198 nil,
199 ),
200 "cache_l1d_read_misses_total": prometheus.NewDesc(
201 prometheus.BuildFQName(
202 namespace,
203 perfSubsystem,
204 "cache_l1d_read_misses_total",
205 ),
206 "Number L1 data cache read misses",
207 []string{"cpu"},
208 nil,
209 ),
210 "cache_l1d_write_hits_total": prometheus.NewDesc(
211 prometheus.BuildFQName(
212 namespace,
213 perfSubsystem,
214 "cache_l1d_write_hits_total",
215 ),
216 "Number L1 data cache write hits",
217 []string{"cpu"},
218 nil,
219 ),
220 "cache_l1_instr_read_misses_total": prometheus.NewDesc(
221 prometheus.BuildFQName(
222 namespace,
223 perfSubsystem,
224 "cache_l1_instr_read_misses_total",
225 ),
226 "Number instruction L1 instruction read misses",
227 []string{"cpu"},
228 nil,
229 ),
230 "cache_tlb_instr_read_hits_total": prometheus.NewDesc(
231 prometheus.BuildFQName(
232 namespace,
233 perfSubsystem,
234 "cache_tlb_instr_read_hits_total",
235 ),
236 "Number instruction TLB read hits",
237 []string{"cpu"},
238 nil,
239 ),
240 "cache_tlb_instr_read_misses_total": prometheus.NewDesc(
241 prometheus.BuildFQName(
242 namespace,
243 perfSubsystem,
244 "cache_tlb_instr_read_misses_total",
245 ),
246 "Number instruction TLB read misses",
247 []string{"cpu"},
248 nil,
249 ),
250 "cache_ll_read_hits_total": prometheus.NewDesc(
251 prometheus.BuildFQName(
252 namespace,
253 perfSubsystem,
254 "cache_ll_read_hits_total",
255 ),
256 "Number last level read hits",
257 []string{"cpu"},
258 nil,
259 ),
260 "cache_ll_read_misses_total": prometheus.NewDesc(
261 prometheus.BuildFQName(
262 namespace,
263 perfSubsystem,
264 "cache_ll_read_misses_total",
265 ),
266 "Number last level read misses",
267 []string{"cpu"},
268 nil,
269 ),
270 "cache_ll_write_hits_total": prometheus.NewDesc(
271 prometheus.BuildFQName(
272 namespace,
273 perfSubsystem,
274 "cache_ll_write_hits_total",
275 ),
276 "Number last level write hits",
277 []string{"cpu"},
278 nil,
279 ),
280 "cache_ll_write_misses_total": prometheus.NewDesc(
281 prometheus.BuildFQName(
282 namespace,
283 perfSubsystem,
284 "cache_ll_write_misses_total",
285 ),
286 "Number last level write misses",
287 []string{"cpu"},
288 nil,
289 ),
290 "cache_bpu_read_hits_total": prometheus.NewDesc(
291 prometheus.BuildFQName(
292 namespace,
293 perfSubsystem,
294 "cache_bpu_read_hits_total",
295 ),
296 "Number BPU read hits",
297 []string{"cpu"},
298 nil,
299 ),
300 "cache_bpu_read_misses_total": prometheus.NewDesc(
301 prometheus.BuildFQName(
302 namespace,
303 perfSubsystem,
304 "cache_bpu_read_misses_total",
305 ),
306 "Number BPU read misses",
307 []string{"cpu"},
308 nil,
309 ),
310 }
311
312 return collector, nil
313}
314
315// Update implements the Collector interface and will collect metrics per CPU.
316func (c *perfCollector) Update(ch chan<- prometheus.Metric) error {
317 if err := c.updateHardwareStats(ch); err != nil {
318 return err
319 }
320
321 if err := c.updateSoftwareStats(ch); err != nil {
322 return err
323 }
324
325 if err := c.updateCacheStats(ch); err != nil {
326 return err
327 }
328
329 return nil
330}
331
332func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error {
333 for cpu, profiler := range c.perfHwProfilers {
334 cpuStr := fmt.Sprintf("%d", cpu)
335 hwProfile, err := profiler.Profile()
336 if err != nil {
337 return err
338 }
339 if hwProfile == nil {
340 continue
341 }
342
343 if hwProfile.CPUCycles != nil {
344 ch <- prometheus.MustNewConstMetric(
345 c.desc["cpucycles_total"],
346 prometheus.CounterValue, float64(*hwProfile.CPUCycles),
347 cpuStr,
348 )
349 }
350
351 if hwProfile.Instructions != nil {
352 ch <- prometheus.MustNewConstMetric(
353 c.desc["instructions_total"],
354 prometheus.CounterValue, float64(*hwProfile.Instructions),
355 cpuStr,
356 )
357 }
358
359 if hwProfile.BranchInstr != nil {
360 ch <- prometheus.MustNewConstMetric(
361 c.desc["branch_instructions_total"],
362 prometheus.CounterValue, float64(*hwProfile.BranchInstr),
363 cpuStr,
364 )
365 }
366
367 if hwProfile.BranchMisses != nil {
368 ch <- prometheus.MustNewConstMetric(
369 c.desc["branch_misses_total"],
370 prometheus.CounterValue, float64(*hwProfile.BranchMisses),
371 cpuStr,
372 )
373 }
374
375 if hwProfile.CacheRefs != nil {
376 ch <- prometheus.MustNewConstMetric(
377 c.desc["cache_refs_total"],
378 prometheus.CounterValue, float64(*hwProfile.CacheRefs),
379 cpuStr,
380 )
381 }
382
383 if hwProfile.CacheMisses != nil {
384 ch <- prometheus.MustNewConstMetric(
385 c.desc["cache_misses_total"],
386 prometheus.CounterValue, float64(*hwProfile.CacheMisses),
387 cpuStr,
388 )
389 }
390
391 if hwProfile.RefCPUCycles != nil {
392 ch <- prometheus.MustNewConstMetric(
393 c.desc["ref_cpucycles_total"],
394 prometheus.CounterValue, float64(*hwProfile.RefCPUCycles),
395 cpuStr,
396 )
397 }
398 }
399
400 return nil
401}
402
403func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error {
404 for cpu, profiler := range c.perfSwProfilers {
405 cpuStr := fmt.Sprintf("%d", cpu)
406 swProfile, err := profiler.Profile()
407 if err != nil {
408 return err
409 }
410 if swProfile == nil {
411 continue
412 }
413
414 if swProfile.PageFaults != nil {
415 ch <- prometheus.MustNewConstMetric(
416 c.desc["page_faults_total"],
417 prometheus.CounterValue, float64(*swProfile.PageFaults),
418 cpuStr,
419 )
420 }
421
422 if swProfile.ContextSwitches != nil {
423 ch <- prometheus.MustNewConstMetric(
424 c.desc["context_switches_total"],
425 prometheus.CounterValue, float64(*swProfile.ContextSwitches),
426 cpuStr,
427 )
428 }
429
430 if swProfile.CPUMigrations != nil {
431 ch <- prometheus.MustNewConstMetric(
432 c.desc["cpu_migrations_total"],
433 prometheus.CounterValue, float64(*swProfile.CPUMigrations),
434 cpuStr,
435 )
436 }
437
438 if swProfile.MinorPageFaults != nil {
439 ch <- prometheus.MustNewConstMetric(
440 c.desc["minor_faults_total"],
441 prometheus.CounterValue, float64(*swProfile.MinorPageFaults),
442 cpuStr,
443 )
444 }
445
446 if swProfile.MajorPageFaults != nil {
447 ch <- prometheus.MustNewConstMetric(
448 c.desc["major_faults_total"],
449 prometheus.CounterValue, float64(*swProfile.MajorPageFaults),
450 cpuStr,
451 )
452 }
453 }
454
455 return nil
456}
457
458func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error {
459 for cpu, profiler := range c.perfCacheProfilers {
460 cpuStr := fmt.Sprintf("%d", cpu)
461 cacheProfile, err := profiler.Profile()
462 if err != nil {
463 return err
464 }
465 if cacheProfile == nil {
466 continue
467 }
468
469 if cacheProfile.L1DataReadHit != nil {
470 ch <- prometheus.MustNewConstMetric(
471 c.desc["cache_l1d_read_hits_total"],
472 prometheus.CounterValue, float64(*cacheProfile.L1DataReadHit),
473 cpuStr,
474 )
475 }
476
477 if cacheProfile.L1DataReadMiss != nil {
478 ch <- prometheus.MustNewConstMetric(
479 c.desc["cache_l1d_read_misses_total"],
480 prometheus.CounterValue, float64(*cacheProfile.L1DataReadMiss),
481 cpuStr,
482 )
483 }
484
485 if cacheProfile.L1DataWriteHit != nil {
486 ch <- prometheus.MustNewConstMetric(
487 c.desc["cache_l1d_write_hits_total"],
488 prometheus.CounterValue, float64(*cacheProfile.L1DataWriteHit),
489 cpuStr,
490 )
491 }
492
493 if cacheProfile.L1InstrReadMiss != nil {
494 ch <- prometheus.MustNewConstMetric(
495 c.desc["cache_l1_instr_read_misses_total"],
496 prometheus.CounterValue, float64(*cacheProfile.L1InstrReadMiss),
497 cpuStr,
498 )
499 }
500
501 if cacheProfile.InstrTLBReadHit != nil {
502 ch <- prometheus.MustNewConstMetric(
503 c.desc["cache_tlb_instr_read_hits_total"],
504 prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadHit),
505 cpuStr,
506 )
507 }
508
509 if cacheProfile.InstrTLBReadMiss != nil {
510 ch <- prometheus.MustNewConstMetric(
511 c.desc["cache_tlb_instr_read_misses_total"],
512 prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadMiss),
513 cpuStr,
514 )
515 }
516
517 if cacheProfile.LastLevelReadHit != nil {
518 ch <- prometheus.MustNewConstMetric(
519 c.desc["cache_ll_read_hits_total"],
520 prometheus.CounterValue, float64(*cacheProfile.LastLevelReadHit),
521 cpuStr,
522 )
523 }
524
525 if cacheProfile.LastLevelReadMiss != nil {
526 ch <- prometheus.MustNewConstMetric(
527 c.desc["cache_ll_read_misses_total"],
528 prometheus.CounterValue, float64(*cacheProfile.LastLevelReadMiss),
529 cpuStr,
530 )
531 }
532
533 if cacheProfile.LastLevelWriteHit != nil {
534 ch <- prometheus.MustNewConstMetric(
535 c.desc["cache_ll_write_hits_total"],
536 prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteHit),
537 cpuStr,
538 )
539 }
540
541 if cacheProfile.LastLevelWriteMiss != nil {
542 ch <- prometheus.MustNewConstMetric(
543 c.desc["cache_ll_write_misses_total"],
544 prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteMiss),
545 cpuStr,
546 )
547 }
548
549 if cacheProfile.BPUReadHit != nil {
550 ch <- prometheus.MustNewConstMetric(
551 c.desc["cache_bpu_read_hits_total"],
552 prometheus.CounterValue, float64(*cacheProfile.BPUReadHit),
553 cpuStr,
554 )
555 }
556
557 if cacheProfile.BPUReadMiss != nil {
558 ch <- prometheus.MustNewConstMetric(
559 c.desc["cache_bpu_read_misses_total"],
560 prometheus.CounterValue, float64(*cacheProfile.BPUReadMiss),
561 cpuStr,
562 )
563 }
564 }
565
566 return nil
567}
diff --git a/collector/perf_linux_test.go b/collector/perf_linux_test.go
new file mode 100644
index 0000000..0b57d10
--- /dev/null
+++ b/collector/perf_linux_test.go
@@ -0,0 +1,55 @@
1// Copyright 2019 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// +build !noprocesses
15
16package collector
17
18import (
19 "io/ioutil"
20 "strconv"
21 "strings"
22 "testing"
23
24 "github.com/prometheus/client_golang/prometheus"
25)
26
27func TestPerfCollector(t *testing.T) {
28 paranoidBytes, err := ioutil.ReadFile("/proc/sys/kernel/perf_event_paranoid")
29 if err != nil {
30 t.Skip("Procfs not mounted, skipping perf tests")
31 }
32 paranoidStr := strings.Replace(string(paranoidBytes), "\n", "", -1)
33 paranoid, err := strconv.Atoi(paranoidStr)
34 if err != nil {
35 t.Fatalf("Expected perf_event_paranoid to be an int, got: %s", paranoidStr)
36 }
37 if paranoid >= 1 {
38 t.Skip("Skipping perf tests, set perf_event_paranoid to 0")
39 }
40 collector, err := NewPerfCollector()
41 if err != nil {
42 t.Fatal(err)
43 }
44
45 // Setup background goroutine to capture metrics.
46 metrics := make(chan prometheus.Metric)
47 defer close(metrics)
48 go func() {
49 for range metrics {
50 }
51 }()
52 if err := collector.Update(metrics); err != nil {
53 t.Fatal(err)
54 }
55}
diff --git a/go.mod b/go.mod
index b3630e9..19e6872 100644
--- a/go.mod
+++ b/go.mod
@@ -7,6 +7,7 @@ require (
7 github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 7 github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968
8 github.com/golang/protobuf v1.3.1 // indirect 8 github.com/golang/protobuf v1.3.1 // indirect
9 github.com/google/go-cmp v0.2.0 // indirect 9 github.com/google/go-cmp v0.2.0 // indirect
10 github.com/hodgesds/perf-utils v0.0.6
10 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect 11 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
11 github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3 12 github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3
12 github.com/mattn/go-xmlrpc v0.0.1 13 github.com/mattn/go-xmlrpc v0.0.1
@@ -21,6 +22,8 @@ require (
21 github.com/sirupsen/logrus v1.4.1 // indirect 22 github.com/sirupsen/logrus v1.4.1 // indirect
22 github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a 23 github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a
23 github.com/stretchr/testify v1.3.0 // indirect 24 github.com/stretchr/testify v1.3.0 // indirect
25 go.uber.org/atomic v1.3.2 // indirect
26 go.uber.org/multierr v1.1.0 // indirect
24 golang.org/x/net v0.0.0-20190328230028-74de082e2cca // indirect 27 golang.org/x/net v0.0.0-20190328230028-74de082e2cca // indirect
25 golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect 28 golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect
26 golang.org/x/sys v0.0.0-20190402142545-baf5eb976a8c 29 golang.org/x/sys v0.0.0-20190402142545-baf5eb976a8c
diff --git a/go.sum b/go.sum
index 661ae5b..86c9cdd 100644
--- a/go.sum
+++ b/go.sum
@@ -26,6 +26,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
26github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 26github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
27github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= 27github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
28github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 28github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
29github.com/hodgesds/perf-utils v0.0.6 h1:qtHULYRGc+LEIADV2+XI1tJrb9d4PrWl5bwdA94WV3c=
30github.com/hodgesds/perf-utils v0.0.6/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs=
29github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 31github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
30github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= 32github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
31github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 33github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -76,6 +78,10 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1
76github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 78github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
77github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= 79github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
78github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 80github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
81go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
82go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
83go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
84go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
79golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 85golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
80golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 86golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
81golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 87golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/vendor/github.com/hodgesds/perf-utils/.gitignore b/vendor/github.com/hodgesds/perf-utils/.gitignore
new file mode 100644
index 0000000..fbf5008
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/.gitignore
@@ -0,0 +1,2 @@
1*.swp
2vendor
diff --git a/vendor/github.com/hodgesds/perf-utils/Gopkg.lock b/vendor/github.com/hodgesds/perf-utils/Gopkg.lock
new file mode 100644
index 0000000..acbc84b
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/Gopkg.lock
@@ -0,0 +1,15 @@
1# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
2
3
4[[projects]]
5 branch = "master"
6 name = "golang.org/x/sys"
7 packages = ["unix"]
8 revision = "90b0e4468f9980bf79a2290394adaf7f045c5d24"
9
10[solve-meta]
11 analyzer-name = "dep"
12 analyzer-version = 1
13 inputs-digest = "c188619af29e454f9af8a4b24b5d13720a55a70615395ba2ded3a628fa51776a"
14 solver-name = "gps-cdcl"
15 solver-version = 1
diff --git a/vendor/github.com/hodgesds/perf-utils/Gopkg.toml b/vendor/github.com/hodgesds/perf-utils/Gopkg.toml
new file mode 100644
index 0000000..c4673b9
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/Gopkg.toml
@@ -0,0 +1,34 @@
1# Gopkg.toml example
2#
3# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
4# for detailed Gopkg.toml documentation.
5#
6# required = ["github.com/user/thing/cmd/thing"]
7# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
8#
9# [[constraint]]
10# name = "github.com/user/project"
11# version = "1.0.0"
12#
13# [[constraint]]
14# name = "github.com/user/project2"
15# branch = "dev"
16# source = "github.com/myfork/project2"
17#
18# [[override]]
19# name = "github.com/x/y"
20# version = "2.4.0"
21#
22# [prune]
23# non-go = false
24# go-tests = true
25# unused-packages = true
26
27
28[[constraint]]
29 branch = "master"
30 name = "golang.org/x/sys"
31
32[prune]
33 go-tests = true
34 unused-packages = true
diff --git a/vendor/github.com/hodgesds/perf-utils/LICENSE b/vendor/github.com/hodgesds/perf-utils/LICENSE
new file mode 100644
index 0000000..06f6912
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/LICENSE
@@ -0,0 +1,22 @@
1The MIT License (MIT)
2
3Copyright (c) 2019 Daniel Hodges
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21SOFTWARE.
22
diff --git a/vendor/github.com/hodgesds/perf-utils/README.md b/vendor/github.com/hodgesds/perf-utils/README.md
new file mode 100644
index 0000000..f15b0ea
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/README.md
@@ -0,0 +1,120 @@
1# Perf
2[![GoDoc](https://godoc.org/github.com/hodgesds/perf-utils?status.svg)](https://godoc.org/github.com/hodgesds/perf-utils)
3
4This package is a go library for interacting with the `perf` subsystem in
5Linux. It allows you to do things like see how many CPU instructions a function
6takes, profile a process for various hardware events, and other interesting
7things. The library is by no means finalized and should be considered pre-alpha
8at best.
9
10# Use Cases
11A majority of the utility methods in this package should only be used for
12testing and/or debugging performance issues. Due to the nature of the go
13runtime profiling on the goroutine level is extremely tricky, with the
14exception of a long running worker goroutine locked to an OS thread. Eventually
15this library could be used to implement many of the features of `perf` but in
16accessible via Go directly.
17
18## Caveats
19* Some utility functions will call
20 [`runtime.LockOSThread`](https://golang.org/pkg/runtime/#LockOSThread) for
21 you, they will also unlock the thread after profiling. ***Note*** using these
22 utility functions will incur significant overhead.
23* Overflow handling is not implemented.
24
25# Setup
26Most likely you will need to tweak some system settings unless you are running as root. From `man perf_event_open`:
27
28```
29 perf_event related configuration files
30 Files in /proc/sys/kernel/
31
32 /proc/sys/kernel/perf_event_paranoid
33 The perf_event_paranoid file can be set to restrict access to the performance counters.
34
35 2 allow only user-space measurements (default since Linux 4.6).
36 1 allow both kernel and user measurements (default before Linux 4.6).
37 0 allow access to CPU-specific data but not raw tracepoint samples.
38 -1 no restrictions.
39
40 The existence of the perf_event_paranoid file is the official method for determining if a kernel supports perf_event_open().
41
42 /proc/sys/kernel/perf_event_max_sample_rate
43 This sets the maximum sample rate. Setting this too high can allow users to sample at a rate that impacts overall machine performance and potentially lock up the machine. The default value is 100000 (samples per
44 second).
45
46 /proc/sys/kernel/perf_event_max_stack
47 This file sets the maximum depth of stack frame entries reported when generating a call trace.
48
49 /proc/sys/kernel/perf_event_mlock_kb
50 Maximum number of pages an unprivileged user can mlock(2). The default is 516 (kB).
51
52```
53
54# Example
55Say you wanted to see how many CPU instructions a particular function took:
56
57```
58package main
59
60import (
61 "fmt"
62 "log"
63 "github.com/hodgesds/perf-utils"
64)
65
66func foo() error {
67 var total int
68 for i:=0;i<1000;i++ {
69 total++
70 }
71 return nil
72}
73
74func main() {
75 profileValue, err := perf.CPUInstructions(foo)
76 if err != nil {
77 log.Fatal(err)
78 }
79 fmt.Printf("CPU instructions: %+v\n", profileValue)
80}
81```
82
83# Benchmarks
84To profile a single function call there is an overhead of ~0.4ms.
85
86```
87$ go test -bench=BenchmarkCPUCycles .
88goos: linux
89goarch: amd64
90pkg: github.com/hodgesds/perf-utils
91BenchmarkCPUCycles-8 3000 397924 ns/op 32 B/op 1 allocs/op
92PASS
93ok github.com/hodgesds/perf-utils 1.255s
94```
95
96The `Profiler` interface has low overhead and suitable for many use cases:
97
98```
99$ go test -bench=BenchmarkProfiler .
100goos: linux
101goarch: amd64
102pkg: github.com/hodgesds/perf-utils
103BenchmarkProfiler-8 3000000 488 ns/op 32 B/op 1 allocs/op
104PASS
105ok github.com/hodgesds/perf-utils 1.981s
106```
107
108# BPF Support
109BPF is supported by using the `BPFProfiler` which is available via the
110`ProfileTracepoint` function. To use BPF you need to create the BPF program and
111then call `AttachBPF` with the file descriptor of the BPF program. This is not
112well tested so use at your own peril.
113
114# Misc
115Originally I set out to use `go generate` to build Go structs that were
116compatible with perf, I found a really good
117[article](https://utcc.utoronto.ca/~cks/space/blog/programming/GoCGoCompatibleStructs)
118on how to do so. Eventually, after digging through some of the `/x/sys/unix`
119code I found pretty much what I was needed. However, I think if you are
120interested in interacting with the kernel it is a worthwhile read.
diff --git a/vendor/github.com/hodgesds/perf-utils/bpf.go b/vendor/github.com/hodgesds/perf-utils/bpf.go
new file mode 100644
index 0000000..823997b
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/bpf.go
@@ -0,0 +1,22 @@
1// +build linux
2
3package perf
4
5import (
6 "golang.org/x/sys/unix"
7)
8
9// BPFProfiler is a Profiler that allows attaching a Berkeley
10// Packet Filter (BPF) program to an existing kprobe tracepoint event.
11// You need CAP_SYS_ADMIN privileges to use this interface. See:
12// https://lwn.net/Articles/683504/
13type BPFProfiler interface {
14 Profiler
15 AttachBPF(int) error
16}
17
18// AttachBPF is used to attach a BPF program to a profiler by using the file
19// descriptor of the BPF program.
20func (p *profiler) AttachBPF(fd int) error {
21 return unix.IoctlSetInt(p.fd, unix.PERF_EVENT_IOC_SET_BPF, fd)
22}
diff --git a/vendor/github.com/hodgesds/perf-utils/cache_profiler.go b/vendor/github.com/hodgesds/perf-utils/cache_profiler.go
new file mode 100644
index 0000000..57d80dc
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/cache_profiler.go
@@ -0,0 +1,336 @@
1// +build linux
2
3package perf
4
5import (
6 "go.uber.org/multierr"
7 "golang.org/x/sys/unix"
8)
9
10const (
11 // L1DataReadHit is a constant...
12 L1DataReadHit = (unix.PERF_COUNT_HW_CACHE_L1D) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
13 // L1DataReadMiss is a constant...
14 L1DataReadMiss = (unix.PERF_COUNT_HW_CACHE_L1D) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
15 // L1DataWriteHit is a constant...
16 L1DataWriteHit = (unix.PERF_COUNT_HW_CACHE_L1D) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
17 // L1InstrReadMiss is a constant...
18 L1InstrReadMiss = (unix.PERF_COUNT_HW_CACHE_L1I) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
19
20 // LLReadHit is a constant...
21 LLReadHit = (unix.PERF_COUNT_HW_CACHE_LL) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
22 // LLReadMiss is a constant...
23 LLReadMiss = (unix.PERF_COUNT_HW_CACHE_LL) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
24 // LLWriteHit is a constant...
25 LLWriteHit = (unix.PERF_COUNT_HW_CACHE_LL) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
26 // LLWriteMiss is a constant...
27 LLWriteMiss = (unix.PERF_COUNT_HW_CACHE_LL) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
28
29 // DataTLBReadHit is a constant...
30 DataTLBReadHit = (unix.PERF_COUNT_HW_CACHE_DTLB) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
31 // DataTLBReadMiss is a constant...
32 DataTLBReadMiss = (unix.PERF_COUNT_HW_CACHE_DTLB) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
33 // DataTLBWriteHit is a constant...
34 DataTLBWriteHit = (unix.PERF_COUNT_HW_CACHE_DTLB) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
35 // DataTLBWriteMiss is a constant...
36 DataTLBWriteMiss = (unix.PERF_COUNT_HW_CACHE_DTLB) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
37
38 // InstrTLBReadHit is a constant...
39 InstrTLBReadHit = (unix.PERF_COUNT_HW_CACHE_ITLB) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
40 // InstrTLBReadMiss is a constant...
41 InstrTLBReadMiss = (unix.PERF_COUNT_HW_CACHE_ITLB) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
42
43 // BPUReadHit is a constant...
44 BPUReadHit = (unix.PERF_COUNT_HW_CACHE_BPU) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
45 // BPUReadMiss is a constant...
46 BPUReadMiss = (unix.PERF_COUNT_HW_CACHE_BPU) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
47
48 // NodeCacheReadHit is a constant...
49 NodeCacheReadHit = (unix.PERF_COUNT_HW_CACHE_NODE) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
50 // NodeCacheReadMiss is a constant...
51 NodeCacheReadMiss = (unix.PERF_COUNT_HW_CACHE_NODE) | (unix.PERF_COUNT_HW_CACHE_OP_READ << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
52 // NodeCacheWriteHit is a constant...
53 NodeCacheWriteHit = (unix.PERF_COUNT_HW_CACHE_NODE) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
54 // NodeCacheWriteMiss is a constant...
55 NodeCacheWriteMiss = (unix.PERF_COUNT_HW_CACHE_NODE) | (unix.PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (unix.PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
56)
57
58type cacheProfiler struct {
59 // map of perf counter type to file descriptor
60 profilers map[int]Profiler
61}
62
63// NewCacheProfiler returns a new cache profiler.
64func NewCacheProfiler(pid, cpu int, opts ...int) CacheProfiler {
65 profilers := map[int]Profiler{}
66
67 // L1 data
68 op := unix.PERF_COUNT_HW_CACHE_OP_READ
69 result := unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
70 l1dataReadHit, err := NewL1DataProfiler(pid, cpu, op, result, opts...)
71 if err == nil {
72 profilers[L1DataReadHit] = l1dataReadHit
73 }
74
75 op = unix.PERF_COUNT_HW_CACHE_OP_READ
76 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
77 l1dataReadMiss, err := NewL1DataProfiler(pid, cpu, op, result, opts...)
78 if err == nil {
79 profilers[L1DataReadMiss] = l1dataReadMiss
80 }
81
82 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
83 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
84 l1dataWriteHit, err := NewL1DataProfiler(pid, cpu, op, result, opts...)
85 if err == nil {
86 profilers[L1DataWriteHit] = l1dataWriteHit
87 }
88
89 // L1 instruction
90 op = unix.PERF_COUNT_HW_CACHE_OP_READ
91 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
92 l1InstrReadMiss, err := NewL1InstrProfiler(pid, cpu, op, result, opts...)
93 if err == nil {
94 profilers[L1InstrReadMiss] = l1InstrReadMiss
95 }
96
97 // Last Level
98 op = unix.PERF_COUNT_HW_CACHE_OP_READ
99 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
100 llReadHit, err := NewLLCacheProfiler(pid, cpu, op, result, opts...)
101 if err == nil {
102 profilers[LLReadHit] = llReadHit
103 }
104
105 op = unix.PERF_COUNT_HW_CACHE_OP_READ
106 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
107 llReadMiss, err := NewLLCacheProfiler(pid, cpu, op, result, opts...)
108 if err == nil {
109 profilers[LLReadMiss] = llReadMiss
110 }
111
112 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
113 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
114 llWriteHit, err := NewLLCacheProfiler(pid, cpu, op, result, opts...)
115 if err == nil {
116 profilers[LLWriteHit] = llWriteHit
117 }
118
119 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
120 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
121 llWriteMiss, err := NewLLCacheProfiler(pid, cpu, op, result, opts...)
122 if err == nil {
123 profilers[LLWriteMiss] = llWriteMiss
124 }
125
126 // dTLB
127 op = unix.PERF_COUNT_HW_CACHE_OP_READ
128 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
129 dTLBReadHit, err := NewDataTLBProfiler(pid, cpu, op, result, opts...)
130 if err == nil {
131 profilers[DataTLBReadHit] = dTLBReadHit
132 }
133
134 op = unix.PERF_COUNT_HW_CACHE_OP_READ
135 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
136 dTLBReadMiss, err := NewDataTLBProfiler(pid, cpu, op, result, opts...)
137 if err == nil {
138 profilers[DataTLBReadMiss] = dTLBReadMiss
139 }
140
141 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
142 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
143 dTLBWriteHit, err := NewDataTLBProfiler(pid, cpu, op, result, opts...)
144 if err == nil {
145 profilers[DataTLBWriteHit] = dTLBWriteHit
146 }
147
148 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
149 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
150 dTLBWriteMiss, err := NewDataTLBProfiler(pid, cpu, op, result, opts...)
151 if err == nil {
152 profilers[DataTLBWriteMiss] = dTLBWriteMiss
153 }
154
155 // iTLB
156 op = unix.PERF_COUNT_HW_CACHE_OP_READ
157 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
158 iTLBReadHit, err := NewInstrTLBProfiler(pid, cpu, op, result, opts...)
159 if err == nil {
160 profilers[InstrTLBReadHit] = iTLBReadHit
161 }
162
163 op = unix.PERF_COUNT_HW_CACHE_OP_READ
164 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
165 iTLBReadMiss, err := NewInstrTLBProfiler(pid, cpu, op, result, opts...)
166 if err == nil {
167 profilers[InstrTLBReadMiss] = iTLBReadMiss
168 }
169
170 // BPU
171 op = unix.PERF_COUNT_HW_CACHE_OP_READ
172 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
173 bpuReadHit, err := NewBPUProfiler(pid, cpu, op, result, opts...)
174 if err == nil {
175 profilers[BPUReadHit] = bpuReadHit
176 }
177
178 op = unix.PERF_COUNT_HW_CACHE_OP_READ
179 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
180 bpuReadMiss, err := NewBPUProfiler(pid, cpu, op, result, opts...)
181 if err == nil {
182 profilers[BPUReadMiss] = bpuReadMiss
183 }
184
185 // Node
186 op = unix.PERF_COUNT_HW_CACHE_OP_READ
187 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
188 nodeReadHit, err := NewNodeCacheProfiler(pid, cpu, op, result, opts...)
189 if err == nil {
190 profilers[NodeCacheReadHit] = nodeReadHit
191 }
192
193 op = unix.PERF_COUNT_HW_CACHE_OP_READ
194 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
195 nodeReadMiss, err := NewNodeCacheProfiler(pid, cpu, op, result, opts...)
196 if err == nil {
197 profilers[NodeCacheReadMiss] = nodeReadMiss
198 }
199
200 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
201 result = unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS
202 nodeWriteHit, err := NewNodeCacheProfiler(pid, cpu, op, result, opts...)
203 if err == nil {
204 profilers[NodeCacheWriteHit] = nodeWriteHit
205 }
206
207 op = unix.PERF_COUNT_HW_CACHE_OP_WRITE
208 result = unix.PERF_COUNT_HW_CACHE_RESULT_MISS
209 nodeWriteMiss, err := NewNodeCacheProfiler(pid, cpu, op, result, opts...)
210 if err == nil {
211 profilers[NodeCacheWriteMiss] = nodeWriteMiss
212 }
213
214 return &cacheProfiler{
215 profilers: profilers,
216 }
217}
218
219// Start is used to start the CacheProfiler, it will return an error if no
220// profilers are configured.
221func (p *cacheProfiler) Start() error {
222 if len(p.profilers) == 0 {
223 return ErrNoProfiler
224 }
225 var err error
226 for _, profiler := range p.profilers {
227 err = multierr.Append(err, profiler.Start())
228 }
229 return err
230}
231
232// Reset is used to reset the CacheProfiler.
233func (p *cacheProfiler) Reset() error {
234 var err error
235 for _, profiler := range p.profilers {
236 err = multierr.Append(err, profiler.Reset())
237 }
238 return err
239}
240
241// Stop is used to reset the CacheProfiler.
242func (p *cacheProfiler) Stop() error {
243 var err error
244 for _, profiler := range p.profilers {
245 err = multierr.Append(err, profiler.Stop())
246 }
247 return err
248}
249
250// Close is used to reset the CacheProfiler.
251func (p *cacheProfiler) Close() error {
252 var err error
253 for _, profiler := range p.profilers {
254 err = multierr.Append(err, profiler.Close())
255 }
256 return err
257}
258
259// Profile is used to read the CacheProfiler CacheProfile it returns an
260// error only if all profiles fail.
261func (p *cacheProfiler) Profile() (*CacheProfile, error) {
262 var err error
263 cacheProfile := &CacheProfile{}
264 for profilerType, profiler := range p.profilers {
265 profileVal, err2 := profiler.Profile()
266 err = multierr.Append(err, err2)
267 if err2 == nil {
268 if cacheProfile.TimeEnabled == nil {
269 cacheProfile.TimeEnabled = &profileVal.TimeEnabled
270 }
271 if cacheProfile.TimeRunning == nil {
272 cacheProfile.TimeRunning = &profileVal.TimeRunning
273 }
274 switch {
275 // L1 data
276 case (profilerType ^ L1DataReadHit) == 0:
277 cacheProfile.L1DataReadHit = &profileVal.Value
278 case (profilerType ^ L1DataReadMiss) == 0:
279 cacheProfile.L1DataReadMiss = &profileVal.Value
280 case (profilerType ^ L1DataWriteHit) == 0:
281 cacheProfile.L1DataWriteHit = &profileVal.Value
282
283 // L1 instruction
284 case (profilerType ^ L1InstrReadMiss) == 0:
285 cacheProfile.L1InstrReadMiss = &profileVal.Value
286
287 // Last Level
288 case (profilerType ^ LLReadHit) == 0:
289 cacheProfile.LastLevelReadHit = &profileVal.Value
290 case (profilerType ^ LLReadMiss) == 0:
291 cacheProfile.LastLevelReadMiss = &profileVal.Value
292 case (profilerType ^ LLWriteHit) == 0:
293 cacheProfile.LastLevelWriteHit = &profileVal.Value
294 case (profilerType ^ LLWriteMiss) == 0:
295 cacheProfile.LastLevelWriteMiss = &profileVal.Value
296
297 // dTLB
298 case (profilerType ^ DataTLBReadHit) == 0:
299 cacheProfile.DataTLBReadHit = &profileVal.Value
300 case (profilerType ^ DataTLBReadMiss) == 0:
301 cacheProfile.DataTLBReadMiss = &profileVal.Value
302 case (profilerType ^ DataTLBWriteHit) == 0:
303 cacheProfile.DataTLBWriteHit = &profileVal.Value
304 case (profilerType ^ DataTLBWriteMiss) == 0:
305 cacheProfile.DataTLBWriteMiss = &profileVal.Value
306
307 // iTLB
308 case (profilerType ^ InstrTLBReadHit) == 0:
309 cacheProfile.InstrTLBReadHit = &profileVal.Value
310 case (profilerType ^ InstrTLBReadMiss) == 0:
311 cacheProfile.InstrTLBReadMiss = &profileVal.Value
312
313 // BPU
314 case (profilerType ^ BPUReadHit) == 0:
315 cacheProfile.BPUReadHit = &profileVal.Value
316 case (profilerType ^ BPUReadMiss) == 0:
317 cacheProfile.BPUReadMiss = &profileVal.Value
318
319 // node
320 case (profilerType ^ NodeCacheReadHit) == 0:
321 cacheProfile.NodeReadHit = &profileVal.Value
322 case (profilerType ^ NodeCacheReadMiss) == 0:
323 cacheProfile.NodeReadMiss = &profileVal.Value
324 case (profilerType ^ NodeCacheWriteHit) == 0:
325 cacheProfile.NodeWriteHit = &profileVal.Value
326 case (profilerType ^ NodeCacheWriteMiss) == 0:
327 cacheProfile.NodeWriteMiss = &profileVal.Value
328 }
329 }
330 }
331 if len(multierr.Errors(err)) == len(p.profilers) {
332 return nil, err
333 }
334
335 return cacheProfile, nil
336}
diff --git a/vendor/github.com/hodgesds/perf-utils/events.go b/vendor/github.com/hodgesds/perf-utils/events.go
new file mode 100644
index 0000000..851be8c
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/events.go
@@ -0,0 +1,98 @@
1// +build linux
2
3package perf
4
5import (
6 "fmt"
7 "strconv"
8 "strings"
9 "unsafe"
10
11 "golang.org/x/sys/unix"
12)
13
14const (
15 // PERF_TYPE_TRACEPOINT is a kernel tracepoint.
16 PERF_TYPE_TRACEPOINT = 2
17)
18
19// AvailableEvents returns the list of available events.
20func AvailableEvents() (map[string][]string, error) {
21 events := map[string][]string{}
22 rawEvents, err := fileToStrings(TracingDir + "/available_events")
23 // Events are colon delimited by type so parse the type and add sub
24 // events appropriately.
25 if err != nil {
26 return events, err
27 }
28 for _, rawEvent := range rawEvents {
29 splits := strings.Split(rawEvent, ":")
30 if len(splits) <= 1 {
31 continue
32 }
33 eventTypeEvents, found := events[splits[0]]
34 if found {
35 events[splits[0]] = append(eventTypeEvents, splits[1])
36 continue
37 }
38 events[splits[0]] = []string{splits[1]}
39 }
40 return events, err
41}
42
43// AvailableTracers returns the list of available tracers.
44func AvailableTracers() ([]string, error) {
45 return fileToStrings(TracingDir + "/available_tracers")
46}
47
48// CurrentTracer returns the current tracer.
49func CurrentTracer() (string, error) {
50 res, err := fileToStrings(TracingDir + "/current_tracer")
51 return res[0], err
52}
53
54// getTracepointConfig is used to get the configuration for a trace event.
55func getTracepointConfig(kind, event string) (uint64, error) {
56 res, err := fileToStrings(TracingDir + fmt.Sprintf("/events/%s/%s/id", kind, event))
57 if err != nil {
58 return 0, err
59 }
60 return strconv.ParseUint(res[0], 10, 64)
61}
62
63// ProfileTracepoint is used to profile a kernel tracepoint event. Events can
64// be listed with `perf list` for Tracepoint Events or in the
65// /sys/kernel/debug/tracing/events directory with the kind being the directory
66// and the event being the subdirectory.
67func ProfileTracepoint(kind, event string, pid, cpu int, opts ...int) (BPFProfiler, error) {
68 config, err := getTracepointConfig(kind, event)
69 if err != nil {
70 return nil, err
71 }
72 eventAttr := &unix.PerfEventAttr{
73 Type: PERF_TYPE_TRACEPOINT,
74 Config: config,
75 Size: uint32(unsafe.Sizeof(unix.PerfEventAttr{})),
76 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeHv,
77 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
78 Sample_type: PERF_SAMPLE_IDENTIFIER,
79 }
80 var eventOps int
81 if len(opts) > 0 {
82 eventOps = opts[0]
83 }
84 fd, err := unix.PerfEventOpen(
85 eventAttr,
86 pid,
87 cpu,
88 -1,
89 eventOps,
90 )
91 if err != nil {
92 return nil, err
93 }
94
95 return &profiler{
96 fd: fd,
97 }, nil
98}
diff --git a/vendor/github.com/hodgesds/perf-utils/fs_utils.go b/vendor/github.com/hodgesds/perf-utils/fs_utils.go
new file mode 100644
index 0000000..1145c9e
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/fs_utils.go
@@ -0,0 +1,102 @@
1// +build linux
2
3package perf
4
5import (
6 "bufio"
7 "fmt"
8 "os"
9 "strings"
10)
11
12const (
13 // DebugFS is the filesystem type for debugfs.
14 DebugFS = "debugfs"
15
16 // TraceFS is the filesystem type for tracefs.
17 TraceFS = "tracefs"
18
19 // ProcMounts is the mount point for file systems in procfs.
20 ProcMounts = "/proc/mounts"
21
22 // PerfMaxStack is the mount point for the max perf event size.
23 PerfMaxStack = "/proc/sys/kernel/perf_event_max_stack"
24
25 // PerfMaxContexts is a sysfs mount that contains the max perf contexts.
26 PerfMaxContexts = "/proc/sys/kernel/perf_event_max_contexts_per_stack"
27
28 // SyscallsDir is a constant of the default tracing event syscalls directory.
29 SyscallsDir = "/sys/kernel/debug/tracing/events/syscalls/"
30
31 // TracingDir is a constant of the default tracing directory.
32 TracingDir = "/sys/kernel/debug/tracing"
33)
34
35var (
36 // ErrNoMount is when there is no such mount.
37 ErrNoMount = fmt.Errorf("no such mount")
38)
39
40// TraceFSMount returns the first found mount point of a tracefs file system.
41func TraceFSMount() (string, error) {
42 mounts, err := GetFSMount(TraceFS)
43 if err != nil {
44 return "", err
45 }
46 if len(mounts) == 0 {
47 return "", ErrNoMount
48 }
49 return mounts[0], nil
50}
51
52// DebugFSMount returns the first found mount point of a debugfs file system.
53func DebugFSMount() (string, error) {
54 mounts, err := GetFSMount(DebugFS)
55 if err != nil {
56 return "", err
57 }
58 if len(mounts) == 0 {
59 return "", ErrNoMount
60 }
61 return mounts[0], nil
62}
63
64// GetFSMount is a helper function to get a mount file system type.
65func GetFSMount(mountType string) ([]string, error) {
66 mounts := []string{}
67 file, err := os.Open(ProcMounts)
68 if err != nil {
69 return mounts, err
70 }
71 scanner := bufio.NewScanner(file)
72 for scanner.Scan() {
73 mountInfo := strings.Split(scanner.Text(), " ")
74 if len(mountInfo) > 3 && mountInfo[2] == mountType {
75 mounts = append(mounts, mountInfo[1])
76 }
77 }
78 if err := scanner.Err(); err != nil {
79 return mounts, err
80 }
81
82 return mounts, file.Close()
83}
84
85// fileToStrings is a helper method that reads a line line by line and returns
86// a slice of strings.
87func fileToStrings(path string) ([]string, error) {
88 res := []string{}
89 f, err := os.Open(path)
90 if err != nil {
91 return res, err
92 }
93 scanner := bufio.NewScanner(f)
94 for scanner.Scan() {
95 res = append(res, scanner.Text())
96 }
97 if err := scanner.Err(); err != nil {
98 return res, err
99 }
100
101 return res, nil
102}
diff --git a/vendor/github.com/hodgesds/perf-utils/group_profiler.go b/vendor/github.com/hodgesds/perf-utils/group_profiler.go
new file mode 100644
index 0000000..d1df0f4
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/group_profiler.go
@@ -0,0 +1,170 @@
1// +build linux
2
3package perf
4
5import (
6 "encoding/binary"
7 "fmt"
8 "syscall"
9
10 "go.uber.org/multierr"
11 "golang.org/x/sys/unix"
12)
13
14// ErrNoLeader is returned when a leader of a GroupProfiler is not defined.
15var ErrNoLeader = fmt.Errorf("No leader defined")
16
17// GroupProfileValue is returned from a GroupProfiler.
18type GroupProfileValue struct {
19 Events uint64
20 TimeEnabled uint64
21 TimeRunning uint64
22 Values []uint64
23}
24
25// GroupProfiler is used to setup a group profiler.
26type GroupProfiler interface {
27 Start() error
28 Reset() error
29 Stop() error
30 Close() error
31 Profile() (*GroupProfileValue, error)
32}
33
34// groupProfiler implements the GroupProfiler interface.
35type groupProfiler struct {
36 fds []int // leader is always element 0
37}
38
39// NewGroupProfiler returns a GroupProfiler.
40func NewGroupProfiler(pid, cpu, opts int, eventAttrs ...unix.PerfEventAttr) (GroupProfiler, error) {
41 fds := make([]int, len(eventAttrs))
42
43 for i, eventAttr := range eventAttrs {
44 // common configs
45 eventAttr.Size = EventAttrSize
46 eventAttr.Sample_type = PERF_SAMPLE_IDENTIFIER
47
48 // Leader fd must be opened first
49 if i == 0 {
50 // leader specific configs
51 eventAttr.Bits = unix.PerfBitDisabled | unix.PerfBitExcludeHv
52 eventAttr.Read_format = unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED | unix.PERF_FORMAT_GROUP
53
54 fd, err := unix.PerfEventOpen(
55 &eventAttr,
56 pid,
57 cpu,
58 -1,
59 opts,
60 )
61 if err != nil {
62 return nil, err
63 }
64 fds[i] = fd
65 continue
66 }
67
68 // non leader configs
69 eventAttr.Read_format = unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED | unix.PERF_FORMAT_GROUP
70 eventAttr.Bits = unix.PerfBitExcludeHv
71
72 fd, err := unix.PerfEventOpen(
73 &eventAttr,
74 pid,
75 cpu,
76 fds[0],
77 opts,
78 )
79 if err != nil {
80 // cleanup any old Fds
81 for ii, fd2 := range fds {
82 if ii == i {
83 break
84 }
85 err = multierr.Append(err, unix.Close(fd2))
86 }
87 return nil, err
88 }
89 fds[i] = fd
90 }
91
92 return &groupProfiler{
93 fds: fds,
94 }, nil
95}
96
97// Start is used to start the GroupProfiler.
98func (p *groupProfiler) Start() error {
99 if len(p.fds) == 0 {
100 return ErrNoLeader
101 }
102 return unix.IoctlSetInt(p.fds[0], unix.PERF_EVENT_IOC_ENABLE, 0)
103}
104
105// Reset is used to reset the GroupProfiler.
106func (p *groupProfiler) Reset() error {
107 if len(p.fds) == 0 {
108 return ErrNoLeader
109 }
110 return unix.IoctlSetInt(p.fds[0], unix.PERF_EVENT_IOC_RESET, 0)
111}
112
113// Stop is used to stop the GroupProfiler.
114func (p *groupProfiler) Stop() error {
115 if len(p.fds) == 0 {
116 return ErrNoLeader
117 }
118 return unix.IoctlSetInt(p.fds[0], unix.PERF_EVENT_IOC_DISABLE, 0)
119}
120
121// Close is used to close the GroupProfiler.
122func (p *groupProfiler) Close() error {
123 var err error
124 for _, fd := range p.fds {
125 err = multierr.Append(err, unix.Close(fd))
126 }
127 return err
128}
129
130// Profile is used to return the GroupProfileValue of the GroupProfiler.
131func (p *groupProfiler) Profile() (*GroupProfileValue, error) {
132 nEvents := len(p.fds)
133 if nEvents == 0 {
134 return nil, ErrNoLeader
135 }
136
137 // read format of the raw event looks like this:
138 /*
139 struct read_format {
140 u64 nr; // The number of events /
141 u64 time_enabled; // if PERF_FORMAT_TOTAL_TIME_ENABLED
142 u64 time_running; // if PERF_FORMAT_TOTAL_TIME_RUNNING
143 struct {
144 u64 value; // The value of the event
145 u64 id; // if PERF_FORMAT_ID
146 } values[nr];
147 };
148 */
149
150 buf := make([]byte, 24+8*nEvents)
151 _, err := syscall.Read(p.fds[0], buf)
152 if err != nil {
153 return nil, err
154 }
155
156 val := &GroupProfileValue{
157 Events: binary.LittleEndian.Uint64(buf[0:8]),
158 TimeEnabled: binary.LittleEndian.Uint64(buf[8:16]),
159 TimeRunning: binary.LittleEndian.Uint64(buf[16:24]),
160 Values: make([]uint64, len(p.fds)),
161 }
162
163 offset := 24
164 for i := range p.fds {
165 val.Values[i] = binary.LittleEndian.Uint64(buf[offset : offset+8])
166 offset += 8
167 }
168
169 return val, nil
170}
diff --git a/vendor/github.com/hodgesds/perf-utils/hardware_profiler.go b/vendor/github.com/hodgesds/perf-utils/hardware_profiler.go
new file mode 100644
index 0000000..d7134ab
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/hardware_profiler.go
@@ -0,0 +1,157 @@
1// +build linux
2
3package perf
4
5import (
6 "go.uber.org/multierr"
7 "golang.org/x/sys/unix"
8)
9
10type hardwareProfiler struct {
11 // map of perf counter type to file descriptor
12 profilers map[int]Profiler
13}
14
15// NewHardwareProfiler returns a new hardware profiler.
16func NewHardwareProfiler(pid, cpu int, opts ...int) HardwareProfiler {
17 profilers := map[int]Profiler{}
18
19 cpuCycleProfiler, err := NewCPUCycleProfiler(pid, cpu, opts...)
20 if err == nil {
21 profilers[unix.PERF_COUNT_HW_CPU_CYCLES] = cpuCycleProfiler
22 }
23
24 instrProfiler, err := NewInstrProfiler(pid, cpu, opts...)
25 if err == nil {
26 profilers[unix.PERF_COUNT_HW_INSTRUCTIONS] = instrProfiler
27 }
28
29 cacheRefProfiler, err := NewCacheRefProfiler(pid, cpu, opts...)
30 if err == nil {
31 profilers[unix.PERF_COUNT_HW_CACHE_REFERENCES] = cacheRefProfiler
32 }
33
34 cacheMissesProfiler, err := NewCacheMissesProfiler(pid, cpu, opts...)
35 if err == nil {
36 profilers[unix.PERF_COUNT_HW_CACHE_MISSES] = cacheMissesProfiler
37 }
38
39 branchInstrProfiler, err := NewBranchInstrProfiler(pid, cpu, opts...)
40 if err == nil {
41 profilers[unix.PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = branchInstrProfiler
42 }
43
44 branchMissesProfiler, err := NewBranchMissesProfiler(pid, cpu, opts...)
45 if err == nil {
46 profilers[unix.PERF_COUNT_HW_BRANCH_MISSES] = branchMissesProfiler
47 }
48
49 busCyclesProfiler, err := NewBusCyclesProfiler(pid, cpu, opts...)
50 if err == nil {
51 profilers[unix.PERF_COUNT_HW_BUS_CYCLES] = busCyclesProfiler
52 }
53
54 stalledCyclesFrontProfiler, err := NewStalledCyclesFrontProfiler(pid, cpu, opts...)
55 if err == nil {
56 profilers[unix.PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = stalledCyclesFrontProfiler
57 }
58
59 stalledCyclesBackProfiler, err := NewStalledCyclesBackProfiler(pid, cpu, opts...)
60 if err == nil {
61 profilers[unix.PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = stalledCyclesBackProfiler
62 }
63
64 refCPUCyclesProfiler, err := NewRefCPUCyclesProfiler(pid, cpu, opts...)
65 if err == nil {
66 profilers[unix.PERF_COUNT_HW_REF_CPU_CYCLES] = refCPUCyclesProfiler
67 }
68
69 return &hardwareProfiler{
70 profilers: profilers,
71 }
72}
73
74// Start is used to start the HardwareProfiler.
75func (p *hardwareProfiler) Start() error {
76 if len(p.profilers) == 0 {
77 return ErrNoProfiler
78 }
79 var err error
80 for _, profiler := range p.profilers {
81 err = multierr.Append(err, profiler.Start())
82 }
83 return err
84}
85
86// Reset is used to reset the HardwareProfiler.
87func (p *hardwareProfiler) Reset() error {
88 var err error
89 for _, profiler := range p.profilers {
90 err = multierr.Append(err, profiler.Reset())
91 }
92 return err
93}
94
95// Stop is used to reset the HardwareProfiler.
96func (p *hardwareProfiler) Stop() error {
97 var err error
98 for _, profiler := range p.profilers {
99 err = multierr.Append(err, profiler.Stop())
100 }
101 return err
102}
103
104// Close is used to reset the HardwareProfiler.
105func (p *hardwareProfiler) Close() error {
106 var err error
107 for _, profiler := range p.profilers {
108 err = multierr.Append(err, profiler.Close())
109 }
110 return err
111}
112
113// Profile is used to read the HardwareProfiler HardwareProfile it returns an
114// error only if all profiles fail.
115func (p *hardwareProfiler) Profile() (*HardwareProfile, error) {
116 var err error
117 hwProfile := &HardwareProfile{}
118 for profilerType, profiler := range p.profilers {
119 profileVal, err2 := profiler.Profile()
120 err = multierr.Append(err, err2)
121 if err2 == nil {
122 if hwProfile.TimeEnabled == nil {
123 hwProfile.TimeEnabled = &profileVal.TimeEnabled
124 }
125 if hwProfile.TimeRunning == nil {
126 hwProfile.TimeRunning = &profileVal.TimeRunning
127 }
128 switch profilerType {
129 case unix.PERF_COUNT_HW_CPU_CYCLES:
130 hwProfile.CPUCycles = &profileVal.Value
131 case unix.PERF_COUNT_HW_INSTRUCTIONS:
132 hwProfile.Instructions = &profileVal.Value
133 case unix.PERF_COUNT_HW_CACHE_REFERENCES:
134 hwProfile.CacheRefs = &profileVal.Value
135 case unix.PERF_COUNT_HW_CACHE_MISSES:
136 hwProfile.CacheMisses = &profileVal.Value
137 case unix.PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
138 hwProfile.BranchInstr = &profileVal.Value
139 case unix.PERF_COUNT_HW_BRANCH_MISSES:
140 hwProfile.BranchMisses = &profileVal.Value
141 case unix.PERF_COUNT_HW_BUS_CYCLES:
142 hwProfile.BusCycles = &profileVal.Value
143 case unix.PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
144 hwProfile.StalledCyclesFrontend = &profileVal.Value
145 case unix.PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
146 hwProfile.StalledCyclesBackend = &profileVal.Value
147 case unix.PERF_COUNT_HW_REF_CPU_CYCLES:
148 hwProfile.RefCPUCycles = &profileVal.Value
149 }
150 }
151 }
152 if len(multierr.Errors(err)) == len(p.profilers) {
153 return nil, err
154 }
155
156 return hwProfile, nil
157}
diff --git a/vendor/github.com/hodgesds/perf-utils/process_profile.go b/vendor/github.com/hodgesds/perf-utils/process_profile.go
new file mode 100644
index 0000000..45f6a5c
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/process_profile.go
@@ -0,0 +1,507 @@
1// +build linux
2
3package perf
4
5import (
6 "encoding/binary"
7 "fmt"
8 "syscall"
9 "unsafe"
10
11 "golang.org/x/sys/unix"
12)
13
14const (
15 // PERF_SAMPLE_IDENTIFIER is not defined in x/sys/unix.
16 PERF_SAMPLE_IDENTIFIER = 1 << 16
17
18 // PERF_IOC_FLAG_GROUP is not defined in x/sys/unix.
19 PERF_IOC_FLAG_GROUP = 1 << 0
20)
21
22var (
23 // ErrNoProfiler is returned when no profiler is available for profiling.
24 ErrNoProfiler = fmt.Errorf("No profiler available")
25)
26
27// Profiler is a profiler.
28type Profiler interface {
29 Start() error
30 Reset() error
31 Stop() error
32 Close() error
33 Profile() (*ProfileValue, error)
34}
35
36// HardwareProfiler is a hardware profiler.
37type HardwareProfiler interface {
38 Start() error
39 Reset() error
40 Stop() error
41 Close() error
42 Profile() (*HardwareProfile, error)
43}
44
45// HardwareProfile is returned by a HardwareProfiler. Depending on kernel
46// configuration some fields may return nil.
47type HardwareProfile struct {
48 CPUCycles *uint64 `json:"cpu_cycles,omitempty"`
49 Instructions *uint64 `json:"instructions,omitempty"`
50 CacheRefs *uint64 `json:"cache_refs,omitempty"`
51 CacheMisses *uint64 `json:"cache_misses,omitempty"`
52 BranchInstr *uint64 `json:"branch_instr,omitempty"`
53 BranchMisses *uint64 `json:"branch_misses,omitempty"`
54 BusCycles *uint64 `json:"bus_cycles,omitempty"`
55 StalledCyclesFrontend *uint64 `json:"stalled_cycles_frontend,omitempty"`
56 StalledCyclesBackend *uint64 `json:"stalled_cycles_backend,omitempty"`
57 RefCPUCycles *uint64 `json:"ref_cpu_cycles,omitempty"`
58 TimeEnabled *uint64 `json:"time_enabled,omitempty"`
59 TimeRunning *uint64 `json:"time_running,omitempty"`
60}
61
62// SoftwareProfiler is a software profiler.
63type SoftwareProfiler interface {
64 Start() error
65 Reset() error
66 Stop() error
67 Close() error
68 Profile() (*SoftwareProfile, error)
69}
70
71// SoftwareProfile is returned by a SoftwareProfiler.
72type SoftwareProfile struct {
73 CPUClock *uint64 `json:"cpu_clock,omitempty"`
74 TaskClock *uint64 `json:"task_clock,omitempty"`
75 PageFaults *uint64 `json:"page_faults,omitempty"`
76 ContextSwitches *uint64 `json:"context_switches,omitempty"`
77 CPUMigrations *uint64 `json:"cpu_migrations,omitempty"`
78 MinorPageFaults *uint64 `json:"minor_page_faults,omitempty"`
79 MajorPageFaults *uint64 `json:"major_page_faults,omitempty"`
80 AlignmentFaults *uint64 `json:"alignment_faults,omitempty"`
81 EmulationFaults *uint64 `json:"emulation_faults,omitempty"`
82 TimeEnabled *uint64 `json:"time_enabled,omitempty"`
83 TimeRunning *uint64 `json:"time_running,omitempty"`
84}
85
86// CacheProfiler is a cache profiler.
87type CacheProfiler interface {
88 Start() error
89 Reset() error
90 Stop() error
91 Close() error
92 Profile() (*CacheProfile, error)
93}
94
95// CacheProfile is returned by a CacheProfiler.
96type CacheProfile struct {
97 L1DataReadHit *uint64 `json:"l1_data_read_hit,omitempty"`
98 L1DataReadMiss *uint64 `json:"l1_data_read_miss,omitempty"`
99 L1DataWriteHit *uint64 `json:"l1_data_write_hit,omitempty"`
100 L1InstrReadMiss *uint64 `json:"l1_instr_read_miss,omitempty"`
101 LastLevelReadHit *uint64 `json:"last_level_read_hit,omitempty"`
102 LastLevelReadMiss *uint64 `json:"last_level_read_miss,omitempty"`
103 LastLevelWriteHit *uint64 `json:"last_level_write_hit,omitempty"`
104 LastLevelWriteMiss *uint64 `json:"last_level_write_miss,omitempty"`
105 DataTLBReadHit *uint64 `json:"data_tlb_read_hit,omitempty"`
106 DataTLBReadMiss *uint64 `json:"data_tlb_read_miss,omitempty"`
107 DataTLBWriteHit *uint64 `json:"data_tlb_write_hit,omitempty"`
108 DataTLBWriteMiss *uint64 `json:"data_tlb_write_miss,omitempty"`
109 InstrTLBReadHit *uint64 `json:"instr_tlb_read_hit,omitempty"`
110 InstrTLBReadMiss *uint64 `json:"instr_tlb_read_miss,omitempty"`
111 BPUReadHit *uint64 `json:"bpu_read_hit,omitempty"`
112 BPUReadMiss *uint64 `json:"bpu_read_miss,omitempty"`
113 NodeReadHit *uint64 `json:"node_read_hit,omitempty"`
114 NodeReadMiss *uint64 `json:"node_read_miss,omitempty"`
115 NodeWriteHit *uint64 `json:"node_write_hit,omitempty"`
116 NodeWriteMiss *uint64 `json:"node_write_miss,omitempty"`
117 TimeEnabled *uint64 `json:"time_enabled,omitempty"`
118 TimeRunning *uint64 `json:"time_running,omitempty"`
119}
120
121// ProfileValue is a value returned by a profiler.
122type ProfileValue struct {
123 Value uint64
124 TimeEnabled uint64
125 TimeRunning uint64
126}
127
128// profiler is used to profile a process.
129type profiler struct {
130 fd int
131}
132
133// NewProfiler creates a new hardware profiler. It does not support grouping.
134func NewProfiler(profilerType uint32, config uint64, pid, cpu int, opts ...int) (Profiler, error) {
135 eventAttr := &unix.PerfEventAttr{
136 Type: profilerType,
137 Config: config,
138 Size: uint32(unsafe.Sizeof(unix.PerfEventAttr{})),
139 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeHv | unix.PerfBitInherit,
140 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
141 Sample_type: PERF_SAMPLE_IDENTIFIER,
142 }
143 var eventOps int
144 if len(opts) > 0 {
145 eventOps = opts[0]
146 }
147 fd, err := unix.PerfEventOpen(
148 eventAttr,
149 pid,
150 cpu,
151 -1,
152 eventOps,
153 )
154 if err != nil {
155 return nil, err
156 }
157
158 return &profiler{
159 fd: fd,
160 }, nil
161}
162
163// NewCPUCycleProfiler returns a Profiler that profiles CPU cycles.
164func NewCPUCycleProfiler(pid, cpu int, opts ...int) (Profiler, error) {
165 return NewProfiler(
166 unix.PERF_TYPE_HARDWARE,
167 unix.PERF_COUNT_HW_CPU_CYCLES,
168 pid,
169 cpu,
170 opts...,
171 )
172}
173
174// NewInstrProfiler returns a Profiler that profiles CPU instructions.
175func NewInstrProfiler(pid, cpu int, opts ...int) (Profiler, error) {
176 return NewProfiler(
177 unix.PERF_TYPE_HARDWARE,
178 unix.PERF_COUNT_HW_INSTRUCTIONS,
179 pid,
180 cpu,
181 opts...,
182 )
183}
184
185// NewCacheRefProfiler returns a Profiler that profiles cache references.
186func NewCacheRefProfiler(pid, cpu int, opts ...int) (Profiler, error) {
187 return NewProfiler(
188 unix.PERF_TYPE_HARDWARE,
189 unix.PERF_COUNT_HW_CACHE_REFERENCES,
190 pid,
191 cpu,
192 opts...,
193 )
194}
195
196// NewCacheMissesProfiler returns a Profiler that profiles cache misses.
197func NewCacheMissesProfiler(pid, cpu int, opts ...int) (Profiler, error) {
198 return NewProfiler(
199 unix.PERF_TYPE_HARDWARE,
200 unix.PERF_COUNT_HW_CACHE_MISSES,
201 pid,
202 cpu,
203 opts...,
204 )
205}
206
207// NewBranchInstrProfiler returns a Profiler that profiles branch instructions.
208func NewBranchInstrProfiler(pid, cpu int, opts ...int) (Profiler, error) {
209 return NewProfiler(
210 unix.PERF_TYPE_HARDWARE,
211 unix.PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
212 pid,
213 cpu,
214 opts...,
215 )
216}
217
218// NewBranchMissesProfiler returns a Profiler that profiles branch misses.
219func NewBranchMissesProfiler(pid, cpu int, opts ...int) (Profiler, error) {
220 return NewProfiler(
221 unix.PERF_TYPE_HARDWARE,
222 unix.PERF_COUNT_HW_BRANCH_MISSES,
223 pid,
224 cpu,
225 opts...,
226 )
227}
228
229// NewBusCyclesProfiler returns a Profiler that profiles bus cycles.
230func NewBusCyclesProfiler(pid, cpu int, opts ...int) (Profiler, error) {
231 return NewProfiler(
232 unix.PERF_TYPE_HARDWARE,
233 unix.PERF_COUNT_HW_BUS_CYCLES,
234 pid,
235 cpu,
236 opts...,
237 )
238}
239
240// NewStalledCyclesFrontProfiler returns a Profiler that profiles stalled
241// frontend cycles.
242func NewStalledCyclesFrontProfiler(pid, cpu int, opts ...int) (Profiler, error) {
243 return NewProfiler(
244 unix.PERF_TYPE_HARDWARE,
245 unix.PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
246 pid,
247 cpu,
248 opts...,
249 )
250}
251
252// NewStalledCyclesBackProfiler returns a Profiler that profiles stalled
253// backend cycles.
254func NewStalledCyclesBackProfiler(pid, cpu int, opts ...int) (Profiler, error) {
255 return NewProfiler(
256 unix.PERF_TYPE_HARDWARE,
257 unix.PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
258 pid,
259 cpu,
260 opts...,
261 )
262}
263
264// NewRefCPUCyclesProfiler returns a Profiler that profiles CPU cycles, it
265// is not affected by frequency scaling.
266func NewRefCPUCyclesProfiler(pid, cpu int, opts ...int) (Profiler, error) {
267 return NewProfiler(
268 unix.PERF_TYPE_HARDWARE,
269 unix.PERF_COUNT_HW_REF_CPU_CYCLES,
270 pid,
271 cpu,
272 opts...,
273 )
274}
275
276// NewCPUClockProfiler returns a Profiler that profiles CPU clock speed.
277func NewCPUClockProfiler(pid, cpu int, opts ...int) (Profiler, error) {
278 return NewProfiler(
279 unix.PERF_TYPE_SOFTWARE,
280 unix.PERF_COUNT_SW_CPU_CLOCK,
281 pid,
282 cpu,
283 opts...,
284 )
285}
286
287// NewTaskClockProfiler returns a Profiler that profiles clock count of the
288// running task.
289func NewTaskClockProfiler(pid, cpu int, opts ...int) (Profiler, error) {
290 return NewProfiler(
291 unix.PERF_TYPE_SOFTWARE,
292 unix.PERF_COUNT_SW_TASK_CLOCK,
293 pid,
294 cpu,
295 opts...,
296 )
297}
298
299// NewPageFaultProfiler returns a Profiler that profiles the number of page
300// faults.
301func NewPageFaultProfiler(pid, cpu int, opts ...int) (Profiler, error) {
302 return NewProfiler(
303 unix.PERF_TYPE_SOFTWARE,
304 unix.PERF_COUNT_SW_PAGE_FAULTS,
305 pid,
306 cpu,
307 opts...,
308 )
309}
310
311// NewCtxSwitchesProfiler returns a Profiler that profiles the number of context
312// switches.
313func NewCtxSwitchesProfiler(pid, cpu int, opts ...int) (Profiler, error) {
314 return NewProfiler(
315 unix.PERF_TYPE_SOFTWARE,
316 unix.PERF_COUNT_SW_CONTEXT_SWITCHES,
317 pid,
318 cpu,
319 opts...,
320 )
321}
322
323// NewCPUMigrationsProfiler returns a Profiler that profiles the number of times
324// the process has migrated to a new CPU.
325func NewCPUMigrationsProfiler(pid, cpu int, opts ...int) (Profiler, error) {
326 return NewProfiler(
327 unix.PERF_TYPE_SOFTWARE,
328 unix.PERF_COUNT_SW_CPU_MIGRATIONS,
329 pid,
330 cpu,
331 opts...,
332 )
333}
334
335// NewMinorFaultsProfiler returns a Profiler that profiles the number of minor
336// page faults.
337func NewMinorFaultsProfiler(pid, cpu int, opts ...int) (Profiler, error) {
338 return NewProfiler(
339 unix.PERF_TYPE_SOFTWARE,
340 unix.PERF_COUNT_SW_PAGE_FAULTS_MIN,
341 pid,
342 cpu,
343 opts...,
344 )
345}
346
347// NewMajorFaultsProfiler returns a Profiler that profiles the number of major
348// page faults.
349func NewMajorFaultsProfiler(pid, cpu int, opts ...int) (Profiler, error) {
350 return NewProfiler(
351 unix.PERF_TYPE_SOFTWARE,
352 unix.PERF_COUNT_SW_PAGE_FAULTS_MAJ,
353 pid,
354 cpu,
355 opts...,
356 )
357}
358
359// NewAlignFaultsProfiler returns a Profiler that profiles the number of
360// alignment faults.
361func NewAlignFaultsProfiler(pid, cpu int, opts ...int) (Profiler, error) {
362 return NewProfiler(
363 unix.PERF_TYPE_SOFTWARE,
364 unix.PERF_COUNT_SW_ALIGNMENT_FAULTS,
365 pid,
366 cpu,
367 opts...,
368 )
369}
370
371// NewEmulationFaultsProfiler returns a Profiler that profiles the number of
372// alignment faults.
373func NewEmulationFaultsProfiler(pid, cpu int, opts ...int) (Profiler, error) {
374 return NewProfiler(
375 unix.PERF_TYPE_SOFTWARE,
376 unix.PERF_COUNT_SW_EMULATION_FAULTS,
377 pid,
378 cpu,
379 opts...,
380 )
381}
382
383// NewL1DataProfiler returns a Profiler that profiles L1 cache data.
384func NewL1DataProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
385
386 return NewProfiler(
387 unix.PERF_TYPE_HW_CACHE,
388 uint64((unix.PERF_COUNT_HW_CACHE_L1D)|(op<<8)|(result<<16)),
389 pid,
390 cpu,
391 opts...,
392 )
393}
394
395// NewL1InstrProfiler returns a Profiler that profiles L1 instruction data.
396func NewL1InstrProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
397 return NewProfiler(
398 unix.PERF_TYPE_HW_CACHE,
399 uint64((unix.PERF_COUNT_HW_CACHE_L1I)|(op<<8)|(result<<16)),
400 pid,
401 cpu,
402 opts...,
403 )
404}
405
406// NewLLCacheProfiler returns a Profiler that profiles last level cache.
407func NewLLCacheProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
408 return NewProfiler(
409 unix.PERF_TYPE_HW_CACHE,
410 uint64((unix.PERF_COUNT_HW_CACHE_LL)|(op<<8)|(result<<16)),
411 pid,
412 cpu,
413 opts...,
414 )
415}
416
417// NewDataTLBProfiler returns a Profiler that profiles the data TLB.
418func NewDataTLBProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
419 return NewProfiler(
420 unix.PERF_TYPE_HW_CACHE,
421 uint64((unix.PERF_COUNT_HW_CACHE_DTLB)|(op<<8)|(result<<16)),
422 pid,
423 cpu,
424 opts...,
425 )
426}
427
428// NewInstrTLBProfiler returns a Profiler that profiles the instruction TLB.
429func NewInstrTLBProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
430 return NewProfiler(
431 unix.PERF_TYPE_HW_CACHE,
432 uint64((unix.PERF_COUNT_HW_CACHE_ITLB)|(op<<8)|(result<<16)),
433 pid,
434 cpu,
435 opts...,
436 )
437}
438
439// NewBPUProfiler returns a Profiler that profiles the BPU (branch prediction unit).
440func NewBPUProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
441 return NewProfiler(
442 unix.PERF_TYPE_HW_CACHE,
443 uint64((unix.PERF_COUNT_HW_CACHE_BPU)|(op<<8)|(result<<16)),
444 pid,
445 cpu,
446 opts...,
447 )
448}
449
450// NewNodeCacheProfiler returns a Profiler that profiles the node cache accesses.
451func NewNodeCacheProfiler(pid, cpu, op, result int, opts ...int) (Profiler, error) {
452 return NewProfiler(
453 unix.PERF_TYPE_HW_CACHE,
454 uint64((unix.PERF_COUNT_HW_CACHE_NODE)|(op<<8)|(result<<16)),
455 pid,
456 cpu,
457 opts...,
458 )
459}
460
461// Reset is used to reset the counters of the profiler.
462func (p *profiler) Reset() error {
463 return unix.IoctlSetInt(p.fd, unix.PERF_EVENT_IOC_RESET, 0)
464}
465
466// Start is used to Start the profiler.
467func (p *profiler) Start() error {
468 return unix.IoctlSetInt(p.fd, unix.PERF_EVENT_IOC_ENABLE, 0)
469}
470
471// Stop is used to stop the profiler.
472func (p *profiler) Stop() error {
473 return unix.IoctlSetInt(p.fd, unix.PERF_EVENT_IOC_DISABLE, 0)
474}
475
476// Profile returns the current Profile.
477func (p *profiler) Profile() (*ProfileValue, error) {
478 // The underlying struct that gets read from the profiler looks like:
479 /*
480 struct read_format {
481 u64 value; // The value of the event
482 u64 time_enabled; // if PERF_FORMAT_TOTAL_TIME_ENABLED
483 u64 time_running; // if PERF_FORMAT_TOTAL_TIME_RUNNING
484 u64 id; // if PERF_FORMAT_ID
485 };
486 */
487
488 // read 24 bytes since PERF_FORMAT_TOTAL_TIME_ENABLED and
489 // PERF_FORMAT_TOTAL_TIME_RUNNING are always set.
490 // XXX: allow profile ids?
491 buf := make([]byte, 24, 24)
492 _, err := syscall.Read(p.fd, buf)
493 if err != nil {
494 return nil, err
495 }
496
497 return &ProfileValue{
498 Value: binary.LittleEndian.Uint64(buf[0:8]),
499 TimeEnabled: binary.LittleEndian.Uint64(buf[8:16]),
500 TimeRunning: binary.LittleEndian.Uint64(buf[16:24]),
501 }, nil
502}
503
504// Close is used to close the perf context.
505func (p *profiler) Close() error {
506 return unix.Close(p.fd)
507}
diff --git a/vendor/github.com/hodgesds/perf-utils/software_profiler.go b/vendor/github.com/hodgesds/perf-utils/software_profiler.go
new file mode 100644
index 0000000..d71fb7a
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/software_profiler.go
@@ -0,0 +1,151 @@
1// +build linux
2
3package perf
4
5import (
6 "go.uber.org/multierr"
7 "golang.org/x/sys/unix"
8)
9
10type softwareProfiler struct {
11 // map of perf counter type to file descriptor
12 profilers map[int]Profiler
13}
14
15// NewSoftwareProfiler returns a new software profiler.
16func NewSoftwareProfiler(pid, cpu int, opts ...int) SoftwareProfiler {
17 profilers := map[int]Profiler{}
18
19 cpuClockProfiler, err := NewCPUClockProfiler(pid, cpu, opts...)
20 if err == nil {
21 profilers[unix.PERF_COUNT_SW_CPU_CLOCK] = cpuClockProfiler
22 }
23
24 taskClockProfiler, err := NewTaskClockProfiler(pid, cpu, opts...)
25 if err == nil {
26 profilers[unix.PERF_COUNT_SW_TASK_CLOCK] = taskClockProfiler
27 }
28
29 pageFaultProfiler, err := NewPageFaultProfiler(pid, cpu, opts...)
30 if err == nil {
31 profilers[unix.PERF_COUNT_SW_PAGE_FAULTS] = pageFaultProfiler
32 }
33
34 ctxSwitchesProfiler, err := NewCtxSwitchesProfiler(pid, cpu, opts...)
35 if err == nil {
36 profilers[unix.PERF_COUNT_SW_CONTEXT_SWITCHES] = ctxSwitchesProfiler
37 }
38
39 cpuMigrationsProfiler, err := NewCPUMigrationsProfiler(pid, cpu, opts...)
40 if err == nil {
41 profilers[unix.PERF_COUNT_SW_CPU_MIGRATIONS] = cpuMigrationsProfiler
42 }
43
44 minorFaultProfiler, err := NewMinorFaultsProfiler(pid, cpu, opts...)
45 if err == nil {
46 profilers[unix.PERF_COUNT_SW_PAGE_FAULTS_MIN] = minorFaultProfiler
47 }
48
49 majorFaultProfiler, err := NewMajorFaultsProfiler(pid, cpu, opts...)
50 if err == nil {
51 profilers[unix.PERF_COUNT_SW_PAGE_FAULTS_MAJ] = majorFaultProfiler
52 }
53
54 alignFaultsFrontProfiler, err := NewAlignFaultsProfiler(pid, cpu, opts...)
55 if err == nil {
56 profilers[unix.PERF_COUNT_SW_ALIGNMENT_FAULTS] = alignFaultsFrontProfiler
57 }
58
59 emuFaultProfiler, err := NewEmulationFaultsProfiler(pid, cpu, opts...)
60 if err == nil {
61 profilers[unix.PERF_COUNT_SW_EMULATION_FAULTS] = emuFaultProfiler
62 }
63
64 return &softwareProfiler{
65 profilers: profilers,
66 }
67}
68
69// Start is used to start the SoftwareProfiler.
70func (p *softwareProfiler) Start() error {
71 if len(p.profilers) == 0 {
72 return ErrNoProfiler
73 }
74 var err error
75 for _, profiler := range p.profilers {
76 err = multierr.Append(err, profiler.Start())
77 }
78 return err
79}
80
81// Reset is used to reset the SoftwareProfiler.
82func (p *softwareProfiler) Reset() error {
83 var err error
84 for _, profiler := range p.profilers {
85 err = multierr.Append(err, profiler.Reset())
86 }
87 return err
88}
89
90// Stop is used to reset the SoftwareProfiler.
91func (p *softwareProfiler) Stop() error {
92 var err error
93 for _, profiler := range p.profilers {
94 err = multierr.Append(err, profiler.Stop())
95 }
96 return err
97}
98
99// Close is used to reset the SoftwareProfiler.
100func (p *softwareProfiler) Close() error {
101 var err error
102 for _, profiler := range p.profilers {
103 err = multierr.Append(err, profiler.Close())
104 }
105 return err
106}
107
108// Profile is used to read the SoftwareProfiler SoftwareProfile it returns an
109// error only if all profiles fail.
110func (p *softwareProfiler) Profile() (*SoftwareProfile, error) {
111 var err error
112 swProfile := &SoftwareProfile{}
113 for profilerType, profiler := range p.profilers {
114 profileVal, err2 := profiler.Profile()
115 err = multierr.Append(err, err2)
116 if err2 == nil {
117 if swProfile.TimeEnabled == nil {
118 swProfile.TimeEnabled = &profileVal.TimeEnabled
119 }
120 if swProfile.TimeRunning == nil {
121 swProfile.TimeRunning = &profileVal.TimeRunning
122 }
123 switch profilerType {
124 case unix.PERF_COUNT_SW_CPU_CLOCK:
125 swProfile.CPUClock = &profileVal.Value
126 case unix.PERF_COUNT_SW_TASK_CLOCK:
127 swProfile.TaskClock = &profileVal.Value
128 case unix.PERF_COUNT_SW_PAGE_FAULTS:
129 swProfile.PageFaults = &profileVal.Value
130 case unix.PERF_COUNT_SW_CONTEXT_SWITCHES:
131 swProfile.ContextSwitches = &profileVal.Value
132 case unix.PERF_COUNT_SW_CPU_MIGRATIONS:
133 swProfile.CPUMigrations = &profileVal.Value
134 case unix.PERF_COUNT_SW_PAGE_FAULTS_MIN:
135 swProfile.MinorPageFaults = &profileVal.Value
136 case unix.PERF_COUNT_SW_PAGE_FAULTS_MAJ:
137 swProfile.MajorPageFaults = &profileVal.Value
138 case unix.PERF_COUNT_SW_ALIGNMENT_FAULTS:
139 swProfile.AlignmentFaults = &profileVal.Value
140 case unix.PERF_COUNT_SW_EMULATION_FAULTS:
141 swProfile.EmulationFaults = &profileVal.Value
142 default:
143 }
144 }
145 }
146 if len(multierr.Errors(err)) == len(p.profilers) {
147 return nil, err
148 }
149
150 return swProfile, nil
151}
diff --git a/vendor/github.com/hodgesds/perf-utils/utils.go b/vendor/github.com/hodgesds/perf-utils/utils.go
new file mode 100644
index 0000000..1593c0c
--- /dev/null
+++ b/vendor/github.com/hodgesds/perf-utils/utils.go
@@ -0,0 +1,681 @@
1// +build linux
2
3package perf
4
5import (
6 "encoding/binary"
7 "runtime"
8 "syscall"
9 "unsafe"
10
11 "golang.org/x/sys/unix"
12)
13
14var (
15 // EventAttrSize is the size of a PerfEventAttr
16 EventAttrSize = uint32(unsafe.Sizeof(unix.PerfEventAttr{}))
17)
18
19// profileFn is a helper function to profile a function.
20func profileFn(eventAttr *unix.PerfEventAttr, f func() error) (*ProfileValue, error) {
21 runtime.LockOSThread()
22 defer runtime.UnlockOSThread()
23 fd, err := unix.PerfEventOpen(
24 eventAttr,
25 unix.Gettid(),
26 -1,
27 -1,
28 0,
29 )
30 if err != nil {
31 return nil, err
32 }
33 if err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_RESET, 0); err != nil {
34 return nil, err
35 }
36 if err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
37 return nil, err
38 }
39 if err := f(); err != nil {
40 return nil, err
41 }
42 if err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_DISABLE, 0); err != nil {
43 return nil, err
44 }
45 buf := make([]byte, 24)
46 if _, err := syscall.Read(fd, buf); err != nil {
47 return nil, err
48 }
49 return &ProfileValue{
50 Value: binary.LittleEndian.Uint64(buf[0:8]),
51 TimeEnabled: binary.LittleEndian.Uint64(buf[8:16]),
52 TimeRunning: binary.LittleEndian.Uint64(buf[16:24]),
53 }, unix.Close(fd)
54}
55
56// CPUInstructions is used to profile a function and return the number of CPU instructions.
57// Note that it will call runtime.LockOSThread to ensure accurate profilng.
58func CPUInstructions(f func() error) (*ProfileValue, error) {
59 eventAttr := &unix.PerfEventAttr{
60 Type: unix.PERF_TYPE_HARDWARE,
61 Config: unix.PERF_COUNT_HW_INSTRUCTIONS,
62 Size: EventAttrSize,
63 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
64 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
65 }
66 return profileFn(eventAttr, f)
67}
68
69// CPUInstructionsEventAttr returns a unix.PerfEventAttr configured for CPUInstructions.
70func CPUInstructionsEventAttr() unix.PerfEventAttr {
71 return unix.PerfEventAttr{
72 Type: unix.PERF_TYPE_HARDWARE,
73 Config: unix.PERF_COUNT_HW_INSTRUCTIONS,
74 Size: EventAttrSize,
75 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
76 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
77 }
78
79}
80
81// CPUCycles is used to profile a function and return the number of CPU cycles.
82// Note that it will call runtime.LockOSThread to ensure accurate profilng.
83func CPUCycles(f func() error) (*ProfileValue, error) {
84 eventAttr := &unix.PerfEventAttr{
85 Type: unix.PERF_TYPE_HARDWARE,
86 Config: unix.PERF_COUNT_HW_CPU_CYCLES,
87 Size: EventAttrSize,
88 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
89 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
90 }
91 return profileFn(eventAttr, f)
92}
93
94// CPUCyclesEventAttr returns a unix.PerfEventAttr configured for CPUCycles.
95func CPUCyclesEventAttr() unix.PerfEventAttr {
96 return unix.PerfEventAttr{
97 Type: unix.PERF_TYPE_HARDWARE,
98 Config: unix.PERF_COUNT_HW_CPU_CYCLES,
99 Size: EventAttrSize,
100 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
101 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
102 }
103
104}
105
106// CacheRef is used to profile a function and return the number of cache
107// references. Note that it will call runtime.LockOSThread to ensure accurate
108// profilng.
109func CacheRef(f func() error) (*ProfileValue, error) {
110 eventAttr := &unix.PerfEventAttr{
111 Type: unix.PERF_TYPE_HARDWARE,
112 Config: unix.PERF_COUNT_HW_CACHE_REFERENCES,
113 Size: EventAttrSize,
114 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
115 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
116 }
117 return profileFn(eventAttr, f)
118}
119
120// CacheRefEventAttr returns a unix.PerfEventAttr configured for CacheRef.
121func CacheRefEventAttr() unix.PerfEventAttr {
122 return unix.PerfEventAttr{
123 Type: unix.PERF_TYPE_HARDWARE,
124 Config: unix.PERF_COUNT_HW_CACHE_REFERENCES,
125 Size: EventAttrSize,
126 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
127 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
128 }
129
130}
131
132// CacheMiss is used to profile a function and return the number of cache
133// misses. Note that it will call runtime.LockOSThread to ensure accurate
134// profilng.
135func CacheMiss(f func() error) (*ProfileValue, error) {
136 eventAttr := &unix.PerfEventAttr{
137 Type: unix.PERF_TYPE_HARDWARE,
138 Config: unix.PERF_COUNT_HW_CACHE_MISSES,
139 Size: EventAttrSize,
140 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
141 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
142 }
143 return profileFn(eventAttr, f)
144}
145
146// CacheMissEventAttr returns a unix.PerfEventAttr configured for CacheMisses.
147func CacheMissEventAttr() unix.PerfEventAttr {
148 return unix.PerfEventAttr{
149 Type: unix.PERF_TYPE_HARDWARE,
150 Config: unix.PERF_COUNT_HW_CACHE_MISSES,
151 Size: EventAttrSize,
152 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
153 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
154 }
155
156}
157
158// BusCycles is used to profile a function and return the number of bus
159// cycles. Note that it will call runtime.LockOSThread to ensure accurate
160// profilng.
161func BusCycles(f func() error) (*ProfileValue, error) {
162 eventAttr := &unix.PerfEventAttr{
163 Type: unix.PERF_TYPE_HARDWARE,
164 Config: unix.PERF_COUNT_HW_BUS_CYCLES,
165 Size: EventAttrSize,
166 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
167 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
168 }
169 return profileFn(eventAttr, f)
170}
171
172// BusCyclesEventAttr returns a unix.PerfEventAttr configured for BusCycles.
173func BusCyclesEventAttr() unix.PerfEventAttr {
174 return unix.PerfEventAttr{
175 Type: unix.PERF_TYPE_HARDWARE,
176 Config: unix.PERF_COUNT_HW_BUS_CYCLES,
177 Size: EventAttrSize,
178 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
179 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
180 }
181
182}
183
184// StalledFrontendCycles is used to profile a function and return the number of
185// stalled frontend cycles. Note that it will call runtime.LockOSThread to
186// ensure accurate profilng.
187func StalledFrontendCycles(f func() error) (*ProfileValue, error) {
188 eventAttr := &unix.PerfEventAttr{
189 Type: unix.PERF_TYPE_HARDWARE,
190 Config: unix.PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
191 Size: EventAttrSize,
192 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
193 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
194 }
195 return profileFn(eventAttr, f)
196}
197
198// StalledFrontendCyclesEventAttr returns a unix.PerfEventAttr configured for StalledFrontendCycles.
199func StalledFrontendCyclesEventAttr() unix.PerfEventAttr {
200 return unix.PerfEventAttr{
201 Type: unix.PERF_TYPE_HARDWARE,
202 Config: unix.PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
203 Size: EventAttrSize,
204 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
205 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
206 }
207
208}
209
210// StalledBackendCycles is used to profile a function and return the number of
211// stalled backend cycles. Note that it will call runtime.LockOSThread to
212// ensure accurate profilng.
213func StalledBackendCycles(f func() error) (*ProfileValue, error) {
214 eventAttr := &unix.PerfEventAttr{
215 Type: unix.PERF_TYPE_HARDWARE,
216 Config: unix.PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
217 Size: EventAttrSize,
218 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
219 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
220 }
221 return profileFn(eventAttr, f)
222}
223
224// StalledBackendCyclesEventAttr returns a unix.PerfEventAttr configured for StalledBackendCycles.
225func StalledBackendCyclesEventAttr() unix.PerfEventAttr {
226 return unix.PerfEventAttr{
227 Type: unix.PERF_TYPE_HARDWARE,
228 Config: unix.PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
229 Size: EventAttrSize,
230 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
231 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
232 }
233
234}
235
236// CPURefCycles is used to profile a function and return the number of CPU
237// references cycles which are not affected by frequency scaling. Note that it
238// will call runtime.LockOSThread to ensure accurate profilng.
239func CPURefCycles(f func() error) (*ProfileValue, error) {
240 eventAttr := &unix.PerfEventAttr{
241 Type: unix.PERF_TYPE_HARDWARE,
242 Config: unix.PERF_COUNT_HW_REF_CPU_CYCLES,
243 Size: EventAttrSize,
244 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
245 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
246 }
247 return profileFn(eventAttr, f)
248}
249
250// CPURefCyclesEventAttr returns a unix.PerfEventAttr configured for CPURefCycles.
251func CPURefCyclesEventAttr() unix.PerfEventAttr {
252 return unix.PerfEventAttr{
253 Type: unix.PERF_TYPE_HARDWARE,
254 Config: unix.PERF_COUNT_HW_REF_CPU_CYCLES,
255 Size: EventAttrSize,
256 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
257 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
258 }
259
260}
261
262// CPUClock is used to profile a function and return the CPU clock timer. Note
263// that it will call runtime.LockOSThread to ensure accurate profilng.
264func CPUClock(f func() error) (*ProfileValue, error) {
265 eventAttr := &unix.PerfEventAttr{
266 Type: unix.PERF_TYPE_SOFTWARE,
267 Config: unix.PERF_COUNT_SW_CPU_CLOCK,
268 Size: EventAttrSize,
269 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
270 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
271 }
272 return profileFn(eventAttr, f)
273}
274
275// CPUClockEventAttr returns a unix.PerfEventAttr configured for CPUClock.
276func CPUClockEventAttr() unix.PerfEventAttr {
277 return unix.PerfEventAttr{
278 Type: unix.PERF_TYPE_SOFTWARE,
279 Config: unix.PERF_COUNT_SW_CPU_CLOCK,
280 Size: EventAttrSize,
281 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
282 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
283 }
284}
285
286// CPUTaskClock is used to profile a function and return the CPU clock timer
287// for the running task. Note that it will call runtime.LockOSThread to ensure
288// accurate profilng.
289func CPUTaskClock(f func() error) (*ProfileValue, error) {
290 eventAttr := &unix.PerfEventAttr{
291 Type: unix.PERF_TYPE_SOFTWARE,
292 Config: unix.PERF_COUNT_SW_TASK_CLOCK,
293 Size: EventAttrSize,
294 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
295 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
296 }
297 return profileFn(eventAttr, f)
298}
299
300// CPUTaskClockEventAttr returns a unix.PerfEventAttr configured for CPUTaskClock.
301func CPUTaskClockEventAttr() unix.PerfEventAttr {
302 return unix.PerfEventAttr{
303 Type: unix.PERF_TYPE_SOFTWARE,
304 Config: unix.PERF_COUNT_SW_TASK_CLOCK,
305 Size: EventAttrSize,
306 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
307 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
308 }
309
310}
311
312// PageFaults is used to profile a function and return the number of page
313// faults. Note that it will call runtime.LockOSThread to ensure accurate
314// profilng.
315func PageFaults(f func() error) (*ProfileValue, error) {
316 eventAttr := &unix.PerfEventAttr{
317 Type: unix.PERF_TYPE_SOFTWARE,
318 Config: unix.PERF_COUNT_SW_PAGE_FAULTS,
319 Size: EventAttrSize,
320 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
321 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
322 }
323 return profileFn(eventAttr, f)
324}
325
326// PageFaultsEventAttr returns a unix.PerfEventAttr configured for PageFaults.
327func PageFaultsEventAttr() unix.PerfEventAttr {
328 return unix.PerfEventAttr{
329 Type: unix.PERF_TYPE_SOFTWARE,
330 Config: unix.PERF_COUNT_SW_PAGE_FAULTS,
331 Size: EventAttrSize,
332 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
333 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
334 }
335}
336
337// ContextSwitches is used to profile a function and return the number of
338// context switches. Note that it will call runtime.LockOSThread to ensure
339// accurate profilng.
340func ContextSwitches(f func() error) (*ProfileValue, error) {
341 eventAttr := &unix.PerfEventAttr{
342 Type: unix.PERF_TYPE_SOFTWARE,
343 Config: unix.PERF_COUNT_SW_CONTEXT_SWITCHES,
344 Size: EventAttrSize,
345 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
346 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
347 }
348 return profileFn(eventAttr, f)
349}
350
351// ContextSwitchesEventAttr returns a unix.PerfEventAttr configured for ContextSwitches.
352func ContextSwitchesEventAttr() unix.PerfEventAttr {
353 return unix.PerfEventAttr{
354 Type: unix.PERF_TYPE_SOFTWARE,
355 Config: unix.PERF_COUNT_SW_CONTEXT_SWITCHES,
356 Size: EventAttrSize,
357 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
358 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
359 }
360}
361
362// CPUMigrations is used to profile a function and return the number of times
363// the thread has been migrated to a new CPU. Note that it will call
364// runtime.LockOSThread to ensure accurate profilng.
365func CPUMigrations(f func() error) (*ProfileValue, error) {
366 eventAttr := &unix.PerfEventAttr{
367 Type: unix.PERF_TYPE_SOFTWARE,
368 Config: unix.PERF_COUNT_SW_CPU_MIGRATIONS,
369 Size: EventAttrSize,
370 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
371 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
372 }
373 return profileFn(eventAttr, f)
374}
375
376// CPUMigrationsEventAttr returns a unix.PerfEventAttr configured for CPUMigrations.
377func CPUMigrationsEventAttr() unix.PerfEventAttr {
378 return unix.PerfEventAttr{
379 Type: unix.PERF_TYPE_SOFTWARE,
380 Config: unix.PERF_COUNT_SW_CPU_MIGRATIONS,
381 Size: EventAttrSize,
382 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
383 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
384 }
385}
386
387// MinorPageFaults is used to profile a function and return the number of minor
388// page faults. Note that it will call runtime.LockOSThread to ensure accurate
389// profilng.
390func MinorPageFaults(f func() error) (*ProfileValue, error) {
391 eventAttr := &unix.PerfEventAttr{
392 Type: unix.PERF_TYPE_SOFTWARE,
393 Config: unix.PERF_COUNT_SW_PAGE_FAULTS_MIN,
394 Size: EventAttrSize,
395 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
396 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
397 }
398 return profileFn(eventAttr, f)
399}
400
401// MinorPageFaultsEventAttr returns a unix.PerfEventAttr configured for MinorPageFaults.
402func MinorPageFaultsEventAttr() unix.PerfEventAttr {
403 return unix.PerfEventAttr{
404 Type: unix.PERF_TYPE_SOFTWARE,
405 Config: unix.PERF_COUNT_SW_PAGE_FAULTS_MIN,
406 Size: EventAttrSize,
407 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
408 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
409 }
410}
411
412// MajorPageFaults is used to profile a function and return the number of major
413// page faults. Note that it will call runtime.LockOSThread to ensure accurate
414// profilng.
415func MajorPageFaults(f func() error) (*ProfileValue, error) {
416 eventAttr := &unix.PerfEventAttr{
417 Type: unix.PERF_TYPE_SOFTWARE,
418 Config: unix.PERF_COUNT_SW_PAGE_FAULTS_MAJ,
419 Size: EventAttrSize,
420 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
421 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
422 }
423 return profileFn(eventAttr, f)
424}
425
426// MajorPageFaultsEventAttr returns a unix.PerfEventAttr configured for MajorPageFaults.
427func MajorPageFaultsEventAttr() unix.PerfEventAttr {
428 return unix.PerfEventAttr{
429 Type: unix.PERF_TYPE_SOFTWARE,
430 Config: unix.PERF_COUNT_SW_PAGE_FAULTS_MAJ,
431 Size: EventAttrSize,
432 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
433 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
434 }
435}
436
437// AlignmentFaults is used to profile a function and return the number of alignment
438// faults. Note that it will call runtime.LockOSThread to ensure accurate
439// profilng.
440func AlignmentFaults(f func() error) (*ProfileValue, error) {
441 eventAttr := &unix.PerfEventAttr{
442 Type: unix.PERF_TYPE_SOFTWARE,
443 Config: unix.PERF_COUNT_SW_ALIGNMENT_FAULTS,
444 Size: EventAttrSize,
445 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
446 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
447 }
448 return profileFn(eventAttr, f)
449}
450
451// AlignmentFaultsEventAttr returns a unix.PerfEventAttr configured for AlignmentFaults.
452func AlignmentFaultsEventAttr() unix.PerfEventAttr {
453 return unix.PerfEventAttr{
454 Type: unix.PERF_TYPE_SOFTWARE,
455 Config: unix.PERF_COUNT_SW_ALIGNMENT_FAULTS,
456 Size: EventAttrSize,
457 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
458 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
459 }
460}
461
462// EmulationFaults is used to profile a function and return the number of emulation
463// faults. Note that it will call runtime.LockOSThread to ensure accurate
464// profilng.
465func EmulationFaults(f func() error) (*ProfileValue, error) {
466 eventAttr := &unix.PerfEventAttr{
467 Type: unix.PERF_TYPE_SOFTWARE,
468 Config: unix.PERF_COUNT_SW_EMULATION_FAULTS,
469 Size: EventAttrSize,
470 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
471 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
472 }
473 return profileFn(eventAttr, f)
474}
475
476// EmulationFaultsEventAttr returns a unix.PerfEventAttr configured for EmulationFaults.
477func EmulationFaultsEventAttr() unix.PerfEventAttr {
478 return unix.PerfEventAttr{
479 Type: unix.PERF_TYPE_SOFTWARE,
480 Config: unix.PERF_COUNT_SW_EMULATION_FAULTS,
481 Size: EventAttrSize,
482 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
483 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
484 }
485}
486
487// L1Data is used to profile a function and the L1 data cache faults. Use
488// PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
489// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
490// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
491// result. Note that it will call runtime.LockOSThread to ensure accurate
492// profilng.
493func L1Data(op, result int, f func() error) (*ProfileValue, error) {
494 eventAttr := &unix.PerfEventAttr{
495 Type: unix.PERF_TYPE_HW_CACHE,
496 Config: uint64((unix.PERF_COUNT_HW_CACHE_L1D) | (op << 8) | (result << 16)),
497 Size: EventAttrSize,
498 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
499 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
500 }
501 return profileFn(eventAttr, f)
502}
503
504// L1DataEventAttr returns a unix.PerfEventAttr configured for L1Data.
505func L1DataEventAttr(op, result int) unix.PerfEventAttr {
506 return unix.PerfEventAttr{
507 Type: unix.PERF_TYPE_HW_CACHE,
508 Config: uint64((unix.PERF_COUNT_HW_CACHE_L1D) | (op << 8) | (result << 16)),
509 Size: EventAttrSize,
510 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
511 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
512 }
513}
514
515// L1Instructions is used to profile a function for the instruction level L1
516// cache. Use PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
517// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
518// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
519// result. Note that it will call runtime.LockOSThread to ensure accurate
520// profilng.
521func L1Instructions(op, result int, f func() error) (*ProfileValue, error) {
522 eventAttr := &unix.PerfEventAttr{
523 Type: unix.PERF_TYPE_HW_CACHE,
524 Config: uint64((unix.PERF_COUNT_HW_CACHE_L1I) | (op << 8) | (result << 16)),
525 Size: EventAttrSize,
526 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
527 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
528 }
529 return profileFn(eventAttr, f)
530}
531
532// L1InstructionsEventAttr returns a unix.PerfEventAttr configured for L1Instructions.
533func L1InstructionsEventAttr(op, result int) unix.PerfEventAttr {
534 return unix.PerfEventAttr{
535 Type: unix.PERF_TYPE_HW_CACHE,
536 Config: uint64((unix.PERF_COUNT_HW_CACHE_L1I) | (op << 8) | (result << 16)),
537 Size: EventAttrSize,
538 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
539 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
540 }
541}
542
543// LLCache is used to profile a function and return the number of emulation
544// PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
545// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
546// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
547// result. Note that it will call runtime.LockOSThread to ensure accurate
548// profilng.
549func LLCache(op, result int, f func() error) (*ProfileValue, error) {
550 eventAttr := &unix.PerfEventAttr{
551 Type: unix.PERF_TYPE_HW_CACHE,
552 Config: uint64((unix.PERF_COUNT_HW_CACHE_LL) | (op << 8) | (result << 16)),
553 Size: EventAttrSize,
554 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
555 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
556 }
557 return profileFn(eventAttr, f)
558}
559
560// LLCacheEventAttr returns a unix.PerfEventAttr configured for LLCache.
561func LLCacheEventAttr(op, result int) unix.PerfEventAttr {
562 return unix.PerfEventAttr{
563 Type: unix.PERF_TYPE_HW_CACHE,
564 Config: uint64((unix.PERF_COUNT_HW_CACHE_LL) | (op << 8) | (result << 16)),
565 Size: EventAttrSize,
566 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
567 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
568 }
569}
570
571// DataTLB is used to profile the data TLB. Use PERF_COUNT_HW_CACHE_OP_READ,
572// PERF_COUNT_HW_CACHE_OP_WRITE, or PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt
573// and PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for
574// the result. Note that it will call runtime.LockOSThread to ensure accurate
575// profilng.
576func DataTLB(op, result int, f func() error) (*ProfileValue, error) {
577 eventAttr := &unix.PerfEventAttr{
578 Type: unix.PERF_TYPE_HW_CACHE,
579 Config: uint64((unix.PERF_COUNT_HW_CACHE_DTLB) | (op << 8) | (result << 16)),
580 Size: EventAttrSize,
581 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
582 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
583 }
584 return profileFn(eventAttr, f)
585}
586
587// DataTLBEventAttr returns a unix.PerfEventAttr configured for DataTLB.
588func DataTLBEventAttr(op, result int) unix.PerfEventAttr {
589 return unix.PerfEventAttr{
590 Type: unix.PERF_TYPE_HW_CACHE,
591 Config: uint64((unix.PERF_COUNT_HW_CACHE_DTLB) | (op << 8) | (result << 16)),
592 Size: EventAttrSize,
593 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
594 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
595 }
596}
597
598// InstructionTLB is used to profile the instruction TLB. Use
599// PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
600// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
601// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
602// result. Note that it will call runtime.LockOSThread to ensure accurate
603// profilng.
604func InstructionTLB(op, result int, f func() error) (*ProfileValue, error) {
605 eventAttr := &unix.PerfEventAttr{
606 Type: unix.PERF_TYPE_HW_CACHE,
607 Config: uint64((unix.PERF_COUNT_HW_CACHE_ITLB) | (op << 8) | (result << 16)),
608 Size: EventAttrSize,
609 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
610 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
611 }
612 return profileFn(eventAttr, f)
613}
614
615// InstructionTLBEventAttr returns a unix.PerfEventAttr configured for InstructionTLB.
616func InstructionTLBEventAttr(op, result int) unix.PerfEventAttr {
617 return unix.PerfEventAttr{
618 Type: unix.PERF_TYPE_HW_CACHE,
619 Config: uint64((unix.PERF_COUNT_HW_CACHE_ITLB) | (op << 8) | (result << 16)),
620 Size: EventAttrSize,
621 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
622 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
623 }
624
625}
626
627// BPU is used to profile a function for the Branch Predictor Unit.
628// Use PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
629// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
630// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
631// result. Note that it will call runtime.LockOSThread to ensure accurate
632// profilng.
633func BPU(op, result int, f func() error) (*ProfileValue, error) {
634 eventAttr := &unix.PerfEventAttr{
635 Type: unix.PERF_TYPE_HW_CACHE,
636 Config: uint64((unix.PERF_COUNT_HW_CACHE_BPU) | (op << 8) | (result << 16)),
637 Size: EventAttrSize,
638 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
639 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
640 }
641 return profileFn(eventAttr, f)
642}
643
644// BPUEventAttr returns a unix.PerfEventAttr configured for BPU events.
645func BPUEventAttr(op, result int) unix.PerfEventAttr {
646 return unix.PerfEventAttr{
647 Type: unix.PERF_TYPE_HW_CACHE,
648 Config: uint64((unix.PERF_COUNT_HW_CACHE_BPU) | (op << 8) | (result << 16)),
649 Size: EventAttrSize,
650 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
651 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
652 }
653}
654
655// NodeCache is used to profile a function for NUMA operations. Use Use
656// PERF_COUNT_HW_CACHE_OP_READ, PERF_COUNT_HW_CACHE_OP_WRITE, or
657// PERF_COUNT_HW_CACHE_OP_PREFETCH for the opt and
658// PERF_COUNT_HW_CACHE_RESULT_ACCESS or PERF_COUNT_HW_CACHE_RESULT_MISS for the
659// result. Note that it will call runtime.LockOSThread to ensure accurate
660// profilng.
661func NodeCache(op, result int, f func() error) (*ProfileValue, error) {
662 eventAttr := &unix.PerfEventAttr{
663 Type: unix.PERF_TYPE_HW_CACHE,
664 Config: uint64((unix.PERF_COUNT_HW_CACHE_NODE) | (op << 8) | (result << 16)),
665 Size: EventAttrSize,
666 Bits: unix.PerfBitDisabled | unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
667 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
668 }
669 return profileFn(eventAttr, f)
670}
671
672// NodeCacheEventAttr returns a unix.PerfEventAttr configured for NUMA cache operations.
673func NodeCacheEventAttr(op, result int) unix.PerfEventAttr {
674 return unix.PerfEventAttr{
675 Type: unix.PERF_TYPE_HW_CACHE,
676 Config: uint64((unix.PERF_COUNT_HW_CACHE_NODE) | (op << 8) | (result << 16)),
677 Size: EventAttrSize,
678 Bits: unix.PerfBitExcludeKernel | unix.PerfBitExcludeHv,
679 Read_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,
680 }
681}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000..6d4d1be
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,15 @@
1coverage:
2 range: 80..100
3 round: down
4 precision: 2
5
6 status:
7 project: # measuring the overall project coverage
8 default: # context, you can create multiple ones with custom titles
9 enabled: yes # must be yes|true to enable this status
10 target: 100 # specify the target coverage for each commit status
11 # option: "auto" (must increase from parent commit or pull request base)
12 # option: "X%" a static target percentage to hit
13 if_not_found: success # if parent is not found report status as success, error, or failure
14 if_ci_failed: error # if ci fails report status as success, error, or failure
15
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000..0a4504f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,11 @@
1.DS_Store
2/vendor
3/cover
4cover.out
5lint.log
6
7# Binaries
8*.test
9
10# Profiling output
11*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 0000000..5895722
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,23 @@
1sudo: false
2language: go
3go_import_path: go.uber.org/atomic
4
5go:
6 - 1.7
7 - 1.8
8 - 1.9
9
10cache:
11 directories:
12 - vendor
13
14install:
15 - make install_ci
16
17script:
18 - make test_ci
19 - scripts/test-ubergo.sh
20 - make lint
21
22after_success:
23 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000..8765c9f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
1Copyright (c) 2016 Uber Technologies, Inc.
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is
8furnished to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in
11all copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000..dfc63d9
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,64 @@
1PACKAGES := $(shell glide nv)
2# Many Go tools take file globs or directories as arguments instead of packages.
3PACKAGE_FILES ?= *.go
4
5
6# The linting tools evolve with each Go version, so run them only on the latest
7# stable release.
8GO_VERSION := $(shell go version | cut -d " " -f 3)
9GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION)))
10LINTABLE_MINOR_VERSIONS := 7 8
11ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),)
12SHOULD_LINT := true
13endif
14
15
16export GO15VENDOREXPERIMENT=1
17
18
19.PHONY: build
20build:
21 go build -i $(PACKAGES)
22
23
24.PHONY: install
25install:
26 glide --version || go get github.com/Masterminds/glide
27 glide install
28
29
30.PHONY: test
31test:
32 go test -cover -race $(PACKAGES)
33
34
35.PHONY: install_ci
36install_ci: install
37 go get github.com/wadey/gocovmerge
38 go get github.com/mattn/goveralls
39 go get golang.org/x/tools/cmd/cover
40ifdef SHOULD_LINT
41 go get github.com/golang/lint/golint
42endif
43
44.PHONY: lint
45lint:
46ifdef SHOULD_LINT
47 @rm -rf lint.log
48 @echo "Checking formatting..."
49 @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log
50 @echo "Checking vet..."
51 @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;)
52 @echo "Checking lint..."
53 @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;)
54 @echo "Checking for unresolved FIXMEs..."
55 @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log
56 @[ ! -s lint.log ]
57else
58 @echo "Skipping linters on" $(GO_VERSION)
59endif
60
61
62.PHONY: test_ci
63test_ci: install_ci build
64 ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000..6505abf
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,36 @@
1# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
2
3Simple wrappers for primitive types to enforce atomic access.
4
5## Installation
6`go get -u go.uber.org/atomic`
7
8## Usage
9The standard library's `sync/atomic` is powerful, but it's easy to forget which
10variables must be accessed atomically. `go.uber.org/atomic` preserves all the
11functionality of the standard library, but wraps the primitive types to
12provide a safer, more convenient API.
13
14```go
15var atom atomic.Uint32
16atom.Store(42)
17atom.Sub(2)
18atom.CAS(40, 11)
19```
20
21See the [documentation][doc] for a complete API specification.
22
23## Development Status
24Stable.
25
26<hr>
27Released under the [MIT License](LICENSE.txt).
28
29[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
30[doc]: https://godoc.org/go.uber.org/atomic
31[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master
32[ci]: https://travis-ci.org/uber-go/atomic
33[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
34[cov]: https://codecov.io/gh/uber-go/atomic
35[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
36[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go
new file mode 100644
index 0000000..1db6849
--- /dev/null
+++ b/vendor/go.uber.org/atomic/atomic.go
@@ -0,0 +1,351 @@
1// Copyright (c) 2016 Uber Technologies, Inc.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to deal
5// in the Software without restriction, including without limitation the rights
6// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7// copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19// THE SOFTWARE.
20
21// Package atomic provides simple wrappers around numerics to enforce atomic
22// access.
23package atomic
24
25import (
26 "math"
27 "sync/atomic"
28 "time"
29)
30
31// Int32 is an atomic wrapper around an int32.
32type Int32 struct{ v int32 }
33
34// NewInt32 creates an Int32.
35func NewInt32(i int32) *Int32 {
36 return &Int32{i}
37}
38
39// Load atomically loads the wrapped value.
40func (i *Int32) Load() int32 {
41 return atomic.LoadInt32(&i.v)
42}
43
44// Add atomically adds to the wrapped int32 and returns the new value.
45func (i *Int32) Add(n int32) int32 {
46 return atomic.AddInt32(&i.v, n)
47}
48
49// Sub atomically subtracts from the wrapped int32 and returns the new value.
50func (i *Int32) Sub(n int32) int32 {
51 return atomic.AddInt32(&i.v, -n)
52}
53
54// Inc atomically increments the wrapped int32 and returns the new value.
55func (i *Int32) Inc() int32 {
56 return i.Add(1)
57}
58
59// Dec atomically decrements the wrapped int32 and returns the new value.
60func (i *Int32) Dec() int32 {
61 return i.Sub(1)
62}
63
64// CAS is an atomic compare-and-swap.
65func (i *Int32) CAS(old, new int32) bool {
66 return atomic.CompareAndSwapInt32(&i.v, old, new)
67}
68
69// Store atomically stores the passed value.
70func (i *Int32) Store(n int32) {
71 atomic.StoreInt32(&i.v, n)
72}
73
74// Swap atomically swaps the wrapped int32 and returns the old value.
75func (i *Int32) Swap(n int32) int32 {
76 return atomic.SwapInt32(&i.v, n)
77}
78
79// Int64 is an atomic wrapper around an int64.
80type Int64 struct{ v int64 }
81
82// NewInt64 creates an Int64.
83func NewInt64(i int64) *Int64 {
84 return &Int64{i}
85}
86
87// Load atomically loads the wrapped value.
88func (i *Int64) Load() int64 {
89 return atomic.LoadInt64(&i.v)
90}
91
92// Add atomically adds to the wrapped int64 and returns the new value.
93func (i *Int64) Add(n int64) int64 {
94 return atomic.AddInt64(&i.v, n)
95}
96
97// Sub atomically subtracts from the wrapped int64 and returns the new value.
98func (i *Int64) Sub(n int64) int64 {
99 return atomic.AddInt64(&i.v, -n)
100}
101
102// Inc atomically increments the wrapped int64 and returns the new value.
103func (i *Int64) Inc() int64 {
104 return i.Add(1)
105}
106
107// Dec atomically decrements the wrapped int64 and returns the new value.
108func (i *Int64) Dec() int64 {
109 return i.Sub(1)
110}
111
112// CAS is an atomic compare-and-swap.
113func (i *Int64) CAS(old, new int64) bool {
114 return atomic.CompareAndSwapInt64(&i.v, old, new)
115}
116
117// Store atomically stores the passed value.
118func (i *Int64) Store(n int64) {
119 atomic.StoreInt64(&i.v, n)
120}
121
122// Swap atomically swaps the wrapped int64 and returns the old value.
123func (i *Int64) Swap(n int64) int64 {
124 return atomic.SwapInt64(&i.v, n)
125}
126
127// Uint32 is an atomic wrapper around an uint32.
128type Uint32 struct{ v uint32 }
129
130// NewUint32 creates a Uint32.
131func NewUint32(i uint32) *Uint32 {
132 return &Uint32{i}
133}
134
135// Load atomically loads the wrapped value.
136func (i *Uint32) Load() uint32 {
137 return atomic.LoadUint32(&i.v)
138}
139
140// Add atomically adds to the wrapped uint32 and returns the new value.
141func (i *Uint32) Add(n uint32) uint32 {
142 return atomic.AddUint32(&i.v, n)
143}
144
145// Sub atomically subtracts from the wrapped uint32 and returns the new value.
146func (i *Uint32) Sub(n uint32) uint32 {
147 return atomic.AddUint32(&i.v, ^(n - 1))
148}
149
150// Inc atomically increments the wrapped uint32 and returns the new value.
151func (i *Uint32) Inc() uint32 {
152 return i.Add(1)
153}
154
155// Dec atomically decrements the wrapped int32 and returns the new value.
156func (i *Uint32) Dec() uint32 {
157 return i.Sub(1)
158}
159
160// CAS is an atomic compare-and-swap.
161func (i *Uint32) CAS(old, new uint32) bool {
162 return atomic.CompareAndSwapUint32(&i.v, old, new)
163}
164
165// Store atomically stores the passed value.
166func (i *Uint32) Store(n uint32) {
167 atomic.StoreUint32(&i.v, n)
168}
169
170// Swap atomically swaps the wrapped uint32 and returns the old value.
171func (i *Uint32) Swap(n uint32) uint32 {
172 return atomic.SwapUint32(&i.v, n)
173}
174
175// Uint64 is an atomic wrapper around a uint64.
176type Uint64 struct{ v uint64 }
177
178// NewUint64 creates a Uint64.
179func NewUint64(i uint64) *Uint64 {
180 return &Uint64{i}
181}
182
183// Load atomically loads the wrapped value.
184func (i *Uint64) Load() uint64 {
185 return atomic.LoadUint64(&i.v)
186}
187
188// Add atomically adds to the wrapped uint64 and returns the new value.
189func (i *Uint64) Add(n uint64) uint64 {
190 return atomic.AddUint64(&i.v, n)
191}
192
193// Sub atomically subtracts from the wrapped uint64 and returns the new value.
194func (i *Uint64) Sub(n uint64) uint64 {
195 return atomic.AddUint64(&i.v, ^(n - 1))
196}
197
198// Inc atomically increments the wrapped uint64 and returns the new value.
199func (i *Uint64) Inc() uint64 {
200 return i.Add(1)
201}
202
203// Dec atomically decrements the wrapped uint64 and returns the new value.
204func (i *Uint64) Dec() uint64 {
205 return i.Sub(1)
206}
207
208// CAS is an atomic compare-and-swap.
209func (i *Uint64) CAS(old, new uint64) bool {
210 return atomic.CompareAndSwapUint64(&i.v, old, new)
211}
212
213// Store atomically stores the passed value.
214func (i *Uint64) Store(n uint64) {
215 atomic.StoreUint64(&i.v, n)
216}
217
218// Swap atomically swaps the wrapped uint64 and returns the old value.
219func (i *Uint64) Swap(n uint64) uint64 {
220 return atomic.SwapUint64(&i.v, n)
221}
222
223// Bool is an atomic Boolean.
224type Bool struct{ v uint32 }
225
226// NewBool creates a Bool.
227func NewBool(initial bool) *Bool {
228 return &Bool{boolToInt(initial)}
229}
230
231// Load atomically loads the Boolean.
232func (b *Bool) Load() bool {
233 return truthy(atomic.LoadUint32(&b.v))
234}
235
236// CAS is an atomic compare-and-swap.
237func (b *Bool) CAS(old, new bool) bool {
238 return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
239}
240
241// Store atomically stores the passed value.
242func (b *Bool) Store(new bool) {
243 atomic.StoreUint32(&b.v, boolToInt(new))
244}
245
246// Swap sets the given value and returns the previous value.
247func (b *Bool) Swap(new bool) bool {
248 return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
249}
250
251// Toggle atomically negates the Boolean and returns the previous value.
252func (b *Bool) Toggle() bool {
253 return truthy(atomic.AddUint32(&b.v, 1) - 1)
254}
255
256func truthy(n uint32) bool {
257 return n&1 == 1
258}
259
260func boolToInt(b bool) uint32 {
261 if b {
262 return 1
263 }
264 return 0
265}
266
267// Float64 is an atomic wrapper around float64.
268type Float64 struct {
269 v uint64
270}
271
272// NewFloat64 creates a Float64.
273func NewFloat64(f float64) *Float64 {
274 return &Float64{math.Float64bits(f)}
275}
276
277// Load atomically loads the wrapped value.
278func (f *Float64) Load() float64 {
279 return math.Float64frombits(atomic.LoadUint64(&f.v))
280}
281
282// Store atomically stores the passed value.
283func (f *Float64) Store(s float64) {
284 atomic.StoreUint64(&f.v, math.Float64bits(s))
285}
286
287// Add atomically adds to the wrapped float64 and returns the new value.
288func (f *Float64) Add(s float64) float64 {
289 for {
290 old := f.Load()
291 new := old + s
292 if f.CAS(old, new) {
293 return new
294 }
295 }
296}
297
298// Sub atomically subtracts from the wrapped float64 and returns the new value.
299func (f *Float64) Sub(s float64) float64 {
300 return f.Add(-s)
301}
302
303// CAS is an atomic compare-and-swap.
304func (f *Float64) CAS(old, new float64) bool {
305 return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
306}
307
308// Duration is an atomic wrapper around time.Duration
309// https://godoc.org/time#Duration
310type Duration struct {
311 v Int64
312}
313
314// NewDuration creates a Duration.
315func NewDuration(d time.Duration) *Duration {
316 return &Duration{v: *NewInt64(int64(d))}
317}
318
319// Load atomically loads the wrapped value.
320func (d *Duration) Load() time.Duration {
321 return time.Duration(d.v.Load())
322}
323
324// Store atomically stores the passed value.
325func (d *Duration) Store(n time.Duration) {
326 d.v.Store(int64(n))
327}
328
329// Add atomically adds to the wrapped time.Duration and returns the new value.
330func (d *Duration) Add(n time.Duration) time.Duration {
331 return time.Duration(d.v.Add(int64(n)))
332}
333
334// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
335func (d *Duration) Sub(n time.Duration) time.Duration {
336 return time.Duration(d.v.Sub(int64(n)))
337}
338
339// Swap atomically swaps the wrapped time.Duration and returns the old value.
340func (d *Duration) Swap(n time.Duration) time.Duration {
341 return time.Duration(d.v.Swap(int64(n)))
342}
343
344// CAS is an atomic compare-and-swap.
345func (d *Duration) CAS(old, new time.Duration) bool {
346 return d.v.CAS(int64(old), int64(new))
347}
348
349// Value shadows the type of the same name from sync/atomic
350// https://godoc.org/sync/atomic#Value
351type Value struct{ atomic.Value }
diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock
new file mode 100644
index 0000000..3c72c59
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.lock
@@ -0,0 +1,17 @@
1hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53
2updated: 2016-10-27T00:10:51.16960137-07:00
3imports: []
4testImports:
5- name: github.com/davecgh/go-spew
6 version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
7 subpackages:
8 - spew
9- name: github.com/pmezard/go-difflib
10 version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
11 subpackages:
12 - difflib
13- name: github.com/stretchr/testify
14 version: d77da356e56a7428ad25149ca77381849a6a5232
15 subpackages:
16 - assert
17 - require
diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml
new file mode 100644
index 0000000..4cf608e
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.yaml
@@ -0,0 +1,6 @@
1package: go.uber.org/atomic
2testImport:
3- package: github.com/stretchr/testify
4 subpackages:
5 - assert
6 - require
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000..ede8136
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,49 @@
1// Copyright (c) 2016 Uber Technologies, Inc.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to deal
5// in the Software without restriction, including without limitation the rights
6// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7// copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19// THE SOFTWARE.
20
21package atomic
22
23// String is an atomic type-safe wrapper around Value for strings.
24type String struct{ v Value }
25
26// NewString creates a String.
27func NewString(str string) *String {
28 s := &String{}
29 if str != "" {
30 s.Store(str)
31 }
32 return s
33}
34
35// Load atomically loads the wrapped string.
36func (s *String) Load() string {
37 v := s.v.Load()
38 if v == nil {
39 return ""
40 }
41 return v.(string)
42}
43
44// Store atomically stores the passed string.
45// Note: Converting the string to an interface{} to store in the Value
46// requires an allocation.
47func (s *String) Store(str string) {
48 s.v.Store(str)
49}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000..6d4d1be
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
1coverage:
2 range: 80..100
3 round: down
4 precision: 2
5
6 status:
7 project: # measuring the overall project coverage
8 default: # context, you can create multiple ones with custom titles
9 enabled: yes # must be yes|true to enable this status
10 target: 100 # specify the target coverage for each commit status
11 # option: "auto" (must increase from parent commit or pull request base)
12 # option: "X%" a static target percentage to hit
13 if_not_found: success # if parent is not found report status as success, error, or failure
14 if_ci_failed: error # if ci fails report status as success, error, or failure
15
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000..61ead86
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1 @@
/vendor
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
new file mode 100644
index 0000000..5ffa8fe
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.travis.yml
@@ -0,0 +1,33 @@
1sudo: false
2language: go
3go_import_path: go.uber.org/multierr
4
5env:
6 global:
7 - GO15VENDOREXPERIMENT=1
8
9go:
10 - 1.7
11 - 1.8
12 - tip
13
14cache:
15 directories:
16 - vendor
17
18before_install:
19- go version
20
21install:
22- |
23 set -e
24 make install_ci
25
26script:
27- |
28 set -e
29 make lint
30 make test_ci
31
32after_success:
33- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000..898445d
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,28 @@
1Releases
2========
3
4v1.1.0 (2017-06-30)
5===================
6
7- Added an `Errors(error) []error` function to extract the underlying list of
8 errors for a multierr error.
9
10
11v1.0.0 (2017-05-31)
12===================
13
14No changes since v0.2.0. This release is committing to making no breaking
15changes to the current API in the 1.X series.
16
17
18v0.2.0 (2017-04-11)
19===================
20
21- Repeatedly appending to the same error is now faster due to fewer
22 allocations.
23
24
25v0.1.0 (2017-31-03)
26===================
27
28- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000..858e024
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
1Copyright (c) 2017 Uber Technologies, Inc.
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is
8furnished to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in
11all copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000..a7437d0
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,74 @@
1export GO15VENDOREXPERIMENT=1
2
3PACKAGES := $(shell glide nv)
4
5GO_FILES := $(shell \
6 find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
7 -o -name '*.go' -print | cut -b3-)
8
9.PHONY: install
10install:
11 glide --version || go get github.com/Masterminds/glide
12 glide install
13
14.PHONY: build
15build:
16 go build -i $(PACKAGES)
17
18.PHONY: test
19test:
20 go test -cover -race $(PACKAGES)
21
22.PHONY: gofmt
23gofmt:
24 $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
25 @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
26 @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
27
28.PHONY: govet
29govet:
30 $(eval VET_LOG := $(shell mktemp -t govet.XXXXX))
31 @go vet $(PACKAGES) 2>&1 \
32 | grep -v '^exit status' > $(VET_LOG) || true
33 @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false)
34
35.PHONY: golint
36golint:
37 @go get github.com/golang/lint/golint
38 $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX))
39 @cat /dev/null > $(LINT_LOG)
40 @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;)
41 @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false)
42
43.PHONY: staticcheck
44staticcheck:
45 @go get honnef.co/go/tools/cmd/staticcheck
46 $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX))
47 @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true
48 @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false)
49
50.PHONY: lint
51lint: gofmt govet golint staticcheck
52
53.PHONY: cover
54cover:
55 ./scripts/cover.sh $(shell go list $(PACKAGES))
56 go tool cover -html=cover.out -o cover.html
57
58update-license:
59 @go get go.uber.org/tools/update-license
60 @update-license \
61 $(shell go list -json $(PACKAGES) | \
62 jq -r '.Dir + "/" + (.GoFiles | .[])')
63
64##############################################################################
65
66.PHONY: install_ci
67install_ci: install
68 go get github.com/wadey/gocovmerge
69 go get github.com/mattn/goveralls
70 go get golang.org/x/tools/cmd/cover
71
72.PHONY: test_ci
73test_ci: install_ci
74 ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000..065088f
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,23 @@
1# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
2
3`multierr` allows combining one or more Go `error`s together.
4
5## Installation
6
7 go get -u go.uber.org/multierr
8
9## Status
10
11Stable: No breaking changes will be made before 2.0.
12
13-------------------------------------------------------------------------------
14
15Released under the [MIT License].
16
17[MIT License]: LICENSE.txt
18[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
19[doc]: https://godoc.org/go.uber.org/multierr
20[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master
21[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
22[ci]: https://travis-ci.org/uber-go/multierr
23[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000..de6ce47
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,401 @@
1// Copyright (c) 2017 Uber Technologies, Inc.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to deal
5// in the Software without restriction, including without limitation the rights
6// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7// copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19// THE SOFTWARE.
20
21// Package multierr allows combining one or more errors together.
22//
23// Overview
24//
25// Errors can be combined with the use of the Combine function.
26//
27// multierr.Combine(
28// reader.Close(),
29// writer.Close(),
30// conn.Close(),
31// )
32//
33// If only two errors are being combined, the Append function may be used
34// instead.
35//
36// err = multierr.Combine(reader.Close(), writer.Close())
37//
38// This makes it possible to record resource cleanup failures from deferred
39// blocks with the help of named return values.
40//
41// func sendRequest(req Request) (err error) {
42// conn, err := openConnection()
43// if err != nil {
44// return err
45// }
46// defer func() {
47// err = multierr.Append(err, conn.Close())
48// }()
49// // ...
50// }
51//
52// The underlying list of errors for a returned error object may be retrieved
53// with the Errors function.
54//
55// errors := multierr.Errors(err)
56// if len(errors) > 0 {
57// fmt.Println("The following errors occurred:")
58// }
59//
60// Advanced Usage
61//
62// Errors returned by Combine and Append MAY implement the following
63// interface.
64//
65// type errorGroup interface {
66// // Returns a slice containing the underlying list of errors.
67// //
68// // This slice MUST NOT be modified by the caller.
69// Errors() []error
70// }
71//
72// Note that if you need access to list of errors behind a multierr error, you
73// should prefer using the Errors function. That said, if you need cheap
74// read-only access to the underlying errors slice, you can attempt to cast
75// the error to this interface. You MUST handle the failure case gracefully
76// because errors returned by Combine and Append are not guaranteed to
77// implement this interface.
78//
79// var errors []error
80// group, ok := err.(errorGroup)
81// if ok {
82// errors = group.Errors()
83// } else {
84// errors = []error{err}
85// }
86package multierr // import "go.uber.org/multierr"
87
88import (
89 "bytes"
90 "fmt"
91 "io"
92 "strings"
93 "sync"
94
95 "go.uber.org/atomic"
96)
97
98var (
99 // Separator for single-line error messages.
100 _singlelineSeparator = []byte("; ")
101
102 _newline = []byte("\n")
103
104 // Prefix for multi-line messages
105 _multilinePrefix = []byte("the following errors occurred:")
106
107 // Prefix for the first and following lines of an item in a list of
108 // multi-line error messages.
109 //
110 // For example, if a single item is:
111 //
112 // foo
113 // bar
114 //
115 // It will become,
116 //
117 // - foo
118 // bar
119 _multilineSeparator = []byte("\n - ")
120 _multilineIndent = []byte(" ")
121)
122
123// _bufferPool is a pool of bytes.Buffers.
124var _bufferPool = sync.Pool{
125 New: func() interface{} {
126 return &bytes.Buffer{}
127 },
128}
129
130type errorGroup interface {
131 Errors() []error
132}
133
134// Errors returns a slice containing zero or more errors that the supplied
135// error is composed of. If the error is nil, the returned slice is empty.
136//
137// err := multierr.Append(r.Close(), w.Close())
138// errors := multierr.Errors(err)
139//
140// If the error is not composed of other errors, the returned slice contains
141// just the error that was passed in.
142//
143// Callers of this function are free to modify the returned slice.
144func Errors(err error) []error {
145 if err == nil {
146 return nil
147 }
148
149 // Note that we're casting to multiError, not errorGroup. Our contract is
150 // that returned errors MAY implement errorGroup. Errors, however, only
151 // has special behavior for multierr-specific error objects.
152 //
153 // This behavior can be expanded in the future but I think it's prudent to
154 // start with as little as possible in terms of contract and possibility
155 // of misuse.
156 eg, ok := err.(*multiError)
157 if !ok {
158 return []error{err}
159 }
160
161 errors := eg.Errors()
162 result := make([]error, len(errors))
163 copy(result, errors)
164 return result
165}
166
167// multiError is an error that holds one or more errors.
168//
169// An instance of this is guaranteed to be non-empty and flattened. That is,
170// none of the errors inside multiError are other multiErrors.
171//
172// multiError formats to a semi-colon delimited list of error messages with
173// %v and with a more readable multi-line format with %+v.
174type multiError struct {
175 copyNeeded atomic.Bool
176 errors []error
177}
178
179var _ errorGroup = (*multiError)(nil)
180
181// Errors returns the list of underlying errors.
182//
183// This slice MUST NOT be modified.
184func (merr *multiError) Errors() []error {
185 if merr == nil {
186 return nil
187 }
188 return merr.errors
189}
190
191func (merr *multiError) Error() string {
192 if merr == nil {
193 return ""
194 }
195
196 buff := _bufferPool.Get().(*bytes.Buffer)
197 buff.Reset()
198
199 merr.writeSingleline(buff)
200
201 result := buff.String()
202 _bufferPool.Put(buff)
203 return result
204}
205
206func (merr *multiError) Format(f fmt.State, c rune) {
207 if c == 'v' && f.Flag('+') {
208 merr.writeMultiline(f)
209 } else {
210 merr.writeSingleline(f)
211 }
212}
213
214func (merr *multiError) writeSingleline(w io.Writer) {
215 first := true
216 for _, item := range merr.errors {
217 if first {
218 first = false
219 } else {
220 w.Write(_singlelineSeparator)
221 }
222 io.WriteString(w, item.Error())
223 }
224}
225
226func (merr *multiError) writeMultiline(w io.Writer) {
227 w.Write(_multilinePrefix)
228 for _, item := range merr.errors {
229 w.Write(_multilineSeparator)
230 writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
231 }
232}
233
234// Writes s to the writer with the given prefix added before each line after
235// the first.
236func writePrefixLine(w io.Writer, prefix []byte, s string) {
237 first := true
238 for len(s) > 0 {
239 if first {
240 first = false
241 } else {
242 w.Write(prefix)
243 }
244
245 idx := strings.IndexByte(s, '\n')
246 if idx < 0 {
247 idx = len(s) - 1
248 }
249
250 io.WriteString(w, s[:idx+1])
251 s = s[idx+1:]
252 }
253}
254
255type inspectResult struct {
256 // Number of top-level non-nil errors
257 Count int
258
259 // Total number of errors including multiErrors
260 Capacity int
261
262 // Index of the first non-nil error in the list. Value is meaningless if
263 // Count is zero.
264 FirstErrorIdx int
265
266 // Whether the list contains at least one multiError
267 ContainsMultiError bool
268}
269
270// Inspects the given slice of errors so that we can efficiently allocate
271// space for it.
272func inspect(errors []error) (res inspectResult) {
273 first := true
274 for i, err := range errors {
275 if err == nil {
276 continue
277 }
278
279 res.Count++
280 if first {
281 first = false
282 res.FirstErrorIdx = i
283 }
284
285 if merr, ok := err.(*multiError); ok {
286 res.Capacity += len(merr.errors)
287 res.ContainsMultiError = true
288 } else {
289 res.Capacity++
290 }
291 }
292 return
293}
294
295// fromSlice converts the given list of errors into a single error.
296func fromSlice(errors []error) error {
297 res := inspect(errors)
298 switch res.Count {
299 case 0:
300 return nil
301 case 1:
302 // only one non-nil entry
303 return errors[res.FirstErrorIdx]
304 case len(errors):
305 if !res.ContainsMultiError {
306 // already flat
307 return &multiError{errors: errors}
308 }
309 }
310
311 nonNilErrs := make([]error, 0, res.Capacity)
312 for _, err := range errors[res.FirstErrorIdx:] {
313 if err == nil {
314 continue
315 }
316
317 if nested, ok := err.(*multiError); ok {
318 nonNilErrs = append(nonNilErrs, nested.errors...)
319 } else {
320 nonNilErrs = append(nonNilErrs, err)
321 }
322 }
323
324 return &multiError{errors: nonNilErrs}
325}
326
327// Combine combines the passed errors into a single error.
328//
329// If zero arguments were passed or if all items are nil, a nil error is
330// returned.
331//
332// Combine(nil, nil) // == nil
333//
334// If only a single error was passed, it is returned as-is.
335//
336// Combine(err) // == err
337//
338// Combine skips over nil arguments so this function may be used to combine
339// together errors from operations that fail independently of each other.
340//
341// multierr.Combine(
342// reader.Close(),
343// writer.Close(),
344// pipe.Close(),
345// )
346//
347// If any of the passed errors is a multierr error, it will be flattened along
348// with the other errors.
349//
350// multierr.Combine(multierr.Combine(err1, err2), err3)
351// // is the same as
352// multierr.Combine(err1, err2, err3)
353//
354// The returned error formats into a readable multi-line error message if
355// formatted with %+v.
356//
357// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
358func Combine(errors ...error) error {
359 return fromSlice(errors)
360}
361
362// Append appends the given errors together. Either value may be nil.
363//
364// This function is a specialization of Combine for the common case where
365// there are only two errors.
366//
367// err = multierr.Append(reader.Close(), writer.Close())
368//
369// The following pattern may also be used to record failure of deferred
370// operations without losing information about the original error.
371//
372// func doSomething(..) (err error) {
373// f := acquireResource()
374// defer func() {
375// err = multierr.Append(err, f.Close())
376// }()
377func Append(left error, right error) error {
378 switch {
379 case left == nil:
380 return right
381 case right == nil:
382 return left
383 }
384
385 if _, ok := right.(*multiError); !ok {
386 if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
387 // Common case where the error on the left is constantly being
388 // appended to.
389 errs := append(l.errors, right)
390 return &multiError{errors: errs}
391 } else if !ok {
392 // Both errors are single errors.
393 return &multiError{errors: []error{left, right}}
394 }
395 }
396
397 // Either right or both, left and right, are multiErrors. Rely on usual
398 // expensive logic.
399 errors := [2]error{left, right}
400 return fromSlice(errors[0:])
401}
diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock
new file mode 100644
index 0000000..f9ea94c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.lock
@@ -0,0 +1,19 @@
1hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221
2updated: 2017-04-10T13:34:45.671678062-07:00
3imports:
4- name: go.uber.org/atomic
5 version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb
6testImports:
7- name: github.com/davecgh/go-spew
8 version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
9 subpackages:
10 - spew
11- name: github.com/pmezard/go-difflib
12 version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
13 subpackages:
14 - difflib
15- name: github.com/stretchr/testify
16 version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
17 subpackages:
18 - assert
19 - require
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
new file mode 100644
index 0000000..6ef084e
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.yaml
@@ -0,0 +1,8 @@
1package: go.uber.org/multierr
2import:
3- package: go.uber.org/atomic
4 version: ^1
5testImport:
6- package: github.com/stretchr/testify
7 subpackages:
8 - assert
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a2612b1..67b2611 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -15,6 +15,8 @@ github.com/ema/qdisc
15github.com/godbus/dbus 15github.com/godbus/dbus
16# github.com/golang/protobuf v1.3.1 16# github.com/golang/protobuf v1.3.1
17github.com/golang/protobuf/proto 17github.com/golang/protobuf/proto
18# github.com/hodgesds/perf-utils v0.0.6
19github.com/hodgesds/perf-utils
18# github.com/konsorten/go-windows-terminal-sequences v1.0.2 20# github.com/konsorten/go-windows-terminal-sequences v1.0.2
19github.com/konsorten/go-windows-terminal-sequences 21github.com/konsorten/go-windows-terminal-sequences
20# github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3 22# github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3
@@ -57,6 +59,10 @@ github.com/siebenmann/go-kstat
57github.com/sirupsen/logrus 59github.com/sirupsen/logrus
58# github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a 60# github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a
59github.com/soundcloud/go-runit/runit 61github.com/soundcloud/go-runit/runit
62# go.uber.org/atomic v1.3.2
63go.uber.org/atomic
64# go.uber.org/multierr v1.1.0
65go.uber.org/multierr
60# golang.org/x/net v0.0.0-20190328230028-74de082e2cca 66# golang.org/x/net v0.0.0-20190328230028-74de082e2cca
61golang.org/x/net/ipv4 67golang.org/x/net/ipv4
62golang.org/x/net/bpf 68golang.org/x/net/bpf