Browse code

vendor prometheus dependency.

github.com/prometheus/client_golang to v1.6.0
github.com/prometheus/client_model to v0.2.0
github.com/prometheus/common to v0.9.1
github.com/prometheus/procfs to v0.0.11

Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>

Jintao Zhang authored on 2020/07/31 02:02:57
Showing 31 changed files
... ...
@@ -149,11 +149,11 @@ github.com/coreos/pkg                               3ac0863d7acf3bc44daf49afef89
149 149
 code.cloudfoundry.org/clock                         02e53af36e6c978af692887ed449b74026d76fec # v1.0.0
150 150
 
151 151
 # prometheus
152
-github.com/prometheus/client_golang                 c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
152
+github.com/prometheus/client_golang                 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0
153 153
 github.com/beorn7/perks                             37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
154
-github.com/prometheus/client_model                  d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
155
-github.com/prometheus/common                        287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
156
-github.com/prometheus/procfs                        6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
154
+github.com/prometheus/client_model                  7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0
155
+github.com/prometheus/common                        d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1
156
+github.com/prometheus/procfs                        46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11
157 157
 github.com/matttproud/golang_protobuf_extensions    c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
158 158
 github.com/pkg/errors                               614d223910a179a466c1767a985424175c39b465 # v0.9.1
159 159
 github.com/grpc-ecosystem/go-grpc-prometheus        c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
... ...
@@ -3,12 +3,16 @@ module github.com/prometheus/client_golang
3 3
 require (
4 4
 	github.com/beorn7/perks v1.0.1
5 5
 	github.com/cespare/xxhash/v2 v2.1.1
6
-	github.com/golang/protobuf v1.3.2
7
-	github.com/json-iterator/go v1.1.8
8
-	github.com/prometheus/client_model v0.1.0
9
-	github.com/prometheus/common v0.7.0
10
-	github.com/prometheus/procfs v0.0.8
11
-	golang.org/x/sys v0.0.0-20191220142924-d4481acd189f
6
+	github.com/golang/protobuf v1.4.0
7
+	github.com/json-iterator/go v1.1.9
8
+	github.com/kr/pretty v0.1.0 // indirect
9
+	github.com/prometheus/client_model v0.2.0
10
+	github.com/prometheus/common v0.9.1
11
+	github.com/prometheus/procfs v0.0.11
12
+	github.com/stretchr/testify v1.4.0 // indirect
13
+	golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
14
+	gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
15
+	gopkg.in/yaml.v2 v2.2.5 // indirect
12 16
 )
13 17
 
14 18
 go 1.11
... ...
@@ -17,6 +17,7 @@ import (
17 17
 	"errors"
18 18
 	"math"
19 19
 	"sync/atomic"
20
+	"time"
20 21
 
21 22
 	dto "github.com/prometheus/client_model/go"
22 23
 )
... ...
@@ -42,11 +43,27 @@ type Counter interface {
42 42
 	Add(float64)
43 43
 }
44 44
 
45
+// ExemplarAdder is implemented by Counters that offer the option of adding a
46
+// value to the Counter together with an exemplar. Its AddWithExemplar method
47
+// works like the Add method of the Counter interface but also replaces the
48
+// currently saved exemplar (if any) with a new one, created from the provided
49
+// value, the current time as timestamp, and the provided labels. Empty Labels
50
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
51
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
52
+// of the provided labels are invalid, or if the provided labels contain more
53
+// than 64 runes in total.
54
+type ExemplarAdder interface {
55
+	AddWithExemplar(value float64, exemplar Labels)
56
+}
57
+
45 58
 // CounterOpts is an alias for Opts. See there for doc comments.
46 59
 type CounterOpts Opts
47 60
 
48 61
 // NewCounter creates a new Counter based on the provided CounterOpts.
49 62
 //
63
+// The returned implementation also implements ExemplarAdder. It is safe to
64
+// perform the corresponding type assertion.
65
+//
50 66
 // The returned implementation tracks the counter value in two separate
51 67
 // variables, a float64 and a uint64. The latter is used to track calls of the
52 68
 // Inc method and calls of the Add method with a value that can be represented
... ...
@@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
61 61
 		nil,
62 62
 		opts.ConstLabels,
63 63
 	)
64
-	result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
64
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
65 65
 	result.init(result) // Init self-collection.
66 66
 	return result
67 67
 }
... ...
@@ -78,6 +95,9 @@ type counter struct {
78 78
 	desc *Desc
79 79
 
80 80
 	labelPairs []*dto.LabelPair
81
+	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
82
+
83
+	now func() time.Time // To mock out time.Now() for testing.
81 84
 }
82 85
 
83 86
 func (c *counter) Desc() *Desc {
... ...
@@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
88 88
 	if v < 0 {
89 89
 		panic(errors.New("counter cannot decrease in value"))
90 90
 	}
91
+
91 92
 	ival := uint64(v)
92 93
 	if float64(ival) == v {
93 94
 		atomic.AddUint64(&c.valInt, ival)
... ...
@@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
103 103
 	}
104 104
 }
105 105
 
106
+func (c *counter) AddWithExemplar(v float64, e Labels) {
107
+	c.Add(v)
108
+	c.updateExemplar(v, e)
109
+}
110
+
106 111
 func (c *counter) Inc() {
107 112
 	atomic.AddUint64(&c.valInt, 1)
108 113
 }
... ...
@@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
112 112
 	ival := atomic.LoadUint64(&c.valInt)
113 113
 	val := fval + float64(ival)
114 114
 
115
-	return populateMetric(CounterValue, val, c.labelPairs, out)
115
+	var exemplar *dto.Exemplar
116
+	if e := c.exemplar.Load(); e != nil {
117
+		exemplar = e.(*dto.Exemplar)
118
+	}
119
+
120
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
121
+}
122
+
123
+func (c *counter) updateExemplar(v float64, l Labels) {
124
+	if l == nil {
125
+		return
126
+	}
127
+	e, err := newExemplar(v, c.now(), l)
128
+	if err != nil {
129
+		panic(err)
130
+	}
131
+	c.exemplar.Store(e)
116 132
 }
117 133
 
118 134
 // CounterVec is a Collector that bundles a set of Counters that all share the
... ...
@@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
138 138
 			if len(lvs) != len(desc.variableLabels) {
139 139
 				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
140 140
 			}
141
-			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
141
+			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
142 142
 			result.init(result) // Init self-collection.
143 143
 			return result
144 144
 		}),
... ...
@@ -267,6 +309,8 @@ type CounterFunc interface {
267 267
 // provided function must be concurrency-safe. The function should also honor
268 268
 // the contract for a Counter (values only go up, not down), but compliance will
269 269
 // not be checked.
270
+//
271
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
270 272
 func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
271 273
 	return newValueFunc(NewDesc(
272 274
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
... ...
@@ -84,25 +84,21 @@
84 84
 // of those four metric types can be found in the Prometheus docs:
85 85
 // https://prometheus.io/docs/concepts/metric_types/
86 86
 //
87
-// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
88
-// Prometheus server not to assume anything about its type.
89
-//
90
-// In addition to the fundamental metric types Gauge, Counter, Summary,
91
-// Histogram, and Untyped, a very important part of the Prometheus data model is
92
-// the partitioning of samples along dimensions called labels, which results in
87
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
88
+// Histogram, a very important part of the Prometheus data model is the
89
+// partitioning of samples along dimensions called labels, which results in
93 90
 // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
94
-// HistogramVec, and UntypedVec.
91
+// and HistogramVec.
95 92
 //
96 93
 // While only the fundamental metric types implement the Metric interface, both
97 94
 // the metrics and their vector versions implement the Collector interface. A
98 95
 // Collector manages the collection of a number of Metrics, but for convenience,
99
-// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
100
-// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
101
-// SummaryVec, HistogramVec, and UntypedVec are not.
96
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
97
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
98
+// and HistogramVec are not.
102 99
 //
103 100
 // To create instances of Metrics and their vector versions, you need a suitable
104
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
105
-// UntypedOpts.
101
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
106 102
 //
107 103
 // Custom Collectors and constant Metrics
108 104
 //
... ...
@@ -118,13 +114,16 @@
118 118
 // existing numbers into Prometheus Metrics during collection. An own
119 119
 // implementation of the Collector interface is perfect for that. You can create
120 120
 // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
121
-// NewConstSummary (and their respective Must… versions). That will happen in
122
-// the Collect method. The Describe method has to return separate Desc
123
-// instances, representative of the “throw-away” metrics to be created later.
124
-// NewDesc comes in handy to create those Desc instances. Alternatively, you
125
-// could return no Desc at all, which will mark the Collector “unchecked”.  No
126
-// checks are performed at registration time, but metric consistency will still
127
-// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
121
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
122
+// for all metric types with just a float64 as their value: Counter, Gauge, and
123
+// a special “type” called Untyped. Use the latter if you are not sure if the
124
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
125
+// happens in the Collect method. The Describe method has to return separate
126
+// Desc instances, representative of the “throw-away” metrics to be created
127
+// later.  NewDesc comes in handy to create those Desc instances. Alternatively,
128
+// you could return no Desc at all, which will mark the Collector “unchecked”.
129
+// No checks are performed at registration time, but metric consistency will
130
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
128 131
 // errors. Thus, with unchecked Collectors, the responsibility to not collect
129 132
 // metrics that lead to inconsistencies in the total scrape result lies with the
130 133
 // implementer of the Collector. While this is not a desirable state, it is
... ...
@@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
123 123
 
124 124
 func (g *gauge) Write(out *dto.Metric) error {
125 125
 	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
126
-	return populateMetric(GaugeValue, val, g.labelPairs, out)
126
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
127 127
 }
128 128
 
129 129
 // GaugeVec is a Collector that bundles a set of Gauges that all share the same
... ...
@@ -73,7 +73,7 @@ func NewGoCollector() Collector {
73 73
 			nil, nil),
74 74
 		gcDesc: NewDesc(
75 75
 			"go_gc_duration_seconds",
76
-			"A summary of the GC invocation durations.",
76
+			"A summary of the pause duration of garbage collection cycles.",
77 77
 			nil, nil),
78 78
 		goInfoDesc: NewDesc(
79 79
 			"go_info",
... ...
@@ -20,6 +20,7 @@ import (
20 20
 	"sort"
21 21
 	"sync"
22 22
 	"sync/atomic"
23
+	"time"
23 24
 
24 25
 	"github.com/golang/protobuf/proto"
25 26
 
... ...
@@ -151,6 +152,10 @@ type HistogramOpts struct {
151 151
 
152 152
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
153 153
 // panics if the buckets in HistogramOpts are not in strictly increasing order.
154
+//
155
+// The returned implementation also implements ExemplarObserver. It is safe to
156
+// perform the corresponding type assertion. Exemplars are tracked separately
157
+// for each bucket.
154 158
 func NewHistogram(opts HistogramOpts) Histogram {
155 159
 	return newHistogram(
156 160
 		NewDesc(
... ...
@@ -188,6 +193,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
188 188
 		upperBounds: opts.Buckets,
189 189
 		labelPairs:  makeLabelPairs(desc, labelValues),
190 190
 		counts:      [2]*histogramCounts{{}, {}},
191
+		now:         time.Now,
191 192
 	}
192 193
 	for i, upperBound := range h.upperBounds {
193 194
 		if i < len(h.upperBounds)-1 {
... ...
@@ -205,9 +211,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
205 205
 		}
206 206
 	}
207 207
 	// Finally we know the final length of h.upperBounds and can make buckets
208
-	// for both counts:
208
+	// for both counts as well as exemplars:
209 209
 	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
210 210
 	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
211
+	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
211 212
 
212 213
 	h.init(h) // Init self-collection.
213 214
 	return h
... ...
@@ -254,6 +261,9 @@ type histogram struct {
254 254
 
255 255
 	upperBounds []float64
256 256
 	labelPairs  []*dto.LabelPair
257
+	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
258
+
259
+	now func() time.Time // To mock out time.Now() for testing.
257 260
 }
258 261
 
259 262
 func (h *histogram) Desc() *Desc {
... ...
@@ -261,36 +271,13 @@ func (h *histogram) Desc() *Desc {
261 261
 }
262 262
 
263 263
 func (h *histogram) Observe(v float64) {
264
-	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
265
-	// slightly faster than the binary search. If we really care, we could
266
-	// switch from one search strategy to the other depending on the number
267
-	// of buckets.
268
-	//
269
-	// Microbenchmarks (BenchmarkHistogramNoLabels):
270
-	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
271
-	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
272
-	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
273
-	i := sort.SearchFloat64s(h.upperBounds, v)
274
-
275
-	// We increment h.countAndHotIdx so that the counter in the lower
276
-	// 63 bits gets incremented. At the same time, we get the new value
277
-	// back, which we can use to find the currently-hot counts.
278
-	n := atomic.AddUint64(&h.countAndHotIdx, 1)
279
-	hotCounts := h.counts[n>>63]
264
+	h.observe(v, h.findBucket(v))
265
+}
280 266
 
281
-	if i < len(h.upperBounds) {
282
-		atomic.AddUint64(&hotCounts.buckets[i], 1)
283
-	}
284
-	for {
285
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
286
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
287
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
288
-			break
289
-		}
290
-	}
291
-	// Increment count last as we take it as a signal that the observation
292
-	// is complete.
293
-	atomic.AddUint64(&hotCounts.count, 1)
267
+func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
268
+	i := h.findBucket(v)
269
+	h.observe(v, i)
270
+	h.updateExemplar(v, i, e)
294 271
 }
295 272
 
296 273
 func (h *histogram) Write(out *dto.Metric) error {
... ...
@@ -329,6 +316,18 @@ func (h *histogram) Write(out *dto.Metric) error {
329 329
 			CumulativeCount: proto.Uint64(cumCount),
330 330
 			UpperBound:      proto.Float64(upperBound),
331 331
 		}
332
+		if e := h.exemplars[i].Load(); e != nil {
333
+			his.Bucket[i].Exemplar = e.(*dto.Exemplar)
334
+		}
335
+	}
336
+	// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
337
+	if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
338
+		b := &dto.Bucket{
339
+			CumulativeCount: proto.Uint64(count),
340
+			UpperBound:      proto.Float64(math.Inf(1)),
341
+			Exemplar:        e.(*dto.Exemplar),
342
+		}
343
+		his.Bucket = append(his.Bucket, b)
332 344
 	}
333 345
 
334 346
 	out.Histogram = his
... ...
@@ -352,6 +351,57 @@ func (h *histogram) Write(out *dto.Metric) error {
352 352
 	return nil
353 353
 }
354 354
 
355
+// findBucket returns the index of the bucket for the provided value, or
356
+// len(h.upperBounds) for the +Inf bucket.
357
+func (h *histogram) findBucket(v float64) int {
358
+	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
359
+	// slightly faster than the binary search. If we really care, we could
360
+	// switch from one search strategy to the other depending on the number
361
+	// of buckets.
362
+	//
363
+	// Microbenchmarks (BenchmarkHistogramNoLabels):
364
+	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
365
+	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
366
+	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
367
+	return sort.SearchFloat64s(h.upperBounds, v)
368
+}
369
+
370
+// observe is the implementation for Observe without the findBucket part.
371
+func (h *histogram) observe(v float64, bucket int) {
372
+	// We increment h.countAndHotIdx so that the counter in the lower
373
+	// 63 bits gets incremented. At the same time, we get the new value
374
+	// back, which we can use to find the currently-hot counts.
375
+	n := atomic.AddUint64(&h.countAndHotIdx, 1)
376
+	hotCounts := h.counts[n>>63]
377
+
378
+	if bucket < len(h.upperBounds) {
379
+		atomic.AddUint64(&hotCounts.buckets[bucket], 1)
380
+	}
381
+	for {
382
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
383
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
384
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
385
+			break
386
+		}
387
+	}
388
+	// Increment count last as we take it as a signal that the observation
389
+	// is complete.
390
+	atomic.AddUint64(&hotCounts.count, 1)
391
+}
392
+
393
+// updateExemplar replaces the exemplar for the provided bucket. With empty
394
+// labels, it's a no-op. It panics if any of the labels is invalid.
395
+func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
396
+	if l == nil {
397
+		return
398
+	}
399
+	e, err := newExemplar(v, h.now(), l)
400
+	if err != nil {
401
+		panic(err)
402
+	}
403
+	h.exemplars[bucket].Store(e)
404
+}
405
+
355 406
 // HistogramVec is a Collector that bundles a set of Histograms that all share the
356 407
 // same Desc, but have different values for their variable labels. This is used
357 408
 // if you want to count the same thing partitioned by various dimensions
... ...
@@ -50,3 +50,15 @@ type ObserverVec interface {
50 50
 
51 51
 	Collector
52 52
 }
53
+
54
+// ExemplarObserver is implemented by Observers that offer the option of
55
+// observing a value together with an exemplar. Its ObserveWithExemplar method
56
+// works like the Observe method of an Observer but also replaces the currently
57
+// saved exemplar (if any) with a new one, created from the provided value, the
58
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
59
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
60
+// left in place. ObserveWithExemplar panics if any of the provided labels are
61
+// invalid or if the provided labels contain more than 64 runes in total.
62
+type ExemplarObserver interface {
63
+	ObserveWithExemplar(value float64, exemplar Labels)
64
+}
... ...
@@ -33,18 +33,22 @@ var (
33 33
 )
34 34
 
35 35
 type processMemoryCounters struct {
36
-	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
36
+	// System interface description
37
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
38
+
39
+	// Refer to the Golang internal implementation
40
+	// https://golang.org/src/internal/syscall/windows/psapi_windows.go
37 41
 	_                          uint32
38 42
 	PageFaultCount             uint32
39
-	PeakWorkingSetSize         uint64
40
-	WorkingSetSize             uint64
41
-	QuotaPeakPagedPoolUsage    uint64
42
-	QuotaPagedPoolUsage        uint64
43
-	QuotaPeakNonPagedPoolUsage uint64
44
-	QuotaNonPagedPoolUsage     uint64
45
-	PagefileUsage              uint64
46
-	PeakPagefileUsage          uint64
47
-	PrivateUsage               uint64
43
+	PeakWorkingSetSize         uintptr
44
+	WorkingSetSize             uintptr
45
+	QuotaPeakPagedPoolUsage    uintptr
46
+	QuotaPagedPoolUsage        uintptr
47
+	QuotaPeakNonPagedPoolUsage uintptr
48
+	QuotaNonPagedPoolUsage     uintptr
49
+	PagefileUsage              uintptr
50
+	PeakPagefileUsage          uintptr
51
+	PrivateUsage               uintptr
48 52
 }
49 53
 
50 54
 func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
... ...
@@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 {
53 53
 }
54 54
 
55 55
 func (r *responseWriterDelegator) WriteHeader(code int) {
56
+	if r.observeWriteHeader != nil && !r.wroteHeader {
57
+		// Only call observeWriteHeader for the 1st time. It's a bug if
58
+		// WriteHeader is called more than once, but we want to protect
59
+		// against it here. Note that we still delegate the WriteHeader
60
+		// to the original ResponseWriter to not mask the bug from it.
61
+		r.observeWriteHeader(code)
62
+	}
56 63
 	r.status = code
57 64
 	r.wroteHeader = true
58 65
 	r.ResponseWriter.WriteHeader(code)
59
-	if r.observeWriteHeader != nil {
60
-		r.observeWriteHeader(code)
61
-	}
62 66
 }
63 67
 
64 68
 func (r *responseWriterDelegator) Write(b []byte) (int, error) {
... ...
@@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
144 144
 			}
145 145
 		}
146 146
 
147
-		contentType := expfmt.Negotiate(req.Header)
147
+		var contentType expfmt.Format
148
+		if opts.EnableOpenMetrics {
149
+			contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
150
+		} else {
151
+			contentType = expfmt.Negotiate(req.Header)
152
+		}
148 153
 		header := rsp.Header()
149 154
 		header.Set(contentTypeHeader, string(contentType))
150 155
 
... ...
@@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
162 162
 
163 163
 		enc := expfmt.NewEncoder(w, contentType)
164 164
 
165
-		var lastErr error
166
-		for _, mf := range mfs {
167
-			if err := enc.Encode(mf); err != nil {
168
-				lastErr = err
169
-				if opts.ErrorLog != nil {
170
-					opts.ErrorLog.Println("error encoding and sending metric family:", err)
171
-				}
172
-				errCnt.WithLabelValues("encoding").Inc()
173
-				switch opts.ErrorHandling {
174
-				case PanicOnError:
175
-					panic(err)
176
-				case ContinueOnError:
177
-					// Handled later.
178
-				case HTTPErrorOnError:
179
-					httpError(rsp, err)
180
-					return
181
-				}
165
+		// handleError handles the error according to opts.ErrorHandling
166
+		// and returns true if we have to abort after the handling.
167
+		handleError := func(err error) bool {
168
+			if err == nil {
169
+				return false
170
+			}
171
+			if opts.ErrorLog != nil {
172
+				opts.ErrorLog.Println("error encoding and sending metric family:", err)
173
+			}
174
+			errCnt.WithLabelValues("encoding").Inc()
175
+			switch opts.ErrorHandling {
176
+			case PanicOnError:
177
+				panic(err)
178
+			case HTTPErrorOnError:
179
+				// We cannot really send an HTTP error at this
180
+				// point because we most likely have written
181
+				// something to rsp already. But at least we can
182
+				// stop sending.
183
+				return true
182 184
 			}
185
+			// Do nothing in all other cases, including ContinueOnError.
186
+			return false
183 187
 		}
184 188
 
185
-		if lastErr != nil {
186
-			httpError(rsp, lastErr)
189
+		for _, mf := range mfs {
190
+			if handleError(enc.Encode(mf)) {
191
+				return
192
+			}
193
+		}
194
+		if closer, ok := enc.(expfmt.Closer); ok {
195
+			// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
196
+			if handleError(closer.Close()) {
197
+				return
198
+			}
187 199
 		}
188 200
 	})
189 201
 
... ...
@@ -255,7 +272,12 @@ type HandlerErrorHandling int
255 255
 // errors are encountered.
256 256
 const (
257 257
 	// Serve an HTTP status code 500 upon the first error
258
-	// encountered. Report the error message in the body.
258
+	// encountered. Report the error message in the body. Note that HTTP
259
+	// errors cannot be served anymore once the beginning of a regular
260
+	// payload has been sent. Thus, in the (unlikely) case that encoding the
261
+	// payload into the negotiated wire format fails, serving the response
262
+	// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
263
+	// those errors.
259 264
 	HTTPErrorOnError HandlerErrorHandling = iota
260 265
 	// Ignore errors and try to serve as many metrics as possible.  However,
261 266
 	// if no metrics can be served, serve an HTTP status code 500 and the
... ...
@@ -318,6 +340,16 @@ type HandlerOpts struct {
318 318
 	// away). Until the implementation is improved, it is recommended to
319 319
 	// implement a separate timeout in potentially slow Collectors.
320 320
 	Timeout time.Duration
321
+	// If true, the experimental OpenMetrics encoding is added to the
322
+	// possible options during content negotiation. Note that Prometheus
323
+	// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
324
+	// the only way to transmit exemplars. However, the move to OpenMetrics
325
+	// is not completely transparent. Most notably, the values of "quantile"
326
+	// labels of Summaries and "le" labels of Histograms are formatted with
327
+	// a trailing ".0" if they would otherwise look like integer numbers
328
+	// (which changes the identity of the resulting series on the Prometheus
329
+	// server).
330
+	EnableOpenMetrics bool
321 331
 }
322 332
 
323 333
 // gzipAccepted returns whether the client will accept gzip-encoded content.
... ...
@@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool {
334 334
 }
335 335
 
336 336
 // httpError removes any content-encoding header and then calls http.Error with
337
-// the provided error and http.StatusInternalServerErrer. Error contents is
338
-// supposed to be uncompressed plain text. However, same as with a plain
339
-// http.Error, any header settings will be void if the header has already been
340
-// sent. The error message will still be written to the writer, but it will
341
-// probably be of limited use.
337
+// the provided error and http.StatusInternalServerError. Error contents is
338
+// supposed to be uncompressed plain text. Same as with a plain http.Error, this
339
+// must not be called if the header or any payload has already been sent.
342 340
 func httpError(rsp http.ResponseWriter, err error) {
343 341
 	rsp.Header().Del(contentEncodingHeader)
344 342
 	http.Error(
... ...
@@ -16,8 +16,11 @@ package prometheus
16 16
 import (
17 17
 	"fmt"
18 18
 	"sort"
19
+	"time"
20
+	"unicode/utf8"
19 21
 
20 22
 	"github.com/golang/protobuf/proto"
23
+	"github.com/golang/protobuf/ptypes"
21 24
 
22 25
 	dto "github.com/prometheus/client_model/go"
23 26
 )
... ...
@@ -25,7 +28,8 @@ import (
25 25
 // ValueType is an enumeration of metric types that represent a simple value.
26 26
 type ValueType int
27 27
 
28
-// Possible values for the ValueType enum.
28
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
29
+// with an unknown type.
29 30
 const (
30 31
 	_ ValueType = iota
31 32
 	CounterValue
... ...
@@ -69,7 +73,7 @@ func (v *valueFunc) Desc() *Desc {
69 69
 }
70 70
 
71 71
 func (v *valueFunc) Write(out *dto.Metric) error {
72
-	return populateMetric(v.valType, v.function(), v.labelPairs, out)
72
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
73 73
 }
74 74
 
75 75
 // NewConstMetric returns a metric with one fixed value that cannot be
... ...
@@ -116,19 +120,20 @@ func (m *constMetric) Desc() *Desc {
116 116
 }
117 117
 
118 118
 func (m *constMetric) Write(out *dto.Metric) error {
119
-	return populateMetric(m.valType, m.val, m.labelPairs, out)
119
+	return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
120 120
 }
121 121
 
122 122
 func populateMetric(
123 123
 	t ValueType,
124 124
 	v float64,
125 125
 	labelPairs []*dto.LabelPair,
126
+	e *dto.Exemplar,
126 127
 	m *dto.Metric,
127 128
 ) error {
128 129
 	m.Label = labelPairs
129 130
 	switch t {
130 131
 	case CounterValue:
131
-		m.Counter = &dto.Counter{Value: proto.Float64(v)}
132
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
132 133
 	case GaugeValue:
133 134
 		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
134 135
 	case UntypedValue:
... ...
@@ -160,3 +165,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
160 160
 	sort.Sort(labelPairSorter(labelPairs))
161 161
 	return labelPairs
162 162
 }
163
+
164
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
165
+const ExemplarMaxRunes = 64
166
+
167
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
168
+// returned if any of the label names or values are invalid or if the total
169
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
170
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
171
+	e := &dto.Exemplar{}
172
+	e.Value = proto.Float64(value)
173
+	tsProto, err := ptypes.TimestampProto(ts)
174
+	if err != nil {
175
+		return nil, err
176
+	}
177
+	e.Timestamp = tsProto
178
+	labelPairs := make([]*dto.LabelPair, 0, len(l))
179
+	var runes int
180
+	for name, value := range l {
181
+		if !checkLabelName(name) {
182
+			return nil, fmt.Errorf("exemplar label name %q is invalid", name)
183
+		}
184
+		runes += utf8.RuneCountInString(name)
185
+		if !utf8.ValidString(value) {
186
+			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
187
+		}
188
+		runes += utf8.RuneCountInString(value)
189
+		labelPairs = append(labelPairs, &dto.LabelPair{
190
+			Name:  proto.String(name),
191
+			Value: proto.String(value),
192
+		})
193
+	}
194
+	if runes > ExemplarMaxRunes {
195
+		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
196
+	}
197
+	e.Label = labelPairs
198
+	return e, nil
199
+}
... ...
@@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool {
91 91
 	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
92 92
 }
93 93
 
94
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
95
+// show up in GoDoc.
96
+
97
+// Describe implements Collector.
98
+func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
99
+
100
+// Collect implements Collector.
101
+func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
102
+
103
+// Reset deletes all metrics in this vector.
104
+func (m *metricVec) Reset() { m.metricMap.Reset() }
105
+
94 106
 func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
95 107
 	var (
96 108
 		newCurry []curriedLabelValue
... ...
@@ -1,11 +1,14 @@
1 1
 // Code generated by protoc-gen-go. DO NOT EDIT.
2 2
 // source: metrics.proto
3 3
 
4
-package io_prometheus_client // import "github.com/prometheus/client_model/go"
4
+package io_prometheus_client
5 5
 
6
-import proto "github.com/golang/protobuf/proto"
7
-import fmt "fmt"
8
-import math "math"
6
+import (
7
+	fmt "fmt"
8
+	proto "github.com/golang/protobuf/proto"
9
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
10
+	math "math"
11
+)
9 12
 
10 13
 // Reference imports to suppress errors if they are not otherwise used.
11 14
 var _ = proto.Marshal
... ...
@@ -16,7 +19,7 @@ var _ = math.Inf
16 16
 // is compatible with the proto package it is being compiled against.
17 17
 // A compilation error at this line likely means your copy of the
18 18
 // proto package needs to be updated.
19
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
19
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
20 20
 
21 21
 type MetricType int32
22 22
 
... ...
@@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
35 35
 	3: "UNTYPED",
36 36
 	4: "HISTOGRAM",
37 37
 }
38
+
38 39
 var MetricType_value = map[string]int32{
39 40
 	"COUNTER":   0,
40 41
 	"GAUGE":     1,
... ...
@@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
48 48
 	*p = x
49 49
 	return p
50 50
 }
51
+
51 52
 func (x MetricType) String() string {
52 53
 	return proto.EnumName(MetricType_name, int32(x))
53 54
 }
55
+
54 56
 func (x *MetricType) UnmarshalJSON(data []byte) error {
55 57
 	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
56 58
 	if err != nil {
... ...
@@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
59 59
 	*x = MetricType(value)
60 60
 	return nil
61 61
 }
62
+
62 63
 func (MetricType) EnumDescriptor() ([]byte, []int) {
63
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
64
+	return fileDescriptor_6039342a2ba47b72, []int{0}
64 65
 }
65 66
 
66 67
 type LabelPair struct {
... ...
@@ -75,16 +82,17 @@ func (m *LabelPair) Reset()         { *m = LabelPair{} }
75 75
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
76 76
 func (*LabelPair) ProtoMessage()    {}
77 77
 func (*LabelPair) Descriptor() ([]byte, []int) {
78
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
78
+	return fileDescriptor_6039342a2ba47b72, []int{0}
79 79
 }
80
+
80 81
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
81 82
 	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
82 83
 }
83 84
 func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
84 85
 	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
85 86
 }
86
-func (dst *LabelPair) XXX_Merge(src proto.Message) {
87
-	xxx_messageInfo_LabelPair.Merge(dst, src)
87
+func (m *LabelPair) XXX_Merge(src proto.Message) {
88
+	xxx_messageInfo_LabelPair.Merge(m, src)
88 89
 }
89 90
 func (m *LabelPair) XXX_Size() int {
90 91
 	return xxx_messageInfo_LabelPair.Size(m)
... ...
@@ -120,16 +128,17 @@ func (m *Gauge) Reset()         { *m = Gauge{} }
120 120
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
121 121
 func (*Gauge) ProtoMessage()    {}
122 122
 func (*Gauge) Descriptor() ([]byte, []int) {
123
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
123
+	return fileDescriptor_6039342a2ba47b72, []int{1}
124 124
 }
125
+
125 126
 func (m *Gauge) XXX_Unmarshal(b []byte) error {
126 127
 	return xxx_messageInfo_Gauge.Unmarshal(m, b)
127 128
 }
128 129
 func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
129 130
 	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
130 131
 }
131
-func (dst *Gauge) XXX_Merge(src proto.Message) {
132
-	xxx_messageInfo_Gauge.Merge(dst, src)
132
+func (m *Gauge) XXX_Merge(src proto.Message) {
133
+	xxx_messageInfo_Gauge.Merge(m, src)
133 134
 }
134 135
 func (m *Gauge) XXX_Size() int {
135 136
 	return xxx_messageInfo_Gauge.Size(m)
... ...
@@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
148 148
 }
149 149
 
150 150
 type Counter struct {
151
-	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
152
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
153
-	XXX_unrecognized     []byte   `json:"-"`
154
-	XXX_sizecache        int32    `json:"-"`
151
+	Value                *float64  `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
152
+	Exemplar             *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
153
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
154
+	XXX_unrecognized     []byte    `json:"-"`
155
+	XXX_sizecache        int32     `json:"-"`
155 156
 }
156 157
 
157 158
 func (m *Counter) Reset()         { *m = Counter{} }
158 159
 func (m *Counter) String() string { return proto.CompactTextString(m) }
159 160
 func (*Counter) ProtoMessage()    {}
160 161
 func (*Counter) Descriptor() ([]byte, []int) {
161
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
162
+	return fileDescriptor_6039342a2ba47b72, []int{2}
162 163
 }
164
+
163 165
 func (m *Counter) XXX_Unmarshal(b []byte) error {
164 166
 	return xxx_messageInfo_Counter.Unmarshal(m, b)
165 167
 }
166 168
 func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
167 169
 	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
168 170
 }
169
-func (dst *Counter) XXX_Merge(src proto.Message) {
170
-	xxx_messageInfo_Counter.Merge(dst, src)
171
+func (m *Counter) XXX_Merge(src proto.Message) {
172
+	xxx_messageInfo_Counter.Merge(m, src)
171 173
 }
172 174
 func (m *Counter) XXX_Size() int {
173 175
 	return xxx_messageInfo_Counter.Size(m)
... ...
@@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
185 185
 	return 0
186 186
 }
187 187
 
188
+func (m *Counter) GetExemplar() *Exemplar {
189
+	if m != nil {
190
+		return m.Exemplar
191
+	}
192
+	return nil
193
+}
194
+
188 195
 type Quantile struct {
189 196
 	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
190 197
 	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
... ...
@@ -197,16 +215,17 @@ func (m *Quantile) Reset()         { *m = Quantile{} }
197 197
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
198 198
 func (*Quantile) ProtoMessage()    {}
199 199
 func (*Quantile) Descriptor() ([]byte, []int) {
200
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
200
+	return fileDescriptor_6039342a2ba47b72, []int{3}
201 201
 }
202
+
202 203
 func (m *Quantile) XXX_Unmarshal(b []byte) error {
203 204
 	return xxx_messageInfo_Quantile.Unmarshal(m, b)
204 205
 }
205 206
 func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
206 207
 	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
207 208
 }
208
-func (dst *Quantile) XXX_Merge(src proto.Message) {
209
-	xxx_messageInfo_Quantile.Merge(dst, src)
209
+func (m *Quantile) XXX_Merge(src proto.Message) {
210
+	xxx_messageInfo_Quantile.Merge(m, src)
210 211
 }
211 212
 func (m *Quantile) XXX_Size() int {
212 213
 	return xxx_messageInfo_Quantile.Size(m)
... ...
@@ -244,16 +263,17 @@ func (m *Summary) Reset()         { *m = Summary{} }
244 244
 func (m *Summary) String() string { return proto.CompactTextString(m) }
245 245
 func (*Summary) ProtoMessage()    {}
246 246
 func (*Summary) Descriptor() ([]byte, []int) {
247
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
247
+	return fileDescriptor_6039342a2ba47b72, []int{4}
248 248
 }
249
+
249 250
 func (m *Summary) XXX_Unmarshal(b []byte) error {
250 251
 	return xxx_messageInfo_Summary.Unmarshal(m, b)
251 252
 }
252 253
 func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
253 254
 	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
254 255
 }
255
-func (dst *Summary) XXX_Merge(src proto.Message) {
256
-	xxx_messageInfo_Summary.Merge(dst, src)
256
+func (m *Summary) XXX_Merge(src proto.Message) {
257
+	xxx_messageInfo_Summary.Merge(m, src)
257 258
 }
258 259
 func (m *Summary) XXX_Size() int {
259 260
 	return xxx_messageInfo_Summary.Size(m)
... ...
@@ -296,16 +316,17 @@ func (m *Untyped) Reset()         { *m = Untyped{} }
296 296
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
297 297
 func (*Untyped) ProtoMessage()    {}
298 298
 func (*Untyped) Descriptor() ([]byte, []int) {
299
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
299
+	return fileDescriptor_6039342a2ba47b72, []int{5}
300 300
 }
301
+
301 302
 func (m *Untyped) XXX_Unmarshal(b []byte) error {
302 303
 	return xxx_messageInfo_Untyped.Unmarshal(m, b)
303 304
 }
304 305
 func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
305 306
 	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
306 307
 }
307
-func (dst *Untyped) XXX_Merge(src proto.Message) {
308
-	xxx_messageInfo_Untyped.Merge(dst, src)
308
+func (m *Untyped) XXX_Merge(src proto.Message) {
309
+	xxx_messageInfo_Untyped.Merge(m, src)
309 310
 }
310 311
 func (m *Untyped) XXX_Size() int {
311 312
 	return xxx_messageInfo_Untyped.Size(m)
... ...
@@ -336,16 +357,17 @@ func (m *Histogram) Reset()         { *m = Histogram{} }
336 336
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
337 337
 func (*Histogram) ProtoMessage()    {}
338 338
 func (*Histogram) Descriptor() ([]byte, []int) {
339
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
339
+	return fileDescriptor_6039342a2ba47b72, []int{6}
340 340
 }
341
+
341 342
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
342 343
 	return xxx_messageInfo_Histogram.Unmarshal(m, b)
343 344
 }
344 345
 func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
345 346
 	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
346 347
 }
347
-func (dst *Histogram) XXX_Merge(src proto.Message) {
348
-	xxx_messageInfo_Histogram.Merge(dst, src)
348
+func (m *Histogram) XXX_Merge(src proto.Message) {
349
+	xxx_messageInfo_Histogram.Merge(m, src)
349 350
 }
350 351
 func (m *Histogram) XXX_Size() int {
351 352
 	return xxx_messageInfo_Histogram.Size(m)
... ...
@@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
378 378
 }
379 379
 
380 380
 type Bucket struct {
381
-	CumulativeCount      *uint64  `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
382
-	UpperBound           *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
383
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
384
-	XXX_unrecognized     []byte   `json:"-"`
385
-	XXX_sizecache        int32    `json:"-"`
381
+	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
382
+	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
383
+	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
384
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
385
+	XXX_unrecognized     []byte    `json:"-"`
386
+	XXX_sizecache        int32     `json:"-"`
386 387
 }
387 388
 
388 389
 func (m *Bucket) Reset()         { *m = Bucket{} }
389 390
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
390 391
 func (*Bucket) ProtoMessage()    {}
391 392
 func (*Bucket) Descriptor() ([]byte, []int) {
392
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
393
+	return fileDescriptor_6039342a2ba47b72, []int{7}
393 394
 }
395
+
394 396
 func (m *Bucket) XXX_Unmarshal(b []byte) error {
395 397
 	return xxx_messageInfo_Bucket.Unmarshal(m, b)
396 398
 }
397 399
 func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
398 400
 	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
399 401
 }
400
-func (dst *Bucket) XXX_Merge(src proto.Message) {
401
-	xxx_messageInfo_Bucket.Merge(dst, src)
402
+func (m *Bucket) XXX_Merge(src proto.Message) {
403
+	xxx_messageInfo_Bucket.Merge(m, src)
402 404
 }
403 405
 func (m *Bucket) XXX_Size() int {
404 406
 	return xxx_messageInfo_Bucket.Size(m)
... ...
@@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
423 423
 	return 0
424 424
 }
425 425
 
426
+func (m *Bucket) GetExemplar() *Exemplar {
427
+	if m != nil {
428
+		return m.Exemplar
429
+	}
430
+	return nil
431
+}
432
+
433
+type Exemplar struct {
434
+	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
435
+	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
436
+	Timestamp            *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
437
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
438
+	XXX_unrecognized     []byte               `json:"-"`
439
+	XXX_sizecache        int32                `json:"-"`
440
+}
441
+
442
+func (m *Exemplar) Reset()         { *m = Exemplar{} }
443
+func (m *Exemplar) String() string { return proto.CompactTextString(m) }
444
+func (*Exemplar) ProtoMessage()    {}
445
+func (*Exemplar) Descriptor() ([]byte, []int) {
446
+	return fileDescriptor_6039342a2ba47b72, []int{8}
447
+}
448
+
449
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
450
+	return xxx_messageInfo_Exemplar.Unmarshal(m, b)
451
+}
452
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
453
+	return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
454
+}
455
+func (m *Exemplar) XXX_Merge(src proto.Message) {
456
+	xxx_messageInfo_Exemplar.Merge(m, src)
457
+}
458
+func (m *Exemplar) XXX_Size() int {
459
+	return xxx_messageInfo_Exemplar.Size(m)
460
+}
461
+func (m *Exemplar) XXX_DiscardUnknown() {
462
+	xxx_messageInfo_Exemplar.DiscardUnknown(m)
463
+}
464
+
465
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
466
+
467
+func (m *Exemplar) GetLabel() []*LabelPair {
468
+	if m != nil {
469
+		return m.Label
470
+	}
471
+	return nil
472
+}
473
+
474
+func (m *Exemplar) GetValue() float64 {
475
+	if m != nil && m.Value != nil {
476
+		return *m.Value
477
+	}
478
+	return 0
479
+}
480
+
481
+func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
482
+	if m != nil {
483
+		return m.Timestamp
484
+	}
485
+	return nil
486
+}
487
+
426 488
 type Metric struct {
427 489
 	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
428 490
 	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
... ...
@@ -440,16 +526,17 @@ func (m *Metric) Reset()         { *m = Metric{} }
440 440
 func (m *Metric) String() string { return proto.CompactTextString(m) }
441 441
 func (*Metric) ProtoMessage()    {}
442 442
 func (*Metric) Descriptor() ([]byte, []int) {
443
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
443
+	return fileDescriptor_6039342a2ba47b72, []int{9}
444 444
 }
445
+
445 446
 func (m *Metric) XXX_Unmarshal(b []byte) error {
446 447
 	return xxx_messageInfo_Metric.Unmarshal(m, b)
447 448
 }
448 449
 func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
449 450
 	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
450 451
 }
451
-func (dst *Metric) XXX_Merge(src proto.Message) {
452
-	xxx_messageInfo_Metric.Merge(dst, src)
452
+func (m *Metric) XXX_Merge(src proto.Message) {
453
+	xxx_messageInfo_Metric.Merge(m, src)
453 454
 }
454 455
 func (m *Metric) XXX_Size() int {
455 456
 	return xxx_messageInfo_Metric.Size(m)
... ...
@@ -523,16 +610,17 @@ func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
523 523
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
524 524
 func (*MetricFamily) ProtoMessage()    {}
525 525
 func (*MetricFamily) Descriptor() ([]byte, []int) {
526
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
526
+	return fileDescriptor_6039342a2ba47b72, []int{10}
527 527
 }
528
+
528 529
 func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
529 530
 	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
530 531
 }
531 532
 func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
532 533
 	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
533 534
 }
534
-func (dst *MetricFamily) XXX_Merge(src proto.Message) {
535
-	xxx_messageInfo_MetricFamily.Merge(dst, src)
535
+func (m *MetricFamily) XXX_Merge(src proto.Message) {
536
+	xxx_messageInfo_MetricFamily.Merge(m, src)
536 537
 }
537 538
 func (m *MetricFamily) XXX_Size() int {
538 539
 	return xxx_messageInfo_MetricFamily.Size(m)
... ...
@@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
572 572
 }
573 573
 
574 574
 func init() {
575
+	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
575 576
 	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
576 577
 	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
577 578
 	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
... ...
@@ -580,50 +669,55 @@ func init() {
580 580
 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
581 581
 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
582 582
 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
583
+	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
583 584
 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
584 585
 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
585
-	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
586 586
 }
587 587
 
588
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
589
-
590
-var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
591
-	// 591 bytes of a gzipped FileDescriptorProto
592
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
593
-	0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
594
-	0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
595
-	0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
596
-	0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
597
-	0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
598
-	0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
599
-	0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
600
-	0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
601
-	0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
602
-	0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
603
-	0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
604
-	0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
605
-	0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
606
-	0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
607
-	0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
608
-	0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
609
-	0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
610
-	0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
611
-	0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
612
-	0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
613
-	0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
614
-	0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
615
-	0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
616
-	0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
617
-	0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
618
-	0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
619
-	0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
620
-	0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
621
-	0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
622
-	0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
623
-	0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
624
-	0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
625
-	0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
626
-	0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
627
-	0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
628
-	0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
588
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
589
+
590
+var fileDescriptor_6039342a2ba47b72 = []byte{
591
+	// 665 bytes of a gzipped FileDescriptorProto
592
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
593
+	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
594
+	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
595
+	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
596
+	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
597
+	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
598
+	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
599
+	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
600
+	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
601
+	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
602
+	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
603
+	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
604
+	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
605
+	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
606
+	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
607
+	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
608
+	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
609
+	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
610
+	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
611
+	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
612
+	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
613
+	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
614
+	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
615
+	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
616
+	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
617
+	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
618
+	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
619
+	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
620
+	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
621
+	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
622
+	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
623
+	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
624
+	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
625
+	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
626
+	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
627
+	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
628
+	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
629
+	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
630
+	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
631
+	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
632
+	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
633
+	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
629 634
 }
... ...
@@ -30,17 +30,38 @@ type Encoder interface {
30 30
 	Encode(*dto.MetricFamily) error
31 31
 }
32 32
 
33
-type encoder func(*dto.MetricFamily) error
33
+// Closer is implemented by Encoders that need to be closed to finalize
34
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
35
+//
36
+// Note that all Encoder implementations returned from this package implement
37
+// Closer, too, even if the Close call is a no-op. This happens in preparation
38
+// for adding a Close method to the Encoder interface directly in a (mildly
39
+// breaking) release in the future.
40
+type Closer interface {
41
+	Close() error
42
+}
43
+
44
+type encoderCloser struct {
45
+	encode func(*dto.MetricFamily) error
46
+	close  func() error
47
+}
34 48
 
35
-func (e encoder) Encode(v *dto.MetricFamily) error {
36
-	return e(v)
49
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
50
+	return ec.encode(v)
37 51
 }
38 52
 
39
-// Negotiate returns the Content-Type based on the given Accept header.
40
-// If no appropriate accepted type is found, FmtText is returned.
53
+func (ec encoderCloser) Close() error {
54
+	return ec.close()
55
+}
56
+
57
+// Negotiate returns the Content-Type based on the given Accept header. If no
58
+// appropriate accepted type is found, FmtText is returned (which is the
59
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
60
+// as the support is still experimental. To include the option to negotiate
61
+// FmtOpenMetrics, use NegotiateOpenMetrics.
41 62
 func Negotiate(h http.Header) Format {
42 63
 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
43
-		// Check for protocol buffer
64
+		ver := ac.Params["version"]
44 65
 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
45 66
 			switch ac.Params["encoding"] {
46 67
 			case "delimited":
... ...
@@ -51,38 +72,91 @@ func Negotiate(h http.Header) Format {
51 51
 				return FmtProtoCompact
52 52
 			}
53 53
 		}
54
-		// Check for text format.
54
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
55
+			return FmtText
56
+		}
57
+	}
58
+	return FmtText
59
+}
60
+
61
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
62
+// FmtOpenMetrics as an option for the result. Note that this function is
63
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
64
+// such may be negotiated by the normal Negotiate function.
65
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
66
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
55 67
 		ver := ac.Params["version"]
68
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
69
+			switch ac.Params["encoding"] {
70
+			case "delimited":
71
+				return FmtProtoDelim
72
+			case "text":
73
+				return FmtProtoText
74
+			case "compact-text":
75
+				return FmtProtoCompact
76
+			}
77
+		}
56 78
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
57 79
 			return FmtText
58 80
 		}
81
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
82
+			return FmtOpenMetrics
83
+		}
59 84
 	}
60 85
 	return FmtText
61 86
 }
62 87
 
63
-// NewEncoder returns a new encoder based on content type negotiation.
88
+// NewEncoder returns a new encoder based on content type negotiation. All
89
+// Encoder implementations returned by NewEncoder also implement Closer, and
90
+// callers should always call the Close method. It is currently only required
91
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
92
+// to the Encoder interface directly. The current version of the Encoder
93
+// interface is kept for backwards compatibility.
64 94
 func NewEncoder(w io.Writer, format Format) Encoder {
65 95
 	switch format {
66 96
 	case FmtProtoDelim:
67
-		return encoder(func(v *dto.MetricFamily) error {
68
-			_, err := pbutil.WriteDelimited(w, v)
69
-			return err
70
-		})
97
+		return encoderCloser{
98
+			encode: func(v *dto.MetricFamily) error {
99
+				_, err := pbutil.WriteDelimited(w, v)
100
+				return err
101
+			},
102
+			close: func() error { return nil },
103
+		}
71 104
 	case FmtProtoCompact:
72
-		return encoder(func(v *dto.MetricFamily) error {
73
-			_, err := fmt.Fprintln(w, v.String())
74
-			return err
75
-		})
105
+		return encoderCloser{
106
+			encode: func(v *dto.MetricFamily) error {
107
+				_, err := fmt.Fprintln(w, v.String())
108
+				return err
109
+			},
110
+			close: func() error { return nil },
111
+		}
76 112
 	case FmtProtoText:
77
-		return encoder(func(v *dto.MetricFamily) error {
78
-			_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
79
-			return err
80
-		})
113
+		return encoderCloser{
114
+			encode: func(v *dto.MetricFamily) error {
115
+				_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
116
+				return err
117
+			},
118
+			close: func() error { return nil },
119
+		}
81 120
 	case FmtText:
82
-		return encoder(func(v *dto.MetricFamily) error {
83
-			_, err := MetricFamilyToText(w, v)
84
-			return err
85
-		})
121
+		return encoderCloser{
122
+			encode: func(v *dto.MetricFamily) error {
123
+				_, err := MetricFamilyToText(w, v)
124
+				return err
125
+			},
126
+			close: func() error { return nil },
127
+		}
128
+	case FmtOpenMetrics:
129
+		return encoderCloser{
130
+			encode: func(v *dto.MetricFamily) error {
131
+				_, err := MetricFamilyToOpenMetrics(w, v)
132
+				return err
133
+			},
134
+			close: func() error {
135
+				_, err := FinalizeOpenMetrics(w)
136
+				return err
137
+			},
138
+		}
86 139
 	}
87
-	panic("expfmt.NewEncoder: unknown format")
140
+	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
88 141
 }
... ...
@@ -19,10 +19,12 @@ type Format string
19 19
 
20 20
 // Constants to assemble the Content-Type values for the different wire protocols.
21 21
 const (
22
-	TextVersion   = "0.0.4"
23
-	ProtoType     = `application/vnd.google.protobuf`
24
-	ProtoProtocol = `io.prometheus.client.MetricFamily`
25
-	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";"
22
+	TextVersion        = "0.0.4"
23
+	ProtoType          = `application/vnd.google.protobuf`
24
+	ProtoProtocol      = `io.prometheus.client.MetricFamily`
25
+	ProtoFmt           = ProtoType + "; proto=" + ProtoProtocol + ";"
26
+	OpenMetricsType    = `application/openmetrics-text`
27
+	OpenMetricsVersion = "0.0.1"
26 28
 
27 29
 	// The Content-Type values for the different wire protocols.
28 30
 	FmtUnknown      Format = `<unknown>`
... ...
@@ -30,6 +32,7 @@ const (
30 30
 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
31 31
 	FmtProtoText    Format = ProtoFmt + ` encoding=text`
32 32
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
33
+	FmtOpenMetrics  Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
33 34
 )
34 35
 
35 36
 const (
36 37
new file mode 100644
... ...
@@ -0,0 +1,527 @@
0
+// Copyright 2020 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+package expfmt
14
+
15
+import (
16
+	"bufio"
17
+	"bytes"
18
+	"fmt"
19
+	"io"
20
+	"math"
21
+	"strconv"
22
+	"strings"
23
+
24
+	"github.com/golang/protobuf/ptypes"
25
+	"github.com/prometheus/common/model"
26
+
27
+	dto "github.com/prometheus/client_model/go"
28
+)
29
+
30
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
31
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
32
+// the number of bytes written and any error encountered. The output will have
33
+// the same order as the input, no further sorting is performed. Furthermore,
34
+// this function assumes the input is already sanitized and does not perform any
35
+// sanity checks. If the input contains duplicate metrics or invalid metric or
36
+// label names, the conversion will result in invalid text format output.
37
+//
38
+// This function fulfills the type 'expfmt.encoder'.
39
+//
40
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
41
+// on individual metric families, it is the responsibility of the caller to
42
+// append this line to 'out' once all metric families have been written.
43
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
44
+//
45
+// The output should be fully OpenMetrics compliant. However, there are a few
46
+// missing features and peculiarities to avoid complications when switching from
47
+// Prometheus to OpenMetrics or vice versa:
48
+//
49
+// - Counters are expected to have the `_total` suffix in their metric name. In
50
+//   the output, the suffix will be truncated from the `# TYPE` and `# HELP`
51
+//   line. A counter with a missing `_total` suffix is not an error. However,
52
+//   its type will be set to `unknown` in that case to avoid invalid OpenMetrics
53
+//   output.
54
+//
55
+// - No support for the following (optional) features: `# UNIT` line, `_created`
56
+//   line, info type, stateset type, gaugehistogram type.
57
+//
58
+// - The size of exemplar labels is not checked (i.e. it's possible to create
59
+//   exemplars that are larger than allowed by the OpenMetrics specification).
60
+//
61
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
62
+//   with a `NaN` value.)
63
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
64
+	name := in.GetName()
65
+	if name == "" {
66
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
67
+	}
68
+
69
+	// Try the interface upgrade. If it doesn't work, we'll use a
70
+	// bufio.Writer from the sync.Pool.
71
+	w, ok := out.(enhancedWriter)
72
+	if !ok {
73
+		b := bufPool.Get().(*bufio.Writer)
74
+		b.Reset(out)
75
+		w = b
76
+		defer func() {
77
+			bErr := b.Flush()
78
+			if err == nil {
79
+				err = bErr
80
+			}
81
+			bufPool.Put(b)
82
+		}()
83
+	}
84
+
85
+	var (
86
+		n          int
87
+		metricType = in.GetType()
88
+		shortName  = name
89
+	)
90
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
91
+		shortName = name[:len(name)-6]
92
+	}
93
+
94
+	// Comments, first HELP, then TYPE.
95
+	if in.Help != nil {
96
+		n, err = w.WriteString("# HELP ")
97
+		written += n
98
+		if err != nil {
99
+			return
100
+		}
101
+		n, err = w.WriteString(shortName)
102
+		written += n
103
+		if err != nil {
104
+			return
105
+		}
106
+		err = w.WriteByte(' ')
107
+		written++
108
+		if err != nil {
109
+			return
110
+		}
111
+		n, err = writeEscapedString(w, *in.Help, true)
112
+		written += n
113
+		if err != nil {
114
+			return
115
+		}
116
+		err = w.WriteByte('\n')
117
+		written++
118
+		if err != nil {
119
+			return
120
+		}
121
+	}
122
+	n, err = w.WriteString("# TYPE ")
123
+	written += n
124
+	if err != nil {
125
+		return
126
+	}
127
+	n, err = w.WriteString(shortName)
128
+	written += n
129
+	if err != nil {
130
+		return
131
+	}
132
+	switch metricType {
133
+	case dto.MetricType_COUNTER:
134
+		if strings.HasSuffix(name, "_total") {
135
+			n, err = w.WriteString(" counter\n")
136
+		} else {
137
+			n, err = w.WriteString(" unknown\n")
138
+		}
139
+	case dto.MetricType_GAUGE:
140
+		n, err = w.WriteString(" gauge\n")
141
+	case dto.MetricType_SUMMARY:
142
+		n, err = w.WriteString(" summary\n")
143
+	case dto.MetricType_UNTYPED:
144
+		n, err = w.WriteString(" unknown\n")
145
+	case dto.MetricType_HISTOGRAM:
146
+		n, err = w.WriteString(" histogram\n")
147
+	default:
148
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
149
+	}
150
+	written += n
151
+	if err != nil {
152
+		return
153
+	}
154
+
155
+	// Finally the samples, one line for each.
156
+	for _, metric := range in.Metric {
157
+		switch metricType {
158
+		case dto.MetricType_COUNTER:
159
+			if metric.Counter == nil {
160
+				return written, fmt.Errorf(
161
+					"expected counter in metric %s %s", name, metric,
162
+				)
163
+			}
164
+			// Note that we have ensured above that either the name
165
+			// ends on `_total` or that the rendered type is
166
+			// `unknown`. Therefore, no `_total` must be added here.
167
+			n, err = writeOpenMetricsSample(
168
+				w, name, "", metric, "", 0,
169
+				metric.Counter.GetValue(), 0, false,
170
+				metric.Counter.Exemplar,
171
+			)
172
+		case dto.MetricType_GAUGE:
173
+			if metric.Gauge == nil {
174
+				return written, fmt.Errorf(
175
+					"expected gauge in metric %s %s", name, metric,
176
+				)
177
+			}
178
+			n, err = writeOpenMetricsSample(
179
+				w, name, "", metric, "", 0,
180
+				metric.Gauge.GetValue(), 0, false,
181
+				nil,
182
+			)
183
+		case dto.MetricType_UNTYPED:
184
+			if metric.Untyped == nil {
185
+				return written, fmt.Errorf(
186
+					"expected untyped in metric %s %s", name, metric,
187
+				)
188
+			}
189
+			n, err = writeOpenMetricsSample(
190
+				w, name, "", metric, "", 0,
191
+				metric.Untyped.GetValue(), 0, false,
192
+				nil,
193
+			)
194
+		case dto.MetricType_SUMMARY:
195
+			if metric.Summary == nil {
196
+				return written, fmt.Errorf(
197
+					"expected summary in metric %s %s", name, metric,
198
+				)
199
+			}
200
+			for _, q := range metric.Summary.Quantile {
201
+				n, err = writeOpenMetricsSample(
202
+					w, name, "", metric,
203
+					model.QuantileLabel, q.GetQuantile(),
204
+					q.GetValue(), 0, false,
205
+					nil,
206
+				)
207
+				written += n
208
+				if err != nil {
209
+					return
210
+				}
211
+			}
212
+			n, err = writeOpenMetricsSample(
213
+				w, name, "_sum", metric, "", 0,
214
+				metric.Summary.GetSampleSum(), 0, false,
215
+				nil,
216
+			)
217
+			written += n
218
+			if err != nil {
219
+				return
220
+			}
221
+			n, err = writeOpenMetricsSample(
222
+				w, name, "_count", metric, "", 0,
223
+				0, metric.Summary.GetSampleCount(), true,
224
+				nil,
225
+			)
226
+		case dto.MetricType_HISTOGRAM:
227
+			if metric.Histogram == nil {
228
+				return written, fmt.Errorf(
229
+					"expected histogram in metric %s %s", name, metric,
230
+				)
231
+			}
232
+			infSeen := false
233
+			for _, b := range metric.Histogram.Bucket {
234
+				n, err = writeOpenMetricsSample(
235
+					w, name, "_bucket", metric,
236
+					model.BucketLabel, b.GetUpperBound(),
237
+					0, b.GetCumulativeCount(), true,
238
+					b.Exemplar,
239
+				)
240
+				written += n
241
+				if err != nil {
242
+					return
243
+				}
244
+				if math.IsInf(b.GetUpperBound(), +1) {
245
+					infSeen = true
246
+				}
247
+			}
248
+			if !infSeen {
249
+				n, err = writeOpenMetricsSample(
250
+					w, name, "_bucket", metric,
251
+					model.BucketLabel, math.Inf(+1),
252
+					0, metric.Histogram.GetSampleCount(), true,
253
+					nil,
254
+				)
255
+				written += n
256
+				if err != nil {
257
+					return
258
+				}
259
+			}
260
+			n, err = writeOpenMetricsSample(
261
+				w, name, "_sum", metric, "", 0,
262
+				metric.Histogram.GetSampleSum(), 0, false,
263
+				nil,
264
+			)
265
+			written += n
266
+			if err != nil {
267
+				return
268
+			}
269
+			n, err = writeOpenMetricsSample(
270
+				w, name, "_count", metric, "", 0,
271
+				0, metric.Histogram.GetSampleCount(), true,
272
+				nil,
273
+			)
274
+		default:
275
+			return written, fmt.Errorf(
276
+				"unexpected type in metric %s %s", name, metric,
277
+			)
278
+		}
279
+		written += n
280
+		if err != nil {
281
+			return
282
+		}
283
+	}
284
+	return
285
+}
286
+
287
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
288
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
289
+	return w.Write([]byte("# EOF\n"))
290
+}
291
+
292
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
293
+// w, given the metric name, the metric proto message itself, optionally an
294
+// additional label name with a float64 value (use empty string as label name if
295
+// not required), the value (optionally as float64 or uint64, determined by
296
+// useIntValue), and optionally an exemplar (use nil if not required). The
297
+// function returns the number of bytes written and any error encountered.
298
+func writeOpenMetricsSample(
299
+	w enhancedWriter,
300
+	name, suffix string,
301
+	metric *dto.Metric,
302
+	additionalLabelName string, additionalLabelValue float64,
303
+	floatValue float64, intValue uint64, useIntValue bool,
304
+	exemplar *dto.Exemplar,
305
+) (int, error) {
306
+	var written int
307
+	n, err := w.WriteString(name)
308
+	written += n
309
+	if err != nil {
310
+		return written, err
311
+	}
312
+	if suffix != "" {
313
+		n, err = w.WriteString(suffix)
314
+		written += n
315
+		if err != nil {
316
+			return written, err
317
+		}
318
+	}
319
+	n, err = writeOpenMetricsLabelPairs(
320
+		w, metric.Label, additionalLabelName, additionalLabelValue,
321
+	)
322
+	written += n
323
+	if err != nil {
324
+		return written, err
325
+	}
326
+	err = w.WriteByte(' ')
327
+	written++
328
+	if err != nil {
329
+		return written, err
330
+	}
331
+	if useIntValue {
332
+		n, err = writeUint(w, intValue)
333
+	} else {
334
+		n, err = writeOpenMetricsFloat(w, floatValue)
335
+	}
336
+	written += n
337
+	if err != nil {
338
+		return written, err
339
+	}
340
+	if metric.TimestampMs != nil {
341
+		err = w.WriteByte(' ')
342
+		written++
343
+		if err != nil {
344
+			return written, err
345
+		}
346
+		// TODO(beorn7): Format this directly without converting to a float first.
347
+		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
348
+		written += n
349
+		if err != nil {
350
+			return written, err
351
+		}
352
+	}
353
+	if exemplar != nil {
354
+		n, err = writeExemplar(w, exemplar)
355
+		written += n
356
+		if err != nil {
357
+			return written, err
358
+		}
359
+	}
360
+	err = w.WriteByte('\n')
361
+	written++
362
+	if err != nil {
363
+		return written, err
364
+	}
365
+	return written, nil
366
+}
367
+
368
+// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
369
+// in OpenMetrics style.
370
+func writeOpenMetricsLabelPairs(
371
+	w enhancedWriter,
372
+	in []*dto.LabelPair,
373
+	additionalLabelName string, additionalLabelValue float64,
374
+) (int, error) {
375
+	if len(in) == 0 && additionalLabelName == "" {
376
+		return 0, nil
377
+	}
378
+	var (
379
+		written   int
380
+		separator byte = '{'
381
+	)
382
+	for _, lp := range in {
383
+		err := w.WriteByte(separator)
384
+		written++
385
+		if err != nil {
386
+			return written, err
387
+		}
388
+		n, err := w.WriteString(lp.GetName())
389
+		written += n
390
+		if err != nil {
391
+			return written, err
392
+		}
393
+		n, err = w.WriteString(`="`)
394
+		written += n
395
+		if err != nil {
396
+			return written, err
397
+		}
398
+		n, err = writeEscapedString(w, lp.GetValue(), true)
399
+		written += n
400
+		if err != nil {
401
+			return written, err
402
+		}
403
+		err = w.WriteByte('"')
404
+		written++
405
+		if err != nil {
406
+			return written, err
407
+		}
408
+		separator = ','
409
+	}
410
+	if additionalLabelName != "" {
411
+		err := w.WriteByte(separator)
412
+		written++
413
+		if err != nil {
414
+			return written, err
415
+		}
416
+		n, err := w.WriteString(additionalLabelName)
417
+		written += n
418
+		if err != nil {
419
+			return written, err
420
+		}
421
+		n, err = w.WriteString(`="`)
422
+		written += n
423
+		if err != nil {
424
+			return written, err
425
+		}
426
+		n, err = writeOpenMetricsFloat(w, additionalLabelValue)
427
+		written += n
428
+		if err != nil {
429
+			return written, err
430
+		}
431
+		err = w.WriteByte('"')
432
+		written++
433
+		if err != nil {
434
+			return written, err
435
+		}
436
+	}
437
+	err := w.WriteByte('}')
438
+	written++
439
+	if err != nil {
440
+		return written, err
441
+	}
442
+	return written, nil
443
+}
444
+
445
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
446
+// function returns the number of bytes written and any error encountered.
447
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
448
+	written := 0
449
+	n, err := w.WriteString(" # ")
450
+	written += n
451
+	if err != nil {
452
+		return written, err
453
+	}
454
+	n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
455
+	written += n
456
+	if err != nil {
457
+		return written, err
458
+	}
459
+	err = w.WriteByte(' ')
460
+	written++
461
+	if err != nil {
462
+		return written, err
463
+	}
464
+	n, err = writeOpenMetricsFloat(w, e.GetValue())
465
+	written += n
466
+	if err != nil {
467
+		return written, err
468
+	}
469
+	if e.Timestamp != nil {
470
+		err = w.WriteByte(' ')
471
+		written++
472
+		if err != nil {
473
+			return written, err
474
+		}
475
+		ts, err := ptypes.Timestamp((*e).Timestamp)
476
+		if err != nil {
477
+			return written, err
478
+		}
479
+		// TODO(beorn7): Format this directly from components of ts to
480
+		// avoid overflow/underflow and precision issues of the float
481
+		// conversion.
482
+		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
483
+		written += n
484
+		if err != nil {
485
+			return written, err
486
+		}
487
+	}
488
+	return written, nil
489
+}
490
+
491
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
492
+// number would otherwise contain neither a "." nor an "e".
493
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
494
+	switch {
495
+	case f == 1:
496
+		return w.WriteString("1.0")
497
+	case f == 0:
498
+		return w.WriteString("0.0")
499
+	case f == -1:
500
+		return w.WriteString("-1.0")
501
+	case math.IsNaN(f):
502
+		return w.WriteString("NaN")
503
+	case math.IsInf(f, +1):
504
+		return w.WriteString("+Inf")
505
+	case math.IsInf(f, -1):
506
+		return w.WriteString("-Inf")
507
+	default:
508
+		bp := numBufPool.Get().(*[]byte)
509
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
510
+		if !bytes.ContainsAny(*bp, "e.") {
511
+			*bp = append(*bp, '.', '0')
512
+		}
513
+		written, err := w.Write(*bp)
514
+		numBufPool.Put(bp)
515
+		return written, err
516
+	}
517
+}
518
+
519
+// writeUint is like writeInt just for uint64.
520
+func writeUint(w enhancedWriter, u uint64) (int, error) {
521
+	bp := numBufPool.Get().(*[]byte)
522
+	*bp = strconv.AppendUint((*bp)[:0], u, 10)
523
+	written, err := w.Write(*bp)
524
+	numBufPool.Put(bp)
525
+	return written, err
526
+}
... ...
@@ -423,9 +423,8 @@ var (
423 423
 func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
424 424
 	if includeDoubleQuote {
425 425
 		return quotedEscaper.WriteString(w, v)
426
-	} else {
427
-		return escaper.WriteString(w, v)
428 426
 	}
427
+	return escaper.WriteString(w, v)
429 428
 }
430 429
 
431 430
 // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
... ...
@@ -11,12 +11,12 @@ require (
11 11
 	github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
12 12
 	github.com/pkg/errors v0.8.1
13 13
 	github.com/prometheus/client_golang v1.0.0
14
-	github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
14
+	github.com/prometheus/client_model v0.2.0
15 15
 	github.com/sirupsen/logrus v1.4.2
16 16
 	golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
17 17
 	golang.org/x/sys v0.0.0-20190422165155-953cdadca894
18 18
 	gopkg.in/alecthomas/kingpin.v2 v2.2.6
19
-	gopkg.in/yaml.v2 v2.2.2
19
+	gopkg.in/yaml.v2 v2.2.4
20 20
 )
21 21
 
22 22
 go 1.11
... ...
@@ -14,10 +14,10 @@
14 14
 package procfs
15 15
 
16 16
 import (
17
+	"bufio"
17 18
 	"bytes"
18 19
 	"fmt"
19
-	"io/ioutil"
20
-	"strconv"
20
+	"io"
21 21
 	"strings"
22 22
 
23 23
 	"github.com/prometheus/procfs/internal/util"
... ...
@@ -52,80 +52,102 @@ type Crypto struct {
52 52
 // structs containing the relevant info.  More information available here:
53 53
 // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
54 54
 func (fs FS) Crypto() ([]Crypto, error) {
55
-	data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
55
+	path := fs.proc.Path("crypto")
56
+	b, err := util.ReadFileNoStat(path)
56 57
 	if err != nil {
57
-		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
58
+		return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
58 59
 	}
59
-	crypto, err := parseCrypto(data)
60
+
61
+	crypto, err := parseCrypto(bytes.NewReader(b))
60 62
 	if err != nil {
61
-		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
63
+		return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
62 64
 	}
65
+
63 66
 	return crypto, nil
64 67
 }
65 68
 
66
-func parseCrypto(cryptoData []byte) ([]Crypto, error) {
67
-	crypto := []Crypto{}
68
-
69
-	cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
70
-
71
-	for _, block := range cryptoBlocks {
72
-		var newCryptoElem Crypto
73
-
74
-		lines := strings.Split(string(block), "\n")
75
-		for _, line := range lines {
76
-			if strings.TrimSpace(line) == "" || line[0] == ' ' {
77
-				continue
78
-			}
79
-			fields := strings.Split(line, ":")
80
-			key := strings.TrimSpace(fields[0])
81
-			value := strings.TrimSpace(fields[1])
82
-			vp := util.NewValueParser(value)
83
-
84
-			switch strings.TrimSpace(key) {
85
-			case "async":
86
-				b, err := strconv.ParseBool(value)
87
-				if err == nil {
88
-					newCryptoElem.Async = b
89
-				}
90
-			case "blocksize":
91
-				newCryptoElem.Blocksize = vp.PUInt64()
92
-			case "chunksize":
93
-				newCryptoElem.Chunksize = vp.PUInt64()
94
-			case "digestsize":
95
-				newCryptoElem.Digestsize = vp.PUInt64()
96
-			case "driver":
97
-				newCryptoElem.Driver = value
98
-			case "geniv":
99
-				newCryptoElem.Geniv = value
100
-			case "internal":
101
-				newCryptoElem.Internal = value
102
-			case "ivsize":
103
-				newCryptoElem.Ivsize = vp.PUInt64()
104
-			case "maxauthsize":
105
-				newCryptoElem.Maxauthsize = vp.PUInt64()
106
-			case "max keysize":
107
-				newCryptoElem.MaxKeysize = vp.PUInt64()
108
-			case "min keysize":
109
-				newCryptoElem.MinKeysize = vp.PUInt64()
110
-			case "module":
111
-				newCryptoElem.Module = value
112
-			case "name":
113
-				newCryptoElem.Name = value
114
-			case "priority":
115
-				newCryptoElem.Priority = vp.PInt64()
116
-			case "refcnt":
117
-				newCryptoElem.Refcnt = vp.PInt64()
118
-			case "seedsize":
119
-				newCryptoElem.Seedsize = vp.PUInt64()
120
-			case "selftest":
121
-				newCryptoElem.Selftest = value
122
-			case "type":
123
-				newCryptoElem.Type = value
124
-			case "walksize":
125
-				newCryptoElem.Walksize = vp.PUInt64()
126
-			}
69
+// parseCrypto parses a /proc/crypto stream into Crypto elements.
70
+func parseCrypto(r io.Reader) ([]Crypto, error) {
71
+	var out []Crypto
72
+
73
+	s := bufio.NewScanner(r)
74
+	for s.Scan() {
75
+		text := s.Text()
76
+		switch {
77
+		case strings.HasPrefix(text, "name"):
78
+			// Each crypto element begins with its name.
79
+			out = append(out, Crypto{})
80
+		case text == "":
81
+			continue
82
+		}
83
+
84
+		kv := strings.Split(text, ":")
85
+		if len(kv) != 2 {
86
+			return nil, fmt.Errorf("malformed crypto line: %q", text)
87
+		}
88
+
89
+		k := strings.TrimSpace(kv[0])
90
+		v := strings.TrimSpace(kv[1])
91
+
92
+		// Parse the key/value pair into the currently focused element.
93
+		c := &out[len(out)-1]
94
+		if err := c.parseKV(k, v); err != nil {
95
+			return nil, err
127 96
 		}
128
-		crypto = append(crypto, newCryptoElem)
129 97
 	}
130
-	return crypto, nil
98
+
99
+	if err := s.Err(); err != nil {
100
+		return nil, err
101
+	}
102
+
103
+	return out, nil
104
+}
105
+
106
+// parseKV parses a key/value pair into the appropriate field of c.
107
+func (c *Crypto) parseKV(k, v string) error {
108
+	vp := util.NewValueParser(v)
109
+
110
+	switch k {
111
+	case "async":
112
+		// Interpret literal yes as true.
113
+		c.Async = v == "yes"
114
+	case "blocksize":
115
+		c.Blocksize = vp.PUInt64()
116
+	case "chunksize":
117
+		c.Chunksize = vp.PUInt64()
118
+	case "digestsize":
119
+		c.Digestsize = vp.PUInt64()
120
+	case "driver":
121
+		c.Driver = v
122
+	case "geniv":
123
+		c.Geniv = v
124
+	case "internal":
125
+		c.Internal = v
126
+	case "ivsize":
127
+		c.Ivsize = vp.PUInt64()
128
+	case "maxauthsize":
129
+		c.Maxauthsize = vp.PUInt64()
130
+	case "max keysize":
131
+		c.MaxKeysize = vp.PUInt64()
132
+	case "min keysize":
133
+		c.MinKeysize = vp.PUInt64()
134
+	case "module":
135
+		c.Module = v
136
+	case "name":
137
+		c.Name = v
138
+	case "priority":
139
+		c.Priority = vp.PInt64()
140
+	case "refcnt":
141
+		c.Refcnt = vp.PInt64()
142
+	case "seedsize":
143
+		c.Seedsize = vp.PUInt64()
144
+	case "selftest":
145
+		c.Selftest = v
146
+	case "type":
147
+		c.Type = v
148
+	case "walksize":
149
+		c.Walksize = vp.PUInt64()
150
+	}
151
+
152
+	return vp.Err()
131 153
 }
... ...
@@ -5,4 +5,5 @@ go 1.12
5 5
 require (
6 6
 	github.com/google/go-cmp v0.3.1
7 7
 	golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
8
+	golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
8 9
 )
9 10
new file mode 100644
... ...
@@ -0,0 +1,62 @@
0
+// Copyright 2019 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+package procfs
14
+
15
+import (
16
+	"fmt"
17
+	"strconv"
18
+	"strings"
19
+
20
+	"github.com/prometheus/procfs/internal/util"
21
+)
22
+
23
+// LoadAvg represents an entry in /proc/loadavg
24
+type LoadAvg struct {
25
+	Load1  float64
26
+	Load5  float64
27
+	Load15 float64
28
+}
29
+
30
+// LoadAvg returns loadavg from /proc.
31
+func (fs FS) LoadAvg() (*LoadAvg, error) {
32
+	path := fs.proc.Path("loadavg")
33
+
34
+	data, err := util.ReadFileNoStat(path)
35
+	if err != nil {
36
+		return nil, err
37
+	}
38
+	return parseLoad(data)
39
+}
40
+
41
+// Parse /proc loadavg and return 1m, 5m and 15m.
42
+func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
43
+	loads := make([]float64, 3)
44
+	parts := strings.Fields(string(loadavgBytes))
45
+	if len(parts) < 3 {
46
+		return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
47
+	}
48
+
49
+	var err error
50
+	for i, load := range parts[0:3] {
51
+		loads[i], err = strconv.ParseFloat(load, 64)
52
+		if err != nil {
53
+			return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
54
+		}
55
+	}
56
+	return &LoadAvg{
57
+		Load1:  loads[0],
58
+		Load5:  loads[1],
59
+		Load15: loads[2],
60
+	}, nil
61
+}
... ...
@@ -29,10 +29,10 @@ import (
29 29
 // is described in the following man page.
30 30
 // http://man7.org/linux/man-pages/man5/proc.5.html
31 31
 type MountInfo struct {
32
-	// Unique Id for the mount
33
-	MountId int
34
-	// The Id of the parent mount
35
-	ParentId int
32
+	// Unique ID for the mount
33
+	MountID int
34
+	// The ID of the parent mount
35
+	ParentID int
36 36
 	// The value of `st_dev` for the files on this FS
37 37
 	MajorMinorVer string
38 38
 	// The pathname of the directory in the FS that forms
... ...
@@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
96 96
 		SuperOptions:   mountOptionsParser(mountInfo[mountInfoLength-1]),
97 97
 	}
98 98
 
99
-	mount.MountId, err = strconv.Atoi(mountInfo[0])
99
+	mount.MountID, err = strconv.Atoi(mountInfo[0])
100 100
 	if err != nil {
101 101
 		return nil, fmt.Errorf("failed to parse mount ID")
102 102
 	}
103
-	mount.ParentId, err = strconv.Atoi(mountInfo[1])
103
+	mount.ParentID, err = strconv.Atoi(mountInfo[1])
104 104
 	if err != nil {
105 105
 		return nil, fmt.Errorf("failed to parse parent ID")
106 106
 	}
107 107
new file mode 100644
... ...
@@ -0,0 +1,153 @@
0
+// Copyright 2020 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+package procfs
14
+
15
+import (
16
+	"bufio"
17
+	"bytes"
18
+	"fmt"
19
+	"io"
20
+	"strconv"
21
+	"strings"
22
+
23
+	"github.com/prometheus/procfs/internal/util"
24
+)
25
+
26
+// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
27
+// and contains netfilter conntrack statistics at one CPU core
28
+type ConntrackStatEntry struct {
29
+	Entries       uint64
30
+	Found         uint64
31
+	Invalid       uint64
32
+	Ignore        uint64
33
+	Insert        uint64
34
+	InsertFailed  uint64
35
+	Drop          uint64
36
+	EarlyDrop     uint64
37
+	SearchRestart uint64
38
+}
39
+
40
+// Retrieves netfilter's conntrack statistics, split by CPU cores
41
+func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
42
+	return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
43
+}
44
+
45
+// Parses a slice of ConntrackStatEntries from the given filepath
46
+func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
47
+	// This file is small and can be read with one syscall.
48
+	b, err := util.ReadFileNoStat(path)
49
+	if err != nil {
50
+		// Do not wrap this error so the caller can detect os.IsNotExist and
51
+		// similar conditions.
52
+		return nil, err
53
+	}
54
+
55
+	stat, err := parseConntrackStat(bytes.NewReader(b))
56
+	if err != nil {
57
+		return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
58
+	}
59
+
60
+	return stat, nil
61
+}
62
+
63
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
64
+func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
65
+	var entries []ConntrackStatEntry
66
+
67
+	scanner := bufio.NewScanner(r)
68
+	scanner.Scan()
69
+	for scanner.Scan() {
70
+		fields := strings.Fields(scanner.Text())
71
+		conntrackEntry, err := parseConntrackStatEntry(fields)
72
+		if err != nil {
73
+			return nil, err
74
+		}
75
+		entries = append(entries, *conntrackEntry)
76
+	}
77
+
78
+	return entries, nil
79
+}
80
+
81
+// Parses a ConntrackStatEntry from given array of fields
82
+func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
83
+	if len(fields) != 17 {
84
+		return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
85
+	}
86
+	entry := &ConntrackStatEntry{}
87
+
88
+	entries, err := parseConntrackStatField(fields[0])
89
+	if err != nil {
90
+		return nil, err
91
+	}
92
+	entry.Entries = entries
93
+
94
+	found, err := parseConntrackStatField(fields[2])
95
+	if err != nil {
96
+		return nil, err
97
+	}
98
+	entry.Found = found
99
+
100
+	invalid, err := parseConntrackStatField(fields[4])
101
+	if err != nil {
102
+		return nil, err
103
+	}
104
+	entry.Invalid = invalid
105
+
106
+	ignore, err := parseConntrackStatField(fields[5])
107
+	if err != nil {
108
+		return nil, err
109
+	}
110
+	entry.Ignore = ignore
111
+
112
+	insert, err := parseConntrackStatField(fields[8])
113
+	if err != nil {
114
+		return nil, err
115
+	}
116
+	entry.Insert = insert
117
+
118
+	insertFailed, err := parseConntrackStatField(fields[9])
119
+	if err != nil {
120
+		return nil, err
121
+	}
122
+	entry.InsertFailed = insertFailed
123
+
124
+	drop, err := parseConntrackStatField(fields[10])
125
+	if err != nil {
126
+		return nil, err
127
+	}
128
+	entry.Drop = drop
129
+
130
+	earlyDrop, err := parseConntrackStatField(fields[11])
131
+	if err != nil {
132
+		return nil, err
133
+	}
134
+	entry.EarlyDrop = earlyDrop
135
+
136
+	searchRestart, err := parseConntrackStatField(fields[16])
137
+	if err != nil {
138
+		return nil, err
139
+	}
140
+	entry.SearchRestart = searchRestart
141
+
142
+	return entry, nil
143
+}
144
+
145
+// Parses a uint64 from given hex in string
146
+func parseConntrackStatField(field string) (uint64, error) {
147
+	val, err := strconv.ParseUint(field, 16, 64)
148
+	if err != nil {
149
+		return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
150
+	}
151
+	return val, err
152
+}
... ...
@@ -14,78 +14,89 @@
14 14
 package procfs
15 15
 
16 16
 import (
17
+	"bufio"
18
+	"bytes"
17 19
 	"fmt"
18
-	"io/ioutil"
20
+	"io"
19 21
 	"strconv"
20 22
 	"strings"
23
+
24
+	"github.com/prometheus/procfs/internal/util"
21 25
 )
22 26
 
23 27
 // For the proc file format details,
24
-// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
28
+// See:
29
+// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
30
+// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
25 31
 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
26 32
 
27
-// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
28
-type SoftnetEntry struct {
33
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat
34
+type SoftnetStat struct {
29 35
 	// Number of processed packets
30
-	Processed uint
36
+	Processed uint32
31 37
 	// Number of dropped packets
32
-	Dropped uint
38
+	Dropped uint32
33 39
 	// Number of times processing packets ran out of quota
34
-	TimeSqueezed uint
40
+	TimeSqueezed uint32
35 41
 }
36 42
 
37
-// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
38
-// and then return a slice of SoftnetEntry's.
39
-func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
40
-	data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
43
+var softNetProcFile = "net/softnet_stat"
44
+
45
+// NetSoftnetStat reads data from /proc/net/softnet_stat.
46
+func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
47
+	b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
48
+	if err != nil {
49
+		return nil, err
50
+	}
51
+
52
+	entries, err := parseSoftnet(bytes.NewReader(b))
41 53
 	if err != nil {
42
-		return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
54
+		return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
43 55
 	}
44 56
 
45
-	return parseSoftnetEntries(data)
57
+	return entries, nil
46 58
 }
47 59
 
48
-func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
49
-	lines := strings.Split(string(data), "\n")
50
-	entries := make([]SoftnetEntry, 0)
51
-	var err error
52
-	const (
53
-		expectedColumns = 11
54
-	)
55
-	for _, line := range lines {
56
-		columns := strings.Fields(line)
60
+func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
61
+	const minColumns = 9
62
+
63
+	s := bufio.NewScanner(r)
64
+
65
+	var stats []SoftnetStat
66
+	for s.Scan() {
67
+		columns := strings.Fields(s.Text())
57 68
 		width := len(columns)
58
-		if width == 0 {
59
-			continue
60
-		}
61
-		if width != expectedColumns {
62
-			return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
69
+
70
+		if width < minColumns {
71
+			return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
63 72
 		}
64
-		var entry SoftnetEntry
65
-		if entry, err = parseSoftnetEntry(columns); err != nil {
66
-			return []SoftnetEntry{}, err
73
+
74
+		// We only parse the first three columns at the moment.
75
+		us, err := parseHexUint32s(columns[0:3])
76
+		if err != nil {
77
+			return nil, err
67 78
 		}
68
-		entries = append(entries, entry)
79
+
80
+		stats = append(stats, SoftnetStat{
81
+			Processed:    us[0],
82
+			Dropped:      us[1],
83
+			TimeSqueezed: us[2],
84
+		})
69 85
 	}
70 86
 
71
-	return entries, nil
87
+	return stats, nil
72 88
 }
73 89
 
74
-func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
75
-	var err error
76
-	var processed, dropped, timeSqueezed uint64
77
-	if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
78
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
79
-	}
80
-	if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
81
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
82
-	}
83
-	if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
84
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
90
+func parseHexUint32s(ss []string) ([]uint32, error) {
91
+	us := make([]uint32, 0, len(ss))
92
+	for _, s := range ss {
93
+		u, err := strconv.ParseUint(s, 16, 32)
94
+		if err != nil {
95
+			return nil, err
96
+		}
97
+
98
+		us = append(us, uint32(u))
85 99
 	}
86
-	return SoftnetEntry{
87
-		Processed:    uint(processed),
88
-		Dropped:      uint(dropped),
89
-		TimeSqueezed: uint(timeSqueezed),
90
-	}, nil
100
+
101
+	return us, nil
91 102
 }
92 103
new file mode 100644
... ...
@@ -0,0 +1,229 @@
0
+// Copyright 2020 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+package procfs
14
+
15
+import (
16
+	"bufio"
17
+	"encoding/hex"
18
+	"fmt"
19
+	"io"
20
+	"net"
21
+	"os"
22
+	"strconv"
23
+	"strings"
24
+)
25
+
26
+const (
27
+	// readLimit is used by io.LimitReader while reading the content of the
28
+	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
29
+	// as each line represents a single used socket.
30
+	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
31
+	// With e.g. 150 Byte per line and the maximum number of 65535,
32
+	// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
33
+	readLimit = 4294967296 // Byte -> 4 GiB
34
+)
35
+
36
+type (
37
+	// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
38
+	NetUDP []*netUDPLine
39
+
40
+	// NetUDPSummary provides already computed values like the total queue lengths or
41
+	// the total number of used sockets. In contrast to NetUDP it does not collect
42
+	// the parsed lines into a slice.
43
+	NetUDPSummary struct {
44
+		// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
45
+		TxQueueLength uint64
46
+		// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
47
+		RxQueueLength uint64
48
+		// UsedSockets shows the total number of parsed lines representing the
49
+		// number of used sockets.
50
+		UsedSockets uint64
51
+	}
52
+
53
+	// netUDPLine represents the fields parsed from a single line
54
+	// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
55
+	// For the proc file format details, see https://linux.die.net/man/5/proc.
56
+	netUDPLine struct {
57
+		Sl        uint64
58
+		LocalAddr net.IP
59
+		LocalPort uint64
60
+		RemAddr   net.IP
61
+		RemPort   uint64
62
+		St        uint64
63
+		TxQueue   uint64
64
+		RxQueue   uint64
65
+		UID       uint64
66
+	}
67
+)
68
+
69
+// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
70
+// read from /proc/net/udp.
71
+func (fs FS) NetUDP() (NetUDP, error) {
72
+	return newNetUDP(fs.proc.Path("net/udp"))
73
+}
74
+
75
+// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
76
+// read from /proc/net/udp6.
77
+func (fs FS) NetUDP6() (NetUDP, error) {
78
+	return newNetUDP(fs.proc.Path("net/udp6"))
79
+}
80
+
81
+// NetUDPSummary returns already computed statistics like the total queue lengths
82
+// for UDP datagrams read from /proc/net/udp.
83
+func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
84
+	return newNetUDPSummary(fs.proc.Path("net/udp"))
85
+}
86
+
87
+// NetUDP6Summary returns already computed statistics like the total queue lengths
88
+// for UDP datagrams read from /proc/net/udp6.
89
+func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
90
+	return newNetUDPSummary(fs.proc.Path("net/udp6"))
91
+}
92
+
93
+// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
94
+func newNetUDP(file string) (NetUDP, error) {
95
+	f, err := os.Open(file)
96
+	if err != nil {
97
+		return nil, err
98
+	}
99
+	defer f.Close()
100
+
101
+	netUDP := NetUDP{}
102
+
103
+	lr := io.LimitReader(f, readLimit)
104
+	s := bufio.NewScanner(lr)
105
+	s.Scan() // skip first line with headers
106
+	for s.Scan() {
107
+		fields := strings.Fields(s.Text())
108
+		line, err := parseNetUDPLine(fields)
109
+		if err != nil {
110
+			return nil, err
111
+		}
112
+		netUDP = append(netUDP, line)
113
+	}
114
+	if err := s.Err(); err != nil {
115
+		return nil, err
116
+	}
117
+	return netUDP, nil
118
+}
119
+
120
+// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
121
+func newNetUDPSummary(file string) (*NetUDPSummary, error) {
122
+	f, err := os.Open(file)
123
+	if err != nil {
124
+		return nil, err
125
+	}
126
+	defer f.Close()
127
+
128
+	netUDPSummary := &NetUDPSummary{}
129
+
130
+	lr := io.LimitReader(f, readLimit)
131
+	s := bufio.NewScanner(lr)
132
+	s.Scan() // skip first line with headers
133
+	for s.Scan() {
134
+		fields := strings.Fields(s.Text())
135
+		line, err := parseNetUDPLine(fields)
136
+		if err != nil {
137
+			return nil, err
138
+		}
139
+		netUDPSummary.TxQueueLength += line.TxQueue
140
+		netUDPSummary.RxQueueLength += line.RxQueue
141
+		netUDPSummary.UsedSockets++
142
+	}
143
+	if err := s.Err(); err != nil {
144
+		return nil, err
145
+	}
146
+	return netUDPSummary, nil
147
+}
148
+
149
+// parseNetUDPLine parses a single line, represented by a list of fields.
150
+func parseNetUDPLine(fields []string) (*netUDPLine, error) {
151
+	line := &netUDPLine{}
152
+	if len(fields) < 8 {
153
+		return nil, fmt.Errorf(
154
+			"cannot parse net udp socket line as it has less then 8 columns: %s",
155
+			strings.Join(fields, " "),
156
+		)
157
+	}
158
+	var err error // parse error
159
+
160
+	// sl
161
+	s := strings.Split(fields[0], ":")
162
+	if len(s) != 2 {
163
+		return nil, fmt.Errorf(
164
+			"cannot parse sl field in udp socket line: %s", fields[0])
165
+	}
166
+
167
+	if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
168
+		return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
169
+	}
170
+	// local_address
171
+	l := strings.Split(fields[1], ":")
172
+	if len(l) != 2 {
173
+		return nil, fmt.Errorf(
174
+			"cannot parse local_address field in udp socket line: %s", fields[1])
175
+	}
176
+	if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
177
+		return nil, fmt.Errorf(
178
+			"cannot parse local_address value in udp socket line: %s", err)
179
+	}
180
+	if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
181
+		return nil, fmt.Errorf(
182
+			"cannot parse local_address port value in udp socket line: %s", err)
183
+	}
184
+
185
+	// remote_address
186
+	r := strings.Split(fields[2], ":")
187
+	if len(r) != 2 {
188
+		return nil, fmt.Errorf(
189
+			"cannot parse rem_address field in udp socket line: %s", fields[1])
190
+	}
191
+	if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
192
+		return nil, fmt.Errorf(
193
+			"cannot parse rem_address value in udp socket line: %s", err)
194
+	}
195
+	if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
196
+		return nil, fmt.Errorf(
197
+			"cannot parse rem_address port value in udp socket line: %s", err)
198
+	}
199
+
200
+	// st
201
+	if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
202
+		return nil, fmt.Errorf(
203
+			"cannot parse st value in udp socket line: %s", err)
204
+	}
205
+
206
+	// tx_queue and rx_queue
207
+	q := strings.Split(fields[4], ":")
208
+	if len(q) != 2 {
209
+		return nil, fmt.Errorf(
210
+			"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
211
+			fields[4],
212
+		)
213
+	}
214
+	if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
215
+		return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
216
+	}
217
+	if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
218
+		return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
219
+	}
220
+
221
+	// uid
222
+	if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
223
+		return nil, fmt.Errorf(
224
+			"cannot parse uid value in udp socket line: %s", err)
225
+	}
226
+
227
+	return line, nil
228
+}
... ...
@@ -15,7 +15,6 @@ package procfs
15 15
 
16 16
 import (
17 17
 	"bufio"
18
-	"errors"
19 18
 	"fmt"
20 19
 	"io"
21 20
 	"os"
... ...
@@ -27,25 +26,15 @@ import (
27 27
 // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
28 28
 // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
29 29
 
30
-const (
31
-	netUnixKernelPtrIdx = iota
32
-	netUnixRefCountIdx
33
-	_
34
-	netUnixFlagsIdx
35
-	netUnixTypeIdx
36
-	netUnixStateIdx
37
-	netUnixInodeIdx
38
-
39
-	// Inode and Path are optional.
40
-	netUnixStaticFieldsCnt = 6
41
-)
42
-
30
+// Constants for the various /proc/net/unix enumerations.
31
+// TODO: match against x/sys/unix or similar?
43 32
 const (
44 33
 	netUnixTypeStream    = 1
45 34
 	netUnixTypeDgram     = 2
46 35
 	netUnixTypeSeqpacket = 5
47 36
 
48
-	netUnixFlagListen = 1 << 16
37
+	netUnixFlagDefault = 0
38
+	netUnixFlagListen  = 1 << 16
49 39
 
50 40
 	netUnixStateUnconnected  = 1
51 41
 	netUnixStateConnecting   = 2
... ...
@@ -53,129 +42,127 @@ const (
53 53
 	netUnixStateDisconnected = 4
54 54
 )
55 55
 
56
-var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
56
+// NetUNIXType is the type of the type field.
57
+type NetUNIXType uint64
57 58
 
58
-// NetUnixType is the type of the type field.
59
-type NetUnixType uint64
59
+// NetUNIXFlags is the type of the flags field.
60
+type NetUNIXFlags uint64
60 61
 
61
-// NetUnixFlags is the type of the flags field.
62
-type NetUnixFlags uint64
62
+// NetUNIXState is the type of the state field.
63
+type NetUNIXState uint64
63 64
 
64
-// NetUnixState is the type of the state field.
65
-type NetUnixState uint64
66
-
67
-// NetUnixLine represents a line of /proc/net/unix.
68
-type NetUnixLine struct {
65
+// NetUNIXLine represents a line of /proc/net/unix.
66
+type NetUNIXLine struct {
69 67
 	KernelPtr string
70 68
 	RefCount  uint64
71 69
 	Protocol  uint64
72
-	Flags     NetUnixFlags
73
-	Type      NetUnixType
74
-	State     NetUnixState
70
+	Flags     NetUNIXFlags
71
+	Type      NetUNIXType
72
+	State     NetUNIXState
75 73
 	Inode     uint64
76 74
 	Path      string
77 75
 }
78 76
 
79
-// NetUnix holds the data read from /proc/net/unix.
80
-type NetUnix struct {
81
-	Rows []*NetUnixLine
77
+// NetUNIX holds the data read from /proc/net/unix.
78
+type NetUNIX struct {
79
+	Rows []*NetUNIXLine
82 80
 }
83 81
 
84
-// NewNetUnix returns data read from /proc/net/unix.
85
-func NewNetUnix() (*NetUnix, error) {
86
-	fs, err := NewFS(DefaultMountPoint)
87
-	if err != nil {
88
-		return nil, err
89
-	}
90
-
91
-	return fs.NewNetUnix()
82
+// NetUNIX returns data read from /proc/net/unix.
83
+func (fs FS) NetUNIX() (*NetUNIX, error) {
84
+	return readNetUNIX(fs.proc.Path("net/unix"))
92 85
 }
93 86
 
94
-// NewNetUnix returns data read from /proc/net/unix.
95
-func (fs FS) NewNetUnix() (*NetUnix, error) {
96
-	return NewNetUnixByPath(fs.proc.Path("net/unix"))
97
-}
98
-
99
-// NewNetUnixByPath returns data read from /proc/net/unix by file path.
100
-// It might returns an error with partial parsed data, if an error occur after some data parsed.
101
-func NewNetUnixByPath(path string) (*NetUnix, error) {
102
-	f, err := os.Open(path)
87
+// readNetUNIX reads data in /proc/net/unix format from the specified file.
88
+func readNetUNIX(file string) (*NetUNIX, error) {
89
+	// This file could be quite large and a streaming read is desirable versus
90
+	// reading the entire contents at once.
91
+	f, err := os.Open(file)
103 92
 	if err != nil {
104 93
 		return nil, err
105 94
 	}
106 95
 	defer f.Close()
107
-	return NewNetUnixByReader(f)
96
+
97
+	return parseNetUNIX(f)
108 98
 }
109 99
 
110
-// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
111
-// It might returns an error with partial parsed data, if an error occur after some data parsed.
112
-func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
113
-	nu := &NetUnix{
114
-		Rows: make([]*NetUnixLine, 0, 32),
115
-	}
116
-	scanner := bufio.NewScanner(reader)
117
-	// Omit the header line.
118
-	scanner.Scan()
119
-	header := scanner.Text()
100
+// parseNetUNIX creates a NetUnix structure from the incoming stream.
101
+func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
102
+	// Begin scanning by checking for the existence of Inode.
103
+	s := bufio.NewScanner(r)
104
+	s.Scan()
105
+
120 106
 	// From the man page of proc(5), it does not contain an Inode field,
121
-	// but in actually it exists.
122
-	// This code works for both cases.
123
-	hasInode := strings.Contains(header, "Inode")
107
+	// but in actually it exists. This code works for both cases.
108
+	hasInode := strings.Contains(s.Text(), "Inode")
124 109
 
125
-	minFieldsCnt := netUnixStaticFieldsCnt
110
+	// Expect a minimum number of fields, but Inode and Path are optional:
111
+	// Num       RefCount Protocol Flags    Type St Inode Path
112
+	minFields := 6
126 113
 	if hasInode {
127
-		minFieldsCnt++
114
+		minFields++
128 115
 	}
129
-	for scanner.Scan() {
130
-		line := scanner.Text()
131
-		item, err := nu.parseLine(line, hasInode, minFieldsCnt)
116
+
117
+	var nu NetUNIX
118
+	for s.Scan() {
119
+		line := s.Text()
120
+		item, err := nu.parseLine(line, hasInode, minFields)
132 121
 		if err != nil {
133
-			return nu, err
122
+			return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
134 123
 		}
124
+
135 125
 		nu.Rows = append(nu.Rows, item)
136 126
 	}
137 127
 
138
-	return nu, scanner.Err()
128
+	if err := s.Err(); err != nil {
129
+		return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
130
+	}
131
+
132
+	return &nu, nil
139 133
 }
140 134
 
141
-func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
135
+func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
142 136
 	fields := strings.Fields(line)
143
-	fieldsLen := len(fields)
144
-	if fieldsLen < minFieldsCnt {
145
-		return nil, fmt.Errorf(
146
-			"Parse Unix domain failed: expect at least %d fields but got %d",
147
-			minFieldsCnt, fieldsLen)
148
-	}
149
-	kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
150
-	if err != nil {
151
-		return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
137
+
138
+	l := len(fields)
139
+	if l < min {
140
+		return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
152 141
 	}
153
-	users, err := u.parseUsers(fields[netUnixRefCountIdx])
142
+
143
+	// Field offsets are as follows:
144
+	// Num       RefCount Protocol Flags    Type St Inode Path
145
+
146
+	kernelPtr := strings.TrimSuffix(fields[0], ":")
147
+
148
+	users, err := u.parseUsers(fields[1])
154 149
 	if err != nil {
155
-		return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
150
+		return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
156 151
 	}
157
-	flags, err := u.parseFlags(fields[netUnixFlagsIdx])
152
+
153
+	flags, err := u.parseFlags(fields[3])
158 154
 	if err != nil {
159
-		return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
155
+		return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
160 156
 	}
161
-	typ, err := u.parseType(fields[netUnixTypeIdx])
157
+
158
+	typ, err := u.parseType(fields[4])
162 159
 	if err != nil {
163
-		return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
160
+		return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
164 161
 	}
165
-	state, err := u.parseState(fields[netUnixStateIdx])
162
+
163
+	state, err := u.parseState(fields[5])
166 164
 	if err != nil {
167
-		return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
165
+		return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
168 166
 	}
167
+
169 168
 	var inode uint64
170 169
 	if hasInode {
171
-		inodeStr := fields[netUnixInodeIdx]
172
-		inode, err = u.parseInode(inodeStr)
170
+		inode, err = u.parseInode(fields[6])
173 171
 		if err != nil {
174
-			return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
172
+			return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
175 173
 		}
176 174
 	}
177 175
 
178
-	nuLine := &NetUnixLine{
176
+	n := &NetUNIXLine{
179 177
 		KernelPtr: kernelPtr,
180 178
 		RefCount:  users,
181 179
 		Type:      typ,
... ...
@@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
185 185
 	}
186 186
 
187 187
 	// Path field is optional.
188
-	if fieldsLen > minFieldsCnt {
189
-		pathIdx := netUnixInodeIdx + 1
188
+	if l > min {
189
+		// Path occurs at either index 6 or 7 depending on whether inode is
190
+		// already present.
191
+		pathIdx := 7
190 192
 		if !hasInode {
191 193
 			pathIdx--
192 194
 		}
193
-		nuLine.Path = fields[pathIdx]
194
-	}
195
-
196
-	return nuLine, nil
197
-}
198 195
 
199
-func (u NetUnix) parseKernelPtr(str string) (string, error) {
200
-	if !strings.HasSuffix(str, ":") {
201
-		return "", errInvalidKernelPtrFmt
196
+		n.Path = fields[pathIdx]
202 197
 	}
203
-	return str[:len(str)-1], nil
198
+
199
+	return n, nil
204 200
 }
205 201
 
206
-func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
207
-	return strconv.ParseUint(hexStr, 16, 32)
202
+func (u NetUNIX) parseUsers(s string) (uint64, error) {
203
+	return strconv.ParseUint(s, 16, 32)
208 204
 }
209 205
 
210
-func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
211
-	typ, err := strconv.ParseUint(hexStr, 16, 16)
206
+func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
207
+	typ, err := strconv.ParseUint(s, 16, 16)
212 208
 	if err != nil {
213 209
 		return 0, err
214 210
 	}
215
-	return NetUnixType(typ), nil
211
+
212
+	return NetUNIXType(typ), nil
216 213
 }
217 214
 
218
-func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
219
-	flags, err := strconv.ParseUint(hexStr, 16, 32)
215
+func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
216
+	flags, err := strconv.ParseUint(s, 16, 32)
220 217
 	if err != nil {
221 218
 		return 0, err
222 219
 	}
223
-	return NetUnixFlags(flags), nil
220
+
221
+	return NetUNIXFlags(flags), nil
224 222
 }
225 223
 
226
-func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
227
-	st, err := strconv.ParseInt(hexStr, 16, 8)
224
+func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
225
+	st, err := strconv.ParseInt(s, 16, 8)
228 226
 	if err != nil {
229 227
 		return 0, err
230 228
 	}
231
-	return NetUnixState(st), nil
229
+
230
+	return NetUNIXState(st), nil
232 231
 }
233 232
 
234
-func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
235
-	return strconv.ParseUint(inodeStr, 10, 64)
233
+func (u NetUNIX) parseInode(s string) (uint64, error) {
234
+	return strconv.ParseUint(s, 10, 64)
236 235
 }
237 236
 
238
-func (t NetUnixType) String() string {
237
+func (t NetUNIXType) String() string {
239 238
 	switch t {
240 239
 	case netUnixTypeStream:
241 240
 		return "stream"
... ...
@@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
247 247
 	return "unknown"
248 248
 }
249 249
 
250
-func (f NetUnixFlags) String() string {
250
+func (f NetUNIXFlags) String() string {
251 251
 	switch f {
252 252
 	case netUnixFlagListen:
253 253
 		return "listen"
... ...
@@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
256 256
 	}
257 257
 }
258 258
 
259
-func (s NetUnixState) String() string {
259
+func (s NetUNIXState) String() string {
260 260
 	switch s {
261 261
 	case netUnixStateUnconnected:
262 262
 		return "unconnected"
... ...
@@ -16,6 +16,7 @@ package procfs
16 16
 import (
17 17
 	"bufio"
18 18
 	"bytes"
19
+	"errors"
19 20
 	"regexp"
20 21
 
21 22
 	"github.com/prometheus/procfs/internal/util"
... ...
@@ -23,10 +24,11 @@ import (
23 23
 
24 24
 // Regexp variables
25 25
 var (
26
-	rPos     = regexp.MustCompile(`^pos:\s+(\d+)$`)
27
-	rFlags   = regexp.MustCompile(`^flags:\s+(\d+)$`)
28
-	rMntID   = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
29
-	rInotify = regexp.MustCompile(`^inotify`)
26
+	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
27
+	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
28
+	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
29
+	rInotify      = regexp.MustCompile(`^inotify`)
30
+	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
30 31
 )
31 32
 
32 33
 // ProcFDInfo contains represents file descriptor information.
... ...
@@ -96,15 +98,21 @@ type InotifyInfo struct {
96 96
 
97 97
 // InotifyInfo constructor. Only available on kernel 3.8+.
98 98
 func parseInotifyInfo(line string) (*InotifyInfo, error) {
99
-	r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
100
-	m := r.FindStringSubmatch(line)
101
-	i := &InotifyInfo{
102
-		WD:   m[1],
103
-		Ino:  m[2],
104
-		Sdev: m[3],
105
-		Mask: m[4],
99
+	m := rInotifyParts.FindStringSubmatch(line)
100
+	if len(m) >= 4 {
101
+		var mask string
102
+		if len(m) == 5 {
103
+			mask = m[4]
104
+		}
105
+		i := &InotifyInfo{
106
+			WD:   m[1],
107
+			Ino:  m[2],
108
+			Sdev: m[3],
109
+			Mask: mask,
110
+		}
111
+		return i, nil
106 112
 	}
107
-	return i, nil
113
+	return nil, errors.New("invalid inode entry: " + line)
108 114
 }
109 115
 
110 116
 // ProcFDInfos represents a list of ProcFDInfo structs.
111 117
new file mode 100644
... ...
@@ -0,0 +1,208 @@
0
+// Copyright 2019 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+// +build !windows
14
+
15
+package procfs
16
+
17
+import (
18
+	"bufio"
19
+	"fmt"
20
+	"os"
21
+	"strconv"
22
+	"strings"
23
+
24
+	"golang.org/x/sys/unix"
25
+)
26
+
27
+type ProcMapPermissions struct {
28
+	// mapping has the [R]ead flag set
29
+	Read bool
30
+	// mapping has the [W]rite flag set
31
+	Write bool
32
+	// mapping has the [X]ecutable flag set
33
+	Execute bool
34
+	// mapping has the [S]hared flag set
35
+	Shared bool
36
+	// mapping is marked as [P]rivate (copy on write)
37
+	Private bool
38
+}
39
+
40
+// ProcMap contains the process memory-mappings of the process,
41
+// read from /proc/[pid]/maps
42
+type ProcMap struct {
43
+	// The start address of current mapping.
44
+	StartAddr uintptr
45
+	// The end address of the current mapping
46
+	EndAddr uintptr
47
+	// The permissions for this mapping
48
+	Perms *ProcMapPermissions
49
+	// The current offset into the file/fd (e.g., shared libs)
50
+	Offset int64
51
+	// Device owner of this mapping (major:minor) in Mkdev format.
52
+	Dev uint64
53
+	// The inode of the device above
54
+	Inode uint64
55
+	// The file or psuedofile (or empty==anonymous)
56
+	Pathname string
57
+}
58
+
59
+// parseDevice parses the device token of a line and converts it to a dev_t
60
+// (mkdev) like structure.
61
+func parseDevice(s string) (uint64, error) {
62
+	toks := strings.Split(s, ":")
63
+	if len(toks) < 2 {
64
+		return 0, fmt.Errorf("unexpected number of fields")
65
+	}
66
+
67
+	major, err := strconv.ParseUint(toks[0], 16, 0)
68
+	if err != nil {
69
+		return 0, err
70
+	}
71
+
72
+	minor, err := strconv.ParseUint(toks[1], 16, 0)
73
+	if err != nil {
74
+		return 0, err
75
+	}
76
+
77
+	return unix.Mkdev(uint32(major), uint32(minor)), nil
78
+}
79
+
80
+// parseAddress just converts a hex-string to a uintptr
81
+func parseAddress(s string) (uintptr, error) {
82
+	a, err := strconv.ParseUint(s, 16, 0)
83
+	if err != nil {
84
+		return 0, err
85
+	}
86
+
87
+	return uintptr(a), nil
88
+}
89
+
90
+// parseAddresses parses the start-end address
91
+func parseAddresses(s string) (uintptr, uintptr, error) {
92
+	toks := strings.Split(s, "-")
93
+	if len(toks) < 2 {
94
+		return 0, 0, fmt.Errorf("invalid address")
95
+	}
96
+
97
+	saddr, err := parseAddress(toks[0])
98
+	if err != nil {
99
+		return 0, 0, err
100
+	}
101
+
102
+	eaddr, err := parseAddress(toks[1])
103
+	if err != nil {
104
+		return 0, 0, err
105
+	}
106
+
107
+	return saddr, eaddr, nil
108
+}
109
+
110
+// parsePermissions parses a token and returns any that are set.
111
+func parsePermissions(s string) (*ProcMapPermissions, error) {
112
+	if len(s) < 4 {
113
+		return nil, fmt.Errorf("invalid permissions token")
114
+	}
115
+
116
+	perms := ProcMapPermissions{}
117
+	for _, ch := range s {
118
+		switch ch {
119
+		case 'r':
120
+			perms.Read = true
121
+		case 'w':
122
+			perms.Write = true
123
+		case 'x':
124
+			perms.Execute = true
125
+		case 'p':
126
+			perms.Private = true
127
+		case 's':
128
+			perms.Shared = true
129
+		}
130
+	}
131
+
132
+	return &perms, nil
133
+}
134
+
135
+// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
136
+// buffer.
137
+func parseProcMap(text string) (*ProcMap, error) {
138
+	fields := strings.Fields(text)
139
+	if len(fields) < 5 {
140
+		return nil, fmt.Errorf("truncated procmap entry")
141
+	}
142
+
143
+	saddr, eaddr, err := parseAddresses(fields[0])
144
+	if err != nil {
145
+		return nil, err
146
+	}
147
+
148
+	perms, err := parsePermissions(fields[1])
149
+	if err != nil {
150
+		return nil, err
151
+	}
152
+
153
+	offset, err := strconv.ParseInt(fields[2], 16, 0)
154
+	if err != nil {
155
+		return nil, err
156
+	}
157
+
158
+	device, err := parseDevice(fields[3])
159
+	if err != nil {
160
+		return nil, err
161
+	}
162
+
163
+	inode, err := strconv.ParseUint(fields[4], 10, 0)
164
+	if err != nil {
165
+		return nil, err
166
+	}
167
+
168
+	pathname := ""
169
+
170
+	if len(fields) >= 5 {
171
+		pathname = strings.Join(fields[5:], " ")
172
+	}
173
+
174
+	return &ProcMap{
175
+		StartAddr: saddr,
176
+		EndAddr:   eaddr,
177
+		Perms:     perms,
178
+		Offset:    offset,
179
+		Dev:       device,
180
+		Inode:     inode,
181
+		Pathname:  pathname,
182
+	}, nil
183
+}
184
+
185
+// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
186
+// process.
187
+func (p Proc) ProcMaps() ([]*ProcMap, error) {
188
+	file, err := os.Open(p.path("maps"))
189
+	if err != nil {
190
+		return nil, err
191
+	}
192
+	defer file.Close()
193
+
194
+	maps := []*ProcMap{}
195
+	scan := bufio.NewScanner(file)
196
+
197
+	for scan.Scan() {
198
+		m, err := parseProcMap(scan.Text())
199
+		if err != nil {
200
+			return nil, err
201
+		}
202
+
203
+		maps = append(maps, m)
204
+	}
205
+
206
+	return maps, nil
207
+}
... ...
@@ -33,37 +33,37 @@ type ProcStatus struct {
33 33
 	TGID int
34 34
 
35 35
 	// Peak virtual memory size.
36
-	VmPeak uint64
36
+	VmPeak uint64 // nolint:golint
37 37
 	// Virtual memory size.
38
-	VmSize uint64
38
+	VmSize uint64 // nolint:golint
39 39
 	// Locked memory size.
40
-	VmLck uint64
40
+	VmLck uint64 // nolint:golint
41 41
 	// Pinned memory size.
42
-	VmPin uint64
42
+	VmPin uint64 // nolint:golint
43 43
 	// Peak resident set size.
44
-	VmHWM uint64
44
+	VmHWM uint64 // nolint:golint
45 45
 	// Resident set size (sum of RssAnnon RssFile and RssShmem).
46
-	VmRSS uint64
46
+	VmRSS uint64 // nolint:golint
47 47
 	// Size of resident anonymous memory.
48
-	RssAnon uint64
48
+	RssAnon uint64 // nolint:golint
49 49
 	// Size of resident file mappings.
50
-	RssFile uint64
50
+	RssFile uint64 // nolint:golint
51 51
 	// Size of resident shared memory.
52
-	RssShmem uint64
52
+	RssShmem uint64 // nolint:golint
53 53
 	// Size of data segments.
54
-	VmData uint64
54
+	VmData uint64 // nolint:golint
55 55
 	// Size of stack segments.
56
-	VmStk uint64
56
+	VmStk uint64 // nolint:golint
57 57
 	// Size of text segments.
58
-	VmExe uint64
58
+	VmExe uint64 // nolint:golint
59 59
 	// Shared library code size.
60
-	VmLib uint64
60
+	VmLib uint64 // nolint:golint
61 61
 	// Page table entries size.
62
-	VmPTE uint64
62
+	VmPTE uint64 // nolint:golint
63 63
 	// Size of second-level page tables.
64
-	VmPMD uint64
64
+	VmPMD uint64 // nolint:golint
65 65
 	// Swapped-out virtual memory size by anonymous private.
66
-	VmSwap uint64
66
+	VmSwap uint64 // nolint:golint
67 67
 	// Size of hugetlb memory portions
68 68
 	HugetlbPages uint64
69 69
 
... ...
@@ -71,6 +71,9 @@ type ProcStatus struct {
71 71
 	VoluntaryCtxtSwitches uint64
72 72
 	// Number of involuntary context switches.
73 73
 	NonVoluntaryCtxtSwitches uint64
74
+
75
+	// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs))
76
+	UIDs [4]string
74 77
 }
75 78
 
76 79
 // NewStatus returns the current status information of the process.
... ...
@@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
114 114
 		s.TGID = int(vUint)
115 115
 	case "Name":
116 116
 		s.Name = vString
117
+	case "Uid":
118
+		copy(s.UIDs[:], strings.Split(vString, "\t"))
117 119
 	case "VmPeak":
118 120
 		s.VmPeak = vUintBytes
119 121
 	case "VmSize":
120 122
new file mode 100644
... ...
@@ -0,0 +1,89 @@
0
+// Copyright 2019 The Prometheus Authors
1
+// Licensed under the Apache License, Version 2.0 (the "License");
2
+// you may not use this file except in compliance with the License.
3
+// You may obtain a copy of the License at
4
+//
5
+// http://www.apache.org/licenses/LICENSE-2.0
6
+//
7
+// Unless required by applicable law or agreed to in writing, software
8
+// distributed under the License is distributed on an "AS IS" BASIS,
9
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+// See the License for the specific language governing permissions and
11
+// limitations under the License.
12
+
13
+package procfs
14
+
15
+import (
16
+	"bufio"
17
+	"bytes"
18
+	"fmt"
19
+	"strconv"
20
+	"strings"
21
+
22
+	"github.com/prometheus/procfs/internal/util"
23
+)
24
+
25
+// Swap represents an entry in /proc/swaps.
26
+type Swap struct {
27
+	Filename string
28
+	Type     string
29
+	Size     int
30
+	Used     int
31
+	Priority int
32
+}
33
+
34
+// Swaps returns a slice of all configured swap devices on the system.
35
+func (fs FS) Swaps() ([]*Swap, error) {
36
+	data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
37
+	if err != nil {
38
+		return nil, err
39
+	}
40
+	return parseSwaps(data)
41
+}
42
+
43
+func parseSwaps(info []byte) ([]*Swap, error) {
44
+	swaps := []*Swap{}
45
+	scanner := bufio.NewScanner(bytes.NewReader(info))
46
+	scanner.Scan() // ignore header line
47
+	for scanner.Scan() {
48
+		swapString := scanner.Text()
49
+		parsedSwap, err := parseSwapString(swapString)
50
+		if err != nil {
51
+			return nil, err
52
+		}
53
+		swaps = append(swaps, parsedSwap)
54
+	}
55
+
56
+	err := scanner.Err()
57
+	return swaps, err
58
+}
59
+
60
+func parseSwapString(swapString string) (*Swap, error) {
61
+	var err error
62
+
63
+	swapFields := strings.Fields(swapString)
64
+	swapLength := len(swapFields)
65
+	if swapLength < 5 {
66
+		return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
67
+	}
68
+
69
+	swap := &Swap{
70
+		Filename: swapFields[0],
71
+		Type:     swapFields[1],
72
+	}
73
+
74
+	swap.Size, err = strconv.Atoi(swapFields[2])
75
+	if err != nil {
76
+		return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
77
+	}
78
+	swap.Used, err = strconv.Atoi(swapFields[3])
79
+	if err != nil {
80
+		return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
81
+	}
82
+	swap.Priority, err = strconv.Atoi(swapFields[4])
83
+	if err != nil {
84
+		return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
85
+	}
86
+
87
+	return swap, nil
88
+}