full diff: https://github.com/prometheus/client_golang/compare/v1.1.0...v1.3.0
This requires LK4D$/vndr v0.1.0 or newer for vendoring; also adds a new
dependency: github.com/cespare/xxhash
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
... | ... |
@@ -143,10 +143,7 @@ github.com/coreos/pkg 3ac0863d7acf3bc44daf49afef89 |
143 | 143 |
code.cloudfoundry.org/clock 02e53af36e6c978af692887ed449b74026d76fec |
144 | 144 |
|
145 | 145 |
# prometheus |
146 |
-# Using v1.1.0, because version v1.2.0 and up use versioned import paths for the |
|
147 |
-# github.com/cespare/xxhash/v2 dependency (prometheus/client_golang#657), which |
|
148 |
-# causes vendoring with vndr to break due to the v2 in the import-path. |
|
149 |
-github.com/prometheus/client_golang 170205fb58decfd011f1550d4cfb737230d7ae4f # v1.1.0 |
|
146 |
+github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0 |
|
150 | 147 |
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1 |
151 | 148 |
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0 |
152 | 149 |
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0 |
... | ... |
@@ -154,6 +151,7 @@ github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea343 |
154 | 154 |
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 |
155 | 155 |
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1 |
156 | 156 |
github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0 |
157 |
+github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1 |
|
157 | 158 |
|
158 | 159 |
# cli |
159 | 160 |
github.com/spf13/cobra ef82de70bb3f60c65fb8eebacbb2d122ef517385 # v0.0.3 |
160 | 161 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,22 @@ |
0 |
+Copyright (c) 2016 Caleb Spare |
|
1 |
+ |
|
2 |
+MIT License |
|
3 |
+ |
|
4 |
+Permission is hereby granted, free of charge, to any person obtaining |
|
5 |
+a copy of this software and associated documentation files (the |
|
6 |
+"Software"), to deal in the Software without restriction, including |
|
7 |
+without limitation the rights to use, copy, modify, merge, publish, |
|
8 |
+distribute, sublicense, and/or sell copies of the Software, and to |
|
9 |
+permit persons to whom the Software is furnished to do so, subject to |
|
10 |
+the following conditions: |
|
11 |
+ |
|
12 |
+The above copyright notice and this permission notice shall be |
|
13 |
+included in all copies or substantial portions of the Software. |
|
14 |
+ |
|
15 |
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
|
16 |
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
|
17 |
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
|
18 |
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
|
19 |
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
|
20 |
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
|
21 |
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
0 | 22 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,67 @@ |
0 |
+# xxhash |
|
1 |
+ |
|
2 |
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) |
|
3 |
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) |
|
4 |
+ |
|
5 |
+xxhash is a Go implementation of the 64-bit |
|
6 |
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a |
|
7 |
+high-quality hashing algorithm that is much faster than anything in the Go |
|
8 |
+standard library. |
|
9 |
+ |
|
10 |
+This package provides a straightforward API: |
|
11 |
+ |
|
12 |
+``` |
|
13 |
+func Sum64(b []byte) uint64 |
|
14 |
+func Sum64String(s string) uint64 |
|
15 |
+type Digest struct{ ... } |
|
16 |
+ func New() *Digest |
|
17 |
+``` |
|
18 |
+ |
|
19 |
+The `Digest` type implements hash.Hash64. Its key methods are: |
|
20 |
+ |
|
21 |
+``` |
|
22 |
+func (*Digest) Write([]byte) (int, error) |
|
23 |
+func (*Digest) WriteString(string) (int, error) |
|
24 |
+func (*Digest) Sum64() uint64 |
|
25 |
+``` |
|
26 |
+ |
|
27 |
+This implementation provides a fast pure-Go implementation and an even faster |
|
28 |
+assembly implementation for amd64. |
|
29 |
+ |
|
30 |
+## Compatibility |
|
31 |
+ |
|
32 |
+This package is in a module and the latest code is in version 2 of the module. |
|
33 |
+You need a version of Go with at least "minimal module compatibility" to use |
|
34 |
+github.com/cespare/xxhash/v2: |
|
35 |
+ |
|
36 |
+* 1.9.7+ for Go 1.9 |
|
37 |
+* 1.10.3+ for Go 1.10 |
|
38 |
+* Go 1.11 or later |
|
39 |
+ |
|
40 |
+I recommend using the latest release of Go. |
|
41 |
+ |
|
42 |
+## Benchmarks |
|
43 |
+ |
|
44 |
+Here are some quick benchmarks comparing the pure-Go and assembly |
|
45 |
+implementations of Sum64. |
|
46 |
+ |
|
47 |
+| input size | purego | asm | |
|
48 |
+| --- | --- | --- | |
|
49 |
+| 5 B | 979.66 MB/s | 1291.17 MB/s | |
|
50 |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s | |
|
51 |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s | |
|
52 |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s | |
|
53 |
+ |
|
54 |
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using |
|
55 |
+the following commands under Go 1.11.2: |
|
56 |
+ |
|
57 |
+``` |
|
58 |
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' |
|
59 |
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes' |
|
60 |
+``` |
|
61 |
+ |
|
62 |
+## Projects using this package |
|
63 |
+ |
|
64 |
+- [InfluxDB](https://github.com/influxdata/influxdb) |
|
65 |
+- [Prometheus](https://github.com/prometheus/prometheus) |
|
66 |
+- [FreeCache](https://github.com/coocood/freecache) |
0 | 3 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,236 @@ |
0 |
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described |
|
1 |
+// at http://cyan4973.github.io/xxHash/. |
|
2 |
+package xxhash |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "encoding/binary" |
|
6 |
+ "errors" |
|
7 |
+ "math/bits" |
|
8 |
+) |
|
9 |
+ |
|
10 |
+const ( |
|
11 |
+ prime1 uint64 = 11400714785074694791 |
|
12 |
+ prime2 uint64 = 14029467366897019727 |
|
13 |
+ prime3 uint64 = 1609587929392839161 |
|
14 |
+ prime4 uint64 = 9650029242287828579 |
|
15 |
+ prime5 uint64 = 2870177450012600261 |
|
16 |
+) |
|
17 |
+ |
|
18 |
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where |
|
19 |
+// possible in the Go code is worth a small (but measurable) performance boost |
|
20 |
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for |
|
21 |
+// convenience in the Go code in a few places where we need to intentionally |
|
22 |
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the |
|
23 |
+// result overflows a uint64). |
|
24 |
+var ( |
|
25 |
+ prime1v = prime1 |
|
26 |
+ prime2v = prime2 |
|
27 |
+ prime3v = prime3 |
|
28 |
+ prime4v = prime4 |
|
29 |
+ prime5v = prime5 |
|
30 |
+) |
|
31 |
+ |
|
32 |
+// Digest implements hash.Hash64. |
|
33 |
+type Digest struct { |
|
34 |
+ v1 uint64 |
|
35 |
+ v2 uint64 |
|
36 |
+ v3 uint64 |
|
37 |
+ v4 uint64 |
|
38 |
+ total uint64 |
|
39 |
+ mem [32]byte |
|
40 |
+ n int // how much of mem is used |
|
41 |
+} |
|
42 |
+ |
|
43 |
+// New creates a new Digest that computes the 64-bit xxHash algorithm. |
|
44 |
+func New() *Digest { |
|
45 |
+ var d Digest |
|
46 |
+ d.Reset() |
|
47 |
+ return &d |
|
48 |
+} |
|
49 |
+ |
|
50 |
+// Reset clears the Digest's state so that it can be reused. |
|
51 |
+func (d *Digest) Reset() { |
|
52 |
+ d.v1 = prime1v + prime2 |
|
53 |
+ d.v2 = prime2 |
|
54 |
+ d.v3 = 0 |
|
55 |
+ d.v4 = -prime1v |
|
56 |
+ d.total = 0 |
|
57 |
+ d.n = 0 |
|
58 |
+} |
|
59 |
+ |
|
60 |
+// Size always returns 8 bytes. |
|
61 |
+func (d *Digest) Size() int { return 8 } |
|
62 |
+ |
|
63 |
+// BlockSize always returns 32 bytes. |
|
64 |
+func (d *Digest) BlockSize() int { return 32 } |
|
65 |
+ |
|
66 |
+// Write adds more data to d. It always returns len(b), nil. |
|
67 |
+func (d *Digest) Write(b []byte) (n int, err error) { |
|
68 |
+ n = len(b) |
|
69 |
+ d.total += uint64(n) |
|
70 |
+ |
|
71 |
+ if d.n+n < 32 { |
|
72 |
+ // This new data doesn't even fill the current block. |
|
73 |
+ copy(d.mem[d.n:], b) |
|
74 |
+ d.n += n |
|
75 |
+ return |
|
76 |
+ } |
|
77 |
+ |
|
78 |
+ if d.n > 0 { |
|
79 |
+ // Finish off the partial block. |
|
80 |
+ copy(d.mem[d.n:], b) |
|
81 |
+ d.v1 = round(d.v1, u64(d.mem[0:8])) |
|
82 |
+ d.v2 = round(d.v2, u64(d.mem[8:16])) |
|
83 |
+ d.v3 = round(d.v3, u64(d.mem[16:24])) |
|
84 |
+ d.v4 = round(d.v4, u64(d.mem[24:32])) |
|
85 |
+ b = b[32-d.n:] |
|
86 |
+ d.n = 0 |
|
87 |
+ } |
|
88 |
+ |
|
89 |
+ if len(b) >= 32 { |
|
90 |
+ // One or more full blocks left. |
|
91 |
+ nw := writeBlocks(d, b) |
|
92 |
+ b = b[nw:] |
|
93 |
+ } |
|
94 |
+ |
|
95 |
+ // Store any remaining partial block. |
|
96 |
+ copy(d.mem[:], b) |
|
97 |
+ d.n = len(b) |
|
98 |
+ |
|
99 |
+ return |
|
100 |
+} |
|
101 |
+ |
|
102 |
+// Sum appends the current hash to b and returns the resulting slice. |
|
103 |
+func (d *Digest) Sum(b []byte) []byte { |
|
104 |
+ s := d.Sum64() |
|
105 |
+ return append( |
|
106 |
+ b, |
|
107 |
+ byte(s>>56), |
|
108 |
+ byte(s>>48), |
|
109 |
+ byte(s>>40), |
|
110 |
+ byte(s>>32), |
|
111 |
+ byte(s>>24), |
|
112 |
+ byte(s>>16), |
|
113 |
+ byte(s>>8), |
|
114 |
+ byte(s), |
|
115 |
+ ) |
|
116 |
+} |
|
117 |
+ |
|
118 |
+// Sum64 returns the current hash. |
|
119 |
+func (d *Digest) Sum64() uint64 { |
|
120 |
+ var h uint64 |
|
121 |
+ |
|
122 |
+ if d.total >= 32 { |
|
123 |
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 |
|
124 |
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) |
|
125 |
+ h = mergeRound(h, v1) |
|
126 |
+ h = mergeRound(h, v2) |
|
127 |
+ h = mergeRound(h, v3) |
|
128 |
+ h = mergeRound(h, v4) |
|
129 |
+ } else { |
|
130 |
+ h = d.v3 + prime5 |
|
131 |
+ } |
|
132 |
+ |
|
133 |
+ h += d.total |
|
134 |
+ |
|
135 |
+ i, end := 0, d.n |
|
136 |
+ for ; i+8 <= end; i += 8 { |
|
137 |
+ k1 := round(0, u64(d.mem[i:i+8])) |
|
138 |
+ h ^= k1 |
|
139 |
+ h = rol27(h)*prime1 + prime4 |
|
140 |
+ } |
|
141 |
+ if i+4 <= end { |
|
142 |
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1 |
|
143 |
+ h = rol23(h)*prime2 + prime3 |
|
144 |
+ i += 4 |
|
145 |
+ } |
|
146 |
+ for i < end { |
|
147 |
+ h ^= uint64(d.mem[i]) * prime5 |
|
148 |
+ h = rol11(h) * prime1 |
|
149 |
+ i++ |
|
150 |
+ } |
|
151 |
+ |
|
152 |
+ h ^= h >> 33 |
|
153 |
+ h *= prime2 |
|
154 |
+ h ^= h >> 29 |
|
155 |
+ h *= prime3 |
|
156 |
+ h ^= h >> 32 |
|
157 |
+ |
|
158 |
+ return h |
|
159 |
+} |
|
160 |
+ |
|
161 |
+const ( |
|
162 |
+ magic = "xxh\x06" |
|
163 |
+ marshaledSize = len(magic) + 8*5 + 32 |
|
164 |
+) |
|
165 |
+ |
|
166 |
+// MarshalBinary implements the encoding.BinaryMarshaler interface. |
|
167 |
+func (d *Digest) MarshalBinary() ([]byte, error) { |
|
168 |
+ b := make([]byte, 0, marshaledSize) |
|
169 |
+ b = append(b, magic...) |
|
170 |
+ b = appendUint64(b, d.v1) |
|
171 |
+ b = appendUint64(b, d.v2) |
|
172 |
+ b = appendUint64(b, d.v3) |
|
173 |
+ b = appendUint64(b, d.v4) |
|
174 |
+ b = appendUint64(b, d.total) |
|
175 |
+ b = append(b, d.mem[:d.n]...) |
|
176 |
+ b = b[:len(b)+len(d.mem)-d.n] |
|
177 |
+ return b, nil |
|
178 |
+} |
|
179 |
+ |
|
180 |
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. |
|
181 |
+func (d *Digest) UnmarshalBinary(b []byte) error { |
|
182 |
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic { |
|
183 |
+ return errors.New("xxhash: invalid hash state identifier") |
|
184 |
+ } |
|
185 |
+ if len(b) != marshaledSize { |
|
186 |
+ return errors.New("xxhash: invalid hash state size") |
|
187 |
+ } |
|
188 |
+ b = b[len(magic):] |
|
189 |
+ b, d.v1 = consumeUint64(b) |
|
190 |
+ b, d.v2 = consumeUint64(b) |
|
191 |
+ b, d.v3 = consumeUint64(b) |
|
192 |
+ b, d.v4 = consumeUint64(b) |
|
193 |
+ b, d.total = consumeUint64(b) |
|
194 |
+ copy(d.mem[:], b) |
|
195 |
+ b = b[len(d.mem):] |
|
196 |
+ d.n = int(d.total % uint64(len(d.mem))) |
|
197 |
+ return nil |
|
198 |
+} |
|
199 |
+ |
|
200 |
+func appendUint64(b []byte, x uint64) []byte { |
|
201 |
+ var a [8]byte |
|
202 |
+ binary.LittleEndian.PutUint64(a[:], x) |
|
203 |
+ return append(b, a[:]...) |
|
204 |
+} |
|
205 |
+ |
|
206 |
+func consumeUint64(b []byte) ([]byte, uint64) { |
|
207 |
+ x := u64(b) |
|
208 |
+ return b[8:], x |
|
209 |
+} |
|
210 |
+ |
|
211 |
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } |
|
212 |
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } |
|
213 |
+ |
|
214 |
+func round(acc, input uint64) uint64 { |
|
215 |
+ acc += input * prime2 |
|
216 |
+ acc = rol31(acc) |
|
217 |
+ acc *= prime1 |
|
218 |
+ return acc |
|
219 |
+} |
|
220 |
+ |
|
221 |
+func mergeRound(acc, val uint64) uint64 { |
|
222 |
+ val = round(0, val) |
|
223 |
+ acc ^= val |
|
224 |
+ acc = acc*prime1 + prime4 |
|
225 |
+ return acc |
|
226 |
+} |
|
227 |
+ |
|
228 |
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } |
|
229 |
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } |
|
230 |
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } |
|
231 |
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } |
|
232 |
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } |
|
233 |
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } |
|
234 |
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } |
|
235 |
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } |
0 | 236 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,13 @@ |
0 |
+// +build !appengine |
|
1 |
+// +build gc |
|
2 |
+// +build !purego |
|
3 |
+ |
|
4 |
+package xxhash |
|
5 |
+ |
|
6 |
+// Sum64 computes the 64-bit xxHash digest of b. |
|
7 |
+// |
|
8 |
+//go:noescape |
|
9 |
+func Sum64(b []byte) uint64 |
|
10 |
+ |
|
11 |
+//go:noescape |
|
12 |
+func writeBlocks(d *Digest, b []byte) int |
0 | 13 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,215 @@ |
0 |
+// +build !appengine |
|
1 |
+// +build gc |
|
2 |
+// +build !purego |
|
3 |
+ |
|
4 |
+#include "textflag.h" |
|
5 |
+ |
|
6 |
+// Register allocation: |
|
7 |
+// AX h |
|
8 |
+// CX pointer to advance through b |
|
9 |
+// DX n |
|
10 |
+// BX loop end |
|
11 |
+// R8 v1, k1 |
|
12 |
+// R9 v2 |
|
13 |
+// R10 v3 |
|
14 |
+// R11 v4 |
|
15 |
+// R12 tmp |
|
16 |
+// R13 prime1v |
|
17 |
+// R14 prime2v |
|
18 |
+// R15 prime4v |
|
19 |
+ |
|
20 |
+// round reads from and advances the buffer pointer in CX. |
|
21 |
+// It assumes that R13 has prime1v and R14 has prime2v. |
|
22 |
+#define round(r) \ |
|
23 |
+ MOVQ (CX), R12 \ |
|
24 |
+ ADDQ $8, CX \ |
|
25 |
+ IMULQ R14, R12 \ |
|
26 |
+ ADDQ R12, r \ |
|
27 |
+ ROLQ $31, r \ |
|
28 |
+ IMULQ R13, r |
|
29 |
+ |
|
30 |
+// mergeRound applies a merge round on the two registers acc and val. |
|
31 |
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. |
|
32 |
+#define mergeRound(acc, val) \ |
|
33 |
+ IMULQ R14, val \ |
|
34 |
+ ROLQ $31, val \ |
|
35 |
+ IMULQ R13, val \ |
|
36 |
+ XORQ val, acc \ |
|
37 |
+ IMULQ R13, acc \ |
|
38 |
+ ADDQ R15, acc |
|
39 |
+ |
|
40 |
+// func Sum64(b []byte) uint64 |
|
41 |
+TEXT ·Sum64(SB), NOSPLIT, $0-32 |
|
42 |
+ // Load fixed primes. |
|
43 |
+ MOVQ ·prime1v(SB), R13 |
|
44 |
+ MOVQ ·prime2v(SB), R14 |
|
45 |
+ MOVQ ·prime4v(SB), R15 |
|
46 |
+ |
|
47 |
+ // Load slice. |
|
48 |
+ MOVQ b_base+0(FP), CX |
|
49 |
+ MOVQ b_len+8(FP), DX |
|
50 |
+ LEAQ (CX)(DX*1), BX |
|
51 |
+ |
|
52 |
+ // The first loop limit will be len(b)-32. |
|
53 |
+ SUBQ $32, BX |
|
54 |
+ |
|
55 |
+ // Check whether we have at least one block. |
|
56 |
+ CMPQ DX, $32 |
|
57 |
+ JLT noBlocks |
|
58 |
+ |
|
59 |
+ // Set up initial state (v1, v2, v3, v4). |
|
60 |
+ MOVQ R13, R8 |
|
61 |
+ ADDQ R14, R8 |
|
62 |
+ MOVQ R14, R9 |
|
63 |
+ XORQ R10, R10 |
|
64 |
+ XORQ R11, R11 |
|
65 |
+ SUBQ R13, R11 |
|
66 |
+ |
|
67 |
+ // Loop until CX > BX. |
|
68 |
+blockLoop: |
|
69 |
+ round(R8) |
|
70 |
+ round(R9) |
|
71 |
+ round(R10) |
|
72 |
+ round(R11) |
|
73 |
+ |
|
74 |
+ CMPQ CX, BX |
|
75 |
+ JLE blockLoop |
|
76 |
+ |
|
77 |
+ MOVQ R8, AX |
|
78 |
+ ROLQ $1, AX |
|
79 |
+ MOVQ R9, R12 |
|
80 |
+ ROLQ $7, R12 |
|
81 |
+ ADDQ R12, AX |
|
82 |
+ MOVQ R10, R12 |
|
83 |
+ ROLQ $12, R12 |
|
84 |
+ ADDQ R12, AX |
|
85 |
+ MOVQ R11, R12 |
|
86 |
+ ROLQ $18, R12 |
|
87 |
+ ADDQ R12, AX |
|
88 |
+ |
|
89 |
+ mergeRound(AX, R8) |
|
90 |
+ mergeRound(AX, R9) |
|
91 |
+ mergeRound(AX, R10) |
|
92 |
+ mergeRound(AX, R11) |
|
93 |
+ |
|
94 |
+ JMP afterBlocks |
|
95 |
+ |
|
96 |
+noBlocks: |
|
97 |
+ MOVQ ·prime5v(SB), AX |
|
98 |
+ |
|
99 |
+afterBlocks: |
|
100 |
+ ADDQ DX, AX |
|
101 |
+ |
|
102 |
+ // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. |
|
103 |
+ ADDQ $24, BX |
|
104 |
+ |
|
105 |
+ CMPQ CX, BX |
|
106 |
+ JG fourByte |
|
107 |
+ |
|
108 |
+wordLoop: |
|
109 |
+ // Calculate k1. |
|
110 |
+ MOVQ (CX), R8 |
|
111 |
+ ADDQ $8, CX |
|
112 |
+ IMULQ R14, R8 |
|
113 |
+ ROLQ $31, R8 |
|
114 |
+ IMULQ R13, R8 |
|
115 |
+ |
|
116 |
+ XORQ R8, AX |
|
117 |
+ ROLQ $27, AX |
|
118 |
+ IMULQ R13, AX |
|
119 |
+ ADDQ R15, AX |
|
120 |
+ |
|
121 |
+ CMPQ CX, BX |
|
122 |
+ JLE wordLoop |
|
123 |
+ |
|
124 |
+fourByte: |
|
125 |
+ ADDQ $4, BX |
|
126 |
+ CMPQ CX, BX |
|
127 |
+ JG singles |
|
128 |
+ |
|
129 |
+ MOVL (CX), R8 |
|
130 |
+ ADDQ $4, CX |
|
131 |
+ IMULQ R13, R8 |
|
132 |
+ XORQ R8, AX |
|
133 |
+ |
|
134 |
+ ROLQ $23, AX |
|
135 |
+ IMULQ R14, AX |
|
136 |
+ ADDQ ·prime3v(SB), AX |
|
137 |
+ |
|
138 |
+singles: |
|
139 |
+ ADDQ $4, BX |
|
140 |
+ CMPQ CX, BX |
|
141 |
+ JGE finalize |
|
142 |
+ |
|
143 |
+singlesLoop: |
|
144 |
+ MOVBQZX (CX), R12 |
|
145 |
+ ADDQ $1, CX |
|
146 |
+ IMULQ ·prime5v(SB), R12 |
|
147 |
+ XORQ R12, AX |
|
148 |
+ |
|
149 |
+ ROLQ $11, AX |
|
150 |
+ IMULQ R13, AX |
|
151 |
+ |
|
152 |
+ CMPQ CX, BX |
|
153 |
+ JL singlesLoop |
|
154 |
+ |
|
155 |
+finalize: |
|
156 |
+ MOVQ AX, R12 |
|
157 |
+ SHRQ $33, R12 |
|
158 |
+ XORQ R12, AX |
|
159 |
+ IMULQ R14, AX |
|
160 |
+ MOVQ AX, R12 |
|
161 |
+ SHRQ $29, R12 |
|
162 |
+ XORQ R12, AX |
|
163 |
+ IMULQ ·prime3v(SB), AX |
|
164 |
+ MOVQ AX, R12 |
|
165 |
+ SHRQ $32, R12 |
|
166 |
+ XORQ R12, AX |
|
167 |
+ |
|
168 |
+ MOVQ AX, ret+24(FP) |
|
169 |
+ RET |
|
170 |
+ |
|
171 |
+// writeBlocks uses the same registers as above except that it uses AX to store |
|
172 |
+// the d pointer. |
|
173 |
+ |
|
174 |
+// func writeBlocks(d *Digest, b []byte) int |
|
175 |
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40 |
|
176 |
+ // Load fixed primes needed for round. |
|
177 |
+ MOVQ ·prime1v(SB), R13 |
|
178 |
+ MOVQ ·prime2v(SB), R14 |
|
179 |
+ |
|
180 |
+ // Load slice. |
|
181 |
+ MOVQ b_base+8(FP), CX |
|
182 |
+ MOVQ b_len+16(FP), DX |
|
183 |
+ LEAQ (CX)(DX*1), BX |
|
184 |
+ SUBQ $32, BX |
|
185 |
+ |
|
186 |
+ // Load vN from d. |
|
187 |
+ MOVQ d+0(FP), AX |
|
188 |
+ MOVQ 0(AX), R8 // v1 |
|
189 |
+ MOVQ 8(AX), R9 // v2 |
|
190 |
+ MOVQ 16(AX), R10 // v3 |
|
191 |
+ MOVQ 24(AX), R11 // v4 |
|
192 |
+ |
|
193 |
+ // We don't need to check the loop condition here; this function is |
|
194 |
+ // always called with at least one block of data to process. |
|
195 |
+blockLoop: |
|
196 |
+ round(R8) |
|
197 |
+ round(R9) |
|
198 |
+ round(R10) |
|
199 |
+ round(R11) |
|
200 |
+ |
|
201 |
+ CMPQ CX, BX |
|
202 |
+ JLE blockLoop |
|
203 |
+ |
|
204 |
+ // Copy vN back to d. |
|
205 |
+ MOVQ R8, 0(AX) |
|
206 |
+ MOVQ R9, 8(AX) |
|
207 |
+ MOVQ R10, 16(AX) |
|
208 |
+ MOVQ R11, 24(AX) |
|
209 |
+ |
|
210 |
+ // The number of bytes written is CX minus the old base pointer. |
|
211 |
+ SUBQ b_base+8(FP), CX |
|
212 |
+ MOVQ CX, ret+32(FP) |
|
213 |
+ |
|
214 |
+ RET |
0 | 215 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,76 @@ |
0 |
+// +build !amd64 appengine !gc purego |
|
1 |
+ |
|
2 |
+package xxhash |
|
3 |
+ |
|
4 |
+// Sum64 computes the 64-bit xxHash digest of b. |
|
5 |
+func Sum64(b []byte) uint64 { |
|
6 |
+ // A simpler version would be |
|
7 |
+ // d := New() |
|
8 |
+ // d.Write(b) |
|
9 |
+ // return d.Sum64() |
|
10 |
+ // but this is faster, particularly for small inputs. |
|
11 |
+ |
|
12 |
+ n := len(b) |
|
13 |
+ var h uint64 |
|
14 |
+ |
|
15 |
+ if n >= 32 { |
|
16 |
+ v1 := prime1v + prime2 |
|
17 |
+ v2 := prime2 |
|
18 |
+ v3 := uint64(0) |
|
19 |
+ v4 := -prime1v |
|
20 |
+ for len(b) >= 32 { |
|
21 |
+ v1 = round(v1, u64(b[0:8:len(b)])) |
|
22 |
+ v2 = round(v2, u64(b[8:16:len(b)])) |
|
23 |
+ v3 = round(v3, u64(b[16:24:len(b)])) |
|
24 |
+ v4 = round(v4, u64(b[24:32:len(b)])) |
|
25 |
+ b = b[32:len(b):len(b)] |
|
26 |
+ } |
|
27 |
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) |
|
28 |
+ h = mergeRound(h, v1) |
|
29 |
+ h = mergeRound(h, v2) |
|
30 |
+ h = mergeRound(h, v3) |
|
31 |
+ h = mergeRound(h, v4) |
|
32 |
+ } else { |
|
33 |
+ h = prime5 |
|
34 |
+ } |
|
35 |
+ |
|
36 |
+ h += uint64(n) |
|
37 |
+ |
|
38 |
+ i, end := 0, len(b) |
|
39 |
+ for ; i+8 <= end; i += 8 { |
|
40 |
+ k1 := round(0, u64(b[i:i+8:len(b)])) |
|
41 |
+ h ^= k1 |
|
42 |
+ h = rol27(h)*prime1 + prime4 |
|
43 |
+ } |
|
44 |
+ if i+4 <= end { |
|
45 |
+ h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 |
|
46 |
+ h = rol23(h)*prime2 + prime3 |
|
47 |
+ i += 4 |
|
48 |
+ } |
|
49 |
+ for ; i < end; i++ { |
|
50 |
+ h ^= uint64(b[i]) * prime5 |
|
51 |
+ h = rol11(h) * prime1 |
|
52 |
+ } |
|
53 |
+ |
|
54 |
+ h ^= h >> 33 |
|
55 |
+ h *= prime2 |
|
56 |
+ h ^= h >> 29 |
|
57 |
+ h *= prime3 |
|
58 |
+ h ^= h >> 32 |
|
59 |
+ |
|
60 |
+ return h |
|
61 |
+} |
|
62 |
+ |
|
63 |
+func writeBlocks(d *Digest, b []byte) int { |
|
64 |
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 |
|
65 |
+ n := len(b) |
|
66 |
+ for len(b) >= 32 { |
|
67 |
+ v1 = round(v1, u64(b[0:8:len(b)])) |
|
68 |
+ v2 = round(v2, u64(b[8:16:len(b)])) |
|
69 |
+ v3 = round(v3, u64(b[16:24:len(b)])) |
|
70 |
+ v4 = round(v4, u64(b[24:32:len(b)])) |
|
71 |
+ b = b[32:len(b):len(b)] |
|
72 |
+ } |
|
73 |
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 |
|
74 |
+ return n - len(b) |
|
75 |
+} |
0 | 76 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,15 @@ |
0 |
+// +build appengine |
|
1 |
+ |
|
2 |
+// This file contains the safe implementations of otherwise unsafe-using code. |
|
3 |
+ |
|
4 |
+package xxhash |
|
5 |
+ |
|
6 |
+// Sum64String computes the 64-bit xxHash digest of s. |
|
7 |
+func Sum64String(s string) uint64 { |
|
8 |
+ return Sum64([]byte(s)) |
|
9 |
+} |
|
10 |
+ |
|
11 |
+// WriteString adds more data to d. It always returns len(s), nil. |
|
12 |
+func (d *Digest) WriteString(s string) (n int, err error) { |
|
13 |
+ return d.Write([]byte(s)) |
|
14 |
+} |
0 | 15 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,46 @@ |
0 |
+// +build !appengine |
|
1 |
+ |
|
2 |
+// This file encapsulates usage of unsafe. |
|
3 |
+// xxhash_safe.go contains the safe implementations. |
|
4 |
+ |
|
5 |
+package xxhash |
|
6 |
+ |
|
7 |
+import ( |
|
8 |
+ "reflect" |
|
9 |
+ "unsafe" |
|
10 |
+) |
|
11 |
+ |
|
12 |
+// Notes: |
|
13 |
+// |
|
14 |
+// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ |
|
15 |
+// for some discussion about these unsafe conversions. |
|
16 |
+// |
|
17 |
+// In the future it's possible that compiler optimizations will make these |
|
18 |
+// unsafe operations unnecessary: https://golang.org/issue/2205. |
|
19 |
+// |
|
20 |
+// Both of these wrapper functions still incur function call overhead since they |
|
21 |
+// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write |
|
22 |
+// for strings to squeeze out a bit more speed. Mid-stack inlining should |
|
23 |
+// eventually fix this. |
|
24 |
+ |
|
25 |
+// Sum64String computes the 64-bit xxHash digest of s. |
|
26 |
+// It may be faster than Sum64([]byte(s)) by avoiding a copy. |
|
27 |
+func Sum64String(s string) uint64 { |
|
28 |
+ var b []byte |
|
29 |
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) |
|
30 |
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data |
|
31 |
+ bh.Len = len(s) |
|
32 |
+ bh.Cap = len(s) |
|
33 |
+ return Sum64(b) |
|
34 |
+} |
|
35 |
+ |
|
36 |
+// WriteString adds more data to d. It always returns len(s), nil. |
|
37 |
+// It may be faster than Write([]byte(s)) by avoiding a copy. |
|
38 |
+func (d *Digest) WriteString(s string) (n int, err error) { |
|
39 |
+ var b []byte |
|
40 |
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) |
|
41 |
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data |
|
42 |
+ bh.Len = len(s) |
|
43 |
+ bh.Cap = len(s) |
|
44 |
+ return d.Write(b) |
|
45 |
+} |
... | ... |
@@ -9,7 +9,7 @@ This is the [Go](http://golang.org) client library for |
9 | 9 |
instrumenting application code, and one for creating clients that talk to the |
10 | 10 |
Prometheus HTTP API. |
11 | 11 |
|
12 |
-__This library requires Go1.9 or later.__ |
|
12 |
+__This library requires Go1.9 or later.__ The minimum required patch releases for older Go versions are Go1.9.7 and Go1.10.3. |
|
13 | 13 |
|
14 | 14 |
## Important note about releases and stability |
15 | 15 |
|
... | ... |
@@ -2,10 +2,13 @@ module github.com/prometheus/client_golang |
2 | 2 |
|
3 | 3 |
require ( |
4 | 4 |
github.com/beorn7/perks v1.0.1 |
5 |
+ github.com/cespare/xxhash/v2 v2.1.1 |
|
5 | 6 |
github.com/golang/protobuf v1.3.2 |
6 |
- github.com/json-iterator/go v1.1.7 |
|
7 |
- github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 |
|
8 |
- github.com/prometheus/common v0.6.0 |
|
9 |
- github.com/prometheus/procfs v0.0.3 |
|
10 |
- golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 |
|
7 |
+ github.com/json-iterator/go v1.1.8 |
|
8 |
+ github.com/prometheus/client_model v0.1.0 |
|
9 |
+ github.com/prometheus/common v0.7.0 |
|
10 |
+ github.com/prometheus/procfs v0.0.8 |
|
11 |
+ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f |
|
11 | 12 |
) |
13 |
+ |
|
14 |
+go 1.11 |
... | ... |
@@ -19,6 +19,7 @@ import ( |
19 | 19 |
"sort" |
20 | 20 |
"strings" |
21 | 21 |
|
22 |
+ "github.com/cespare/xxhash/v2" |
|
22 | 23 |
"github.com/golang/protobuf/proto" |
23 | 24 |
"github.com/prometheus/common/model" |
24 | 25 |
|
... | ... |
@@ -126,24 +127,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * |
126 | 126 |
return d |
127 | 127 |
} |
128 | 128 |
|
129 |
- vh := hashNew() |
|
129 |
+ xxh := xxhash.New() |
|
130 | 130 |
for _, val := range labelValues { |
131 |
- vh = hashAdd(vh, val) |
|
132 |
- vh = hashAddByte(vh, separatorByte) |
|
131 |
+ xxh.WriteString(val) |
|
132 |
+ xxh.Write(separatorByteSlice) |
|
133 | 133 |
} |
134 |
- d.id = vh |
|
134 |
+ d.id = xxh.Sum64() |
|
135 | 135 |
// Sort labelNames so that order doesn't matter for the hash. |
136 | 136 |
sort.Strings(labelNames) |
137 | 137 |
// Now hash together (in this order) the help string and the sorted |
138 | 138 |
// label names. |
139 |
- lh := hashNew() |
|
140 |
- lh = hashAdd(lh, help) |
|
141 |
- lh = hashAddByte(lh, separatorByte) |
|
139 |
+ xxh.Reset() |
|
140 |
+ xxh.WriteString(help) |
|
141 |
+ xxh.Write(separatorByteSlice) |
|
142 | 142 |
for _, labelName := range labelNames { |
143 |
- lh = hashAdd(lh, labelName) |
|
144 |
- lh = hashAddByte(lh, separatorByte) |
|
143 |
+ xxh.WriteString(labelName) |
|
144 |
+ xxh.Write(separatorByteSlice) |
|
145 | 145 |
} |
146 |
- d.dimHash = lh |
|
146 |
+ d.dimHash = xxh.Sum64() |
|
147 | 147 |
|
148 | 148 |
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) |
149 | 149 |
for n, v := range constLabels { |
... | ... |
@@ -273,9 +273,12 @@ type GaugeFunc interface { |
273 | 273 |
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The |
274 | 274 |
// value reported is determined by calling the given function from within the |
275 | 275 |
// Write method. Take into account that metric collection may happen |
276 |
-// concurrently. If that results in concurrent calls to Write, like in the case |
|
277 |
-// where a GaugeFunc is directly registered with Prometheus, the provided |
|
278 |
-// function must be concurrency-safe. |
|
276 |
+// concurrently. Therefore, it must be safe to call the provided function |
|
277 |
+// concurrently. |
|
278 |
+// |
|
279 |
+// NewGaugeFunc is a good way to create an “info” style metric with a constant |
|
280 |
+// value of 1. Example: |
|
281 |
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 |
|
279 | 282 |
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { |
280 | 283 |
return newValueFunc(NewDesc( |
281 | 284 |
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
... | ... |
@@ -138,7 +138,7 @@ type HistogramOpts struct { |
138 | 138 |
// better covered by target labels set by the scraping Prometheus |
139 | 139 |
// server, or by one specific metric (e.g. a build_info or a |
140 | 140 |
// machine_role metric). See also |
141 |
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels |
|
141 |
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels |
|
142 | 142 |
ConstLabels Labels |
143 | 143 |
|
144 | 144 |
// Buckets defines the buckets into which observations are counted. Each |
... | ... |
@@ -187,7 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr |
187 | 187 |
desc: desc, |
188 | 188 |
upperBounds: opts.Buckets, |
189 | 189 |
labelPairs: makeLabelPairs(desc, labelValues), |
190 |
- counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, |
|
190 |
+ counts: [2]*histogramCounts{{}, {}}, |
|
191 | 191 |
} |
192 | 192 |
for i, upperBound := range h.upperBounds { |
193 | 193 |
if i < len(h.upperBounds)-1 { |
... | ... |
@@ -18,11 +18,12 @@ import ( |
18 | 18 |
"time" |
19 | 19 |
|
20 | 20 |
"github.com/golang/protobuf/proto" |
21 |
+ "github.com/prometheus/common/model" |
|
21 | 22 |
|
22 | 23 |
dto "github.com/prometheus/client_model/go" |
23 | 24 |
) |
24 | 25 |
|
25 |
-const separatorByte byte = 255 |
|
26 |
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. |
|
26 | 27 |
|
27 | 28 |
// A Metric models a single sample value with its meta data being exported to |
28 | 29 |
// Prometheus. Implementations of Metric in this package are Gauge, Counter, |
... | ... |
@@ -62,6 +62,8 @@ func (r *responseWriterDelegator) WriteHeader(code int) { |
62 | 62 |
} |
63 | 63 |
|
64 | 64 |
func (r *responseWriterDelegator) Write(b []byte) (int, error) { |
65 |
+ // If applicable, call WriteHeader here so that observeWriteHeader is |
|
66 |
+ // handled appropriately. |
|
65 | 67 |
if !r.wroteHeader { |
66 | 68 |
r.WriteHeader(http.StatusOK) |
67 | 69 |
} |
... | ... |
@@ -82,12 +84,19 @@ func (d closeNotifierDelegator) CloseNotify() <-chan bool { |
82 | 82 |
return d.ResponseWriter.(http.CloseNotifier).CloseNotify() |
83 | 83 |
} |
84 | 84 |
func (d flusherDelegator) Flush() { |
85 |
+ // If applicable, call WriteHeader here so that observeWriteHeader is |
|
86 |
+ // handled appropriately. |
|
87 |
+ if !d.wroteHeader { |
|
88 |
+ d.WriteHeader(http.StatusOK) |
|
89 |
+ } |
|
85 | 90 |
d.ResponseWriter.(http.Flusher).Flush() |
86 | 91 |
} |
87 | 92 |
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { |
88 | 93 |
return d.ResponseWriter.(http.Hijacker).Hijack() |
89 | 94 |
} |
90 | 95 |
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { |
96 |
+ // If applicable, call WriteHeader here so that observeWriteHeader is |
|
97 |
+ // handled appropriately. |
|
91 | 98 |
if !d.wroteHeader { |
92 | 99 |
d.WriteHeader(http.StatusOK) |
93 | 100 |
} |
... | ... |
@@ -25,6 +25,7 @@ import ( |
25 | 25 |
"sync" |
26 | 26 |
"unicode/utf8" |
27 | 27 |
|
28 |
+ "github.com/cespare/xxhash/v2" |
|
28 | 29 |
"github.com/golang/protobuf/proto" |
29 | 30 |
"github.com/prometheus/common/expfmt" |
30 | 31 |
|
... | ... |
@@ -74,7 +75,7 @@ func NewRegistry() *Registry { |
74 | 74 |
// NewPedanticRegistry returns a registry that checks during collection if each |
75 | 75 |
// collected Metric is consistent with its reported Desc, and if the Desc has |
76 | 76 |
// actually been registered with the registry. Unchecked Collectors (those whose |
77 |
-// Describe methed does not yield any descriptors) are excluded from the check. |
|
77 |
+// Describe method does not yield any descriptors) are excluded from the check. |
|
78 | 78 |
// |
79 | 79 |
// Usually, a Registry will be happy as long as the union of all collected |
80 | 80 |
// Metrics is consistent and valid even if some metrics are not consistent with |
... | ... |
@@ -266,7 +267,7 @@ func (r *Registry) Register(c Collector) error { |
266 | 266 |
descChan = make(chan *Desc, capDescChan) |
267 | 267 |
newDescIDs = map[uint64]struct{}{} |
268 | 268 |
newDimHashesByName = map[string]uint64{} |
269 |
- collectorID uint64 // Just a sum of all desc IDs. |
|
269 |
+ collectorID uint64 // All desc IDs XOR'd together. |
|
270 | 270 |
duplicateDescErr error |
271 | 271 |
) |
272 | 272 |
go func() { |
... | ... |
@@ -293,12 +294,12 @@ func (r *Registry) Register(c Collector) error { |
293 | 293 |
if _, exists := r.descIDs[desc.id]; exists { |
294 | 294 |
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) |
295 | 295 |
} |
296 |
- // If it is not a duplicate desc in this collector, add it to |
|
296 |
+ // If it is not a duplicate desc in this collector, XOR it to |
|
297 | 297 |
// the collectorID. (We allow duplicate descs within the same |
298 | 298 |
// collector, but their existence must be a no-op.) |
299 | 299 |
if _, exists := newDescIDs[desc.id]; !exists { |
300 | 300 |
newDescIDs[desc.id] = struct{}{} |
301 |
- collectorID += desc.id |
|
301 |
+ collectorID ^= desc.id |
|
302 | 302 |
} |
303 | 303 |
|
304 | 304 |
// Are all the label names and the help string consistent with |
... | ... |
@@ -360,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool { |
360 | 360 |
var ( |
361 | 361 |
descChan = make(chan *Desc, capDescChan) |
362 | 362 |
descIDs = map[uint64]struct{}{} |
363 |
- collectorID uint64 // Just a sum of the desc IDs. |
|
363 |
+ collectorID uint64 // All desc IDs XOR'd together. |
|
364 | 364 |
) |
365 | 365 |
go func() { |
366 | 366 |
c.Describe(descChan) |
... | ... |
@@ -368,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool { |
368 | 368 |
}() |
369 | 369 |
for desc := range descChan { |
370 | 370 |
if _, exists := descIDs[desc.id]; !exists { |
371 |
- collectorID += desc.id |
|
371 |
+ collectorID ^= desc.id |
|
372 | 372 |
descIDs[desc.id] = struct{}{} |
373 | 373 |
} |
374 | 374 |
} |
... | ... |
@@ -875,9 +876,9 @@ func checkMetricConsistency( |
875 | 875 |
} |
876 | 876 |
|
877 | 877 |
// Is the metric unique (i.e. no other metric with the same name and the same labels)? |
878 |
- h := hashNew() |
|
879 |
- h = hashAdd(h, name) |
|
880 |
- h = hashAddByte(h, separatorByte) |
|
878 |
+ h := xxhash.New() |
|
879 |
+ h.WriteString(name) |
|
880 |
+ h.Write(separatorByteSlice) |
|
881 | 881 |
// Make sure label pairs are sorted. We depend on it for the consistency |
882 | 882 |
// check. |
883 | 883 |
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { |
... | ... |
@@ -888,18 +889,19 @@ func checkMetricConsistency( |
888 | 888 |
dtoMetric.Label = copiedLabels |
889 | 889 |
} |
890 | 890 |
for _, lp := range dtoMetric.Label { |
891 |
- h = hashAdd(h, lp.GetName()) |
|
892 |
- h = hashAddByte(h, separatorByte) |
|
893 |
- h = hashAdd(h, lp.GetValue()) |
|
894 |
- h = hashAddByte(h, separatorByte) |
|
891 |
+ h.WriteString(lp.GetName()) |
|
892 |
+ h.Write(separatorByteSlice) |
|
893 |
+ h.WriteString(lp.GetValue()) |
|
894 |
+ h.Write(separatorByteSlice) |
|
895 | 895 |
} |
896 |
- if _, exists := metricHashes[h]; exists { |
|
896 |
+ hSum := h.Sum64() |
|
897 |
+ if _, exists := metricHashes[hSum]; exists { |
|
897 | 898 |
return fmt.Errorf( |
898 | 899 |
"collected metric %q { %s} was collected before with the same name and label values", |
899 | 900 |
name, dtoMetric, |
900 | 901 |
) |
901 | 902 |
} |
902 |
- metricHashes[h] = struct{}{} |
|
903 |
+ metricHashes[hSum] = struct{}{} |
|
903 | 904 |
return nil |
904 | 905 |
} |
905 | 906 |
|
... | ... |
@@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { |
208 | 208 |
s := &noObjectivesSummary{ |
209 | 209 |
desc: desc, |
210 | 210 |
labelPairs: makeLabelPairs(desc, labelValues), |
211 |
- counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, |
|
211 |
+ counts: [2]*summaryCounts{{}, {}}, |
|
212 | 212 |
} |
213 | 213 |
s.init(s) // Init self-collection. |
214 | 214 |
return s |
... | ... |
@@ -24,7 +24,7 @@ import ( |
24 | 24 |
// their label values. metricVec is not used directly (and therefore |
25 | 25 |
// unexported). It is used as a building block for implementations of vectors of |
26 | 26 |
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. |
27 |
-// It also handles label currying. It uses basicMetricVec internally. |
|
27 |
+// It also handles label currying. |
|
28 | 28 |
type metricVec struct { |
29 | 29 |
*metricMap |
30 | 30 |
|