Browse code

Re-vendor notary, as well as change jfrazelle/go to docker/go.

Signed-off-by: cyli <cyli@twistedmatrix.com>

cyli authored on 2016/01/27 07:21:07
Showing 47 changed files
... ...
@@ -168,7 +168,7 @@ RUN set -x \
168 168
 	&& rm -rf "$GOPATH"
169 169
 
170 170
 # Install notary server
171
-ENV NOTARY_VERSION docker-v1.10-3
171
+ENV NOTARY_VERSION docker-v1.10-4
172 172
 RUN set -x \
173 173
 	&& export GOPATH="$(mktemp -d)" \
174 174
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -145,7 +145,7 @@ RUN set -x \
145 145
 	&& rm -rf "$GOPATH"
146 146
 
147 147
 # Install notary server
148
-ENV NOTARY_VERSION docker-v1.10-3
148
+ENV NOTARY_VERSION docker-v1.10-4
149 149
 RUN set -x \
150 150
 	&& export GOPATH="$(mktemp -d)" \
151 151
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -116,7 +116,7 @@ RUN set -x \
116 116
 	&& rm -rf "$GOPATH"
117 117
 
118 118
 # Install notary server
119
-#ENV NOTARY_VERSION docker-v1.10-3
119
+#ENV NOTARY_VERSION docker-v1.10-4
120 120
 #RUN set -x \
121 121
 #	&& export GOPATH="$(mktemp -d)" \
122 122
 #	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -116,7 +116,7 @@ RUN set -x \
116 116
 	&& rm -rf "$GOPATH"
117 117
 
118 118
 # Install notary server
119
-ENV NOTARY_VERSION docker-v1.10-3
119
+ENV NOTARY_VERSION docker-v1.10-4
120 120
 RUN set -x \
121 121
 	&& export GOPATH="$(mktemp -d)" \
122 122
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -50,11 +50,11 @@ clone git github.com/docker/distribution c301f8ab27f4913c968b8d73a38e5dda79b9d3d
50 50
 clone git github.com/vbatts/tar-split v0.9.11
51 51
 
52 52
 # get desired notary commit, might also need to be updated in Dockerfile
53
-clone git github.com/docker/notary docker-v1.10-3
53
+clone git github.com/docker/notary docker-v1.10-4
54 54
 
55 55
 clone git google.golang.org/grpc 174192fc93efcb188fc8f46ca447f0da606b6885 https://github.com/grpc/grpc-go.git
56 56
 clone git github.com/miekg/pkcs11 80f102b5cac759de406949c47f0928b99bd64cdf
57
-clone git github.com/jfrazelle/go v1.5.1-1
57
+clone git github.com/docker/go v1.5.1-1-1-gbaf439e
58 58
 clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
59 59
 
60 60
 clone git github.com/opencontainers/runc 3d8a20bb772defc28c355534d83486416d1719b4 # libcontainer
61 61
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+Copyright (c) 2012 The Go Authors. All rights reserved.
1
+
2
+Redistribution and use in source and binary forms, with or without
3
+modification, are permitted provided that the following conditions are
4
+met:
5
+
6
+   * Redistributions of source code must retain the above copyright
7
+notice, this list of conditions and the following disclaimer.
8
+   * Redistributions in binary form must reproduce the above
9
+copyright notice, this list of conditions and the following disclaimer
10
+in the documentation and/or other materials provided with the
11
+distribution.
12
+   * Neither the name of Google Inc. nor the names of its
13
+contributors may be used to endorse or promote products derived from
14
+this software without specific prior written permission.
15
+
16
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0 27
new file mode 100644
... ...
@@ -0,0 +1,1094 @@
0
+// Copyright 2010 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// Represents JSON data structure using native Go types: booleans, floats,
5
+// strings, arrays, and maps.
6
+
7
+package json
8
+
9
+import (
10
+	"bytes"
11
+	"encoding"
12
+	"encoding/base64"
13
+	"errors"
14
+	"fmt"
15
+	"reflect"
16
+	"runtime"
17
+	"strconv"
18
+	"unicode"
19
+	"unicode/utf16"
20
+	"unicode/utf8"
21
+)
22
+
23
+// Unmarshal parses the JSON-encoded data and stores the result
24
+// in the value pointed to by v.
25
+//
26
+// Unmarshal uses the inverse of the encodings that
27
+// Marshal uses, allocating maps, slices, and pointers as necessary,
28
+// with the following additional rules:
29
+//
30
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
31
+// the JSON being the JSON literal null.  In that case, Unmarshal sets
32
+// the pointer to nil.  Otherwise, Unmarshal unmarshals the JSON into
33
+// the value pointed at by the pointer.  If the pointer is nil, Unmarshal
34
+// allocates a new value for it to point to.
35
+//
36
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
37
+// keys to the keys used by Marshal (either the struct field name or its tag),
38
+// preferring an exact match but also accepting a case-insensitive match.
39
+//
40
+// To unmarshal JSON into an interface value,
41
+// Unmarshal stores one of these in the interface value:
42
+//
43
+//	bool, for JSON booleans
44
+//	float64, for JSON numbers
45
+//	string, for JSON strings
46
+//	[]interface{}, for JSON arrays
47
+//	map[string]interface{}, for JSON objects
48
+//	nil for JSON null
49
+//
50
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil
51
+// and then appends each element to the slice.
52
+//
53
+// To unmarshal a JSON object into a map, Unmarshal replaces the map
54
+// with an empty map and then adds key-value pairs from the object to
55
+// the map.
56
+//
57
+// If a JSON value is not appropriate for a given target type,
58
+// or if a JSON number overflows the target type, Unmarshal
59
+// skips that field and completes the unmarshalling as best it can.
60
+// If no more serious errors are encountered, Unmarshal returns
61
+// an UnmarshalTypeError describing the earliest such error.
62
+//
63
+// The JSON null value unmarshals into an interface, map, pointer, or slice
64
+// by setting that Go value to nil. Because null is often used in JSON to mean
65
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
66
+// on the value and produces no error.
67
+//
68
+// When unmarshaling quoted strings, invalid UTF-8 or
69
+// invalid UTF-16 surrogate pairs are not treated as an error.
70
+// Instead, they are replaced by the Unicode replacement
71
+// character U+FFFD.
72
+//
73
+func Unmarshal(data []byte, v interface{}) error {
74
+	// Check for well-formedness.
75
+	// Avoids filling out half a data structure
76
+	// before discovering a JSON syntax error.
77
+	var d decodeState
78
+	err := checkValid(data, &d.scan)
79
+	if err != nil {
80
+		return err
81
+	}
82
+
83
+	d.init(data)
84
+	return d.unmarshal(v)
85
+}
86
+
87
+// Unmarshaler is the interface implemented by objects
88
+// that can unmarshal a JSON description of themselves.
89
+// The input can be assumed to be a valid encoding of
90
+// a JSON value. UnmarshalJSON must copy the JSON data
91
+// if it wishes to retain the data after returning.
92
+type Unmarshaler interface {
93
+	UnmarshalJSON([]byte) error
94
+}
95
+
96
+// An UnmarshalTypeError describes a JSON value that was
97
+// not appropriate for a value of a specific Go type.
98
+type UnmarshalTypeError struct {
99
+	Value  string       // description of JSON value - "bool", "array", "number -5"
100
+	Type   reflect.Type // type of Go value it could not be assigned to
101
+	Offset int64        // error occurred after reading Offset bytes
102
+}
103
+
104
+func (e *UnmarshalTypeError) Error() string {
105
+	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
106
+}
107
+
108
+// An UnmarshalFieldError describes a JSON object key that
109
+// led to an unexported (and therefore unwritable) struct field.
110
+// (No longer used; kept for compatibility.)
111
+type UnmarshalFieldError struct {
112
+	Key   string
113
+	Type  reflect.Type
114
+	Field reflect.StructField
115
+}
116
+
117
+func (e *UnmarshalFieldError) Error() string {
118
+	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
119
+}
120
+
121
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
122
+// (The argument to Unmarshal must be a non-nil pointer.)
123
+type InvalidUnmarshalError struct {
124
+	Type reflect.Type
125
+}
126
+
127
+func (e *InvalidUnmarshalError) Error() string {
128
+	if e.Type == nil {
129
+		return "json: Unmarshal(nil)"
130
+	}
131
+
132
+	if e.Type.Kind() != reflect.Ptr {
133
+		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
134
+	}
135
+	return "json: Unmarshal(nil " + e.Type.String() + ")"
136
+}
137
+
138
+func (d *decodeState) unmarshal(v interface{}) (err error) {
139
+	defer func() {
140
+		if r := recover(); r != nil {
141
+			if _, ok := r.(runtime.Error); ok {
142
+				panic(r)
143
+			}
144
+			err = r.(error)
145
+		}
146
+	}()
147
+
148
+	rv := reflect.ValueOf(v)
149
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
150
+		return &InvalidUnmarshalError{reflect.TypeOf(v)}
151
+	}
152
+
153
+	d.scan.reset()
154
+	// We decode rv not rv.Elem because the Unmarshaler interface
155
+	// test must be applied at the top level of the value.
156
+	d.value(rv)
157
+	return d.savedError
158
+}
159
+
160
+// A Number represents a JSON number literal.
161
+type Number string
162
+
163
+// String returns the literal text of the number.
164
+func (n Number) String() string { return string(n) }
165
+
166
+// Float64 returns the number as a float64.
167
+func (n Number) Float64() (float64, error) {
168
+	return strconv.ParseFloat(string(n), 64)
169
+}
170
+
171
+// Int64 returns the number as an int64.
172
+func (n Number) Int64() (int64, error) {
173
+	return strconv.ParseInt(string(n), 10, 64)
174
+}
175
+
176
+// decodeState represents the state while decoding a JSON value.
177
+type decodeState struct {
178
+	data       []byte
179
+	off        int // read offset in data
180
+	scan       scanner
181
+	nextscan   scanner // for calls to nextValue
182
+	savedError error
183
+	useNumber  bool
184
+	canonical  bool
185
+}
186
+
187
+// errPhase is used for errors that should not happen unless
188
+// there is a bug in the JSON decoder or something is editing
189
+// the data slice while the decoder executes.
190
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
191
+
192
+func (d *decodeState) init(data []byte) *decodeState {
193
+	d.data = data
194
+	d.off = 0
195
+	d.savedError = nil
196
+	return d
197
+}
198
+
199
+// error aborts the decoding by panicking with err.
200
+func (d *decodeState) error(err error) {
201
+	panic(err)
202
+}
203
+
204
+// saveError saves the first err it is called with,
205
+// for reporting at the end of the unmarshal.
206
+func (d *decodeState) saveError(err error) {
207
+	if d.savedError == nil {
208
+		d.savedError = err
209
+	}
210
+}
211
+
212
+// next cuts off and returns the next full JSON value in d.data[d.off:].
213
+// The next value is known to be an object or array, not a literal.
214
+func (d *decodeState) next() []byte {
215
+	c := d.data[d.off]
216
+	item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
217
+	if err != nil {
218
+		d.error(err)
219
+	}
220
+	d.off = len(d.data) - len(rest)
221
+
222
+	// Our scanner has seen the opening brace/bracket
223
+	// and thinks we're still in the middle of the object.
224
+	// invent a closing brace/bracket to get it out.
225
+	if c == '{' {
226
+		d.scan.step(&d.scan, '}')
227
+	} else {
228
+		d.scan.step(&d.scan, ']')
229
+	}
230
+
231
+	return item
232
+}
233
+
234
+// scanWhile processes bytes in d.data[d.off:] until it
235
+// receives a scan code not equal to op.
236
+// It updates d.off and returns the new scan code.
237
+func (d *decodeState) scanWhile(op int) int {
238
+	var newOp int
239
+	for {
240
+		if d.off >= len(d.data) {
241
+			newOp = d.scan.eof()
242
+			d.off = len(d.data) + 1 // mark processed EOF with len+1
243
+		} else {
244
+			c := int(d.data[d.off])
245
+			d.off++
246
+			newOp = d.scan.step(&d.scan, c)
247
+		}
248
+		if newOp != op {
249
+			break
250
+		}
251
+	}
252
+	return newOp
253
+}
254
+
255
+// value decodes a JSON value from d.data[d.off:] into the value.
256
+// it updates d.off to point past the decoded value.
257
+func (d *decodeState) value(v reflect.Value) {
258
+	if !v.IsValid() {
259
+		_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
260
+		if err != nil {
261
+			d.error(err)
262
+		}
263
+		d.off = len(d.data) - len(rest)
264
+
265
+		// d.scan thinks we're still at the beginning of the item.
266
+		// Feed in an empty string - the shortest, simplest value -
267
+		// so that it knows we got to the end of the value.
268
+		if d.scan.redo {
269
+			// rewind.
270
+			d.scan.redo = false
271
+			d.scan.step = stateBeginValue
272
+		}
273
+		d.scan.step(&d.scan, '"')
274
+		d.scan.step(&d.scan, '"')
275
+
276
+		n := len(d.scan.parseState)
277
+		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
278
+			// d.scan thinks we just read an object key; finish the object
279
+			d.scan.step(&d.scan, ':')
280
+			d.scan.step(&d.scan, '"')
281
+			d.scan.step(&d.scan, '"')
282
+			d.scan.step(&d.scan, '}')
283
+		}
284
+
285
+		return
286
+	}
287
+
288
+	switch op := d.scanWhile(scanSkipSpace); op {
289
+	default:
290
+		d.error(errPhase)
291
+
292
+	case scanBeginArray:
293
+		d.array(v)
294
+
295
+	case scanBeginObject:
296
+		d.object(v)
297
+
298
+	case scanBeginLiteral:
299
+		d.literal(v)
300
+	}
301
+}
302
+
303
+type unquotedValue struct{}
304
+
305
+// valueQuoted is like value but decodes a
306
+// quoted string literal or literal null into an interface value.
307
+// If it finds anything other than a quoted string literal or null,
308
+// valueQuoted returns unquotedValue{}.
309
+func (d *decodeState) valueQuoted() interface{} {
310
+	switch op := d.scanWhile(scanSkipSpace); op {
311
+	default:
312
+		d.error(errPhase)
313
+
314
+	case scanBeginArray:
315
+		d.array(reflect.Value{})
316
+
317
+	case scanBeginObject:
318
+		d.object(reflect.Value{})
319
+
320
+	case scanBeginLiteral:
321
+		switch v := d.literalInterface().(type) {
322
+		case nil, string:
323
+			return v
324
+		}
325
+	}
326
+	return unquotedValue{}
327
+}
328
+
329
+// indirect walks down v allocating pointers as needed,
330
+// until it gets to a non-pointer.
331
+// if it encounters an Unmarshaler, indirect stops and returns that.
332
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
333
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
334
+	// If v is a named type and is addressable,
335
+	// start with its address, so that if the type has pointer methods,
336
+	// we find them.
337
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
338
+		v = v.Addr()
339
+	}
340
+	for {
341
+		// Load value from interface, but only if the result will be
342
+		// usefully addressable.
343
+		if v.Kind() == reflect.Interface && !v.IsNil() {
344
+			e := v.Elem()
345
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
346
+				v = e
347
+				continue
348
+			}
349
+		}
350
+
351
+		if v.Kind() != reflect.Ptr {
352
+			break
353
+		}
354
+
355
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
356
+			break
357
+		}
358
+		if v.IsNil() {
359
+			v.Set(reflect.New(v.Type().Elem()))
360
+		}
361
+		if v.Type().NumMethod() > 0 {
362
+			if u, ok := v.Interface().(Unmarshaler); ok {
363
+				return u, nil, reflect.Value{}
364
+			}
365
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
366
+				return nil, u, reflect.Value{}
367
+			}
368
+		}
369
+		v = v.Elem()
370
+	}
371
+	return nil, nil, v
372
+}
373
+
374
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
375
+// the first byte of the array ('[') has been read already.
376
+func (d *decodeState) array(v reflect.Value) {
377
+	// Check for unmarshaler.
378
+	u, ut, pv := d.indirect(v, false)
379
+	if u != nil {
380
+		d.off--
381
+		err := u.UnmarshalJSON(d.next())
382
+		if err != nil {
383
+			d.error(err)
384
+		}
385
+		return
386
+	}
387
+	if ut != nil {
388
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
389
+		d.off--
390
+		d.next()
391
+		return
392
+	}
393
+
394
+	v = pv
395
+
396
+	// Check type of target.
397
+	switch v.Kind() {
398
+	case reflect.Interface:
399
+		if v.NumMethod() == 0 {
400
+			// Decoding into nil interface?  Switch to non-reflect code.
401
+			v.Set(reflect.ValueOf(d.arrayInterface()))
402
+			return
403
+		}
404
+		// Otherwise it's invalid.
405
+		fallthrough
406
+	default:
407
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
408
+		d.off--
409
+		d.next()
410
+		return
411
+	case reflect.Array:
412
+	case reflect.Slice:
413
+		break
414
+	}
415
+
416
+	i := 0
417
+	for {
418
+		// Look ahead for ] - can only happen on first iteration.
419
+		op := d.scanWhile(scanSkipSpace)
420
+		if op == scanEndArray {
421
+			break
422
+		}
423
+
424
+		// Back up so d.value can have the byte we just read.
425
+		d.off--
426
+		d.scan.undo(op)
427
+
428
+		// Get element of array, growing if necessary.
429
+		if v.Kind() == reflect.Slice {
430
+			// Grow slice if necessary
431
+			if i >= v.Cap() {
432
+				newcap := v.Cap() + v.Cap()/2
433
+				if newcap < 4 {
434
+					newcap = 4
435
+				}
436
+				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
437
+				reflect.Copy(newv, v)
438
+				v.Set(newv)
439
+			}
440
+			if i >= v.Len() {
441
+				v.SetLen(i + 1)
442
+			}
443
+		}
444
+
445
+		if i < v.Len() {
446
+			// Decode into element.
447
+			d.value(v.Index(i))
448
+		} else {
449
+			// Ran out of fixed array: skip.
450
+			d.value(reflect.Value{})
451
+		}
452
+		i++
453
+
454
+		// Next token must be , or ].
455
+		op = d.scanWhile(scanSkipSpace)
456
+		if op == scanEndArray {
457
+			break
458
+		}
459
+		if op != scanArrayValue {
460
+			d.error(errPhase)
461
+		}
462
+	}
463
+
464
+	if i < v.Len() {
465
+		if v.Kind() == reflect.Array {
466
+			// Array.  Zero the rest.
467
+			z := reflect.Zero(v.Type().Elem())
468
+			for ; i < v.Len(); i++ {
469
+				v.Index(i).Set(z)
470
+			}
471
+		} else {
472
+			v.SetLen(i)
473
+		}
474
+	}
475
+	if i == 0 && v.Kind() == reflect.Slice {
476
+		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
477
+	}
478
+}
479
+
480
+var nullLiteral = []byte("null")
481
+
482
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
483
+// the first byte ('{') of the object has been read already.
484
+func (d *decodeState) object(v reflect.Value) {
485
+	// Check for unmarshaler.
486
+	u, ut, pv := d.indirect(v, false)
487
+	if u != nil {
488
+		d.off--
489
+		err := u.UnmarshalJSON(d.next())
490
+		if err != nil {
491
+			d.error(err)
492
+		}
493
+		return
494
+	}
495
+	if ut != nil {
496
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
497
+		d.off--
498
+		d.next() // skip over { } in input
499
+		return
500
+	}
501
+	v = pv
502
+
503
+	// Decoding into nil interface?  Switch to non-reflect code.
504
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
505
+		v.Set(reflect.ValueOf(d.objectInterface()))
506
+		return
507
+	}
508
+
509
+	// Check type of target: struct or map[string]T
510
+	switch v.Kind() {
511
+	case reflect.Map:
512
+		// map must have string kind
513
+		t := v.Type()
514
+		if t.Key().Kind() != reflect.String {
515
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
516
+			d.off--
517
+			d.next() // skip over { } in input
518
+			return
519
+		}
520
+		if v.IsNil() {
521
+			v.Set(reflect.MakeMap(t))
522
+		}
523
+	case reflect.Struct:
524
+
525
+	default:
526
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
527
+		d.off--
528
+		d.next() // skip over { } in input
529
+		return
530
+	}
531
+
532
+	var mapElem reflect.Value
533
+
534
+	for {
535
+		// Read opening " of string key or closing }.
536
+		op := d.scanWhile(scanSkipSpace)
537
+		if op == scanEndObject {
538
+			// closing } - can only happen on first iteration.
539
+			break
540
+		}
541
+		if op != scanBeginLiteral {
542
+			d.error(errPhase)
543
+		}
544
+
545
+		// Read key.
546
+		start := d.off - 1
547
+		op = d.scanWhile(scanContinue)
548
+		item := d.data[start : d.off-1]
549
+		key, ok := unquoteBytes(item)
550
+		if !ok {
551
+			d.error(errPhase)
552
+		}
553
+
554
+		// Figure out field corresponding to key.
555
+		var subv reflect.Value
556
+		destring := false // whether the value is wrapped in a string to be decoded first
557
+
558
+		if v.Kind() == reflect.Map {
559
+			elemType := v.Type().Elem()
560
+			if !mapElem.IsValid() {
561
+				mapElem = reflect.New(elemType).Elem()
562
+			} else {
563
+				mapElem.Set(reflect.Zero(elemType))
564
+			}
565
+			subv = mapElem
566
+		} else {
567
+			var f *field
568
+			fields := cachedTypeFields(v.Type(), false)
569
+			for i := range fields {
570
+				ff := &fields[i]
571
+				if bytes.Equal(ff.nameBytes, key) {
572
+					f = ff
573
+					break
574
+				}
575
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
576
+					f = ff
577
+				}
578
+			}
579
+			if f != nil {
580
+				subv = v
581
+				destring = f.quoted
582
+				for _, i := range f.index {
583
+					if subv.Kind() == reflect.Ptr {
584
+						if subv.IsNil() {
585
+							subv.Set(reflect.New(subv.Type().Elem()))
586
+						}
587
+						subv = subv.Elem()
588
+					}
589
+					subv = subv.Field(i)
590
+				}
591
+			}
592
+		}
593
+
594
+		// Read : before value.
595
+		if op == scanSkipSpace {
596
+			op = d.scanWhile(scanSkipSpace)
597
+		}
598
+		if op != scanObjectKey {
599
+			d.error(errPhase)
600
+		}
601
+
602
+		// Read value.
603
+		if destring {
604
+			switch qv := d.valueQuoted().(type) {
605
+			case nil:
606
+				d.literalStore(nullLiteral, subv, false)
607
+			case string:
608
+				d.literalStore([]byte(qv), subv, true)
609
+			default:
610
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
611
+			}
612
+		} else {
613
+			d.value(subv)
614
+		}
615
+
616
+		// Write value back to map;
617
+		// if using struct, subv points into struct already.
618
+		if v.Kind() == reflect.Map {
619
+			kv := reflect.ValueOf(key).Convert(v.Type().Key())
620
+			v.SetMapIndex(kv, subv)
621
+		}
622
+
623
+		// Next token must be , or }.
624
+		op = d.scanWhile(scanSkipSpace)
625
+		if op == scanEndObject {
626
+			break
627
+		}
628
+		if op != scanObjectValue {
629
+			d.error(errPhase)
630
+		}
631
+	}
632
+}
633
+
634
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
635
+// The first byte of the literal has been read already
636
+// (that's how the caller knows it's a literal).
637
+func (d *decodeState) literal(v reflect.Value) {
638
+	// All bytes inside literal return scanContinue op code.
639
+	start := d.off - 1
640
+	op := d.scanWhile(scanContinue)
641
+
642
+	// Scan read one byte too far; back up.
643
+	d.off--
644
+	d.scan.undo(op)
645
+
646
+	d.literalStore(d.data[start:d.off], v, false)
647
+}
648
+
649
+// convertNumber converts the number literal s to a float64 or a Number
650
+// depending on the setting of d.useNumber.
651
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
652
+	if d.useNumber {
653
+		return Number(s), nil
654
+	}
655
+	f, err := strconv.ParseFloat(s, 64)
656
+	if err != nil {
657
+		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
658
+	}
659
+	return f, nil
660
+}
661
+
662
+var numberType = reflect.TypeOf(Number(""))
663
+
664
+// literalStore decodes a literal stored in item into v.
665
+//
666
+// fromQuoted indicates whether this literal came from unwrapping a
667
+// string from the ",string" struct tag option. this is used only to
668
+// produce more helpful error messages.
669
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
670
+	// Check for unmarshaler.
671
+	if len(item) == 0 {
672
+		//Empty string given
673
+		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
674
+		return
675
+	}
676
+	wantptr := item[0] == 'n' // null
677
+	u, ut, pv := d.indirect(v, wantptr)
678
+	if u != nil {
679
+		err := u.UnmarshalJSON(item)
680
+		if err != nil {
681
+			d.error(err)
682
+		}
683
+		return
684
+	}
685
+	if ut != nil {
686
+		if item[0] != '"' {
687
+			if fromQuoted {
688
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
689
+			} else {
690
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
691
+			}
692
+			return
693
+		}
694
+		s, ok := unquoteBytes(item)
695
+		if !ok {
696
+			if fromQuoted {
697
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
698
+			} else {
699
+				d.error(errPhase)
700
+			}
701
+		}
702
+		err := ut.UnmarshalText(s)
703
+		if err != nil {
704
+			d.error(err)
705
+		}
706
+		return
707
+	}
708
+
709
+	v = pv
710
+
711
+	switch c := item[0]; c {
712
+	case 'n': // null
713
+		switch v.Kind() {
714
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
715
+			v.Set(reflect.Zero(v.Type()))
716
+			// otherwise, ignore null for primitives/string
717
+		}
718
+	case 't', 'f': // true, false
719
+		value := c == 't'
720
+		switch v.Kind() {
721
+		default:
722
+			if fromQuoted {
723
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
724
+			} else {
725
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
726
+			}
727
+		case reflect.Bool:
728
+			v.SetBool(value)
729
+		case reflect.Interface:
730
+			if v.NumMethod() == 0 {
731
+				v.Set(reflect.ValueOf(value))
732
+			} else {
733
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
734
+			}
735
+		}
736
+
737
+	case '"': // string
738
+		s, ok := unquoteBytes(item)
739
+		if !ok {
740
+			if fromQuoted {
741
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
742
+			} else {
743
+				d.error(errPhase)
744
+			}
745
+		}
746
+		switch v.Kind() {
747
+		default:
748
+			d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
749
+		case reflect.Slice:
750
+			if v.Type().Elem().Kind() != reflect.Uint8 {
751
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
752
+				break
753
+			}
754
+			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
755
+			n, err := base64.StdEncoding.Decode(b, s)
756
+			if err != nil {
757
+				d.saveError(err)
758
+				break
759
+			}
760
+			v.Set(reflect.ValueOf(b[0:n]))
761
+		case reflect.String:
762
+			v.SetString(string(s))
763
+		case reflect.Interface:
764
+			if v.NumMethod() == 0 {
765
+				v.Set(reflect.ValueOf(string(s)))
766
+			} else {
767
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
768
+			}
769
+		}
770
+
771
+	default: // number
772
+		if c != '-' && (c < '0' || c > '9') {
773
+			if fromQuoted {
774
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
775
+			} else {
776
+				d.error(errPhase)
777
+			}
778
+		}
779
+		s := string(item)
780
+		switch v.Kind() {
781
+		default:
782
+			if v.Kind() == reflect.String && v.Type() == numberType {
783
+				v.SetString(s)
784
+				break
785
+			}
786
+			if fromQuoted {
787
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
788
+			} else {
789
+				d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
790
+			}
791
+		case reflect.Interface:
792
+			n, err := d.convertNumber(s)
793
+			if err != nil {
794
+				d.saveError(err)
795
+				break
796
+			}
797
+			if v.NumMethod() != 0 {
798
+				d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
799
+				break
800
+			}
801
+			v.Set(reflect.ValueOf(n))
802
+
803
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
804
+			n, err := strconv.ParseInt(s, 10, 64)
805
+			if err != nil || v.OverflowInt(n) {
806
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
807
+				break
808
+			}
809
+			v.SetInt(n)
810
+
811
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
812
+			n, err := strconv.ParseUint(s, 10, 64)
813
+			if err != nil || v.OverflowUint(n) {
814
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
815
+				break
816
+			}
817
+			v.SetUint(n)
818
+
819
+		case reflect.Float32, reflect.Float64:
820
+			n, err := strconv.ParseFloat(s, v.Type().Bits())
821
+			if err != nil || v.OverflowFloat(n) {
822
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
823
+				break
824
+			}
825
+			v.SetFloat(n)
826
+		}
827
+	}
828
+}
829
+
830
+// The xxxInterface routines build up a value to be stored
831
+// in an empty interface.  They are not strictly necessary,
832
+// but they avoid the weight of reflection in this common case.
833
+
834
+// valueInterface is like value but returns interface{}
835
+func (d *decodeState) valueInterface() interface{} {
836
+	switch d.scanWhile(scanSkipSpace) {
837
+	default:
838
+		d.error(errPhase)
839
+		panic("unreachable")
840
+	case scanBeginArray:
841
+		return d.arrayInterface()
842
+	case scanBeginObject:
843
+		return d.objectInterface()
844
+	case scanBeginLiteral:
845
+		return d.literalInterface()
846
+	}
847
+}
848
+
849
+// arrayInterface is like array but returns []interface{}.
850
+func (d *decodeState) arrayInterface() []interface{} {
851
+	var v = make([]interface{}, 0)
852
+	for {
853
+		// Look ahead for ] - can only happen on first iteration.
854
+		op := d.scanWhile(scanSkipSpace)
855
+		if op == scanEndArray {
856
+			break
857
+		}
858
+
859
+		// Back up so d.value can have the byte we just read.
860
+		d.off--
861
+		d.scan.undo(op)
862
+
863
+		v = append(v, d.valueInterface())
864
+
865
+		// Next token must be , or ].
866
+		op = d.scanWhile(scanSkipSpace)
867
+		if op == scanEndArray {
868
+			break
869
+		}
870
+		if op != scanArrayValue {
871
+			d.error(errPhase)
872
+		}
873
+	}
874
+	return v
875
+}
876
+
877
+// objectInterface is like object but returns map[string]interface{}.
878
+func (d *decodeState) objectInterface() map[string]interface{} {
879
+	m := make(map[string]interface{})
880
+	for {
881
+		// Read opening " of string key or closing }.
882
+		op := d.scanWhile(scanSkipSpace)
883
+		if op == scanEndObject {
884
+			// closing } - can only happen on first iteration.
885
+			break
886
+		}
887
+		if op != scanBeginLiteral {
888
+			d.error(errPhase)
889
+		}
890
+
891
+		// Read string key.
892
+		start := d.off - 1
893
+		op = d.scanWhile(scanContinue)
894
+		item := d.data[start : d.off-1]
895
+		key, ok := unquote(item)
896
+		if !ok {
897
+			d.error(errPhase)
898
+		}
899
+
900
+		// Read : before value.
901
+		if op == scanSkipSpace {
902
+			op = d.scanWhile(scanSkipSpace)
903
+		}
904
+		if op != scanObjectKey {
905
+			d.error(errPhase)
906
+		}
907
+
908
+		// Read value.
909
+		m[key] = d.valueInterface()
910
+
911
+		// Next token must be , or }.
912
+		op = d.scanWhile(scanSkipSpace)
913
+		if op == scanEndObject {
914
+			break
915
+		}
916
+		if op != scanObjectValue {
917
+			d.error(errPhase)
918
+		}
919
+	}
920
+	return m
921
+}
922
+
923
+// literalInterface is like literal but returns an interface value.
924
+func (d *decodeState) literalInterface() interface{} {
925
+	// All bytes inside literal return scanContinue op code.
926
+	start := d.off - 1
927
+	op := d.scanWhile(scanContinue)
928
+
929
+	// Scan read one byte too far; back up.
930
+	d.off--
931
+	d.scan.undo(op)
932
+	item := d.data[start:d.off]
933
+
934
+	switch c := item[0]; c {
935
+	case 'n': // null
936
+		return nil
937
+
938
+	case 't', 'f': // true, false
939
+		return c == 't'
940
+
941
+	case '"': // string
942
+		s, ok := unquote(item)
943
+		if !ok {
944
+			d.error(errPhase)
945
+		}
946
+		return s
947
+
948
+	default: // number
949
+		if c != '-' && (c < '0' || c > '9') {
950
+			d.error(errPhase)
951
+		}
952
+		n, err := d.convertNumber(string(item))
953
+		if err != nil {
954
+			d.saveError(err)
955
+		}
956
+		return n
957
+	}
958
+}
959
+
960
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
961
+// or it returns -1.
962
+func getu4(s []byte) rune {
963
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
964
+		return -1
965
+	}
966
+	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
967
+	if err != nil {
968
+		return -1
969
+	}
970
+	return rune(r)
971
+}
972
+
973
+// unquote converts a quoted JSON string literal s into an actual string t.
974
+// The rules are different than for Go, so cannot use strconv.Unquote.
975
+func unquote(s []byte) (t string, ok bool) {
976
+	s, ok = unquoteBytes(s)
977
+	t = string(s)
978
+	return
979
+}
980
+
981
+func unquoteBytes(s []byte) (t []byte, ok bool) {
982
+	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
983
+		return
984
+	}
985
+	s = s[1 : len(s)-1]
986
+
987
+	// Check for unusual characters. If there are none,
988
+	// then no unquoting is needed, so return a slice of the
989
+	// original bytes.
990
+	r := 0
991
+	for r < len(s) {
992
+		c := s[r]
993
+		if c == '\\' || c == '"' || c < ' ' {
994
+			break
995
+		}
996
+		if c < utf8.RuneSelf {
997
+			r++
998
+			continue
999
+		}
1000
+		rr, size := utf8.DecodeRune(s[r:])
1001
+		if rr == utf8.RuneError && size == 1 {
1002
+			break
1003
+		}
1004
+		r += size
1005
+	}
1006
+	if r == len(s) {
1007
+		return s, true
1008
+	}
1009
+
1010
+	b := make([]byte, len(s)+2*utf8.UTFMax)
1011
+	w := copy(b, s[0:r])
1012
+	for r < len(s) {
1013
+		// Out of room?  Can only happen if s is full of
1014
+		// malformed UTF-8 and we're replacing each
1015
+		// byte with RuneError.
1016
+		if w >= len(b)-2*utf8.UTFMax {
1017
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
1018
+			copy(nb, b[0:w])
1019
+			b = nb
1020
+		}
1021
+		switch c := s[r]; {
1022
+		case c == '\\':
1023
+			r++
1024
+			if r >= len(s) {
1025
+				return
1026
+			}
1027
+			switch s[r] {
1028
+			default:
1029
+				return
1030
+			case '"', '\\', '/', '\'':
1031
+				b[w] = s[r]
1032
+				r++
1033
+				w++
1034
+			case 'b':
1035
+				b[w] = '\b'
1036
+				r++
1037
+				w++
1038
+			case 'f':
1039
+				b[w] = '\f'
1040
+				r++
1041
+				w++
1042
+			case 'n':
1043
+				b[w] = '\n'
1044
+				r++
1045
+				w++
1046
+			case 'r':
1047
+				b[w] = '\r'
1048
+				r++
1049
+				w++
1050
+			case 't':
1051
+				b[w] = '\t'
1052
+				r++
1053
+				w++
1054
+			case 'u':
1055
+				r--
1056
+				rr := getu4(s[r:])
1057
+				if rr < 0 {
1058
+					return
1059
+				}
1060
+				r += 6
1061
+				if utf16.IsSurrogate(rr) {
1062
+					rr1 := getu4(s[r:])
1063
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
1064
+						// A valid pair; consume.
1065
+						r += 6
1066
+						w += utf8.EncodeRune(b[w:], dec)
1067
+						break
1068
+					}
1069
+					// Invalid surrogate; fall back to replacement rune.
1070
+					rr = unicode.ReplacementChar
1071
+				}
1072
+				w += utf8.EncodeRune(b[w:], rr)
1073
+			}
1074
+
1075
+		// Quote, control characters are invalid.
1076
+		case c == '"', c < ' ':
1077
+			return
1078
+
1079
+		// ASCII
1080
+		case c < utf8.RuneSelf:
1081
+			b[w] = c
1082
+			r++
1083
+			w++
1084
+
1085
+		// Coerce to well-formed UTF-8.
1086
+		default:
1087
+			rr, size := utf8.DecodeRune(s[r:])
1088
+			r += size
1089
+			w += utf8.EncodeRune(b[w:], rr)
1090
+		}
1091
+	}
1092
+	return b[0:w], true
1093
+}
0 1094
new file mode 100644
... ...
@@ -0,0 +1,1245 @@
0
+// Copyright 2010 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// Package json implements encoding and decoding of JSON objects as defined in
5
+// RFC 4627. The mapping between JSON objects and Go values is described
6
+// in the documentation for the Marshal and Unmarshal functions.
7
+//
8
+// See "JSON and Go" for an introduction to this package:
9
+// https://golang.org/doc/articles/json_and_go.html
10
+package json
11
+
12
+import (
13
+	"bytes"
14
+	"encoding"
15
+	"encoding/base64"
16
+	"math"
17
+	"reflect"
18
+	"runtime"
19
+	"sort"
20
+	"strconv"
21
+	"strings"
22
+	"sync"
23
+	"unicode"
24
+	"unicode/utf8"
25
+)
26
+
27
+// Marshal returns the JSON encoding of v.
28
+//
29
+// Marshal traverses the value v recursively.
30
+// If an encountered value implements the Marshaler interface
31
+// and is not a nil pointer, Marshal calls its MarshalJSON method
32
+// to produce JSON.  The nil pointer exception is not strictly necessary
33
+// but mimics a similar, necessary exception in the behavior of
34
+// UnmarshalJSON.
35
+//
36
+// Otherwise, Marshal uses the following type-dependent default encodings:
37
+//
38
+// Boolean values encode as JSON booleans.
39
+//
40
+// Floating point, integer, and Number values encode as JSON numbers.
41
+//
42
+// String values encode as JSON strings coerced to valid UTF-8,
43
+// replacing invalid bytes with the Unicode replacement rune.
44
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
45
+// to keep some browsers from misinterpreting JSON output as HTML.
46
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
47
+//
48
+// Array and slice values encode as JSON arrays, except that
49
+// []byte encodes as a base64-encoded string, and a nil slice
50
+// encodes as the null JSON object.
51
+//
52
+// Struct values encode as JSON objects. Each exported struct field
53
+// becomes a member of the object unless
54
+//   - the field's tag is "-", or
55
+//   - the field is empty and its tag specifies the "omitempty" option.
56
+// The empty values are false, 0, any
57
+// nil pointer or interface value, and any array, slice, map, or string of
58
+// length zero. The object's default key string is the struct field name
59
+// but can be specified in the struct field's tag value. The "json" key in
60
+// the struct field's tag value is the key name, followed by an optional comma
61
+// and options. Examples:
62
+//
63
+//   // Field is ignored by this package.
64
+//   Field int `json:"-"`
65
+//
66
+//   // Field appears in JSON as key "myName".
67
+//   Field int `json:"myName"`
68
+//
69
+//   // Field appears in JSON as key "myName" and
70
+//   // the field is omitted from the object if its value is empty,
71
+//   // as defined above.
72
+//   Field int `json:"myName,omitempty"`
73
+//
74
+//   // Field appears in JSON as key "Field" (the default), but
75
+//   // the field is skipped if empty.
76
+//   // Note the leading comma.
77
+//   Field int `json:",omitempty"`
78
+//
79
+// The "string" option signals that a field is stored as JSON inside a
80
+// JSON-encoded string. It applies only to fields of string, floating point,
81
+// integer, or boolean types. This extra level of encoding is sometimes used
82
+// when communicating with JavaScript programs:
83
+//
84
+//    Int64String int64 `json:",string"`
85
+//
86
+// The key name will be used if it's a non-empty string consisting of
87
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
88
+// underscores and slashes.
89
+//
90
+// Anonymous struct fields are usually marshaled as if their inner exported fields
91
+// were fields in the outer struct, subject to the usual Go visibility rules amended
92
+// as described in the next paragraph.
93
+// An anonymous struct field with a name given in its JSON tag is treated as
94
+// having that name, rather than being anonymous.
95
+// An anonymous struct field of interface type is treated the same as having
96
+// that type as its name, rather than being anonymous.
97
+//
98
+// The Go visibility rules for struct fields are amended for JSON when
99
+// deciding which field to marshal or unmarshal. If there are
100
+// multiple fields at the same level, and that level is the least
101
+// nested (and would therefore be the nesting level selected by the
102
+// usual Go rules), the following extra rules apply:
103
+//
104
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
105
+// even if there are multiple untagged fields that would otherwise conflict.
106
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
107
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
108
+//
109
+// Handling of anonymous struct fields is new in Go 1.1.
110
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
111
+// an anonymous struct field in both current and earlier versions, give the field
112
+// a JSON tag of "-".
113
+//
114
+// Map values encode as JSON objects.
115
+// The map's key type must be string; the map keys are used as JSON object
116
+// keys, subject to the UTF-8 coercion described for string values above.
117
+//
118
+// Pointer values encode as the value pointed to.
119
+// A nil pointer encodes as the null JSON object.
120
+//
121
+// Interface values encode as the value contained in the interface.
122
+// A nil interface value encodes as the null JSON object.
123
+//
124
+// Channel, complex, and function values cannot be encoded in JSON.
125
+// Attempting to encode such a value causes Marshal to return
126
+// an UnsupportedTypeError.
127
+//
128
+// JSON cannot represent cyclic data structures and Marshal does not
129
+// handle them.  Passing cyclic structures to Marshal will result in
130
+// an infinite recursion.
131
+//
132
+func Marshal(v interface{}) ([]byte, error) {
133
+	return marshal(v, false)
134
+}
135
+
136
+// MarshalIndent is like Marshal but applies Indent to format the output.
137
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
138
+	b, err := Marshal(v)
139
+	if err != nil {
140
+		return nil, err
141
+	}
142
+	var buf bytes.Buffer
143
+	err = Indent(&buf, b, prefix, indent)
144
+	if err != nil {
145
+		return nil, err
146
+	}
147
+	return buf.Bytes(), nil
148
+}
149
+
150
+// MarshalCanonical is like Marshal but encodes into Canonical JSON.
151
+// Read more at: http://wiki.laptop.org/go/Canonical_JSON
152
+func MarshalCanonical(v interface{}) ([]byte, error) {
153
+	return marshal(v, true)
154
+}
155
+
156
+func marshal(v interface{}, canonical bool) ([]byte, error) {
157
+	e := &encodeState{canonical: canonical}
158
+	err := e.marshal(v)
159
+	if err != nil {
160
+		return nil, err
161
+	}
162
+	return e.Bytes(), nil
163
+}
164
+
165
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
166
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
167
+// so that the JSON will be safe to embed inside HTML <script> tags.
168
+// For historical reasons, web browsers don't honor standard HTML
169
+// escaping within <script> tags, so an alternative JSON encoding must
170
+// be used.
171
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
172
+	// The characters can only appear in string literals,
173
+	// so just scan the string one byte at a time.
174
+	start := 0
175
+	for i, c := range src {
176
+		if c == '<' || c == '>' || c == '&' {
177
+			if start < i {
178
+				dst.Write(src[start:i])
179
+			}
180
+			dst.WriteString(`\u00`)
181
+			dst.WriteByte(hex[c>>4])
182
+			dst.WriteByte(hex[c&0xF])
183
+			start = i + 1
184
+		}
185
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
186
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
187
+			if start < i {
188
+				dst.Write(src[start:i])
189
+			}
190
+			dst.WriteString(`\u202`)
191
+			dst.WriteByte(hex[src[i+2]&0xF])
192
+			start = i + 3
193
+		}
194
+	}
195
+	if start < len(src) {
196
+		dst.Write(src[start:])
197
+	}
198
+}
199
+
200
+// Marshaler is the interface implemented by objects that
201
+// can marshal themselves into valid JSON.
202
+type Marshaler interface {
203
+	MarshalJSON() ([]byte, error)
204
+}
205
+
206
+// An UnsupportedTypeError is returned by Marshal when attempting
207
+// to encode an unsupported value type.
208
+type UnsupportedTypeError struct {
209
+	Type reflect.Type
210
+}
211
+
212
+func (e *UnsupportedTypeError) Error() string {
213
+	return "json: unsupported type: " + e.Type.String()
214
+}
215
+
216
+type UnsupportedValueError struct {
217
+	Value reflect.Value
218
+	Str   string
219
+}
220
+
221
+func (e *UnsupportedValueError) Error() string {
222
+	return "json: unsupported value: " + e.Str
223
+}
224
+
225
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
226
+// attempting to encode a string value with invalid UTF-8 sequences.
227
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
228
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
229
+// This error is no longer generated but is kept for backwards compatibility
230
+// with programs that might mention it.
231
+type InvalidUTF8Error struct {
232
+	S string // the whole string value that caused the error
233
+}
234
+
235
+func (e *InvalidUTF8Error) Error() string {
236
+	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
237
+}
238
+
239
+type MarshalerError struct {
240
+	Type reflect.Type
241
+	Err  error
242
+}
243
+
244
+func (e *MarshalerError) Error() string {
245
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
246
+}
247
+
248
+var hex = "0123456789abcdef"
249
+
250
+// An encodeState encodes JSON into a bytes.Buffer.
251
+type encodeState struct {
252
+	bytes.Buffer // accumulated output
253
+	scratch      [64]byte
254
+	canonical    bool
255
+}
256
+
257
+var encodeStatePool sync.Pool
258
+
259
+func newEncodeState(canonical bool) *encodeState {
260
+	if v := encodeStatePool.Get(); v != nil {
261
+		e := v.(*encodeState)
262
+		e.Reset()
263
+		e.canonical = canonical
264
+		return e
265
+	}
266
+	return &encodeState{canonical: canonical}
267
+}
268
+
269
+func (e *encodeState) marshal(v interface{}) (err error) {
270
+	defer func() {
271
+		if r := recover(); r != nil {
272
+			if _, ok := r.(runtime.Error); ok {
273
+				panic(r)
274
+			}
275
+			if s, ok := r.(string); ok {
276
+				panic(s)
277
+			}
278
+			err = r.(error)
279
+		}
280
+	}()
281
+	e.reflectValue(reflect.ValueOf(v))
282
+	return nil
283
+}
284
+
285
+func (e *encodeState) error(err error) {
286
+	panic(err)
287
+}
288
+
289
+func isEmptyValue(v reflect.Value) bool {
290
+	switch v.Kind() {
291
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
292
+		return v.Len() == 0
293
+	case reflect.Bool:
294
+		return !v.Bool()
295
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
296
+		return v.Int() == 0
297
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
298
+		return v.Uint() == 0
299
+	case reflect.Float32, reflect.Float64:
300
+		return v.Float() == 0
301
+	case reflect.Interface, reflect.Ptr:
302
+		return v.IsNil()
303
+	}
304
+	return false
305
+}
306
+
307
+func (e *encodeState) reflectValue(v reflect.Value) {
308
+	e.valueEncoder(v)(e, v, false)
309
+}
310
+
311
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
312
+
313
+var encoderCache struct {
314
+	sync.RWMutex
315
+	m map[reflect.Type]encoderFunc
316
+}
317
+
318
+func (e *encodeState) valueEncoder(v reflect.Value) encoderFunc {
319
+	if !v.IsValid() {
320
+		return invalidValueEncoder
321
+	}
322
+	return e.typeEncoder(v.Type())
323
+}
324
+
325
+func (e *encodeState) typeEncoder(t reflect.Type) encoderFunc {
326
+	encoderCache.RLock()
327
+	f := encoderCache.m[t]
328
+	encoderCache.RUnlock()
329
+	if f != nil {
330
+		return f
331
+	}
332
+
333
+	// To deal with recursive types, populate the map with an
334
+	// indirect func before we build it. This type waits on the
335
+	// real func (f) to be ready and then calls it.  This indirect
336
+	// func is only used for recursive types.
337
+	encoderCache.Lock()
338
+	if encoderCache.m == nil {
339
+		encoderCache.m = make(map[reflect.Type]encoderFunc)
340
+	}
341
+	var wg sync.WaitGroup
342
+	wg.Add(1)
343
+	encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
344
+		wg.Wait()
345
+		f(e, v, quoted)
346
+	}
347
+	encoderCache.Unlock()
348
+
349
+	// Compute fields without lock.
350
+	// Might duplicate effort but won't hold other computations back.
351
+	f = e.newTypeEncoder(t, true)
352
+	wg.Done()
353
+	encoderCache.Lock()
354
+	encoderCache.m[t] = f
355
+	encoderCache.Unlock()
356
+	return f
357
+}
358
+
359
+var (
360
+	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
361
+	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
362
+)
363
+
364
+// newTypeEncoder constructs an encoderFunc for a type.
365
+// The returned encoder only checks CanAddr when allowAddr is true.
366
+func (e *encodeState) newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
367
+	if t.Implements(marshalerType) {
368
+		return marshalerEncoder
369
+	}
370
+	if t.Kind() != reflect.Ptr && allowAddr {
371
+		if reflect.PtrTo(t).Implements(marshalerType) {
372
+			return newCondAddrEncoder(addrMarshalerEncoder, e.newTypeEncoder(t, false))
373
+		}
374
+	}
375
+
376
+	if t.Implements(textMarshalerType) {
377
+		return textMarshalerEncoder
378
+	}
379
+	if t.Kind() != reflect.Ptr && allowAddr {
380
+		if reflect.PtrTo(t).Implements(textMarshalerType) {
381
+			return newCondAddrEncoder(addrTextMarshalerEncoder, e.newTypeEncoder(t, false))
382
+		}
383
+	}
384
+
385
+	switch t.Kind() {
386
+	case reflect.Bool:
387
+		return boolEncoder
388
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
389
+		return intEncoder
390
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
391
+		return uintEncoder
392
+	case reflect.Float32:
393
+		return float32Encoder
394
+	case reflect.Float64:
395
+		return float64Encoder
396
+	case reflect.String:
397
+		return stringEncoder
398
+	case reflect.Interface:
399
+		return interfaceEncoder
400
+	case reflect.Struct:
401
+		return e.newStructEncoder(t)
402
+	case reflect.Map:
403
+		return e.newMapEncoder(t)
404
+	case reflect.Slice:
405
+		return e.newSliceEncoder(t)
406
+	case reflect.Array:
407
+		return e.newArrayEncoder(t)
408
+	case reflect.Ptr:
409
+		return e.newPtrEncoder(t)
410
+	default:
411
+		return unsupportedTypeEncoder
412
+	}
413
+}
414
+
415
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
416
+	e.WriteString("null")
417
+}
418
+
419
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
420
+	if v.Kind() == reflect.Ptr && v.IsNil() {
421
+		e.WriteString("null")
422
+		return
423
+	}
424
+	m := v.Interface().(Marshaler)
425
+	b, err := m.MarshalJSON()
426
+	if err == nil {
427
+		// copy JSON into buffer, checking validity.
428
+		err = compact(&e.Buffer, b, true)
429
+	}
430
+	if err != nil {
431
+		e.error(&MarshalerError{v.Type(), err})
432
+	}
433
+}
434
+
435
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
436
+	va := v.Addr()
437
+	if va.IsNil() {
438
+		e.WriteString("null")
439
+		return
440
+	}
441
+	m := va.Interface().(Marshaler)
442
+	b, err := m.MarshalJSON()
443
+	if err == nil {
444
+		// copy JSON into buffer, checking validity.
445
+		err = compact(&e.Buffer, b, true)
446
+	}
447
+	if err != nil {
448
+		e.error(&MarshalerError{v.Type(), err})
449
+	}
450
+}
451
+
452
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
453
+	if v.Kind() == reflect.Ptr && v.IsNil() {
454
+		e.WriteString("null")
455
+		return
456
+	}
457
+	m := v.Interface().(encoding.TextMarshaler)
458
+	b, err := m.MarshalText()
459
+	if err == nil {
460
+		_, err = e.stringBytes(b)
461
+	}
462
+	if err != nil {
463
+		e.error(&MarshalerError{v.Type(), err})
464
+	}
465
+}
466
+
467
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
468
+	va := v.Addr()
469
+	if va.IsNil() {
470
+		e.WriteString("null")
471
+		return
472
+	}
473
+	m := va.Interface().(encoding.TextMarshaler)
474
+	b, err := m.MarshalText()
475
+	if err == nil {
476
+		_, err = e.stringBytes(b)
477
+	}
478
+	if err != nil {
479
+		e.error(&MarshalerError{v.Type(), err})
480
+	}
481
+}
482
+
483
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
484
+	if quoted {
485
+		e.WriteByte('"')
486
+	}
487
+	if v.Bool() {
488
+		e.WriteString("true")
489
+	} else {
490
+		e.WriteString("false")
491
+	}
492
+	if quoted {
493
+		e.WriteByte('"')
494
+	}
495
+}
496
+
497
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
498
+	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
499
+	if quoted {
500
+		e.WriteByte('"')
501
+	}
502
+	e.Write(b)
503
+	if quoted {
504
+		e.WriteByte('"')
505
+	}
506
+}
507
+
508
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
509
+	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
510
+	if quoted {
511
+		e.WriteByte('"')
512
+	}
513
+	e.Write(b)
514
+	if quoted {
515
+		e.WriteByte('"')
516
+	}
517
+}
518
+
519
+type floatEncoder int // number of bits
520
+
521
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
522
+	f := v.Float()
523
+	if math.IsInf(f, 0) || math.IsNaN(f) || (e.canonical && math.Floor(f) != f) {
524
+		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
525
+	}
526
+
527
+	var b []byte
528
+	if e.canonical {
529
+		b = strconv.AppendInt(e.scratch[:0], int64(f), 10)
530
+	} else {
531
+		b = strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
532
+	}
533
+	if quoted {
534
+		e.WriteByte('"')
535
+	}
536
+	e.Write(b)
537
+	if quoted {
538
+		e.WriteByte('"')
539
+	}
540
+}
541
+
542
+var (
543
+	float32Encoder = (floatEncoder(32)).encode
544
+	float64Encoder = (floatEncoder(64)).encode
545
+)
546
+
547
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
548
+	if v.Type() == numberType {
549
+		numStr := v.String()
550
+		if numStr == "" {
551
+			numStr = "0" // Number's zero-val
552
+		}
553
+		e.WriteString(numStr)
554
+		return
555
+	}
556
+	if quoted {
557
+		sb, err := Marshal(v.String())
558
+		if err != nil {
559
+			e.error(err)
560
+		}
561
+		e.string(string(sb))
562
+	} else {
563
+		e.string(v.String())
564
+	}
565
+}
566
+
567
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
568
+	if v.IsNil() {
569
+		e.WriteString("null")
570
+		return
571
+	}
572
+	e.reflectValue(v.Elem())
573
+}
574
+
575
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
576
+	e.error(&UnsupportedTypeError{v.Type()})
577
+}
578
+
579
+type structEncoder struct {
580
+	fields    []field
581
+	fieldEncs []encoderFunc
582
+}
583
+
584
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
585
+	e.WriteByte('{')
586
+	first := true
587
+	for i, f := range se.fields {
588
+		fv := fieldByIndex(v, f.index)
589
+		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
590
+			continue
591
+		}
592
+		if first {
593
+			first = false
594
+		} else {
595
+			e.WriteByte(',')
596
+		}
597
+		e.string(f.name)
598
+		e.WriteByte(':')
599
+		se.fieldEncs[i](e, fv, f.quoted)
600
+	}
601
+	e.WriteByte('}')
602
+}
603
+
604
+func (e *encodeState) newStructEncoder(t reflect.Type) encoderFunc {
605
+	fields := cachedTypeFields(t, e.canonical)
606
+	se := &structEncoder{
607
+		fields:    fields,
608
+		fieldEncs: make([]encoderFunc, len(fields)),
609
+	}
610
+	for i, f := range fields {
611
+		se.fieldEncs[i] = e.typeEncoder(typeByIndex(t, f.index))
612
+	}
613
+	return se.encode
614
+}
615
+
616
+type mapEncoder struct {
617
+	elemEnc encoderFunc
618
+}
619
+
620
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
621
+	if v.IsNil() {
622
+		e.WriteString("null")
623
+		return
624
+	}
625
+	e.WriteByte('{')
626
+	var sv stringValues = v.MapKeys()
627
+	sort.Sort(sv)
628
+	for i, k := range sv {
629
+		if i > 0 {
630
+			e.WriteByte(',')
631
+		}
632
+		e.string(k.String())
633
+		e.WriteByte(':')
634
+		me.elemEnc(e, v.MapIndex(k), false)
635
+	}
636
+	e.WriteByte('}')
637
+}
638
+
639
+func (e *encodeState) newMapEncoder(t reflect.Type) encoderFunc {
640
+	if t.Key().Kind() != reflect.String {
641
+		return unsupportedTypeEncoder
642
+	}
643
+	me := &mapEncoder{e.typeEncoder(t.Elem())}
644
+	return me.encode
645
+}
646
+
647
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
648
+	if v.IsNil() {
649
+		e.WriteString("null")
650
+		return
651
+	}
652
+	s := v.Bytes()
653
+	e.WriteByte('"')
654
+	if len(s) < 1024 {
655
+		// for small buffers, using Encode directly is much faster.
656
+		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
657
+		base64.StdEncoding.Encode(dst, s)
658
+		e.Write(dst)
659
+	} else {
660
+		// for large buffers, avoid unnecessary extra temporary
661
+		// buffer space.
662
+		enc := base64.NewEncoder(base64.StdEncoding, e)
663
+		enc.Write(s)
664
+		enc.Close()
665
+	}
666
+	e.WriteByte('"')
667
+}
668
+
669
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
670
+type sliceEncoder struct {
671
+	arrayEnc encoderFunc
672
+}
673
+
674
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
675
+	if v.IsNil() {
676
+		e.WriteString("null")
677
+		return
678
+	}
679
+	se.arrayEnc(e, v, false)
680
+}
681
+
682
+func (e *encodeState) newSliceEncoder(t reflect.Type) encoderFunc {
683
+	// Byte slices get special treatment; arrays don't.
684
+	if t.Elem().Kind() == reflect.Uint8 {
685
+		return encodeByteSlice
686
+	}
687
+	enc := &sliceEncoder{e.newArrayEncoder(t)}
688
+	return enc.encode
689
+}
690
+
691
+type arrayEncoder struct {
692
+	elemEnc encoderFunc
693
+}
694
+
695
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
696
+	e.WriteByte('[')
697
+	n := v.Len()
698
+	for i := 0; i < n; i++ {
699
+		if i > 0 {
700
+			e.WriteByte(',')
701
+		}
702
+		ae.elemEnc(e, v.Index(i), false)
703
+	}
704
+	e.WriteByte(']')
705
+}
706
+
707
+func (e *encodeState) newArrayEncoder(t reflect.Type) encoderFunc {
708
+	enc := &arrayEncoder{e.typeEncoder(t.Elem())}
709
+	return enc.encode
710
+}
711
+
712
+type ptrEncoder struct {
713
+	elemEnc encoderFunc
714
+}
715
+
716
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
717
+	if v.IsNil() {
718
+		e.WriteString("null")
719
+		return
720
+	}
721
+	pe.elemEnc(e, v.Elem(), quoted)
722
+}
723
+
724
+func (e *encodeState) newPtrEncoder(t reflect.Type) encoderFunc {
725
+	enc := &ptrEncoder{e.typeEncoder(t.Elem())}
726
+	return enc.encode
727
+}
728
+
729
+type condAddrEncoder struct {
730
+	canAddrEnc, elseEnc encoderFunc
731
+}
732
+
733
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
734
+	if v.CanAddr() {
735
+		ce.canAddrEnc(e, v, quoted)
736
+	} else {
737
+		ce.elseEnc(e, v, quoted)
738
+	}
739
+}
740
+
741
+// newCondAddrEncoder returns an encoder that checks whether its value
742
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
743
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
744
+	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
745
+	return enc.encode
746
+}
747
+
748
+func isValidTag(s string) bool {
749
+	if s == "" {
750
+		return false
751
+	}
752
+	for _, c := range s {
753
+		switch {
754
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
755
+			// Backslash and quote chars are reserved, but
756
+			// otherwise any punctuation chars are allowed
757
+			// in a tag name.
758
+		default:
759
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
760
+				return false
761
+			}
762
+		}
763
+	}
764
+	return true
765
+}
766
+
767
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
768
+	for _, i := range index {
769
+		if v.Kind() == reflect.Ptr {
770
+			if v.IsNil() {
771
+				return reflect.Value{}
772
+			}
773
+			v = v.Elem()
774
+		}
775
+		v = v.Field(i)
776
+	}
777
+	return v
778
+}
779
+
780
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
781
+	for _, i := range index {
782
+		if t.Kind() == reflect.Ptr {
783
+			t = t.Elem()
784
+		}
785
+		t = t.Field(i).Type
786
+	}
787
+	return t
788
+}
789
+
790
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
791
+// It implements the methods to sort by string.
792
+type stringValues []reflect.Value
793
+
794
+func (sv stringValues) Len() int           { return len(sv) }
795
+func (sv stringValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
796
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
797
+func (sv stringValues) get(i int) string   { return sv[i].String() }
798
+
799
+// NOTE: keep in sync with stringBytes below.
800
+func (e *encodeState) string(s string) (int, error) {
801
+	len0 := e.Len()
802
+	e.WriteByte('"')
803
+	start := 0
804
+	for i := 0; i < len(s); {
805
+		if b := s[i]; b < utf8.RuneSelf {
806
+			if b != '\\' && b != '"' {
807
+				if e.canonical || (0x20 <= b && b != '<' && b != '>' && b != '&') {
808
+					i++
809
+					continue
810
+				}
811
+			}
812
+			if start < i {
813
+				e.WriteString(s[start:i])
814
+			}
815
+			switch b {
816
+			case '\\', '"':
817
+				e.WriteByte('\\')
818
+				e.WriteByte(b)
819
+			case '\n':
820
+				e.WriteByte('\\')
821
+				e.WriteByte('n')
822
+			case '\r':
823
+				e.WriteByte('\\')
824
+				e.WriteByte('r')
825
+			case '\t':
826
+				e.WriteByte('\\')
827
+				e.WriteByte('t')
828
+			default:
829
+				// This encodes bytes < 0x20 except for \n and \r,
830
+				// as well as <, > and &. The latter are escaped because they
831
+				// can lead to security holes when user-controlled strings
832
+				// are rendered into JSON and served to some browsers.
833
+				e.WriteString(`\u00`)
834
+				e.WriteByte(hex[b>>4])
835
+				e.WriteByte(hex[b&0xF])
836
+			}
837
+			i++
838
+			start = i
839
+			continue
840
+		}
841
+		if e.canonical {
842
+			i++
843
+			continue
844
+		}
845
+		c, size := utf8.DecodeRuneInString(s[i:])
846
+		if c == utf8.RuneError && size == 1 {
847
+			if start < i {
848
+				e.WriteString(s[start:i])
849
+			}
850
+			e.WriteString(`\ufffd`)
851
+			i += size
852
+			start = i
853
+			continue
854
+		}
855
+		// U+2028 is LINE SEPARATOR.
856
+		// U+2029 is PARAGRAPH SEPARATOR.
857
+		// They are both technically valid characters in JSON strings,
858
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
859
+		// and can lead to security holes there. It is valid JSON to
860
+		// escape them, so we do so unconditionally.
861
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
862
+		if c == '\u2028' || c == '\u2029' {
863
+			if start < i {
864
+				e.WriteString(s[start:i])
865
+			}
866
+			e.WriteString(`\u202`)
867
+			e.WriteByte(hex[c&0xF])
868
+			i += size
869
+			start = i
870
+			continue
871
+		}
872
+		i += size
873
+	}
874
+	if start < len(s) {
875
+		e.WriteString(s[start:])
876
+	}
877
+	e.WriteByte('"')
878
+	return e.Len() - len0, nil
879
+}
880
+
881
+// NOTE: keep in sync with string above.
882
+func (e *encodeState) stringBytes(s []byte) (int, error) {
883
+	len0 := e.Len()
884
+	e.WriteByte('"')
885
+	start := 0
886
+	for i := 0; i < len(s); {
887
+		if b := s[i]; b < utf8.RuneSelf {
888
+			if b != '\\' && b != '"' {
889
+				if e.canonical || (0x20 <= b && b != '<' && b != '>' && b != '&') {
890
+					i++
891
+					continue
892
+				}
893
+			}
894
+			if start < i {
895
+				e.Write(s[start:i])
896
+			}
897
+			switch b {
898
+			case '\\', '"':
899
+				e.WriteByte('\\')
900
+				e.WriteByte(b)
901
+			case '\n':
902
+				e.WriteByte('\\')
903
+				e.WriteByte('n')
904
+			case '\r':
905
+				e.WriteByte('\\')
906
+				e.WriteByte('r')
907
+			case '\t':
908
+				e.WriteByte('\\')
909
+				e.WriteByte('t')
910
+			default:
911
+				// This encodes bytes < 0x20 except for \n and \r,
912
+				// as well as <, >, and &. The latter are escaped because they
913
+				// can lead to security holes when user-controlled strings
914
+				// are rendered into JSON and served to some browsers.
915
+				e.WriteString(`\u00`)
916
+				e.WriteByte(hex[b>>4])
917
+				e.WriteByte(hex[b&0xF])
918
+			}
919
+			i++
920
+			start = i
921
+			continue
922
+		}
923
+		if e.canonical {
924
+			i++
925
+			continue
926
+		}
927
+		c, size := utf8.DecodeRune(s[i:])
928
+		if c == utf8.RuneError && size == 1 {
929
+			if start < i {
930
+				e.Write(s[start:i])
931
+			}
932
+			e.WriteString(`\ufffd`)
933
+			i += size
934
+			start = i
935
+			continue
936
+		}
937
+		// U+2028 is LINE SEPARATOR.
938
+		// U+2029 is PARAGRAPH SEPARATOR.
939
+		// They are both technically valid characters in JSON strings,
940
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
941
+		// and can lead to security holes there. It is valid JSON to
942
+		// escape them, so we do so unconditionally.
943
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
944
+		if c == '\u2028' || c == '\u2029' {
945
+			if start < i {
946
+				e.Write(s[start:i])
947
+			}
948
+			e.WriteString(`\u202`)
949
+			e.WriteByte(hex[c&0xF])
950
+			i += size
951
+			start = i
952
+			continue
953
+		}
954
+		i += size
955
+	}
956
+	if start < len(s) {
957
+		e.Write(s[start:])
958
+	}
959
+	e.WriteByte('"')
960
+	return e.Len() - len0, nil
961
+}
962
+
963
+// A field represents a single field found in a struct.
964
+type field struct {
965
+	name      string
966
+	nameBytes []byte                 // []byte(name)
967
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
968
+
969
+	tag       bool
970
+	index     []int
971
+	typ       reflect.Type
972
+	omitEmpty bool
973
+	quoted    bool
974
+}
975
+
976
+func fillField(f field) field {
977
+	f.nameBytes = []byte(f.name)
978
+	f.equalFold = foldFunc(f.nameBytes)
979
+	return f
980
+}
981
+
982
+// byName sorts field by name, breaking ties with depth,
983
+// then breaking ties with "name came from json tag", then
984
+// breaking ties with index sequence.
985
+type byName []field
986
+
987
+func (x byName) Len() int { return len(x) }
988
+
989
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
990
+
991
+func (x byName) Less(i, j int) bool {
992
+	if x[i].name != x[j].name {
993
+		return x[i].name < x[j].name
994
+	}
995
+	if len(x[i].index) != len(x[j].index) {
996
+		return len(x[i].index) < len(x[j].index)
997
+	}
998
+	if x[i].tag != x[j].tag {
999
+		return x[i].tag
1000
+	}
1001
+	return byIndex(x).Less(i, j)
1002
+}
1003
+
1004
+// byIndex sorts field by index sequence.
1005
+type byIndex []field
1006
+
1007
+func (x byIndex) Len() int { return len(x) }
1008
+
1009
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
1010
+
1011
+func (x byIndex) Less(i, j int) bool {
1012
+	for k, xik := range x[i].index {
1013
+		if k >= len(x[j].index) {
1014
+			return false
1015
+		}
1016
+		if xik != x[j].index[k] {
1017
+			return xik < x[j].index[k]
1018
+		}
1019
+	}
1020
+	return len(x[i].index) < len(x[j].index)
1021
+}
1022
+
1023
+// typeFields returns a list of fields that JSON should recognize for the given type.
1024
+// The algorithm is breadth-first search over the set of structs to include - the top struct
1025
+// and then any reachable anonymous structs.
1026
+func typeFields(t reflect.Type) []field {
1027
+	// Anonymous fields to explore at the current level and the next.
1028
+	current := []field{}
1029
+	next := []field{{typ: t}}
1030
+
1031
+	// Count of queued names for current level and the next.
1032
+	count := map[reflect.Type]int{}
1033
+	nextCount := map[reflect.Type]int{}
1034
+
1035
+	// Types already visited at an earlier level.
1036
+	visited := map[reflect.Type]bool{}
1037
+
1038
+	// Fields found.
1039
+	var fields []field
1040
+
1041
+	for len(next) > 0 {
1042
+		current, next = next, current[:0]
1043
+		count, nextCount = nextCount, map[reflect.Type]int{}
1044
+
1045
+		for _, f := range current {
1046
+			if visited[f.typ] {
1047
+				continue
1048
+			}
1049
+			visited[f.typ] = true
1050
+
1051
+			// Scan f.typ for fields to include.
1052
+			for i := 0; i < f.typ.NumField(); i++ {
1053
+				sf := f.typ.Field(i)
1054
+				if sf.PkgPath != "" { // unexported
1055
+					continue
1056
+				}
1057
+				tag := sf.Tag.Get("json")
1058
+				if tag == "-" {
1059
+					continue
1060
+				}
1061
+				name, opts := parseTag(tag)
1062
+				if !isValidTag(name) {
1063
+					name = ""
1064
+				}
1065
+				index := make([]int, len(f.index)+1)
1066
+				copy(index, f.index)
1067
+				index[len(f.index)] = i
1068
+
1069
+				ft := sf.Type
1070
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
1071
+					// Follow pointer.
1072
+					ft = ft.Elem()
1073
+				}
1074
+
1075
+				// Only strings, floats, integers, and booleans can be quoted.
1076
+				quoted := false
1077
+				if opts.Contains("string") {
1078
+					switch ft.Kind() {
1079
+					case reflect.Bool,
1080
+						reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
1081
+						reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
1082
+						reflect.Float32, reflect.Float64,
1083
+						reflect.String:
1084
+						quoted = true
1085
+					}
1086
+				}
1087
+
1088
+				// Record found field and index sequence.
1089
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
1090
+					tagged := name != ""
1091
+					if name == "" {
1092
+						name = sf.Name
1093
+					}
1094
+					fields = append(fields, fillField(field{
1095
+						name:      name,
1096
+						tag:       tagged,
1097
+						index:     index,
1098
+						typ:       ft,
1099
+						omitEmpty: opts.Contains("omitempty"),
1100
+						quoted:    quoted,
1101
+					}))
1102
+					if count[f.typ] > 1 {
1103
+						// If there were multiple instances, add a second,
1104
+						// so that the annihilation code will see a duplicate.
1105
+						// It only cares about the distinction between 1 or 2,
1106
+						// so don't bother generating any more copies.
1107
+						fields = append(fields, fields[len(fields)-1])
1108
+					}
1109
+					continue
1110
+				}
1111
+
1112
+				// Record new anonymous struct to explore in next round.
1113
+				nextCount[ft]++
1114
+				if nextCount[ft] == 1 {
1115
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
1116
+				}
1117
+			}
1118
+		}
1119
+	}
1120
+
1121
+	sort.Sort(byName(fields))
1122
+
1123
+	// Delete all fields that are hidden by the Go rules for embedded fields,
1124
+	// except that fields with JSON tags are promoted.
1125
+
1126
+	// The fields are sorted in primary order of name, secondary order
1127
+	// of field index length. Loop over names; for each name, delete
1128
+	// hidden fields by choosing the one dominant field that survives.
1129
+	out := fields[:0]
1130
+	for advance, i := 0, 0; i < len(fields); i += advance {
1131
+		// One iteration per name.
1132
+		// Find the sequence of fields with the name of this first field.
1133
+		fi := fields[i]
1134
+		name := fi.name
1135
+		for advance = 1; i+advance < len(fields); advance++ {
1136
+			fj := fields[i+advance]
1137
+			if fj.name != name {
1138
+				break
1139
+			}
1140
+		}
1141
+		if advance == 1 { // Only one field with this name
1142
+			out = append(out, fi)
1143
+			continue
1144
+		}
1145
+		dominant, ok := dominantField(fields[i : i+advance])
1146
+		if ok {
1147
+			out = append(out, dominant)
1148
+		}
1149
+	}
1150
+
1151
+	return out
1152
+}
1153
+
1154
+// dominantField looks through the fields, all of which are known to
1155
+// have the same name, to find the single field that dominates the
1156
+// others using Go's embedding rules, modified by the presence of
1157
+// JSON tags. If there are multiple top-level fields, the boolean
1158
+// will be false: This condition is an error in Go and we skip all
1159
+// the fields.
1160
+func dominantField(fields []field) (field, bool) {
1161
+	// The fields are sorted in increasing index-length order. The winner
1162
+	// must therefore be one with the shortest index length. Drop all
1163
+	// longer entries, which is easy: just truncate the slice.
1164
+	length := len(fields[0].index)
1165
+	tagged := -1 // Index of first tagged field.
1166
+	for i, f := range fields {
1167
+		if len(f.index) > length {
1168
+			fields = fields[:i]
1169
+			break
1170
+		}
1171
+		if f.tag {
1172
+			if tagged >= 0 {
1173
+				// Multiple tagged fields at the same level: conflict.
1174
+				// Return no field.
1175
+				return field{}, false
1176
+			}
1177
+			tagged = i
1178
+		}
1179
+	}
1180
+	if tagged >= 0 {
1181
+		return fields[tagged], true
1182
+	}
1183
+	// All remaining fields have the same length. If there's more than one,
1184
+	// we have a conflict (two fields named "X" at the same level) and we
1185
+	// return no field.
1186
+	if len(fields) > 1 {
1187
+		return field{}, false
1188
+	}
1189
+	return fields[0], true
1190
+}
1191
+
1192
+type fields struct {
1193
+	byName  []field
1194
+	byIndex []field
1195
+}
1196
+
1197
+var fieldCache struct {
1198
+	sync.RWMutex
1199
+	m map[reflect.Type]*fields
1200
+}
1201
+
1202
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
1203
+func cachedTypeFields(t reflect.Type, canonical bool) []field {
1204
+	fieldCache.RLock()
1205
+	x := fieldCache.m[t]
1206
+	fieldCache.RUnlock()
1207
+
1208
+	var f []field
1209
+	if x != nil {
1210
+		if canonical {
1211
+			f = x.byName
1212
+		}
1213
+		f = x.byIndex
1214
+	}
1215
+	if f != nil {
1216
+		return f
1217
+	}
1218
+
1219
+	// Compute fields without lock.
1220
+	// Might duplicate effort but won't hold other computations back.
1221
+	f = typeFields(t)
1222
+	if f == nil {
1223
+		f = []field{}
1224
+	}
1225
+	if !canonical {
1226
+		sort.Sort(byIndex(f))
1227
+	}
1228
+
1229
+	fieldCache.Lock()
1230
+	if fieldCache.m == nil {
1231
+		fieldCache.m = map[reflect.Type]*fields{}
1232
+	}
1233
+	x = fieldCache.m[t]
1234
+	fieldCache.Unlock()
1235
+	if x == nil {
1236
+		x = new(fields)
1237
+	}
1238
+	if canonical {
1239
+		x.byName = f
1240
+	} else {
1241
+		x.byIndex = f
1242
+	}
1243
+	return f
1244
+}
0 1245
new file mode 100644
... ...
@@ -0,0 +1,143 @@
0
+// Copyright 2013 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package json
5
+
6
+import (
7
+	"bytes"
8
+	"unicode/utf8"
9
+)
10
+
11
+const (
12
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
13
+	kelvin       = '\u212a'
14
+	smallLongEss = '\u017f'
15
+)
16
+
17
+// foldFunc returns one of four different case folding equivalence
18
+// functions, from most general (and slow) to fastest:
19
+//
20
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
21
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
22
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
23
+// 4) simpleLetterEqualFold, no specials, no non-letters.
24
+//
25
+// The letters S and K are special because they map to 3 runes, not just 2:
26
+//  * S maps to s and to U+017F 'Å¿' Latin small letter long s
27
+//  * k maps to K and to U+212A 'K' Kelvin sign
28
+// See https://play.golang.org/p/tTxjOc0OGo
29
+//
30
+// The returned function is specialized for matching against s and
31
+// should only be given s. It's not curried for performance reasons.
32
+func foldFunc(s []byte) func(s, t []byte) bool {
33
+	nonLetter := false
34
+	special := false // special letter
35
+	for _, b := range s {
36
+		if b >= utf8.RuneSelf {
37
+			return bytes.EqualFold
38
+		}
39
+		upper := b & caseMask
40
+		if upper < 'A' || upper > 'Z' {
41
+			nonLetter = true
42
+		} else if upper == 'K' || upper == 'S' {
43
+			// See above for why these letters are special.
44
+			special = true
45
+		}
46
+	}
47
+	if special {
48
+		return equalFoldRight
49
+	}
50
+	if nonLetter {
51
+		return asciiEqualFold
52
+	}
53
+	return simpleLetterEqualFold
54
+}
55
+
56
+// equalFoldRight is a specialization of bytes.EqualFold when s is
57
+// known to be all ASCII (including punctuation), but contains an 's',
58
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
59
+// See comments on foldFunc.
60
+func equalFoldRight(s, t []byte) bool {
61
+	for _, sb := range s {
62
+		if len(t) == 0 {
63
+			return false
64
+		}
65
+		tb := t[0]
66
+		if tb < utf8.RuneSelf {
67
+			if sb != tb {
68
+				sbUpper := sb & caseMask
69
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
70
+					if sbUpper != tb&caseMask {
71
+						return false
72
+					}
73
+				} else {
74
+					return false
75
+				}
76
+			}
77
+			t = t[1:]
78
+			continue
79
+		}
80
+		// sb is ASCII and t is not. t must be either kelvin
81
+		// sign or long s; sb must be s, S, k, or K.
82
+		tr, size := utf8.DecodeRune(t)
83
+		switch sb {
84
+		case 's', 'S':
85
+			if tr != smallLongEss {
86
+				return false
87
+			}
88
+		case 'k', 'K':
89
+			if tr != kelvin {
90
+				return false
91
+			}
92
+		default:
93
+			return false
94
+		}
95
+		t = t[size:]
96
+
97
+	}
98
+	if len(t) > 0 {
99
+		return false
100
+	}
101
+	return true
102
+}
103
+
104
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
105
+// s is all ASCII (but may contain non-letters) and contains no
106
+// special-folding letters.
107
+// See comments on foldFunc.
108
+func asciiEqualFold(s, t []byte) bool {
109
+	if len(s) != len(t) {
110
+		return false
111
+	}
112
+	for i, sb := range s {
113
+		tb := t[i]
114
+		if sb == tb {
115
+			continue
116
+		}
117
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
118
+			if sb&caseMask != tb&caseMask {
119
+				return false
120
+			}
121
+		} else {
122
+			return false
123
+		}
124
+	}
125
+	return true
126
+}
127
+
128
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
129
+// use when s is all ASCII letters (no underscores, etc) and also
130
+// doesn't contain 'k', 'K', 's', or 'S'.
131
+// See comments on foldFunc.
132
+func simpleLetterEqualFold(s, t []byte) bool {
133
+	if len(s) != len(t) {
134
+		return false
135
+	}
136
+	for i, b := range s {
137
+		if b&caseMask != t[i]&caseMask {
138
+			return false
139
+		}
140
+	}
141
+	return true
142
+}
0 143
new file mode 100644
... ...
@@ -0,0 +1,137 @@
0
+// Copyright 2010 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package json
5
+
6
+import "bytes"
7
+
8
+// Compact appends to dst the JSON-encoded src with
9
+// insignificant space characters elided.
10
+func Compact(dst *bytes.Buffer, src []byte) error {
11
+	return compact(dst, src, false)
12
+}
13
+
14
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
15
+	origLen := dst.Len()
16
+	var scan scanner
17
+	scan.reset()
18
+	start := 0
19
+	for i, c := range src {
20
+		if escape && (c == '<' || c == '>' || c == '&') {
21
+			if start < i {
22
+				dst.Write(src[start:i])
23
+			}
24
+			dst.WriteString(`\u00`)
25
+			dst.WriteByte(hex[c>>4])
26
+			dst.WriteByte(hex[c&0xF])
27
+			start = i + 1
28
+		}
29
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
30
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
31
+			if start < i {
32
+				dst.Write(src[start:i])
33
+			}
34
+			dst.WriteString(`\u202`)
35
+			dst.WriteByte(hex[src[i+2]&0xF])
36
+			start = i + 3
37
+		}
38
+		v := scan.step(&scan, int(c))
39
+		if v >= scanSkipSpace {
40
+			if v == scanError {
41
+				break
42
+			}
43
+			if start < i {
44
+				dst.Write(src[start:i])
45
+			}
46
+			start = i + 1
47
+		}
48
+	}
49
+	if scan.eof() == scanError {
50
+		dst.Truncate(origLen)
51
+		return scan.err
52
+	}
53
+	if start < len(src) {
54
+		dst.Write(src[start:])
55
+	}
56
+	return nil
57
+}
58
+
59
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
60
+	dst.WriteByte('\n')
61
+	dst.WriteString(prefix)
62
+	for i := 0; i < depth; i++ {
63
+		dst.WriteString(indent)
64
+	}
65
+}
66
+
67
+// Indent appends to dst an indented form of the JSON-encoded src.
68
+// Each element in a JSON object or array begins on a new,
69
+// indented line beginning with prefix followed by one or more
70
+// copies of indent according to the indentation nesting.
71
+// The data appended to dst does not begin with the prefix nor
72
+// any indentation, and has no trailing newline, to make it
73
+// easier to embed inside other formatted JSON data.
74
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
75
+	origLen := dst.Len()
76
+	var scan scanner
77
+	scan.reset()
78
+	needIndent := false
79
+	depth := 0
80
+	for _, c := range src {
81
+		scan.bytes++
82
+		v := scan.step(&scan, int(c))
83
+		if v == scanSkipSpace {
84
+			continue
85
+		}
86
+		if v == scanError {
87
+			break
88
+		}
89
+		if needIndent && v != scanEndObject && v != scanEndArray {
90
+			needIndent = false
91
+			depth++
92
+			newline(dst, prefix, indent, depth)
93
+		}
94
+
95
+		// Emit semantically uninteresting bytes
96
+		// (in particular, punctuation in strings) unmodified.
97
+		if v == scanContinue {
98
+			dst.WriteByte(c)
99
+			continue
100
+		}
101
+
102
+		// Add spacing around real punctuation.
103
+		switch c {
104
+		case '{', '[':
105
+			// delay indent so that empty object and array are formatted as {} and [].
106
+			needIndent = true
107
+			dst.WriteByte(c)
108
+
109
+		case ',':
110
+			dst.WriteByte(c)
111
+			newline(dst, prefix, indent, depth)
112
+
113
+		case ':':
114
+			dst.WriteByte(c)
115
+			dst.WriteByte(' ')
116
+
117
+		case '}', ']':
118
+			if needIndent {
119
+				// suppress indent in empty object/array
120
+				needIndent = false
121
+			} else {
122
+				depth--
123
+				newline(dst, prefix, indent, depth)
124
+			}
125
+			dst.WriteByte(c)
126
+
127
+		default:
128
+			dst.WriteByte(c)
129
+		}
130
+	}
131
+	if scan.eof() == scanError {
132
+		dst.Truncate(origLen)
133
+		return scan.err
134
+	}
135
+	return nil
136
+}
0 137
new file mode 100644
... ...
@@ -0,0 +1,630 @@
0
+// Copyright 2010 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package json
5
+
6
+// JSON value parser state machine.
7
+// Just about at the limit of what is reasonable to write by hand.
8
+// Some parts are a bit tedious, but overall it nicely factors out the
9
+// otherwise common code from the multiple scanning functions
10
+// in this package (Compact, Indent, checkValid, nextValue, etc).
11
+//
12
+// This file starts with two simple examples using the scanner
13
+// before diving into the scanner itself.
14
+
15
+import "strconv"
16
+
17
+// checkValid verifies that data is valid JSON-encoded data.
18
+// scan is passed in for use by checkValid to avoid an allocation.
19
+func checkValid(data []byte, scan *scanner) error {
20
+	scan.reset()
21
+	for _, c := range data {
22
+		scan.bytes++
23
+		if scan.step(scan, int(c)) == scanError {
24
+			return scan.err
25
+		}
26
+	}
27
+	if scan.eof() == scanError {
28
+		return scan.err
29
+	}
30
+	return nil
31
+}
32
+
33
+// nextValue splits data after the next whole JSON value,
34
+// returning that value and the bytes that follow it as separate slices.
35
+// scan is passed in for use by nextValue to avoid an allocation.
36
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
37
+	scan.reset()
38
+	for i, c := range data {
39
+		v := scan.step(scan, int(c))
40
+		if v >= scanEndObject {
41
+			switch v {
42
+			// probe the scanner with a space to determine whether we will
43
+			// get scanEnd on the next character. Otherwise, if the next character
44
+			// is not a space, scanEndTop allocates a needless error.
45
+			case scanEndObject, scanEndArray:
46
+				if scan.step(scan, ' ') == scanEnd {
47
+					return data[:i+1], data[i+1:], nil
48
+				}
49
+			case scanError:
50
+				return nil, nil, scan.err
51
+			case scanEnd:
52
+				return data[0:i], data[i:], nil
53
+			}
54
+		}
55
+	}
56
+	if scan.eof() == scanError {
57
+		return nil, nil, scan.err
58
+	}
59
+	return data, nil, nil
60
+}
61
+
62
+// A SyntaxError is a description of a JSON syntax error.
63
+type SyntaxError struct {
64
+	msg    string // description of error
65
+	Offset int64  // error occurred after reading Offset bytes
66
+}
67
+
68
+func (e *SyntaxError) Error() string { return e.msg }
69
+
70
+// A scanner is a JSON scanning state machine.
71
+// Callers call scan.reset() and then pass bytes in one at a time
72
+// by calling scan.step(&scan, c) for each byte.
73
+// The return value, referred to as an opcode, tells the
74
+// caller about significant parsing events like beginning
75
+// and ending literals, objects, and arrays, so that the
76
+// caller can follow along if it wishes.
77
+// The return value scanEnd indicates that a single top-level
78
+// JSON value has been completed, *before* the byte that
79
+// just got passed in.  (The indication must be delayed in order
80
+// to recognize the end of numbers: is 123 a whole value or
81
+// the beginning of 12345e+6?).
82
+type scanner struct {
83
+	// The step is a func to be called to execute the next transition.
84
+	// Also tried using an integer constant and a single func
85
+	// with a switch, but using the func directly was 10% faster
86
+	// on a 64-bit Mac Mini, and it's nicer to read.
87
+	step func(*scanner, int) int
88
+
89
+	// Reached end of top-level value.
90
+	endTop bool
91
+
92
+	// Stack of what we're in the middle of - array values, object keys, object values.
93
+	parseState []int
94
+
95
+	// Error that happened, if any.
96
+	err error
97
+
98
+	// 1-byte redo (see undo method)
99
+	redo      bool
100
+	redoCode  int
101
+	redoState func(*scanner, int) int
102
+
103
+	// total bytes consumed, updated by decoder.Decode
104
+	bytes int64
105
+}
106
+
107
+// These values are returned by the state transition functions
108
+// assigned to scanner.state and the method scanner.eof.
109
+// They give details about the current state of the scan that
110
+// callers might be interested to know about.
111
+// It is okay to ignore the return value of any particular
112
+// call to scanner.state: if one call returns scanError,
113
+// every subsequent call will return scanError too.
114
+const (
115
+	// Continue.
116
+	scanContinue     = iota // uninteresting byte
117
+	scanBeginLiteral        // end implied by next result != scanContinue
118
+	scanBeginObject         // begin object
119
+	scanObjectKey           // just finished object key (string)
120
+	scanObjectValue         // just finished non-last object value
121
+	scanEndObject           // end object (implies scanObjectValue if possible)
122
+	scanBeginArray          // begin array
123
+	scanArrayValue          // just finished array value
124
+	scanEndArray            // end array (implies scanArrayValue if possible)
125
+	scanSkipSpace           // space byte; can skip; known to be last "continue" result
126
+
127
+	// Stop.
128
+	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
129
+	scanError // hit an error, scanner.err.
130
+)
131
+
132
+// These values are stored in the parseState stack.
133
+// They give the current state of a composite value
134
+// being scanned.  If the parser is inside a nested value
135
+// the parseState describes the nested state, outermost at entry 0.
136
+const (
137
+	parseObjectKey   = iota // parsing object key (before colon)
138
+	parseObjectValue        // parsing object value (after colon)
139
+	parseArrayValue         // parsing array value
140
+)
141
+
142
+// reset prepares the scanner for use.
143
+// It must be called before calling s.step.
144
+func (s *scanner) reset() {
145
+	s.step = stateBeginValue
146
+	s.parseState = s.parseState[0:0]
147
+	s.err = nil
148
+	s.redo = false
149
+	s.endTop = false
150
+}
151
+
152
+// eof tells the scanner that the end of input has been reached.
153
+// It returns a scan status just as s.step does.
154
+func (s *scanner) eof() int {
155
+	if s.err != nil {
156
+		return scanError
157
+	}
158
+	if s.endTop {
159
+		return scanEnd
160
+	}
161
+	s.step(s, ' ')
162
+	if s.endTop {
163
+		return scanEnd
164
+	}
165
+	if s.err == nil {
166
+		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
167
+	}
168
+	return scanError
169
+}
170
+
171
+// pushParseState pushes a new parse state p onto the parse stack.
172
+func (s *scanner) pushParseState(p int) {
173
+	s.parseState = append(s.parseState, p)
174
+}
175
+
176
+// popParseState pops a parse state (already obtained) off the stack
177
+// and updates s.step accordingly.
178
+func (s *scanner) popParseState() {
179
+	n := len(s.parseState) - 1
180
+	s.parseState = s.parseState[0:n]
181
+	s.redo = false
182
+	if n == 0 {
183
+		s.step = stateEndTop
184
+		s.endTop = true
185
+	} else {
186
+		s.step = stateEndValue
187
+	}
188
+}
189
+
190
+func isSpace(c rune) bool {
191
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
192
+}
193
+
194
+// stateBeginValueOrEmpty is the state after reading `[`.
195
+func stateBeginValueOrEmpty(s *scanner, c int) int {
196
+	if c <= ' ' && isSpace(rune(c)) {
197
+		return scanSkipSpace
198
+	}
199
+	if c == ']' {
200
+		return stateEndValue(s, c)
201
+	}
202
+	return stateBeginValue(s, c)
203
+}
204
+
205
+// stateBeginValue is the state at the beginning of the input.
206
+func stateBeginValue(s *scanner, c int) int {
207
+	if c <= ' ' && isSpace(rune(c)) {
208
+		return scanSkipSpace
209
+	}
210
+	switch c {
211
+	case '{':
212
+		s.step = stateBeginStringOrEmpty
213
+		s.pushParseState(parseObjectKey)
214
+		return scanBeginObject
215
+	case '[':
216
+		s.step = stateBeginValueOrEmpty
217
+		s.pushParseState(parseArrayValue)
218
+		return scanBeginArray
219
+	case '"':
220
+		s.step = stateInString
221
+		return scanBeginLiteral
222
+	case '-':
223
+		s.step = stateNeg
224
+		return scanBeginLiteral
225
+	case '0': // beginning of 0.123
226
+		s.step = state0
227
+		return scanBeginLiteral
228
+	case 't': // beginning of true
229
+		s.step = stateT
230
+		return scanBeginLiteral
231
+	case 'f': // beginning of false
232
+		s.step = stateF
233
+		return scanBeginLiteral
234
+	case 'n': // beginning of null
235
+		s.step = stateN
236
+		return scanBeginLiteral
237
+	}
238
+	if '1' <= c && c <= '9' { // beginning of 1234.5
239
+		s.step = state1
240
+		return scanBeginLiteral
241
+	}
242
+	return s.error(c, "looking for beginning of value")
243
+}
244
+
245
+// stateBeginStringOrEmpty is the state after reading `{`.
246
+func stateBeginStringOrEmpty(s *scanner, c int) int {
247
+	if c <= ' ' && isSpace(rune(c)) {
248
+		return scanSkipSpace
249
+	}
250
+	if c == '}' {
251
+		n := len(s.parseState)
252
+		s.parseState[n-1] = parseObjectValue
253
+		return stateEndValue(s, c)
254
+	}
255
+	return stateBeginString(s, c)
256
+}
257
+
258
+// stateBeginString is the state after reading `{"key": value,`.
259
+func stateBeginString(s *scanner, c int) int {
260
+	if c <= ' ' && isSpace(rune(c)) {
261
+		return scanSkipSpace
262
+	}
263
+	if c == '"' {
264
+		s.step = stateInString
265
+		return scanBeginLiteral
266
+	}
267
+	return s.error(c, "looking for beginning of object key string")
268
+}
269
+
270
+// stateEndValue is the state after completing a value,
271
+// such as after reading `{}` or `true` or `["x"`.
272
+func stateEndValue(s *scanner, c int) int {
273
+	n := len(s.parseState)
274
+	if n == 0 {
275
+		// Completed top-level before the current byte.
276
+		s.step = stateEndTop
277
+		s.endTop = true
278
+		return stateEndTop(s, c)
279
+	}
280
+	if c <= ' ' && isSpace(rune(c)) {
281
+		s.step = stateEndValue
282
+		return scanSkipSpace
283
+	}
284
+	ps := s.parseState[n-1]
285
+	switch ps {
286
+	case parseObjectKey:
287
+		if c == ':' {
288
+			s.parseState[n-1] = parseObjectValue
289
+			s.step = stateBeginValue
290
+			return scanObjectKey
291
+		}
292
+		return s.error(c, "after object key")
293
+	case parseObjectValue:
294
+		if c == ',' {
295
+			s.parseState[n-1] = parseObjectKey
296
+			s.step = stateBeginString
297
+			return scanObjectValue
298
+		}
299
+		if c == '}' {
300
+			s.popParseState()
301
+			return scanEndObject
302
+		}
303
+		return s.error(c, "after object key:value pair")
304
+	case parseArrayValue:
305
+		if c == ',' {
306
+			s.step = stateBeginValue
307
+			return scanArrayValue
308
+		}
309
+		if c == ']' {
310
+			s.popParseState()
311
+			return scanEndArray
312
+		}
313
+		return s.error(c, "after array element")
314
+	}
315
+	return s.error(c, "")
316
+}
317
+
318
+// stateEndTop is the state after finishing the top-level value,
319
+// such as after reading `{}` or `[1,2,3]`.
320
+// Only space characters should be seen now.
321
+func stateEndTop(s *scanner, c int) int {
322
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
323
+		// Complain about non-space byte on next call.
324
+		s.error(c, "after top-level value")
325
+	}
326
+	return scanEnd
327
+}
328
+
329
+// stateInString is the state after reading `"`.
330
+func stateInString(s *scanner, c int) int {
331
+	if c == '"' {
332
+		s.step = stateEndValue
333
+		return scanContinue
334
+	}
335
+	if c == '\\' {
336
+		s.step = stateInStringEsc
337
+		return scanContinue
338
+	}
339
+	if c < 0x20 {
340
+		return s.error(c, "in string literal")
341
+	}
342
+	return scanContinue
343
+}
344
+
345
+// stateInStringEsc is the state after reading `"\` during a quoted string.
346
+func stateInStringEsc(s *scanner, c int) int {
347
+	switch c {
348
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
349
+		s.step = stateInString
350
+		return scanContinue
351
+	}
352
+	if c == 'u' {
353
+		s.step = stateInStringEscU
354
+		return scanContinue
355
+	}
356
+	return s.error(c, "in string escape code")
357
+}
358
+
359
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
360
+func stateInStringEscU(s *scanner, c int) int {
361
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
362
+		s.step = stateInStringEscU1
363
+		return scanContinue
364
+	}
365
+	// numbers
366
+	return s.error(c, "in \\u hexadecimal character escape")
367
+}
368
+
369
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
370
+func stateInStringEscU1(s *scanner, c int) int {
371
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
372
+		s.step = stateInStringEscU12
373
+		return scanContinue
374
+	}
375
+	// numbers
376
+	return s.error(c, "in \\u hexadecimal character escape")
377
+}
378
+
379
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
380
+func stateInStringEscU12(s *scanner, c int) int {
381
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
382
+		s.step = stateInStringEscU123
383
+		return scanContinue
384
+	}
385
+	// numbers
386
+	return s.error(c, "in \\u hexadecimal character escape")
387
+}
388
+
389
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
390
+func stateInStringEscU123(s *scanner, c int) int {
391
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
392
+		s.step = stateInString
393
+		return scanContinue
394
+	}
395
+	// numbers
396
+	return s.error(c, "in \\u hexadecimal character escape")
397
+}
398
+
399
+// stateNeg is the state after reading `-` during a number.
400
+func stateNeg(s *scanner, c int) int {
401
+	if c == '0' {
402
+		s.step = state0
403
+		return scanContinue
404
+	}
405
+	if '1' <= c && c <= '9' {
406
+		s.step = state1
407
+		return scanContinue
408
+	}
409
+	return s.error(c, "in numeric literal")
410
+}
411
+
412
+// state1 is the state after reading a non-zero integer during a number,
413
+// such as after reading `1` or `100` but not `0`.
414
+func state1(s *scanner, c int) int {
415
+	if '0' <= c && c <= '9' {
416
+		s.step = state1
417
+		return scanContinue
418
+	}
419
+	return state0(s, c)
420
+}
421
+
422
+// state0 is the state after reading `0` during a number.
423
+func state0(s *scanner, c int) int {
424
+	if c == '.' {
425
+		s.step = stateDot
426
+		return scanContinue
427
+	}
428
+	if c == 'e' || c == 'E' {
429
+		s.step = stateE
430
+		return scanContinue
431
+	}
432
+	return stateEndValue(s, c)
433
+}
434
+
435
+// stateDot is the state after reading the integer and decimal point in a number,
436
+// such as after reading `1.`.
437
+func stateDot(s *scanner, c int) int {
438
+	if '0' <= c && c <= '9' {
439
+		s.step = stateDot0
440
+		return scanContinue
441
+	}
442
+	return s.error(c, "after decimal point in numeric literal")
443
+}
444
+
445
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
446
+// digits of a number, such as after reading `3.14`.
447
+func stateDot0(s *scanner, c int) int {
448
+	if '0' <= c && c <= '9' {
449
+		s.step = stateDot0
450
+		return scanContinue
451
+	}
452
+	if c == 'e' || c == 'E' {
453
+		s.step = stateE
454
+		return scanContinue
455
+	}
456
+	return stateEndValue(s, c)
457
+}
458
+
459
+// stateE is the state after reading the mantissa and e in a number,
460
+// such as after reading `314e` or `0.314e`.
461
+func stateE(s *scanner, c int) int {
462
+	if c == '+' {
463
+		s.step = stateESign
464
+		return scanContinue
465
+	}
466
+	if c == '-' {
467
+		s.step = stateESign
468
+		return scanContinue
469
+	}
470
+	return stateESign(s, c)
471
+}
472
+
473
+// stateESign is the state after reading the mantissa, e, and sign in a number,
474
+// such as after reading `314e-` or `0.314e+`.
475
+func stateESign(s *scanner, c int) int {
476
+	if '0' <= c && c <= '9' {
477
+		s.step = stateE0
478
+		return scanContinue
479
+	}
480
+	return s.error(c, "in exponent of numeric literal")
481
+}
482
+
483
+// stateE0 is the state after reading the mantissa, e, optional sign,
484
+// and at least one digit of the exponent in a number,
485
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
486
+func stateE0(s *scanner, c int) int {
487
+	if '0' <= c && c <= '9' {
488
+		s.step = stateE0
489
+		return scanContinue
490
+	}
491
+	return stateEndValue(s, c)
492
+}
493
+
494
+// stateT is the state after reading `t`.
495
+func stateT(s *scanner, c int) int {
496
+	if c == 'r' {
497
+		s.step = stateTr
498
+		return scanContinue
499
+	}
500
+	return s.error(c, "in literal true (expecting 'r')")
501
+}
502
+
503
+// stateTr is the state after reading `tr`.
504
+func stateTr(s *scanner, c int) int {
505
+	if c == 'u' {
506
+		s.step = stateTru
507
+		return scanContinue
508
+	}
509
+	return s.error(c, "in literal true (expecting 'u')")
510
+}
511
+
512
+// stateTru is the state after reading `tru`.
513
+func stateTru(s *scanner, c int) int {
514
+	if c == 'e' {
515
+		s.step = stateEndValue
516
+		return scanContinue
517
+	}
518
+	return s.error(c, "in literal true (expecting 'e')")
519
+}
520
+
521
+// stateF is the state after reading `f`.
522
+func stateF(s *scanner, c int) int {
523
+	if c == 'a' {
524
+		s.step = stateFa
525
+		return scanContinue
526
+	}
527
+	return s.error(c, "in literal false (expecting 'a')")
528
+}
529
+
530
+// stateFa is the state after reading `fa`.
531
+func stateFa(s *scanner, c int) int {
532
+	if c == 'l' {
533
+		s.step = stateFal
534
+		return scanContinue
535
+	}
536
+	return s.error(c, "in literal false (expecting 'l')")
537
+}
538
+
539
+// stateFal is the state after reading `fal`.
540
+func stateFal(s *scanner, c int) int {
541
+	if c == 's' {
542
+		s.step = stateFals
543
+		return scanContinue
544
+	}
545
+	return s.error(c, "in literal false (expecting 's')")
546
+}
547
+
548
+// stateFals is the state after reading `fals`.
549
+func stateFals(s *scanner, c int) int {
550
+	if c == 'e' {
551
+		s.step = stateEndValue
552
+		return scanContinue
553
+	}
554
+	return s.error(c, "in literal false (expecting 'e')")
555
+}
556
+
557
+// stateN is the state after reading `n`.
558
+func stateN(s *scanner, c int) int {
559
+	if c == 'u' {
560
+		s.step = stateNu
561
+		return scanContinue
562
+	}
563
+	return s.error(c, "in literal null (expecting 'u')")
564
+}
565
+
566
+// stateNu is the state after reading `nu`.
567
+func stateNu(s *scanner, c int) int {
568
+	if c == 'l' {
569
+		s.step = stateNul
570
+		return scanContinue
571
+	}
572
+	return s.error(c, "in literal null (expecting 'l')")
573
+}
574
+
575
+// stateNul is the state after reading `nul`.
576
+func stateNul(s *scanner, c int) int {
577
+	if c == 'l' {
578
+		s.step = stateEndValue
579
+		return scanContinue
580
+	}
581
+	return s.error(c, "in literal null (expecting 'l')")
582
+}
583
+
584
+// stateError is the state after reaching a syntax error,
585
+// such as after reading `[1}` or `5.1.2`.
586
+func stateError(s *scanner, c int) int {
587
+	return scanError
588
+}
589
+
590
+// error records an error and switches to the error state.
591
+func (s *scanner) error(c int, context string) int {
592
+	s.step = stateError
593
+	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
594
+	return scanError
595
+}
596
+
597
+// quoteChar formats c as a quoted character literal
598
+func quoteChar(c int) string {
599
+	// special cases - different from quoted strings
600
+	if c == '\'' {
601
+		return `'\''`
602
+	}
603
+	if c == '"' {
604
+		return `'"'`
605
+	}
606
+
607
+	// use quoted string with different quotation marks
608
+	s := strconv.Quote(string(c))
609
+	return "'" + s[1:len(s)-1] + "'"
610
+}
611
+
612
+// undo causes the scanner to return scanCode from the next state transition.
613
+// This gives callers a simple 1-byte undo mechanism.
614
+func (s *scanner) undo(scanCode int) {
615
+	if s.redo {
616
+		panic("json: invalid use of scanner")
617
+	}
618
+	s.redoCode = scanCode
619
+	s.redoState = s.step
620
+	s.step = stateRedo
621
+	s.redo = true
622
+}
623
+
624
+// stateRedo helps implement the scanner's 1-byte undo.
625
+func stateRedo(s *scanner, c int) int {
626
+	s.redo = false
627
+	s.step = s.redoState
628
+	return s.redoCode
629
+}
0 630
new file mode 100644
... ...
@@ -0,0 +1,487 @@
0
+// Copyright 2010 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package json
5
+
6
+import (
7
+	"bytes"
8
+	"errors"
9
+	"io"
10
+)
11
+
12
+// A Decoder reads and decodes JSON objects from an input stream.
13
+type Decoder struct {
14
+	r     io.Reader
15
+	buf   []byte
16
+	d     decodeState
17
+	scanp int // start of unread data in buf
18
+	scan  scanner
19
+	err   error
20
+
21
+	tokenState int
22
+	tokenStack []int
23
+}
24
+
25
+// NewDecoder returns a new decoder that reads from r.
26
+//
27
+// The decoder introduces its own buffering and may
28
+// read data from r beyond the JSON values requested.
29
+func NewDecoder(r io.Reader) *Decoder {
30
+	return &Decoder{r: r}
31
+}
32
+
33
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
34
+// Number instead of as a float64.
35
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
36
+
37
+// Decode reads the next JSON-encoded value from its
38
+// input and stores it in the value pointed to by v.
39
+//
40
+// See the documentation for Unmarshal for details about
41
+// the conversion of JSON into a Go value.
42
+func (dec *Decoder) Decode(v interface{}) error {
43
+	if dec.err != nil {
44
+		return dec.err
45
+	}
46
+
47
+	if err := dec.tokenPrepareForDecode(); err != nil {
48
+		return err
49
+	}
50
+
51
+	if !dec.tokenValueAllowed() {
52
+		return &SyntaxError{msg: "not at beginning of value"}
53
+	}
54
+
55
+	// Read whole value into buffer.
56
+	n, err := dec.readValue()
57
+	if err != nil {
58
+		return err
59
+	}
60
+	dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
61
+	dec.scanp += n
62
+
63
+	// Don't save err from unmarshal into dec.err:
64
+	// the connection is still usable since we read a complete JSON
65
+	// object from it before the error happened.
66
+	err = dec.d.unmarshal(v)
67
+
68
+	// fixup token streaming state
69
+	dec.tokenValueEnd()
70
+
71
+	return err
72
+}
73
+
74
+// Buffered returns a reader of the data remaining in the Decoder's
75
+// buffer. The reader is valid until the next call to Decode.
76
+func (dec *Decoder) Buffered() io.Reader {
77
+	return bytes.NewReader(dec.buf[dec.scanp:])
78
+}
79
+
80
+// readValue reads a JSON value into dec.buf.
81
+// It returns the length of the encoding.
82
+func (dec *Decoder) readValue() (int, error) {
83
+	dec.scan.reset()
84
+
85
+	scanp := dec.scanp
86
+	var err error
87
+Input:
88
+	for {
89
+		// Look in the buffer for a new value.
90
+		for i, c := range dec.buf[scanp:] {
91
+			dec.scan.bytes++
92
+			v := dec.scan.step(&dec.scan, int(c))
93
+			if v == scanEnd {
94
+				scanp += i
95
+				break Input
96
+			}
97
+			// scanEnd is delayed one byte.
98
+			// We might block trying to get that byte from src,
99
+			// so instead invent a space byte.
100
+			if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
101
+				scanp += i + 1
102
+				break Input
103
+			}
104
+			if v == scanError {
105
+				dec.err = dec.scan.err
106
+				return 0, dec.scan.err
107
+			}
108
+		}
109
+		scanp = len(dec.buf)
110
+
111
+		// Did the last read have an error?
112
+		// Delayed until now to allow buffer scan.
113
+		if err != nil {
114
+			if err == io.EOF {
115
+				if dec.scan.step(&dec.scan, ' ') == scanEnd {
116
+					break Input
117
+				}
118
+				if nonSpace(dec.buf) {
119
+					err = io.ErrUnexpectedEOF
120
+				}
121
+			}
122
+			dec.err = err
123
+			return 0, err
124
+		}
125
+
126
+		n := scanp - dec.scanp
127
+		err = dec.refill()
128
+		scanp = dec.scanp + n
129
+	}
130
+	return scanp - dec.scanp, nil
131
+}
132
+
133
+func (dec *Decoder) refill() error {
134
+	// Make room to read more into the buffer.
135
+	// First slide down data already consumed.
136
+	if dec.scanp > 0 {
137
+		n := copy(dec.buf, dec.buf[dec.scanp:])
138
+		dec.buf = dec.buf[:n]
139
+		dec.scanp = 0
140
+	}
141
+
142
+	// Grow buffer if not large enough.
143
+	const minRead = 512
144
+	if cap(dec.buf)-len(dec.buf) < minRead {
145
+		newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
146
+		copy(newBuf, dec.buf)
147
+		dec.buf = newBuf
148
+	}
149
+
150
+	// Read.  Delay error for next iteration (after scan).
151
+	n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
152
+	dec.buf = dec.buf[0 : len(dec.buf)+n]
153
+
154
+	return err
155
+}
156
+
157
+func nonSpace(b []byte) bool {
158
+	for _, c := range b {
159
+		if !isSpace(rune(c)) {
160
+			return true
161
+		}
162
+	}
163
+	return false
164
+}
165
+
166
+// An Encoder writes JSON objects to an output stream.
167
+type Encoder struct {
168
+	w         io.Writer
169
+	err       error
170
+	canonical bool
171
+}
172
+
173
+// NewEncoder returns a new encoder that writes to w.
174
+func NewEncoder(w io.Writer) *Encoder {
175
+	return &Encoder{w: w}
176
+}
177
+
178
+// Canonical causes the encoder to switch to Canonical JSON mode.
179
+// Read more at: http://wiki.laptop.org/go/Canonical_JSON
180
+func (enc *Encoder) Canonical() { enc.canonical = true }
181
+
182
+// Encode writes the JSON encoding of v to the stream,
183
+// followed by a newline character.
184
+//
185
+// See the documentation for Marshal for details about the
186
+// conversion of Go values to JSON.
187
+func (enc *Encoder) Encode(v interface{}) error {
188
+	if enc.err != nil {
189
+		return enc.err
190
+	}
191
+	e := newEncodeState(enc.canonical)
192
+	err := e.marshal(v)
193
+	if err != nil {
194
+		return err
195
+	}
196
+
197
+	if !enc.canonical {
198
+		// Terminate each value with a newline.
199
+		// This makes the output look a little nicer
200
+		// when debugging, and some kind of space
201
+		// is required if the encoded value was a number,
202
+		// so that the reader knows there aren't more
203
+		// digits coming.
204
+		e.WriteByte('\n')
205
+	}
206
+
207
+	if _, err = enc.w.Write(e.Bytes()); err != nil {
208
+		enc.err = err
209
+	}
210
+	encodeStatePool.Put(e)
211
+	return err
212
+}
213
+
214
+// RawMessage is a raw encoded JSON object.
215
+// It implements Marshaler and Unmarshaler and can
216
+// be used to delay JSON decoding or precompute a JSON encoding.
217
+type RawMessage []byte
218
+
219
+// MarshalJSON returns *m as the JSON encoding of m.
220
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
221
+	return *m, nil
222
+}
223
+
224
+// UnmarshalJSON sets *m to a copy of data.
225
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
226
+	if m == nil {
227
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
228
+	}
229
+	*m = append((*m)[0:0], data...)
230
+	return nil
231
+}
232
+
233
+var _ Marshaler = (*RawMessage)(nil)
234
+var _ Unmarshaler = (*RawMessage)(nil)
235
+
236
+// A Token holds a value of one of these types:
237
+//
238
+//	Delim, for the four JSON delimiters [ ] { }
239
+//	bool, for JSON booleans
240
+//	float64, for JSON numbers
241
+//	Number, for JSON numbers
242
+//	string, for JSON string literals
243
+//	nil, for JSON null
244
+//
245
+type Token interface{}
246
+
247
+const (
248
+	tokenTopValue = iota
249
+	tokenArrayStart
250
+	tokenArrayValue
251
+	tokenArrayComma
252
+	tokenObjectStart
253
+	tokenObjectKey
254
+	tokenObjectColon
255
+	tokenObjectValue
256
+	tokenObjectComma
257
+)
258
+
259
+// advance tokenstate from a separator state to a value state
260
+func (dec *Decoder) tokenPrepareForDecode() error {
261
+	// Note: Not calling peek before switch, to avoid
262
+	// putting peek into the standard Decode path.
263
+	// peek is only called when using the Token API.
264
+	switch dec.tokenState {
265
+	case tokenArrayComma:
266
+		c, err := dec.peek()
267
+		if err != nil {
268
+			return err
269
+		}
270
+		if c != ',' {
271
+			return &SyntaxError{"expected comma after array element", 0}
272
+		}
273
+		dec.scanp++
274
+		dec.tokenState = tokenArrayValue
275
+	case tokenObjectColon:
276
+		c, err := dec.peek()
277
+		if err != nil {
278
+			return err
279
+		}
280
+		if c != ':' {
281
+			return &SyntaxError{"expected colon after object key", 0}
282
+		}
283
+		dec.scanp++
284
+		dec.tokenState = tokenObjectValue
285
+	}
286
+	return nil
287
+}
288
+
289
+func (dec *Decoder) tokenValueAllowed() bool {
290
+	switch dec.tokenState {
291
+	case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
292
+		return true
293
+	}
294
+	return false
295
+}
296
+
297
+func (dec *Decoder) tokenValueEnd() {
298
+	switch dec.tokenState {
299
+	case tokenArrayStart, tokenArrayValue:
300
+		dec.tokenState = tokenArrayComma
301
+	case tokenObjectValue:
302
+		dec.tokenState = tokenObjectComma
303
+	}
304
+}
305
+
306
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
307
+type Delim rune
308
+
309
+func (d Delim) String() string {
310
+	return string(d)
311
+}
312
+
313
+// Token returns the next JSON token in the input stream.
314
+// At the end of the input stream, Token returns nil, io.EOF.
315
+//
316
+// Token guarantees that the delimiters [ ] { } it returns are
317
+// properly nested and matched: if Token encounters an unexpected
318
+// delimiter in the input, it will return an error.
319
+//
320
+// The input stream consists of basic JSON values—bool, string,
321
+// number, and null—along with delimiters [ ] { } of type Delim
322
+// to mark the start and end of arrays and objects.
323
+// Commas and colons are elided.
324
+func (dec *Decoder) Token() (Token, error) {
325
+	for {
326
+		c, err := dec.peek()
327
+		if err != nil {
328
+			return nil, err
329
+		}
330
+		switch c {
331
+		case '[':
332
+			if !dec.tokenValueAllowed() {
333
+				return dec.tokenError(c)
334
+			}
335
+			dec.scanp++
336
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
337
+			dec.tokenState = tokenArrayStart
338
+			return Delim('['), nil
339
+
340
+		case ']':
341
+			if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
342
+				return dec.tokenError(c)
343
+			}
344
+			dec.scanp++
345
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
346
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
347
+			dec.tokenValueEnd()
348
+			return Delim(']'), nil
349
+
350
+		case '{':
351
+			if !dec.tokenValueAllowed() {
352
+				return dec.tokenError(c)
353
+			}
354
+			dec.scanp++
355
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
356
+			dec.tokenState = tokenObjectStart
357
+			return Delim('{'), nil
358
+
359
+		case '}':
360
+			if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
361
+				return dec.tokenError(c)
362
+			}
363
+			dec.scanp++
364
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
365
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
366
+			dec.tokenValueEnd()
367
+			return Delim('}'), nil
368
+
369
+		case ':':
370
+			if dec.tokenState != tokenObjectColon {
371
+				return dec.tokenError(c)
372
+			}
373
+			dec.scanp++
374
+			dec.tokenState = tokenObjectValue
375
+			continue
376
+
377
+		case ',':
378
+			if dec.tokenState == tokenArrayComma {
379
+				dec.scanp++
380
+				dec.tokenState = tokenArrayValue
381
+				continue
382
+			}
383
+			if dec.tokenState == tokenObjectComma {
384
+				dec.scanp++
385
+				dec.tokenState = tokenObjectKey
386
+				continue
387
+			}
388
+			return dec.tokenError(c)
389
+
390
+		case '"':
391
+			if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
392
+				var x string
393
+				old := dec.tokenState
394
+				dec.tokenState = tokenTopValue
395
+				err := dec.Decode(&x)
396
+				dec.tokenState = old
397
+				if err != nil {
398
+					clearOffset(err)
399
+					return nil, err
400
+				}
401
+				dec.tokenState = tokenObjectColon
402
+				return x, nil
403
+			}
404
+			fallthrough
405
+
406
+		default:
407
+			if !dec.tokenValueAllowed() {
408
+				return dec.tokenError(c)
409
+			}
410
+			var x interface{}
411
+			if err := dec.Decode(&x); err != nil {
412
+				clearOffset(err)
413
+				return nil, err
414
+			}
415
+			return x, nil
416
+		}
417
+	}
418
+}
419
+
420
+func clearOffset(err error) {
421
+	if s, ok := err.(*SyntaxError); ok {
422
+		s.Offset = 0
423
+	}
424
+}
425
+
426
+func (dec *Decoder) tokenError(c byte) (Token, error) {
427
+	var context string
428
+	switch dec.tokenState {
429
+	case tokenTopValue:
430
+		context = " looking for beginning of value"
431
+	case tokenArrayStart, tokenArrayValue, tokenObjectValue:
432
+		context = " looking for beginning of value"
433
+	case tokenArrayComma:
434
+		context = " after array element"
435
+	case tokenObjectKey:
436
+		context = " looking for beginning of object key string"
437
+	case tokenObjectColon:
438
+		context = " after object key"
439
+	case tokenObjectComma:
440
+		context = " after object key:value pair"
441
+	}
442
+	return nil, &SyntaxError{"invalid character " + quoteChar(int(c)) + " " + context, 0}
443
+}
444
+
445
+// More reports whether there is another element in the
446
+// current array or object being parsed.
447
+func (dec *Decoder) More() bool {
448
+	c, err := dec.peek()
449
+	return err == nil && c != ']' && c != '}'
450
+}
451
+
452
+func (dec *Decoder) peek() (byte, error) {
453
+	var err error
454
+	for {
455
+		for i := dec.scanp; i < len(dec.buf); i++ {
456
+			c := dec.buf[i]
457
+			if isSpace(rune(c)) {
458
+				continue
459
+			}
460
+			dec.scanp = i
461
+			return c, nil
462
+		}
463
+		// buffer has been scanned, now report any error
464
+		if err != nil {
465
+			return 0, err
466
+		}
467
+		err = dec.refill()
468
+	}
469
+}
470
+
471
+/*
472
+TODO
473
+
474
+// EncodeToken writes the given JSON token to the stream.
475
+// It returns an error if the delimiters [ ] { } are not properly used.
476
+//
477
+// EncodeToken does not call Flush, because usually it is part of
478
+// a larger operation such as Encode, and those will call Flush when finished.
479
+// Callers that create an Encoder and then invoke EncodeToken directly,
480
+// without using Encode, need to call Flush when finished to ensure that
481
+// the JSON is written to the underlying writer.
482
+func (e *Encoder) EncodeToken(t Token) error  {
483
+	...
484
+}
485
+
486
+*/
0 487
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+// Copyright 2011 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package json
5
+
6
+import (
7
+	"strings"
8
+)
9
+
10
+// tagOptions is the string following a comma in a struct field's "json"
11
+// tag, or the empty string. It does not include the leading comma.
12
+type tagOptions string
13
+
14
+// parseTag splits a struct field's json tag into its name and
15
+// comma-separated options.
16
+func parseTag(tag string) (string, tagOptions) {
17
+	if idx := strings.Index(tag, ","); idx != -1 {
18
+		return tag[:idx], tagOptions(tag[idx+1:])
19
+	}
20
+	return tag, tagOptions("")
21
+}
22
+
23
+// Contains reports whether a comma-separated list of options
24
+// contains a particular substr flag. substr must be surrounded by a
25
+// string boundary or commas.
26
+func (o tagOptions) Contains(optionName string) bool {
27
+	if len(o) == 0 {
28
+		return false
29
+	}
30
+	s := string(o)
31
+	for s != "" {
32
+		var next string
33
+		i := strings.Index(s, ",")
34
+		if i >= 0 {
35
+			s, next = s[:i], s[i+1:]
36
+		}
37
+		if s == optionName {
38
+			return true
39
+		}
40
+		s = next
41
+	}
42
+	return false
43
+}
... ...
@@ -118,12 +118,13 @@ protos:
118 118
 # be run first
119 119
 
120 120
 define gocover
121
-$(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).cover" "$(1)" || exit 1;
121
+$(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).coverage.txt" "$(1)" || exit 1;
122 122
 endef
123 123
 
124 124
 gen-cover: go_version
125 125
 	@mkdir -p "$(COVERDIR)"
126 126
 	$(foreach PKG,$(PKGS),$(call gocover,$(PKG)))
127
+	rm "$(COVERDIR)"/*testutils*.coverage.txt
127 128
 
128 129
 # Generates the cover binaries and runs them all in serial, so this can be used
129 130
 # run all tests with a yubikey without any problems
130 131
deleted file mode 100644
... ...
@@ -1,345 +0,0 @@
1
-package certs
2
-
3
-import (
4
-	"crypto/x509"
5
-	"errors"
6
-	"fmt"
7
-	"path/filepath"
8
-	"time"
9
-
10
-	"github.com/Sirupsen/logrus"
11
-	"github.com/docker/notary/trustmanager"
12
-	"github.com/docker/notary/tuf/data"
13
-	"github.com/docker/notary/tuf/signed"
14
-)
15
-
16
-// Manager is an abstraction around trusted root CA stores
17
-type Manager struct {
18
-	trustedCAStore          trustmanager.X509Store
19
-	trustedCertificateStore trustmanager.X509Store
20
-}
21
-
22
-const trustDir = "trusted_certificates"
23
-
24
-// ErrValidationFail is returned when there is no valid trusted certificates
25
-// being served inside of the roots.json
26
-type ErrValidationFail struct {
27
-	Reason string
28
-}
29
-
30
-// ErrValidationFail is returned when there is no valid trusted certificates
31
-// being served inside of the roots.json
32
-func (err ErrValidationFail) Error() string {
33
-	return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
34
-}
35
-
36
-// ErrRootRotationFail is returned when we fail to do a full root key rotation
37
-// by either failing to add the new root certificate, or delete the old ones
38
-type ErrRootRotationFail struct {
39
-	Reason string
40
-}
41
-
42
-// ErrRootRotationFail is returned when we fail to do a full root key rotation
43
-// by either failing to add the new root certificate, or delete the old ones
44
-func (err ErrRootRotationFail) Error() string {
45
-	return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
46
-}
47
-
48
-// NewManager returns an initialized Manager, or an error
49
-// if it fails to load certificates
50
-func NewManager(baseDir string) (*Manager, error) {
51
-	trustPath := filepath.Join(baseDir, trustDir)
52
-
53
-	// Load all CAs that aren't expired and don't use SHA1
54
-	trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
55
-		return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil &&
56
-			time.Now().Before(cert.NotAfter) &&
57
-			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
58
-			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
59
-			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
60
-	})
61
-	if err != nil {
62
-		return nil, err
63
-	}
64
-
65
-	// Load all individual (non-CA) certificates that aren't expired and don't use SHA1
66
-	trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
67
-		return !cert.IsCA &&
68
-			time.Now().Before(cert.NotAfter) &&
69
-			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
70
-			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
71
-			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
72
-	})
73
-	if err != nil {
74
-		return nil, err
75
-	}
76
-
77
-	return &Manager{
78
-		trustedCAStore:          trustedCAStore,
79
-		trustedCertificateStore: trustedCertificateStore,
80
-	}, nil
81
-}
82
-
83
-// TrustedCertificateStore returns the trusted certificate store being managed
84
-// by this Manager
85
-func (m *Manager) TrustedCertificateStore() trustmanager.X509Store {
86
-	return m.trustedCertificateStore
87
-}
88
-
89
-// TrustedCAStore returns the CA store being managed by this Manager
90
-func (m *Manager) TrustedCAStore() trustmanager.X509Store {
91
-	return m.trustedCAStore
92
-}
93
-
94
-// AddTrustedCert adds a cert to the trusted certificate store (not the CA
95
-// store)
96
-func (m *Manager) AddTrustedCert(cert *x509.Certificate) {
97
-	m.trustedCertificateStore.AddCert(cert)
98
-}
99
-
100
-// AddTrustedCACert adds a cert to the trusted CA certificate store
101
-func (m *Manager) AddTrustedCACert(cert *x509.Certificate) {
102
-	m.trustedCAStore.AddCert(cert)
103
-}
104
-
105
-/*
106
-ValidateRoot receives a new root, validates its correctness and attempts to
107
-do root key rotation if needed.
108
-
109
-First we list the current trusted certificates we have for a particular GUN. If
110
-that list is non-empty means that we've already seen this repository before, and
111
-have a list of trusted certificates for it. In this case, we use this list of
112
-certificates to attempt to validate this root file.
113
-
114
-If the previous validation suceeds, or in the case where we found no trusted
115
-certificates for this particular GUN, we check the integrity of the root by
116
-making sure that it is validated by itself. This means that we will attempt to
117
-validate the root data with the certificates that are included in the root keys
118
-themselves.
119
-
120
-If this last steps succeeds, we attempt to do root rotation, by ensuring that
121
-we only trust the certificates that are present in the new root.
122
-
123
-This mechanism of operation is essentially Trust On First Use (TOFU): if we
124
-have never seen a certificate for a particular CN, we trust it. If later we see
125
-a different certificate for that certificate, we return an ErrValidationFailed error.
126
-
127
-Note that since we only allow trust data to be downloaded over an HTTPS channel
128
-we are using the current public PKI to validate the first download of the certificate
129
-adding an extra layer of security over the normal (SSH style) trust model.
130
-We shall call this: TOFUS.
131
-*/
132
-func (m *Manager) ValidateRoot(root *data.Signed, gun string) error {
133
-	logrus.Debugf("entered ValidateRoot with dns: %s", gun)
134
-	signedRoot, err := data.RootFromSigned(root)
135
-	if err != nil {
136
-		return err
137
-	}
138
-
139
-	// Retrieve all the leaf certificates in root for which the CN matches the GUN
140
-	allValidCerts, err := validRootLeafCerts(signedRoot, gun)
141
-	if err != nil {
142
-		logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
143
-		return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
144
-	}
145
-
146
-	// Retrieve all the trusted certificates that match this gun
147
-	certsForCN, err := m.trustedCertificateStore.GetCertificatesByCN(gun)
148
-	if err != nil {
149
-		// If the error that we get back is different than ErrNoCertificatesFound
150
-		// we couldn't check if there are any certificates with this CN already
151
-		// trusted. Let's take the conservative approach and return a failed validation
152
-		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
153
-			logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
154
-			return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
155
-		}
156
-	}
157
-
158
-	// If we have certificates that match this specific GUN, let's make sure to
159
-	// use them first to validate that this new root is valid.
160
-	if len(certsForCN) != 0 {
161
-		logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun)
162
-		err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN))
163
-		if err != nil {
164
-			logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
165
-			return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
166
-		}
167
-	} else {
168
-		logrus.Debugf("found no currently valid root certificates for %s", gun)
169
-	}
170
-
171
-	// Validate the integrity of the new root (does it have valid signatures)
172
-	err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts))
173
-	if err != nil {
174
-		logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
175
-		return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
176
-	}
177
-
178
-	// Getting here means A) we had trusted certificates and both the
179
-	// old and new validated this root; or B) we had no trusted certificates but
180
-	// the new set of certificates has integrity (self-signed)
181
-	logrus.Debugf("entering root certificate rotation for: %s", gun)
182
-
183
-	// Do root certificate rotation: we trust only the certs present in the new root
184
-	// First we add all the new certificates (even if they already exist)
185
-	for _, cert := range allValidCerts {
186
-		err := m.trustedCertificateStore.AddCert(cert)
187
-		if err != nil {
188
-			// If the error is already exists we don't fail the rotation
189
-			if _, ok := err.(*trustmanager.ErrCertExists); ok {
190
-				logrus.Debugf("ignoring certificate addition to: %s", gun)
191
-				continue
192
-			}
193
-			logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
194
-		}
195
-	}
196
-
197
-	// Now we delete old certificates that aren't present in the new root
198
-	for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
199
-		logrus.Debugf("removing certificate with certID: %s", certID)
200
-		err = m.trustedCertificateStore.RemoveCert(cert)
201
-		if err != nil {
202
-			logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
203
-			return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
204
-		}
205
-	}
206
-
207
-	logrus.Debugf("Root validation succeeded for %s", gun)
208
-	return nil
209
-}
210
-
211
-// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose
212
-// Common-Names match the provided GUN
213
-func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) {
214
-	// Get a list of all of the leaf certificates present in root
215
-	allLeafCerts, _ := parseAllCerts(root)
216
-	var validLeafCerts []*x509.Certificate
217
-
218
-	// Go through every leaf certificate and check that the CN matches the gun
219
-	for _, cert := range allLeafCerts {
220
-		// Validate that this leaf certificate has a CN that matches the exact gun
221
-		if cert.Subject.CommonName != gun {
222
-			logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName)
223
-			continue
224
-		}
225
-		// Make sure the certificate is not expired
226
-		if time.Now().After(cert.NotAfter) {
227
-			logrus.Debugf("error leaf certificate is expired")
228
-			continue
229
-		}
230
-
231
-		// We don't allow root certificates that use SHA1
232
-		if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
233
-			cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
234
-			cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
235
-
236
-			logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
237
-			continue
238
-		}
239
-
240
-		validLeafCerts = append(validLeafCerts, cert)
241
-	}
242
-
243
-	if len(validLeafCerts) < 1 {
244
-		logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
245
-		return nil, errors.New("no valid leaf certificates found in any of the root keys")
246
-	}
247
-
248
-	logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun)
249
-	return validLeafCerts, nil
250
-}
251
-
252
-// parseAllCerts returns two maps, one with all of the leafCertificates and one
253
-// with all the intermediate certificates found in signedRoot
254
-func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
255
-	leafCerts := make(map[string]*x509.Certificate)
256
-	intCerts := make(map[string][]*x509.Certificate)
257
-
258
-	// Before we loop through all root keys available, make sure any exist
259
-	rootRoles, ok := signedRoot.Signed.Roles["root"]
260
-	if !ok {
261
-		logrus.Debugf("tried to parse certificates from invalid root signed data")
262
-		return nil, nil
263
-	}
264
-
265
-	logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
266
-	// Iterate over every keyID for the root role inside of roots.json
267
-	for _, keyID := range rootRoles.KeyIDs {
268
-		// check that the key exists in the signed root keys map
269
-		key, ok := signedRoot.Signed.Keys[keyID]
270
-		if !ok {
271
-			logrus.Debugf("error while getting data for keyID: %s", keyID)
272
-			continue
273
-		}
274
-
275
-		// Decode all the x509 certificates that were bundled with this
276
-		// Specific root key
277
-		decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
278
-		if err != nil {
279
-			logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
280
-			continue
281
-		}
282
-
283
-		// Get all non-CA certificates in the decoded certificates
284
-		leafCertList := trustmanager.GetLeafCerts(decodedCerts)
285
-
286
-		// If we got no leaf certificates or we got more than one, fail
287
-		if len(leafCertList) != 1 {
288
-			logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
289
-			continue
290
-		}
291
-
292
-		// Get the ID of the leaf certificate
293
-		leafCert := leafCertList[0]
294
-		leafID, err := trustmanager.FingerprintCert(leafCert)
295
-		if err != nil {
296
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
297
-			continue
298
-		}
299
-
300
-		// Store the leaf cert in the map
301
-		leafCerts[leafID] = leafCert
302
-
303
-		// Get all the remainder certificates marked as a CA to be used as intermediates
304
-		intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
305
-		intCerts[leafID] = intermediateCerts
306
-	}
307
-
308
-	return leafCerts, intCerts
309
-}
310
-
311
-// certsToRemove returns all the certifificates from oldCerts that aren't present
312
-// in newCerts
313
-func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate {
314
-	certsToRemove := make(map[string]*x509.Certificate)
315
-
316
-	// If no newCerts were provided
317
-	if len(newCerts) == 0 {
318
-		return certsToRemove
319
-	}
320
-
321
-	// Populate a map with all the IDs from newCert
322
-	var newCertMap = make(map[string]struct{})
323
-	for _, cert := range newCerts {
324
-		certID, err := trustmanager.FingerprintCert(cert)
325
-		if err != nil {
326
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
327
-			continue
328
-		}
329
-		newCertMap[certID] = struct{}{}
330
-	}
331
-
332
-	// Iterate over all the old certificates and check to see if we should remove them
333
-	for _, cert := range oldCerts {
334
-		certID, err := trustmanager.FingerprintCert(cert)
335
-		if err != nil {
336
-			logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
337
-			continue
338
-		}
339
-		if _, ok := newCertMap[certID]; !ok {
340
-			certsToRemove[certID] = cert
341
-		}
342
-	}
343
-
344
-	return certsToRemove
345
-}
346 1
new file mode 100644
... ...
@@ -0,0 +1,280 @@
0
+package certs
1
+
2
+import (
3
+	"crypto/x509"
4
+	"errors"
5
+	"fmt"
6
+	"time"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/notary/trustmanager"
10
+	"github.com/docker/notary/tuf/data"
11
+	"github.com/docker/notary/tuf/signed"
12
+)
13
+
14
+// ErrValidationFail is returned when there is no valid trusted certificates
15
+// being served inside of the roots.json
16
+type ErrValidationFail struct {
17
+	Reason string
18
+}
19
+
20
+// ErrValidationFail is returned when there is no valid trusted certificates
21
+// being served inside of the roots.json
22
+func (err ErrValidationFail) Error() string {
23
+	return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
24
+}
25
+
26
+// ErrRootRotationFail is returned when we fail to do a full root key rotation
27
+// by either failing to add the new root certificate, or delete the old ones
28
+type ErrRootRotationFail struct {
29
+	Reason string
30
+}
31
+
32
+// ErrRootRotationFail is returned when we fail to do a full root key rotation
33
+// by either failing to add the new root certificate, or delete the old ones
34
+func (err ErrRootRotationFail) Error() string {
35
+	return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
36
+}
37
+
38
+/*
39
+ValidateRoot receives a new root, validates its correctness and attempts to
40
+do root key rotation if needed.
41
+
42
+First we list the current trusted certificates we have for a particular GUN. If
43
+that list is non-empty means that we've already seen this repository before, and
44
+have a list of trusted certificates for it. In this case, we use this list of
45
+certificates to attempt to validate this root file.
46
+
47
+If the previous validation succeeds, or in the case where we found no trusted
48
+certificates for this particular GUN, we check the integrity of the root by
49
+making sure that it is validated by itself. This means that we will attempt to
50
+validate the root data with the certificates that are included in the root keys
51
+themselves.
52
+
53
+If this last steps succeeds, we attempt to do root rotation, by ensuring that
54
+we only trust the certificates that are present in the new root.
55
+
56
+This mechanism of operation is essentially Trust On First Use (TOFU): if we
57
+have never seen a certificate for a particular CN, we trust it. If later we see
58
+a different certificate for that certificate, we return an ErrValidationFailed error.
59
+
60
+Note that since we only allow trust data to be downloaded over an HTTPS channel
61
+we are using the current public PKI to validate the first download of the certificate
62
+adding an extra layer of security over the normal (SSH style) trust model.
63
+We shall call this: TOFUS.
64
+*/
65
+func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun string) error {
66
+	logrus.Debugf("entered ValidateRoot with dns: %s", gun)
67
+	signedRoot, err := data.RootFromSigned(root)
68
+	if err != nil {
69
+		return err
70
+	}
71
+
72
+	// Retrieve all the leaf certificates in root for which the CN matches the GUN
73
+	allValidCerts, err := validRootLeafCerts(signedRoot, gun)
74
+	if err != nil {
75
+		logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
76
+		return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
77
+	}
78
+
79
+	// Retrieve all the trusted certificates that match this gun
80
+	certsForCN, err := certStore.GetCertificatesByCN(gun)
81
+	if err != nil {
82
+		// If the error that we get back is different than ErrNoCertificatesFound
83
+		// we couldn't check if there are any certificates with this CN already
84
+		// trusted. Let's take the conservative approach and return a failed validation
85
+		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
86
+			logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
87
+			return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
88
+		}
89
+	}
90
+
91
+	// If we have certificates that match this specific GUN, let's make sure to
92
+	// use them first to validate that this new root is valid.
93
+	if len(certsForCN) != 0 {
94
+		logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun)
95
+		err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN))
96
+		if err != nil {
97
+			logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
98
+			return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
99
+		}
100
+	} else {
101
+		logrus.Debugf("found no currently valid root certificates for %s", gun)
102
+	}
103
+
104
+	// Validate the integrity of the new root (does it have valid signatures)
105
+	err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts))
106
+	if err != nil {
107
+		logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
108
+		return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
109
+	}
110
+
111
+	// Getting here means A) we had trusted certificates and both the
112
+	// old and new validated this root; or B) we had no trusted certificates but
113
+	// the new set of certificates has integrity (self-signed)
114
+	logrus.Debugf("entering root certificate rotation for: %s", gun)
115
+
116
+	// Do root certificate rotation: we trust only the certs present in the new root
117
+	// First we add all the new certificates (even if they already exist)
118
+	for _, cert := range allValidCerts {
119
+		err := certStore.AddCert(cert)
120
+		if err != nil {
121
+			// If the error is already exists we don't fail the rotation
122
+			if _, ok := err.(*trustmanager.ErrCertExists); ok {
123
+				logrus.Debugf("ignoring certificate addition to: %s", gun)
124
+				continue
125
+			}
126
+			logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
127
+		}
128
+	}
129
+
130
+	// Now we delete old certificates that aren't present in the new root
131
+	for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
132
+		logrus.Debugf("removing certificate with certID: %s", certID)
133
+		err = certStore.RemoveCert(cert)
134
+		if err != nil {
135
+			logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
136
+			return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
137
+		}
138
+	}
139
+
140
+	logrus.Debugf("Root validation succeeded for %s", gun)
141
+	return nil
142
+}
143
+
144
+// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whose
145
+// Common-Names match the provided GUN
146
+func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) {
147
+	// Get a list of all of the leaf certificates present in root
148
+	allLeafCerts, _ := parseAllCerts(root)
149
+	var validLeafCerts []*x509.Certificate
150
+
151
+	// Go through every leaf certificate and check that the CN matches the gun
152
+	for _, cert := range allLeafCerts {
153
+		// Validate that this leaf certificate has a CN that matches the exact gun
154
+		if cert.Subject.CommonName != gun {
155
+			logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s",
156
+				cert.Subject.CommonName, gun)
157
+			continue
158
+		}
159
+		// Make sure the certificate is not expired
160
+		if time.Now().After(cert.NotAfter) {
161
+			logrus.Debugf("error leaf certificate is expired")
162
+			continue
163
+		}
164
+
165
+		// We don't allow root certificates that use SHA1
166
+		if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
167
+			cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
168
+			cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
169
+
170
+			logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
171
+			continue
172
+		}
173
+
174
+		validLeafCerts = append(validLeafCerts, cert)
175
+	}
176
+
177
+	if len(validLeafCerts) < 1 {
178
+		logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
179
+		return nil, errors.New("no valid leaf certificates found in any of the root keys")
180
+	}
181
+
182
+	logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun)
183
+	return validLeafCerts, nil
184
+}
185
+
186
+// parseAllCerts returns two maps, one with all of the leafCertificates and one
187
+// with all the intermediate certificates found in signedRoot
188
+func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
189
+	leafCerts := make(map[string]*x509.Certificate)
190
+	intCerts := make(map[string][]*x509.Certificate)
191
+
192
+	// Before we loop through all root keys available, make sure any exist
193
+	rootRoles, ok := signedRoot.Signed.Roles["root"]
194
+	if !ok {
195
+		logrus.Debugf("tried to parse certificates from invalid root signed data")
196
+		return nil, nil
197
+	}
198
+
199
+	logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
200
+	// Iterate over every keyID for the root role inside of roots.json
201
+	for _, keyID := range rootRoles.KeyIDs {
202
+		// check that the key exists in the signed root keys map
203
+		key, ok := signedRoot.Signed.Keys[keyID]
204
+		if !ok {
205
+			logrus.Debugf("error while getting data for keyID: %s", keyID)
206
+			continue
207
+		}
208
+
209
+		// Decode all the x509 certificates that were bundled with this
210
+		// Specific root key
211
+		decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
212
+		if err != nil {
213
+			logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
214
+			continue
215
+		}
216
+
217
+		// Get all non-CA certificates in the decoded certificates
218
+		leafCertList := trustmanager.GetLeafCerts(decodedCerts)
219
+
220
+		// If we got no leaf certificates or we got more than one, fail
221
+		if len(leafCertList) != 1 {
222
+			logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
223
+			continue
224
+		}
225
+
226
+		// Get the ID of the leaf certificate
227
+		leafCert := leafCertList[0]
228
+		leafID, err := trustmanager.FingerprintCert(leafCert)
229
+		if err != nil {
230
+			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
231
+			continue
232
+		}
233
+
234
+		// Store the leaf cert in the map
235
+		leafCerts[leafID] = leafCert
236
+
237
+		// Get all the remainder certificates marked as a CA to be used as intermediates
238
+		intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
239
+		intCerts[leafID] = intermediateCerts
240
+	}
241
+
242
+	return leafCerts, intCerts
243
+}
244
+
245
+// certsToRemove returns all the certifificates from oldCerts that aren't present
246
+// in newCerts
247
+func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate {
248
+	certsToRemove := make(map[string]*x509.Certificate)
249
+
250
+	// If no newCerts were provided
251
+	if len(newCerts) == 0 {
252
+		return certsToRemove
253
+	}
254
+
255
+	// Populate a map with all the IDs from newCert
256
+	var newCertMap = make(map[string]struct{})
257
+	for _, cert := range newCerts {
258
+		certID, err := trustmanager.FingerprintCert(cert)
259
+		if err != nil {
260
+			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
261
+			continue
262
+		}
263
+		newCertMap[certID] = struct{}{}
264
+	}
265
+
266
+	// Iterate over all the old certificates and check to see if we should remove them
267
+	for _, cert := range oldCerts {
268
+		certID, err := trustmanager.FingerprintCert(cert)
269
+		if err != nil {
270
+			logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
271
+			continue
272
+		}
273
+		if _, ok := newCertMap[certID]; !ok {
274
+			certsToRemove[certID] = cert
275
+		}
276
+	}
277
+
278
+	return certsToRemove
279
+}
... ...
@@ -18,6 +18,8 @@ machine:
18 18
     CIRCLE_PAIN: "mode: set"
19 19
   # Put the coverage profile somewhere codecov's script can find it
20 20
     COVERPROFILE: coverage.out
21
+  # Set the pull request number so codecov can figure it out
22
+    PULL_REQUEST: ${CI_PULL_REQUEST##*/}
21 23
 
22 24
   hosts:
23 25
   # Not used yet
... ...
@@ -40,8 +42,7 @@ dependencies:
40 40
   # For the stable go version, additionally install linting tools
41 41
     - >
42 42
       gvm use stable &&
43
-      go get github.com/golang/lint/golint github.com/wadey/gocovmerge &&
44
-      go install github.com/wadey/gocovmerge
43
+      go get github.com/golang/lint/golint
45 44
 test:
46 45
   pre:
47 46
   # Output the go versions we are going to test
... ...
@@ -72,11 +73,6 @@ test:
72 72
         pwd: $BASE_STABLE
73 73
 
74 74
   post:
75
-    - gvm use stable && make covmerge:
76
-        timeout: 600
77
-        parallel: true
78
-        pwd: $BASE_STABLE
79
-
80 75
   # Report to codecov.io
81 76
     - bash <(curl -s https://codecov.io/bash):
82 77
         parallel: true
... ...
@@ -9,10 +9,10 @@ import (
9 9
 	"net/url"
10 10
 	"os"
11 11
 	"path/filepath"
12
-	"strings"
13 12
 	"time"
14 13
 
15 14
 	"github.com/Sirupsen/logrus"
15
+	"github.com/docker/notary"
16 16
 	"github.com/docker/notary/certs"
17 17
 	"github.com/docker/notary/client/changelist"
18 18
 	"github.com/docker/notary/cryptoservice"
... ...
@@ -53,9 +53,9 @@ type ErrInvalidRemoteRole struct {
53 53
 	Role string
54 54
 }
55 55
 
56
-func (e ErrInvalidRemoteRole) Error() string {
56
+func (err ErrInvalidRemoteRole) Error() string {
57 57
 	return fmt.Sprintf(
58
-		"notary does not support the server managing the %s key", e.Role)
58
+		"notary does not support the server managing the %s key", err.Role)
59 59
 }
60 60
 
61 61
 // ErrRepositoryNotExist is returned when an action is taken on a remote
... ...
@@ -84,7 +84,7 @@ type NotaryRepository struct {
84 84
 	CryptoService signed.CryptoService
85 85
 	tufRepo       *tuf.Repo
86 86
 	roundTrip     http.RoundTripper
87
-	CertManager   *certs.Manager
87
+	CertStore     trustmanager.X509Store
88 88
 }
89 89
 
90 90
 // repositoryFromKeystores is a helper function for NewNotaryRepository that
... ...
@@ -93,7 +93,11 @@ type NotaryRepository struct {
93 93
 func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
94 94
 	keyStores []trustmanager.KeyStore) (*NotaryRepository, error) {
95 95
 
96
-	certManager, err := certs.NewManager(baseDir)
96
+	certPath := filepath.Join(baseDir, notary.TrustedCertsDir)
97
+	certStore, err := trustmanager.NewX509FilteredFileStore(
98
+		certPath,
99
+		trustmanager.FilterCertsExpiredSha1,
100
+	)
97 101
 	if err != nil {
98 102
 		return nil, err
99 103
 	}
... ...
@@ -107,7 +111,7 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
107 107
 		tufRepoPath:   filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
108 108
 		CryptoService: cryptoService,
109 109
 		roundTrip:     rt,
110
-		CertManager:   certManager,
110
+		CertStore:     certStore,
111 111
 	}
112 112
 
113 113
 	fileStore, err := store.NewFilesystemStore(
... ...
@@ -165,7 +169,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
165 165
 	// currently we only support server managing timestamps and snapshots, and
166 166
 	// nothing else - timestamps are always managed by the server, and implicit
167 167
 	// (do not have to be passed in as part of `serverManagedRoles`, so that
168
-	// the API of Initialize doens't change).
168
+	// the API of Initialize doesn't change).
169 169
 	var serverManagesSnapshot bool
170 170
 	locallyManagedKeys := []string{
171 171
 		data.CanonicalTargetsRole,
... ...
@@ -197,7 +201,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
197 197
 	if err != nil {
198 198
 		return err
199 199
 	}
200
-	r.CertManager.AddTrustedCert(rootCert)
200
+	r.CertStore.AddCert(rootCert)
201 201
 
202 202
 	// The root key gets stored in the TUF metadata X509 encoded, linking
203 203
 	// the tuf root.json to our X509 PKI.
... ...
@@ -275,8 +279,6 @@ func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...stri
275 275
 
276 276
 	var changes []changelist.Change
277 277
 	for _, role := range roles {
278
-		role = strings.ToLower(role)
279
-
280 278
 		// Ensure we can only add targets to the CanonicalTargetsRole,
281 279
 		// or a Delegation role (which is <CanonicalTargetsRole>/something else)
282 280
 		if role != data.CanonicalTargetsRole && !data.IsDelegation(role) {
... ...
@@ -347,7 +349,7 @@ func (r *NotaryRepository) AddDelegation(name string, threshold int,
347 347
 // the repository when the changelist gets applied at publish time.
348 348
 // This does not validate that the delegation exists, since one might exist
349 349
 // after applying all changes.
350
-func (r *NotaryRepository) RemoveDelegation(name string) error {
350
+func (r *NotaryRepository) RemoveDelegation(name string, keyIDs, paths []string, removeAll bool) error {
351 351
 
352 352
 	if !data.IsDelegation(name) {
353 353
 		return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
... ...
@@ -360,20 +362,41 @@ func (r *NotaryRepository) RemoveDelegation(name string) error {
360 360
 	defer cl.Close()
361 361
 
362 362
 	logrus.Debugf(`Removing delegation "%s"\n`, name)
363
+	var template *changelist.TufChange
364
+
365
+	// We use the Delete action only for force removal, Update is used for removing individual keys and paths
366
+	if removeAll {
367
+		template = changelist.NewTufChange(
368
+			changelist.ActionDelete,
369
+			name,
370
+			changelist.TypeTargetsDelegation,
371
+			"",  // no path
372
+			nil, // deleting role, no data needed
373
+		)
363 374
 
364
-	template := changelist.NewTufChange(
365
-		changelist.ActionDelete,
366
-		name,
367
-		changelist.TypeTargetsDelegation,
368
-		"", // no path
369
-		nil,
370
-	)
375
+	} else {
376
+		tdJSON, err := json.Marshal(&changelist.TufDelegation{
377
+			RemoveKeys:  keyIDs,
378
+			RemovePaths: paths,
379
+		})
380
+		if err != nil {
381
+			return err
382
+		}
383
+
384
+		template = changelist.NewTufChange(
385
+			changelist.ActionUpdate,
386
+			name,
387
+			changelist.TypeTargetsDelegation,
388
+			"", // no path
389
+			tdJSON,
390
+		)
391
+	}
371 392
 
372 393
 	return addChange(cl, template, name)
373 394
 }
374 395
 
375 396
 // AddTarget creates new changelist entries to add a target to the given roles
376
-// in the repository when the changelist gets appied at publish time.
397
+// in the repository when the changelist gets applied at publish time.
377 398
 // If roles are unspecified, the default role is "targets".
378 399
 func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error {
379 400
 
... ...
@@ -431,7 +454,7 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro
431 431
 	for _, role := range roles {
432 432
 		// we don't need to do anything special with removing role from
433 433
 		// roles because listSubtree always processes role and only excludes
434
-		// descendent delegations that appear in roles.
434
+		// descendant delegations that appear in roles.
435 435
 		r.listSubtree(targets, role, roles...)
436 436
 	}
437 437
 
... ...
@@ -509,6 +532,92 @@ func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) {
509 509
 	return cl, nil
510 510
 }
511 511
 
512
+// GetDelegationRoles returns the keys and roles of the repository's delegations
513
+func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
514
+	// Update state of the repo to latest
515
+	if _, err := r.Update(false); err != nil {
516
+		return nil, err
517
+	}
518
+
519
+	// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
520
+	targets, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
521
+	if !ok {
522
+		return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole}
523
+	}
524
+
525
+	allDelegations := targets.Signed.Delegations.Roles
526
+
527
+	// make a copy for traversing nested delegations
528
+	delegationsList := make([]*data.Role, len(allDelegations))
529
+	copy(delegationsList, allDelegations)
530
+
531
+	// Now traverse to lower level delegations (ex: targets/level1/level2)
532
+	for len(delegationsList) > 0 {
533
+		// Pop off first delegation to traverse
534
+		delegation := delegationsList[0]
535
+		delegationsList = delegationsList[1:]
536
+
537
+		// Get metadata
538
+		delegationMeta, ok := r.tufRepo.Targets[delegation.Name]
539
+		// If we get an error, don't try to traverse further into this subtree because it doesn't exist or is malformed
540
+		if !ok {
541
+			continue
542
+		}
543
+
544
+		// Add nested delegations to return list and exploration list
545
+		allDelegations = append(allDelegations, delegationMeta.Signed.Delegations.Roles...)
546
+		delegationsList = append(delegationsList, delegationMeta.Signed.Delegations.Roles...)
547
+	}
548
+	return allDelegations, nil
549
+}
550
+
551
+// RoleWithSignatures is a Role with its associated signatures
552
+type RoleWithSignatures struct {
553
+	Signatures []data.Signature
554
+	data.Role
555
+}
556
+
557
+// ListRoles returns a list of RoleWithSignatures objects for this repo
558
+// This represents the latest metadata for each role in this repo
559
+func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) {
560
+	// Update to latest repo state
561
+	_, err := r.Update(false)
562
+	if err != nil {
563
+		return nil, err
564
+	}
565
+
566
+	// Get all role info from our updated keysDB, can be empty
567
+	roles := r.tufRepo.GetAllLoadedRoles()
568
+
569
+	var roleWithSigs []RoleWithSignatures
570
+
571
+	// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
572
+	for _, role := range roles {
573
+		roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
574
+		switch role.Name {
575
+		case data.CanonicalRootRole:
576
+			roleWithSig.Signatures = r.tufRepo.Root.Signatures
577
+		case data.CanonicalTargetsRole:
578
+			roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
579
+		case data.CanonicalSnapshotRole:
580
+			roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
581
+		case data.CanonicalTimestampRole:
582
+			roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
583
+		default:
584
+			// If the role isn't a delegation, we should error -- this is only possible if we have invalid keyDB state
585
+			if !data.IsDelegation(role.Name) {
586
+				return nil, data.ErrInvalidRole{Role: role.Name, Reason: "invalid role name"}
587
+			}
588
+			if _, ok := r.tufRepo.Targets[role.Name]; ok {
589
+				// We'll only find a signature if we've published any targets with this delegation
590
+				roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
591
+			}
592
+		}
593
+		roleWithSigs = append(roleWithSigs, roleWithSig)
594
+	}
595
+	return roleWithSigs, nil
596
+}
597
+
512 598
 // Publish pushes the local changes in signed material to the remote notary-server
513 599
 // Conceptually it performs an operation similar to a `git rebase`
514 600
 func (r *NotaryRepository) Publish() error {
... ...
@@ -837,7 +946,7 @@ func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, erro
837 837
 		return nil, err
838 838
 	}
839 839
 
840
-	err = r.CertManager.ValidateRoot(root, r.gun)
840
+	err = certs.ValidateRoot(r.CertStore, root, r.gun)
841 841
 	if err != nil {
842 842
 		return nil, err
843 843
 	}
... ...
@@ -904,3 +1013,27 @@ func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.Publi
904 904
 	}
905 905
 	return nil
906 906
 }
907
+
908
+// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side
909
+func (r *NotaryRepository) DeleteTrustData() error {
910
+	// Clear TUF files and cache
911
+	if err := r.fileStore.RemoveAll(); err != nil {
912
+		return fmt.Errorf("error clearing TUF repo data: %v", err)
913
+	}
914
+	r.tufRepo = tuf.NewRepo(nil, nil)
915
+	// Clear certificates
916
+	certificates, err := r.CertStore.GetCertificatesByCN(r.gun)
917
+	if err != nil {
918
+		// If there were no certificates to delete, we're done
919
+		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); ok {
920
+			return nil
921
+		}
922
+		return fmt.Errorf("error retrieving certificates for %s: %v", r.gun, err)
923
+	}
924
+	for _, cert := range certificates {
925
+		if err := r.CertStore.RemoveCert(cert); err != nil {
926
+			return fmt.Errorf("error removing certificate: %v: %v", cert, err)
927
+		}
928
+	}
929
+	return nil
930
+}
... ...
@@ -5,6 +5,7 @@ import (
5 5
 	"fmt"
6 6
 	"net/http"
7 7
 	"path"
8
+	"strings"
8 9
 	"time"
9 10
 
10 11
 	"github.com/Sirupsen/logrus"
... ...
@@ -85,13 +86,13 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
85 85
 			return err
86 86
 		}
87 87
 		if err == nil {
88
-			// role existed
89
-			return data.ErrInvalidRole{
90
-				Role:   c.Scope(),
91
-				Reason: "cannot create a role that already exists",
88
+			// role existed, attempt to merge paths and keys
89
+			if err := r.AddPaths(td.AddPaths); err != nil {
90
+				return err
92 91
 			}
92
+			return repo.UpdateDelegations(r, td.AddKeys)
93 93
 		}
94
-		// role doesn't exist, create brand new
94
+		// create brand new role
95 95
 		r, err = td.ToNewRole(c.Scope())
96 96
 		if err != nil {
97 97
 			return err
... ...
@@ -107,7 +108,12 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
107 107
 		if err != nil {
108 108
 			return err
109 109
 		}
110
-		// role exists, merge
110
+		// If we specify the only keys left delete the role, else just delete specified keys
111
+		if strings.Join(r.KeyIDs, ";") == strings.Join(td.RemoveKeys, ";") && len(td.AddKeys) == 0 {
112
+			r := data.Role{Name: c.Scope()}
113
+			return repo.DeleteDelegation(r)
114
+		}
115
+		// if we aren't deleting and the role exists, merge
111 116
 		if err := r.AddPaths(td.AddPaths); err != nil {
112 117
 			return err
113 118
 		}
... ...
@@ -2,6 +2,14 @@ package notary
2 2
 
3 3
 // application wide constants
4 4
 const (
5
+	// MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold
6
+	MinThreshold = 1
7
+	// PrivKeyPerms are the file permissions to use when writing private keys to disk
5 8
 	PrivKeyPerms = 0700
9
+	// PubCertPerms are the file permissions to use when writing public certificates to disk
6 10
 	PubCertPerms = 0755
11
+	// Sha256HexSize is how big a Sha256 hex is in number of characters
12
+	Sha256HexSize = 64
13
+	// TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored
14
+	TrustedCertsDir = "trusted_certificates"
7 15
 )
... ...
@@ -69,8 +69,8 @@ func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error)
69 69
 	if err != nil {
70 70
 		return nil, fmt.Errorf("failed to add key to filestore: %v", err)
71 71
 	}
72
-	return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons")
73 72
 
73
+	return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons")
74 74
 }
75 75
 
76 76
 // GetPrivateKey returns a private key and role if present by ID.
... ...
@@ -205,7 +205,8 @@ func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
205 205
 
206 206
 // GetIntermediateCerts parses a list of x509 Certificates and returns all of the
207 207
 // ones marked as a CA, to be used as intermediates
208
-func GetIntermediateCerts(certs []*x509.Certificate) (intCerts []*x509.Certificate) {
208
+func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
209
+	var intCerts []*x509.Certificate
209 210
 	for _, cert := range certs {
210 211
 		if cert.IsCA {
211 212
 			intCerts = append(intCerts, cert)
... ...
@@ -299,6 +300,44 @@ func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, er
299 299
 	}
300 300
 }
301 301
 
302
+// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate.
303
+func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
304
+	pemBlock, _ := pem.Decode(pubKeyBytes)
305
+	if pemBlock == nil {
306
+		return nil, errors.New("no valid public key found")
307
+	}
308
+
309
+	switch pemBlock.Type {
310
+	case "CERTIFICATE":
311
+		cert, err := x509.ParseCertificate(pemBlock.Bytes)
312
+		if err != nil {
313
+			return nil, fmt.Errorf("could not parse provided certificate: %v", err)
314
+		}
315
+		err = ValidateCertificate(cert)
316
+		if err != nil {
317
+			return nil, fmt.Errorf("invalid certificate: %v", err)
318
+		}
319
+		return CertToKey(cert), nil
320
+	default:
321
+		return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type)
322
+	}
323
+}
324
+
325
+// ValidateCertificate returns an error if the certificate is not valid for notary
326
+// Currently, this is only a time expiry check
327
+func ValidateCertificate(c *x509.Certificate) error {
328
+	if (c.NotBefore).After(c.NotAfter) {
329
+		return fmt.Errorf("certificate validity window is invalid")
330
+	}
331
+	now := time.Now()
332
+	tomorrow := now.AddDate(0, 0, 1)
333
+	// Give one day leeway on creation "before" time, check "after" against today
334
+	if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
335
+		return fmt.Errorf("certificate is expired")
336
+	}
337
+	return nil
338
+}
339
+
302 340
 // GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey
303 341
 func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) {
304 342
 	rsaPrivKey, err := rsa.GenerateKey(random, bits)
... ...
@@ -532,3 +571,14 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
532 532
 
533 533
 	return key.ID(), nil
534 534
 }
535
+
536
+// FilterCertsExpiredSha1 can be used as the filter function to cert store
537
+// initializers to filter out all expired or SHA-1 certificate that we
538
+// shouldn't load.
539
+func FilterCertsExpiredSha1(cert *x509.Certificate) bool {
540
+	return !cert.IsCA &&
541
+		time.Now().Before(cert.NotAfter) &&
542
+		cert.SignatureAlgorithm != x509.SHA1WithRSA &&
543
+		cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
544
+		cert.SignatureAlgorithm != x509.ECDSAWithSHA1
545
+}
... ...
@@ -54,7 +54,7 @@ func (c *Client) Update() error {
54 54
 	if err != nil {
55 55
 		logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
56 56
 		if err := c.downloadRoot(); err != nil {
57
-			logrus.Error("client Update (Root):", err)
57
+			logrus.Error("Client Update (Root):", err)
58 58
 			return err
59 59
 		}
60 60
 		// If we error again, we now have the latest root and just want to fail
... ...
@@ -247,28 +247,27 @@ func (c *Client) downloadTimestamp() error {
247 247
 	// We may not have a cached timestamp if this is the first time
248 248
 	// we're interacting with the repo. This will result in the
249 249
 	// version being 0
250
-	var download bool
251
-	old := &data.Signed{}
252
-	version := 0
250
+	var (
251
+		saveToCache bool
252
+		old         *data.Signed
253
+		version     = 0
254
+	)
253 255
 	cachedTS, err := c.cache.GetMeta(role, maxSize)
254 256
 	if err == nil {
255
-		err := json.Unmarshal(cachedTS, old)
257
+		cached := &data.Signed{}
258
+		err := json.Unmarshal(cachedTS, cached)
256 259
 		if err == nil {
257
-			ts, err := data.TimestampFromSigned(old)
260
+			ts, err := data.TimestampFromSigned(cached)
258 261
 			if err == nil {
259 262
 				version = ts.Signed.Version
260 263
 			}
261
-		} else {
262
-			old = nil
264
+			old = cached
263 265
 		}
264 266
 	}
265 267
 	// unlike root, targets and snapshot, always try and download timestamps
266 268
 	// from remote, only using the cache one if we couldn't reach remote.
267 269
 	raw, s, err := c.downloadSigned(role, maxSize, nil)
268 270
 	if err != nil || len(raw) == 0 {
269
-		if err, ok := err.(store.ErrMetaNotFound); ok {
270
-			return err
271
-		}
272 271
 		if old == nil {
273 272
 			if err == nil {
274 273
 				// couldn't retrieve data from server and don't have valid
... ...
@@ -277,17 +276,18 @@ func (c *Client) downloadTimestamp() error {
277 277
 			}
278 278
 			return err
279 279
 		}
280
-		logrus.Debug("using cached timestamp")
280
+		logrus.Debug(err.Error())
281
+		logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
281 282
 		s = old
282 283
 	} else {
283
-		download = true
284
+		saveToCache = true
284 285
 	}
285 286
 	err = signed.Verify(s, role, version, c.keysDB)
286 287
 	if err != nil {
287 288
 		return err
288 289
 	}
289 290
 	logrus.Debug("successfully verified timestamp")
290
-	if download {
291
+	if saveToCache {
291 292
 		c.cache.SetMeta(role, raw)
292 293
 	}
293 294
 	ts, err := data.TimestampFromSigned(s)
... ...
@@ -327,7 +327,7 @@ func (c *Client) downloadSnapshot() error {
327 327
 		}
328 328
 		err := json.Unmarshal(raw, old)
329 329
 		if err == nil {
330
-			snap, err := data.TimestampFromSigned(old)
330
+			snap, err := data.SnapshotFromSigned(old)
331 331
 			if err == nil {
332 332
 				version = snap.Signed.Version
333 333
 			} else {
... ...
@@ -14,7 +14,7 @@ import (
14 14
 
15 15
 	"github.com/Sirupsen/logrus"
16 16
 	"github.com/agl/ed25519"
17
-	"github.com/jfrazelle/go/canonical/json"
17
+	"github.com/docker/go/canonical/json"
18 18
 )
19 19
 
20 20
 // PublicKey is the necessary interface for public keys
... ...
@@ -2,6 +2,7 @@ package data
2 2
 
3 3
 import (
4 4
 	"fmt"
5
+	"github.com/Sirupsen/logrus"
5 6
 	"path"
6 7
 	"regexp"
7 8
 	"strings"
... ...
@@ -109,10 +110,7 @@ func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []strin
109 109
 	}
110 110
 	if IsDelegation(name) {
111 111
 		if len(paths) == 0 && len(pathHashPrefixes) == 0 {
112
-			return nil, ErrInvalidRole{
113
-				Role:   name,
114
-				Reason: "roles with no Paths and no PathHashPrefixes will never be able to publish content",
115
-			}
112
+			logrus.Debugf("role %s with no Paths and no PathHashPrefixes will never be able to publish content until one or more are added", name)
116 113
 		}
117 114
 	}
118 115
 	if threshold < 1 {
... ...
@@ -3,7 +3,7 @@ package data
3 3
 import (
4 4
 	"time"
5 5
 
6
-	"github.com/jfrazelle/go/canonical/json"
6
+	"github.com/docker/go/canonical/json"
7 7
 )
8 8
 
9 9
 // SignedRoot is a fully unpacked root.json
... ...
@@ -1,6 +1,6 @@
1 1
 package data
2 2
 
3
-import "github.com/jfrazelle/go/canonical/json"
3
+import "github.com/docker/go/canonical/json"
4 4
 
5 5
 // Serializer is an interface that can marshal and unmarshal TUF data.  This
6 6
 // is expected to be a canonical JSON marshaller
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"time"
6 6
 
7 7
 	"github.com/Sirupsen/logrus"
8
-	"github.com/jfrazelle/go/canonical/json"
8
+	"github.com/docker/go/canonical/json"
9 9
 )
10 10
 
11 11
 // SignedSnapshot is a fully unpacked snapshot.json
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"encoding/hex"
6 6
 	"errors"
7 7
 
8
-	"github.com/jfrazelle/go/canonical/json"
8
+	"github.com/docker/go/canonical/json"
9 9
 )
10 10
 
11 11
 // SignedTargets is a fully unpacked targets.json, or target delegation
... ...
@@ -4,7 +4,7 @@ import (
4 4
 	"bytes"
5 5
 	"time"
6 6
 
7
-	"github.com/jfrazelle/go/canonical/json"
7
+	"github.com/docker/go/canonical/json"
8 8
 )
9 9
 
10 10
 // SignedTimestamp is a fully unpacked timestamp.json
... ...
@@ -11,7 +11,7 @@ import (
11 11
 	"time"
12 12
 
13 13
 	"github.com/Sirupsen/logrus"
14
-	"github.com/jfrazelle/go/canonical/json"
14
+	"github.com/docker/go/canonical/json"
15 15
 )
16 16
 
17 17
 // SigAlgorithm for types of signatures
... ...
@@ -58,6 +58,15 @@ func (db *KeyDB) AddRole(r *data.Role) error {
58 58
 	return nil
59 59
 }
60 60
 
61
+// GetAllRoles gets all roles from the database
62
+func (db *KeyDB) GetAllRoles() []*data.Role {
63
+	roles := []*data.Role{}
64
+	for _, role := range db.roles {
65
+		roles = append(roles, role)
66
+	}
67
+	return roles
68
+}
69
+
61 70
 // GetKey pulls a key out of the database by its ID
62 71
 func (db *KeyDB) GetKey(id string) data.PublicKey {
63 72
 	return db.keys[id]
... ...
@@ -6,9 +6,9 @@ import (
6 6
 	"time"
7 7
 
8 8
 	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/go/canonical/json"
9 10
 	"github.com/docker/notary/tuf/data"
10 11
 	"github.com/docker/notary/tuf/keys"
11
-	"github.com/jfrazelle/go/canonical/json"
12 12
 )
13 13
 
14 14
 // Various basic signing errors
... ...
@@ -39,11 +39,14 @@ type FilesystemStore struct {
39 39
 	targetsDir    string
40 40
 }
41 41
 
42
+func (f *FilesystemStore) getPath(name string) string {
43
+	fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
44
+	return filepath.Join(f.metaDir, fileName)
45
+}
46
+
42 47
 // GetMeta returns the meta for the given name (a role)
43 48
 func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
44
-	fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
45
-	path := filepath.Join(f.metaDir, fileName)
46
-	meta, err := ioutil.ReadFile(path)
49
+	meta, err := ioutil.ReadFile(f.getPath(name))
47 50
 	if err != nil {
48 51
 		if os.IsNotExist(err) {
49 52
 			err = ErrMetaNotFound{Resource: name}
... ...
@@ -66,21 +69,31 @@ func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error {
66 66
 
67 67
 // SetMeta sets the meta for a single role
68 68
 func (f *FilesystemStore) SetMeta(name string, meta []byte) error {
69
-	fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
70
-	path := filepath.Join(f.metaDir, fileName)
69
+	fp := f.getPath(name)
71 70
 
72 71
 	// Ensures the parent directories of the file we are about to write exist
73
-	err := os.MkdirAll(filepath.Dir(path), 0700)
72
+	err := os.MkdirAll(filepath.Dir(fp), 0700)
74 73
 	if err != nil {
75 74
 		return err
76 75
 	}
77 76
 
78 77
 	// if something already exists, just delete it and re-write it
79
-	os.RemoveAll(path)
78
+	os.RemoveAll(fp)
80 79
 
81 80
 	// Write the file to disk
82
-	if err = ioutil.WriteFile(path, meta, 0600); err != nil {
81
+	if err = ioutil.WriteFile(fp, meta, 0600); err != nil {
83 82
 		return err
84 83
 	}
85 84
 	return nil
86 85
 }
86
+
87
+// RemoveAll clears the existing filestore by removing its base directory
88
+func (f *FilesystemStore) RemoveAll() error {
89
+	return os.RemoveAll(f.baseDir)
90
+}
91
+
92
+// RemoveMeta removes the metadata for a single role - if the metadata doesn't
93
+// exist, no error is returned
94
+func (f *FilesystemStore) RemoveMeta(name string) error {
95
+	return os.RemoveAll(f.getPath(name)) // RemoveAll succeeds if path doesn't exist
96
+}
... ...
@@ -85,6 +85,9 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtensio
85 85
 	if !base.IsAbs() {
86 86
 		return nil, errors.New("HTTPStore requires an absolute baseURL")
87 87
 	}
88
+	if roundTrip == nil {
89
+		return &OfflineStore{}, nil
90
+	}
88 91
 	return &HTTPStore{
89 92
 		baseURL:       *base,
90 93
 		metaPrefix:    metaPrefix,
... ...
@@ -182,6 +185,12 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error {
182 182
 	return translateStatusToError(resp, "POST "+name)
183 183
 }
184 184
 
185
+// RemoveMeta always fails, because we should never be able to delete metadata
186
+// remotely
187
+func (s HTTPStore) RemoveMeta(name string) error {
188
+	return ErrInvalidOperation{msg: "cannot delete metadata"}
189
+}
190
+
185 191
 // NewMultiPartMetaRequest builds a request with the provided metadata updates
186 192
 // in multipart form
187 193
 func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
... ...
@@ -227,6 +236,11 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
227 227
 	return translateStatusToError(resp, "POST metadata endpoint")
228 228
 }
229 229
 
230
+// RemoveAll in the interface is not supported, admins should use the DeleteHandler endpoint directly to delete remote data for a GUN
231
+func (s HTTPStore) RemoveAll() error {
232
+	return errors.New("remove all functionality not supported for HTTPStore")
233
+}
234
+
230 235
 func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
231 236
 	var filename string
232 237
 	if name != "" {
... ...
@@ -14,6 +14,8 @@ type MetadataStore interface {
14 14
 	GetMeta(name string, size int64) ([]byte, error)
15 15
 	SetMeta(name string, blob []byte) error
16 16
 	SetMultiMeta(map[string][]byte) error
17
+	RemoveAll() error
18
+	RemoveMeta(name string) error
17 19
 }
18 20
 
19 21
 // PublicKeyStore must be implemented by a key service
... ...
@@ -54,6 +54,13 @@ func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error {
54 54
 	return nil
55 55
 }
56 56
 
57
+// RemoveMeta removes the metadata for a single role - if the metadata doesn't
58
+// exist, no error is returned
59
+func (m *memoryStore) RemoveMeta(name string) error {
60
+	delete(m.meta, name)
61
+	return nil
62
+}
63
+
57 64
 func (m *memoryStore) GetTarget(path string) (io.ReadCloser, error) {
58 65
 	return &utils.NoopCloser{Reader: bytes.NewReader(m.files[path])}, nil
59 66
 }
... ...
@@ -95,3 +102,11 @@ func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) er
95 95
 func (m *memoryStore) GetKey(role string) ([]byte, error) {
96 96
 	return nil, fmt.Errorf("GetKey is not implemented for the memoryStore")
97 97
 }
98
+
99
+// Clear this existing memory store by setting this store as new empty one
100
+func (m *memoryStore) RemoveAll() error {
101
+	m.meta = make(map[string][]byte)
102
+	m.files = make(map[string][]byte)
103
+	m.keys = make(map[string][]data.PrivateKey)
104
+	return nil
105
+}
... ...
@@ -14,30 +14,40 @@ func (e ErrOffline) Error() string {
14 14
 var err = ErrOffline{}
15 15
 
16 16
 // OfflineStore is to be used as a placeholder for a nil store. It simply
17
-// return ErrOffline for every operation
17
+// returns ErrOffline for every operation
18 18
 type OfflineStore struct{}
19 19
 
20
-// GetMeta return ErrOffline
20
+// GetMeta returns ErrOffline
21 21
 func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) {
22 22
 	return nil, err
23 23
 }
24 24
 
25
-// SetMeta return ErrOffline
25
+// SetMeta returns ErrOffline
26 26
 func (es OfflineStore) SetMeta(name string, blob []byte) error {
27 27
 	return err
28 28
 }
29 29
 
30
-// SetMultiMeta return ErrOffline
30
+// SetMultiMeta returns ErrOffline
31 31
 func (es OfflineStore) SetMultiMeta(map[string][]byte) error {
32 32
 	return err
33 33
 }
34 34
 
35
-// GetKey return ErrOffline
35
+// RemoveMeta returns ErrOffline
36
+func (es OfflineStore) RemoveMeta(name string) error {
37
+	return err
38
+}
39
+
40
+// GetKey returns ErrOffline
36 41
 func (es OfflineStore) GetKey(role string) ([]byte, error) {
37 42
 	return nil, err
38 43
 }
39 44
 
40
-// GetTarget return ErrOffline
45
+// GetTarget returns ErrOffline
41 46
 func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) {
42 47
 	return nil, err
43 48
 }
49
+
50
+// RemoveAll return ErrOffline
51
+func (es OfflineStore) RemoveAll() error {
52
+	return err
53
+}
... ...
@@ -173,6 +173,11 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error {
173 173
 	return nil
174 174
 }
175 175
 
176
+// GetAllLoadedRoles returns a list of all role entries loaded in this TUF repo, could be empty
177
+func (tr *Repo) GetAllLoadedRoles() []*data.Role {
178
+	return tr.keysDB.GetAllRoles()
179
+}
180
+
176 181
 // GetDelegation finds the role entry representing the provided
177 182
 // role name or ErrInvalidRole
178 183
 func (tr *Repo) GetDelegation(role string) (*data.Role, error) {
179 184
deleted file mode 100644
... ...
@@ -1,27 +0,0 @@
1
-Copyright (c) 2012 The Go Authors. All rights reserved.
2
-
3
-Redistribution and use in source and binary forms, with or without
4
-modification, are permitted provided that the following conditions are
5
-met:
6
-
7
-   * Redistributions of source code must retain the above copyright
8
-notice, this list of conditions and the following disclaimer.
9
-   * Redistributions in binary form must reproduce the above
10
-copyright notice, this list of conditions and the following disclaimer
11
-in the documentation and/or other materials provided with the
12
-distribution.
13
-   * Neither the name of Google Inc. nor the names of its
14
-contributors may be used to endorse or promote products derived from
15
-this software without specific prior written permission.
16
-
17
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1
deleted file mode 100644
... ...
@@ -1,1094 +0,0 @@
1
-// Copyright 2010 The Go Authors. All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-// Represents JSON data structure using native Go types: booleans, floats,
6
-// strings, arrays, and maps.
7
-
8
-package json
9
-
10
-import (
11
-	"bytes"
12
-	"encoding"
13
-	"encoding/base64"
14
-	"errors"
15
-	"fmt"
16
-	"reflect"
17
-	"runtime"
18
-	"strconv"
19
-	"unicode"
20
-	"unicode/utf16"
21
-	"unicode/utf8"
22
-)
23
-
24
-// Unmarshal parses the JSON-encoded data and stores the result
25
-// in the value pointed to by v.
26
-//
27
-// Unmarshal uses the inverse of the encodings that
28
-// Marshal uses, allocating maps, slices, and pointers as necessary,
29
-// with the following additional rules:
30
-//
31
-// To unmarshal JSON into a pointer, Unmarshal first handles the case of
32
-// the JSON being the JSON literal null.  In that case, Unmarshal sets
33
-// the pointer to nil.  Otherwise, Unmarshal unmarshals the JSON into
34
-// the value pointed at by the pointer.  If the pointer is nil, Unmarshal
35
-// allocates a new value for it to point to.
36
-//
37
-// To unmarshal JSON into a struct, Unmarshal matches incoming object
38
-// keys to the keys used by Marshal (either the struct field name or its tag),
39
-// preferring an exact match but also accepting a case-insensitive match.
40
-//
41
-// To unmarshal JSON into an interface value,
42
-// Unmarshal stores one of these in the interface value:
43
-//
44
-//	bool, for JSON booleans
45
-//	float64, for JSON numbers
46
-//	string, for JSON strings
47
-//	[]interface{}, for JSON arrays
48
-//	map[string]interface{}, for JSON objects
49
-//	nil for JSON null
50
-//
51
-// To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil
52
-// and then appends each element to the slice.
53
-//
54
-// To unmarshal a JSON object into a map, Unmarshal replaces the map
55
-// with an empty map and then adds key-value pairs from the object to
56
-// the map.
57
-//
58
-// If a JSON value is not appropriate for a given target type,
59
-// or if a JSON number overflows the target type, Unmarshal
60
-// skips that field and completes the unmarshalling as best it can.
61
-// If no more serious errors are encountered, Unmarshal returns
62
-// an UnmarshalTypeError describing the earliest such error.
63
-//
64
-// The JSON null value unmarshals into an interface, map, pointer, or slice
65
-// by setting that Go value to nil. Because null is often used in JSON to mean
66
-// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
67
-// on the value and produces no error.
68
-//
69
-// When unmarshaling quoted strings, invalid UTF-8 or
70
-// invalid UTF-16 surrogate pairs are not treated as an error.
71
-// Instead, they are replaced by the Unicode replacement
72
-// character U+FFFD.
73
-//
74
-func Unmarshal(data []byte, v interface{}) error {
75
-	// Check for well-formedness.
76
-	// Avoids filling out half a data structure
77
-	// before discovering a JSON syntax error.
78
-	var d decodeState
79
-	err := checkValid(data, &d.scan)
80
-	if err != nil {
81
-		return err
82
-	}
83
-
84
-	d.init(data)
85
-	return d.unmarshal(v)
86
-}
87
-
88
-// Unmarshaler is the interface implemented by objects
89
-// that can unmarshal a JSON description of themselves.
90
-// The input can be assumed to be a valid encoding of
91
-// a JSON value. UnmarshalJSON must copy the JSON data
92
-// if it wishes to retain the data after returning.
93
-type Unmarshaler interface {
94
-	UnmarshalJSON([]byte) error
95
-}
96
-
97
-// An UnmarshalTypeError describes a JSON value that was
98
-// not appropriate for a value of a specific Go type.
99
-type UnmarshalTypeError struct {
100
-	Value  string       // description of JSON value - "bool", "array", "number -5"
101
-	Type   reflect.Type // type of Go value it could not be assigned to
102
-	Offset int64        // error occurred after reading Offset bytes
103
-}
104
-
105
-func (e *UnmarshalTypeError) Error() string {
106
-	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
107
-}
108
-
109
-// An UnmarshalFieldError describes a JSON object key that
110
-// led to an unexported (and therefore unwritable) struct field.
111
-// (No longer used; kept for compatibility.)
112
-type UnmarshalFieldError struct {
113
-	Key   string
114
-	Type  reflect.Type
115
-	Field reflect.StructField
116
-}
117
-
118
-func (e *UnmarshalFieldError) Error() string {
119
-	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
120
-}
121
-
122
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
123
-// (The argument to Unmarshal must be a non-nil pointer.)
124
-type InvalidUnmarshalError struct {
125
-	Type reflect.Type
126
-}
127
-
128
-func (e *InvalidUnmarshalError) Error() string {
129
-	if e.Type == nil {
130
-		return "json: Unmarshal(nil)"
131
-	}
132
-
133
-	if e.Type.Kind() != reflect.Ptr {
134
-		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
135
-	}
136
-	return "json: Unmarshal(nil " + e.Type.String() + ")"
137
-}
138
-
139
-func (d *decodeState) unmarshal(v interface{}) (err error) {
140
-	defer func() {
141
-		if r := recover(); r != nil {
142
-			if _, ok := r.(runtime.Error); ok {
143
-				panic(r)
144
-			}
145
-			err = r.(error)
146
-		}
147
-	}()
148
-
149
-	rv := reflect.ValueOf(v)
150
-	if rv.Kind() != reflect.Ptr || rv.IsNil() {
151
-		return &InvalidUnmarshalError{reflect.TypeOf(v)}
152
-	}
153
-
154
-	d.scan.reset()
155
-	// We decode rv not rv.Elem because the Unmarshaler interface
156
-	// test must be applied at the top level of the value.
157
-	d.value(rv)
158
-	return d.savedError
159
-}
160
-
161
-// A Number represents a JSON number literal.
162
-type Number string
163
-
164
-// String returns the literal text of the number.
165
-func (n Number) String() string { return string(n) }
166
-
167
-// Float64 returns the number as a float64.
168
-func (n Number) Float64() (float64, error) {
169
-	return strconv.ParseFloat(string(n), 64)
170
-}
171
-
172
-// Int64 returns the number as an int64.
173
-func (n Number) Int64() (int64, error) {
174
-	return strconv.ParseInt(string(n), 10, 64)
175
-}
176
-
177
-// decodeState represents the state while decoding a JSON value.
178
-type decodeState struct {
179
-	data       []byte
180
-	off        int // read offset in data
181
-	scan       scanner
182
-	nextscan   scanner // for calls to nextValue
183
-	savedError error
184
-	useNumber  bool
185
-	canonical  bool
186
-}
187
-
188
-// errPhase is used for errors that should not happen unless
189
-// there is a bug in the JSON decoder or something is editing
190
-// the data slice while the decoder executes.
191
-var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
192
-
193
-func (d *decodeState) init(data []byte) *decodeState {
194
-	d.data = data
195
-	d.off = 0
196
-	d.savedError = nil
197
-	return d
198
-}
199
-
200
-// error aborts the decoding by panicking with err.
201
-func (d *decodeState) error(err error) {
202
-	panic(err)
203
-}
204
-
205
-// saveError saves the first err it is called with,
206
-// for reporting at the end of the unmarshal.
207
-func (d *decodeState) saveError(err error) {
208
-	if d.savedError == nil {
209
-		d.savedError = err
210
-	}
211
-}
212
-
213
-// next cuts off and returns the next full JSON value in d.data[d.off:].
214
-// The next value is known to be an object or array, not a literal.
215
-func (d *decodeState) next() []byte {
216
-	c := d.data[d.off]
217
-	item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
218
-	if err != nil {
219
-		d.error(err)
220
-	}
221
-	d.off = len(d.data) - len(rest)
222
-
223
-	// Our scanner has seen the opening brace/bracket
224
-	// and thinks we're still in the middle of the object.
225
-	// invent a closing brace/bracket to get it out.
226
-	if c == '{' {
227
-		d.scan.step(&d.scan, '}')
228
-	} else {
229
-		d.scan.step(&d.scan, ']')
230
-	}
231
-
232
-	return item
233
-}
234
-
235
-// scanWhile processes bytes in d.data[d.off:] until it
236
-// receives a scan code not equal to op.
237
-// It updates d.off and returns the new scan code.
238
-func (d *decodeState) scanWhile(op int) int {
239
-	var newOp int
240
-	for {
241
-		if d.off >= len(d.data) {
242
-			newOp = d.scan.eof()
243
-			d.off = len(d.data) + 1 // mark processed EOF with len+1
244
-		} else {
245
-			c := int(d.data[d.off])
246
-			d.off++
247
-			newOp = d.scan.step(&d.scan, c)
248
-		}
249
-		if newOp != op {
250
-			break
251
-		}
252
-	}
253
-	return newOp
254
-}
255
-
256
-// value decodes a JSON value from d.data[d.off:] into the value.
257
-// it updates d.off to point past the decoded value.
258
-func (d *decodeState) value(v reflect.Value) {
259
-	if !v.IsValid() {
260
-		_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
261
-		if err != nil {
262
-			d.error(err)
263
-		}
264
-		d.off = len(d.data) - len(rest)
265
-
266
-		// d.scan thinks we're still at the beginning of the item.
267
-		// Feed in an empty string - the shortest, simplest value -
268
-		// so that it knows we got to the end of the value.
269
-		if d.scan.redo {
270
-			// rewind.
271
-			d.scan.redo = false
272
-			d.scan.step = stateBeginValue
273
-		}
274
-		d.scan.step(&d.scan, '"')
275
-		d.scan.step(&d.scan, '"')
276
-
277
-		n := len(d.scan.parseState)
278
-		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
279
-			// d.scan thinks we just read an object key; finish the object
280
-			d.scan.step(&d.scan, ':')
281
-			d.scan.step(&d.scan, '"')
282
-			d.scan.step(&d.scan, '"')
283
-			d.scan.step(&d.scan, '}')
284
-		}
285
-
286
-		return
287
-	}
288
-
289
-	switch op := d.scanWhile(scanSkipSpace); op {
290
-	default:
291
-		d.error(errPhase)
292
-
293
-	case scanBeginArray:
294
-		d.array(v)
295
-
296
-	case scanBeginObject:
297
-		d.object(v)
298
-
299
-	case scanBeginLiteral:
300
-		d.literal(v)
301
-	}
302
-}
303
-
304
-type unquotedValue struct{}
305
-
306
-// valueQuoted is like value but decodes a
307
-// quoted string literal or literal null into an interface value.
308
-// If it finds anything other than a quoted string literal or null,
309
-// valueQuoted returns unquotedValue{}.
310
-func (d *decodeState) valueQuoted() interface{} {
311
-	switch op := d.scanWhile(scanSkipSpace); op {
312
-	default:
313
-		d.error(errPhase)
314
-
315
-	case scanBeginArray:
316
-		d.array(reflect.Value{})
317
-
318
-	case scanBeginObject:
319
-		d.object(reflect.Value{})
320
-
321
-	case scanBeginLiteral:
322
-		switch v := d.literalInterface().(type) {
323
-		case nil, string:
324
-			return v
325
-		}
326
-	}
327
-	return unquotedValue{}
328
-}
329
-
330
-// indirect walks down v allocating pointers as needed,
331
-// until it gets to a non-pointer.
332
-// if it encounters an Unmarshaler, indirect stops and returns that.
333
-// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
334
-func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
335
-	// If v is a named type and is addressable,
336
-	// start with its address, so that if the type has pointer methods,
337
-	// we find them.
338
-	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
339
-		v = v.Addr()
340
-	}
341
-	for {
342
-		// Load value from interface, but only if the result will be
343
-		// usefully addressable.
344
-		if v.Kind() == reflect.Interface && !v.IsNil() {
345
-			e := v.Elem()
346
-			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
347
-				v = e
348
-				continue
349
-			}
350
-		}
351
-
352
-		if v.Kind() != reflect.Ptr {
353
-			break
354
-		}
355
-
356
-		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
357
-			break
358
-		}
359
-		if v.IsNil() {
360
-			v.Set(reflect.New(v.Type().Elem()))
361
-		}
362
-		if v.Type().NumMethod() > 0 {
363
-			if u, ok := v.Interface().(Unmarshaler); ok {
364
-				return u, nil, reflect.Value{}
365
-			}
366
-			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
367
-				return nil, u, reflect.Value{}
368
-			}
369
-		}
370
-		v = v.Elem()
371
-	}
372
-	return nil, nil, v
373
-}
374
-
375
-// array consumes an array from d.data[d.off-1:], decoding into the value v.
376
-// the first byte of the array ('[') has been read already.
377
-func (d *decodeState) array(v reflect.Value) {
378
-	// Check for unmarshaler.
379
-	u, ut, pv := d.indirect(v, false)
380
-	if u != nil {
381
-		d.off--
382
-		err := u.UnmarshalJSON(d.next())
383
-		if err != nil {
384
-			d.error(err)
385
-		}
386
-		return
387
-	}
388
-	if ut != nil {
389
-		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
390
-		d.off--
391
-		d.next()
392
-		return
393
-	}
394
-
395
-	v = pv
396
-
397
-	// Check type of target.
398
-	switch v.Kind() {
399
-	case reflect.Interface:
400
-		if v.NumMethod() == 0 {
401
-			// Decoding into nil interface?  Switch to non-reflect code.
402
-			v.Set(reflect.ValueOf(d.arrayInterface()))
403
-			return
404
-		}
405
-		// Otherwise it's invalid.
406
-		fallthrough
407
-	default:
408
-		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
409
-		d.off--
410
-		d.next()
411
-		return
412
-	case reflect.Array:
413
-	case reflect.Slice:
414
-		break
415
-	}
416
-
417
-	i := 0
418
-	for {
419
-		// Look ahead for ] - can only happen on first iteration.
420
-		op := d.scanWhile(scanSkipSpace)
421
-		if op == scanEndArray {
422
-			break
423
-		}
424
-
425
-		// Back up so d.value can have the byte we just read.
426
-		d.off--
427
-		d.scan.undo(op)
428
-
429
-		// Get element of array, growing if necessary.
430
-		if v.Kind() == reflect.Slice {
431
-			// Grow slice if necessary
432
-			if i >= v.Cap() {
433
-				newcap := v.Cap() + v.Cap()/2
434
-				if newcap < 4 {
435
-					newcap = 4
436
-				}
437
-				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
438
-				reflect.Copy(newv, v)
439
-				v.Set(newv)
440
-			}
441
-			if i >= v.Len() {
442
-				v.SetLen(i + 1)
443
-			}
444
-		}
445
-
446
-		if i < v.Len() {
447
-			// Decode into element.
448
-			d.value(v.Index(i))
449
-		} else {
450
-			// Ran out of fixed array: skip.
451
-			d.value(reflect.Value{})
452
-		}
453
-		i++
454
-
455
-		// Next token must be , or ].
456
-		op = d.scanWhile(scanSkipSpace)
457
-		if op == scanEndArray {
458
-			break
459
-		}
460
-		if op != scanArrayValue {
461
-			d.error(errPhase)
462
-		}
463
-	}
464
-
465
-	if i < v.Len() {
466
-		if v.Kind() == reflect.Array {
467
-			// Array.  Zero the rest.
468
-			z := reflect.Zero(v.Type().Elem())
469
-			for ; i < v.Len(); i++ {
470
-				v.Index(i).Set(z)
471
-			}
472
-		} else {
473
-			v.SetLen(i)
474
-		}
475
-	}
476
-	if i == 0 && v.Kind() == reflect.Slice {
477
-		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
478
-	}
479
-}
480
-
481
-var nullLiteral = []byte("null")
482
-
483
-// object consumes an object from d.data[d.off-1:], decoding into the value v.
484
-// the first byte ('{') of the object has been read already.
485
-func (d *decodeState) object(v reflect.Value) {
486
-	// Check for unmarshaler.
487
-	u, ut, pv := d.indirect(v, false)
488
-	if u != nil {
489
-		d.off--
490
-		err := u.UnmarshalJSON(d.next())
491
-		if err != nil {
492
-			d.error(err)
493
-		}
494
-		return
495
-	}
496
-	if ut != nil {
497
-		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
498
-		d.off--
499
-		d.next() // skip over { } in input
500
-		return
501
-	}
502
-	v = pv
503
-
504
-	// Decoding into nil interface?  Switch to non-reflect code.
505
-	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
506
-		v.Set(reflect.ValueOf(d.objectInterface()))
507
-		return
508
-	}
509
-
510
-	// Check type of target: struct or map[string]T
511
-	switch v.Kind() {
512
-	case reflect.Map:
513
-		// map must have string kind
514
-		t := v.Type()
515
-		if t.Key().Kind() != reflect.String {
516
-			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
517
-			d.off--
518
-			d.next() // skip over { } in input
519
-			return
520
-		}
521
-		if v.IsNil() {
522
-			v.Set(reflect.MakeMap(t))
523
-		}
524
-	case reflect.Struct:
525
-
526
-	default:
527
-		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
528
-		d.off--
529
-		d.next() // skip over { } in input
530
-		return
531
-	}
532
-
533
-	var mapElem reflect.Value
534
-
535
-	for {
536
-		// Read opening " of string key or closing }.
537
-		op := d.scanWhile(scanSkipSpace)
538
-		if op == scanEndObject {
539
-			// closing } - can only happen on first iteration.
540
-			break
541
-		}
542
-		if op != scanBeginLiteral {
543
-			d.error(errPhase)
544
-		}
545
-
546
-		// Read key.
547
-		start := d.off - 1
548
-		op = d.scanWhile(scanContinue)
549
-		item := d.data[start : d.off-1]
550
-		key, ok := unquoteBytes(item)
551
-		if !ok {
552
-			d.error(errPhase)
553
-		}
554
-
555
-		// Figure out field corresponding to key.
556
-		var subv reflect.Value
557
-		destring := false // whether the value is wrapped in a string to be decoded first
558
-
559
-		if v.Kind() == reflect.Map {
560
-			elemType := v.Type().Elem()
561
-			if !mapElem.IsValid() {
562
-				mapElem = reflect.New(elemType).Elem()
563
-			} else {
564
-				mapElem.Set(reflect.Zero(elemType))
565
-			}
566
-			subv = mapElem
567
-		} else {
568
-			var f *field
569
-			fields := cachedTypeFields(v.Type(), false)
570
-			for i := range fields {
571
-				ff := &fields[i]
572
-				if bytes.Equal(ff.nameBytes, key) {
573
-					f = ff
574
-					break
575
-				}
576
-				if f == nil && ff.equalFold(ff.nameBytes, key) {
577
-					f = ff
578
-				}
579
-			}
580
-			if f != nil {
581
-				subv = v
582
-				destring = f.quoted
583
-				for _, i := range f.index {
584
-					if subv.Kind() == reflect.Ptr {
585
-						if subv.IsNil() {
586
-							subv.Set(reflect.New(subv.Type().Elem()))
587
-						}
588
-						subv = subv.Elem()
589
-					}
590
-					subv = subv.Field(i)
591
-				}
592
-			}
593
-		}
594
-
595
-		// Read : before value.
596
-		if op == scanSkipSpace {
597
-			op = d.scanWhile(scanSkipSpace)
598
-		}
599
-		if op != scanObjectKey {
600
-			d.error(errPhase)
601
-		}
602
-
603
-		// Read value.
604
-		if destring {
605
-			switch qv := d.valueQuoted().(type) {
606
-			case nil:
607
-				d.literalStore(nullLiteral, subv, false)
608
-			case string:
609
-				d.literalStore([]byte(qv), subv, true)
610
-			default:
611
-				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
612
-			}
613
-		} else {
614
-			d.value(subv)
615
-		}
616
-
617
-		// Write value back to map;
618
-		// if using struct, subv points into struct already.
619
-		if v.Kind() == reflect.Map {
620
-			kv := reflect.ValueOf(key).Convert(v.Type().Key())
621
-			v.SetMapIndex(kv, subv)
622
-		}
623
-
624
-		// Next token must be , or }.
625
-		op = d.scanWhile(scanSkipSpace)
626
-		if op == scanEndObject {
627
-			break
628
-		}
629
-		if op != scanObjectValue {
630
-			d.error(errPhase)
631
-		}
632
-	}
633
-}
634
-
635
-// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
636
-// The first byte of the literal has been read already
637
-// (that's how the caller knows it's a literal).
638
-func (d *decodeState) literal(v reflect.Value) {
639
-	// All bytes inside literal return scanContinue op code.
640
-	start := d.off - 1
641
-	op := d.scanWhile(scanContinue)
642
-
643
-	// Scan read one byte too far; back up.
644
-	d.off--
645
-	d.scan.undo(op)
646
-
647
-	d.literalStore(d.data[start:d.off], v, false)
648
-}
649
-
650
-// convertNumber converts the number literal s to a float64 or a Number
651
-// depending on the setting of d.useNumber.
652
-func (d *decodeState) convertNumber(s string) (interface{}, error) {
653
-	if d.useNumber {
654
-		return Number(s), nil
655
-	}
656
-	f, err := strconv.ParseFloat(s, 64)
657
-	if err != nil {
658
-		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
659
-	}
660
-	return f, nil
661
-}
662
-
663
-var numberType = reflect.TypeOf(Number(""))
664
-
665
-// literalStore decodes a literal stored in item into v.
666
-//
667
-// fromQuoted indicates whether this literal came from unwrapping a
668
-// string from the ",string" struct tag option. this is used only to
669
-// produce more helpful error messages.
670
-func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
671
-	// Check for unmarshaler.
672
-	if len(item) == 0 {
673
-		//Empty string given
674
-		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
675
-		return
676
-	}
677
-	wantptr := item[0] == 'n' // null
678
-	u, ut, pv := d.indirect(v, wantptr)
679
-	if u != nil {
680
-		err := u.UnmarshalJSON(item)
681
-		if err != nil {
682
-			d.error(err)
683
-		}
684
-		return
685
-	}
686
-	if ut != nil {
687
-		if item[0] != '"' {
688
-			if fromQuoted {
689
-				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
690
-			} else {
691
-				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
692
-			}
693
-			return
694
-		}
695
-		s, ok := unquoteBytes(item)
696
-		if !ok {
697
-			if fromQuoted {
698
-				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
699
-			} else {
700
-				d.error(errPhase)
701
-			}
702
-		}
703
-		err := ut.UnmarshalText(s)
704
-		if err != nil {
705
-			d.error(err)
706
-		}
707
-		return
708
-	}
709
-
710
-	v = pv
711
-
712
-	switch c := item[0]; c {
713
-	case 'n': // null
714
-		switch v.Kind() {
715
-		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
716
-			v.Set(reflect.Zero(v.Type()))
717
-			// otherwise, ignore null for primitives/string
718
-		}
719
-	case 't', 'f': // true, false
720
-		value := c == 't'
721
-		switch v.Kind() {
722
-		default:
723
-			if fromQuoted {
724
-				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
725
-			} else {
726
-				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
727
-			}
728
-		case reflect.Bool:
729
-			v.SetBool(value)
730
-		case reflect.Interface:
731
-			if v.NumMethod() == 0 {
732
-				v.Set(reflect.ValueOf(value))
733
-			} else {
734
-				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
735
-			}
736
-		}
737
-
738
-	case '"': // string
739
-		s, ok := unquoteBytes(item)
740
-		if !ok {
741
-			if fromQuoted {
742
-				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
743
-			} else {
744
-				d.error(errPhase)
745
-			}
746
-		}
747
-		switch v.Kind() {
748
-		default:
749
-			d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
750
-		case reflect.Slice:
751
-			if v.Type().Elem().Kind() != reflect.Uint8 {
752
-				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
753
-				break
754
-			}
755
-			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
756
-			n, err := base64.StdEncoding.Decode(b, s)
757
-			if err != nil {
758
-				d.saveError(err)
759
-				break
760
-			}
761
-			v.Set(reflect.ValueOf(b[0:n]))
762
-		case reflect.String:
763
-			v.SetString(string(s))
764
-		case reflect.Interface:
765
-			if v.NumMethod() == 0 {
766
-				v.Set(reflect.ValueOf(string(s)))
767
-			} else {
768
-				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
769
-			}
770
-		}
771
-
772
-	default: // number
773
-		if c != '-' && (c < '0' || c > '9') {
774
-			if fromQuoted {
775
-				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
776
-			} else {
777
-				d.error(errPhase)
778
-			}
779
-		}
780
-		s := string(item)
781
-		switch v.Kind() {
782
-		default:
783
-			if v.Kind() == reflect.String && v.Type() == numberType {
784
-				v.SetString(s)
785
-				break
786
-			}
787
-			if fromQuoted {
788
-				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
789
-			} else {
790
-				d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
791
-			}
792
-		case reflect.Interface:
793
-			n, err := d.convertNumber(s)
794
-			if err != nil {
795
-				d.saveError(err)
796
-				break
797
-			}
798
-			if v.NumMethod() != 0 {
799
-				d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
800
-				break
801
-			}
802
-			v.Set(reflect.ValueOf(n))
803
-
804
-		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
805
-			n, err := strconv.ParseInt(s, 10, 64)
806
-			if err != nil || v.OverflowInt(n) {
807
-				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
808
-				break
809
-			}
810
-			v.SetInt(n)
811
-
812
-		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
813
-			n, err := strconv.ParseUint(s, 10, 64)
814
-			if err != nil || v.OverflowUint(n) {
815
-				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
816
-				break
817
-			}
818
-			v.SetUint(n)
819
-
820
-		case reflect.Float32, reflect.Float64:
821
-			n, err := strconv.ParseFloat(s, v.Type().Bits())
822
-			if err != nil || v.OverflowFloat(n) {
823
-				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
824
-				break
825
-			}
826
-			v.SetFloat(n)
827
-		}
828
-	}
829
-}
830
-
831
-// The xxxInterface routines build up a value to be stored
832
-// in an empty interface.  They are not strictly necessary,
833
-// but they avoid the weight of reflection in this common case.
834
-
835
-// valueInterface is like value but returns interface{}
836
-func (d *decodeState) valueInterface() interface{} {
837
-	switch d.scanWhile(scanSkipSpace) {
838
-	default:
839
-		d.error(errPhase)
840
-		panic("unreachable")
841
-	case scanBeginArray:
842
-		return d.arrayInterface()
843
-	case scanBeginObject:
844
-		return d.objectInterface()
845
-	case scanBeginLiteral:
846
-		return d.literalInterface()
847
-	}
848
-}
849
-
850
-// arrayInterface is like array but returns []interface{}.
851
-func (d *decodeState) arrayInterface() []interface{} {
852
-	var v = make([]interface{}, 0)
853
-	for {
854
-		// Look ahead for ] - can only happen on first iteration.
855
-		op := d.scanWhile(scanSkipSpace)
856
-		if op == scanEndArray {
857
-			break
858
-		}
859
-
860
-		// Back up so d.value can have the byte we just read.
861
-		d.off--
862
-		d.scan.undo(op)
863
-
864
-		v = append(v, d.valueInterface())
865
-
866
-		// Next token must be , or ].
867
-		op = d.scanWhile(scanSkipSpace)
868
-		if op == scanEndArray {
869
-			break
870
-		}
871
-		if op != scanArrayValue {
872
-			d.error(errPhase)
873
-		}
874
-	}
875
-	return v
876
-}
877
-
878
-// objectInterface is like object but returns map[string]interface{}.
879
-func (d *decodeState) objectInterface() map[string]interface{} {
880
-	m := make(map[string]interface{})
881
-	for {
882
-		// Read opening " of string key or closing }.
883
-		op := d.scanWhile(scanSkipSpace)
884
-		if op == scanEndObject {
885
-			// closing } - can only happen on first iteration.
886
-			break
887
-		}
888
-		if op != scanBeginLiteral {
889
-			d.error(errPhase)
890
-		}
891
-
892
-		// Read string key.
893
-		start := d.off - 1
894
-		op = d.scanWhile(scanContinue)
895
-		item := d.data[start : d.off-1]
896
-		key, ok := unquote(item)
897
-		if !ok {
898
-			d.error(errPhase)
899
-		}
900
-
901
-		// Read : before value.
902
-		if op == scanSkipSpace {
903
-			op = d.scanWhile(scanSkipSpace)
904
-		}
905
-		if op != scanObjectKey {
906
-			d.error(errPhase)
907
-		}
908
-
909
-		// Read value.
910
-		m[key] = d.valueInterface()
911
-
912
-		// Next token must be , or }.
913
-		op = d.scanWhile(scanSkipSpace)
914
-		if op == scanEndObject {
915
-			break
916
-		}
917
-		if op != scanObjectValue {
918
-			d.error(errPhase)
919
-		}
920
-	}
921
-	return m
922
-}
923
-
924
-// literalInterface is like literal but returns an interface value.
925
-func (d *decodeState) literalInterface() interface{} {
926
-	// All bytes inside literal return scanContinue op code.
927
-	start := d.off - 1
928
-	op := d.scanWhile(scanContinue)
929
-
930
-	// Scan read one byte too far; back up.
931
-	d.off--
932
-	d.scan.undo(op)
933
-	item := d.data[start:d.off]
934
-
935
-	switch c := item[0]; c {
936
-	case 'n': // null
937
-		return nil
938
-
939
-	case 't', 'f': // true, false
940
-		return c == 't'
941
-
942
-	case '"': // string
943
-		s, ok := unquote(item)
944
-		if !ok {
945
-			d.error(errPhase)
946
-		}
947
-		return s
948
-
949
-	default: // number
950
-		if c != '-' && (c < '0' || c > '9') {
951
-			d.error(errPhase)
952
-		}
953
-		n, err := d.convertNumber(string(item))
954
-		if err != nil {
955
-			d.saveError(err)
956
-		}
957
-		return n
958
-	}
959
-}
960
-
961
-// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
962
-// or it returns -1.
963
-func getu4(s []byte) rune {
964
-	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
965
-		return -1
966
-	}
967
-	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
968
-	if err != nil {
969
-		return -1
970
-	}
971
-	return rune(r)
972
-}
973
-
974
-// unquote converts a quoted JSON string literal s into an actual string t.
975
-// The rules are different than for Go, so cannot use strconv.Unquote.
976
-func unquote(s []byte) (t string, ok bool) {
977
-	s, ok = unquoteBytes(s)
978
-	t = string(s)
979
-	return
980
-}
981
-
982
-func unquoteBytes(s []byte) (t []byte, ok bool) {
983
-	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
984
-		return
985
-	}
986
-	s = s[1 : len(s)-1]
987
-
988
-	// Check for unusual characters. If there are none,
989
-	// then no unquoting is needed, so return a slice of the
990
-	// original bytes.
991
-	r := 0
992
-	for r < len(s) {
993
-		c := s[r]
994
-		if c == '\\' || c == '"' || c < ' ' {
995
-			break
996
-		}
997
-		if c < utf8.RuneSelf {
998
-			r++
999
-			continue
1000
-		}
1001
-		rr, size := utf8.DecodeRune(s[r:])
1002
-		if rr == utf8.RuneError && size == 1 {
1003
-			break
1004
-		}
1005
-		r += size
1006
-	}
1007
-	if r == len(s) {
1008
-		return s, true
1009
-	}
1010
-
1011
-	b := make([]byte, len(s)+2*utf8.UTFMax)
1012
-	w := copy(b, s[0:r])
1013
-	for r < len(s) {
1014
-		// Out of room?  Can only happen if s is full of
1015
-		// malformed UTF-8 and we're replacing each
1016
-		// byte with RuneError.
1017
-		if w >= len(b)-2*utf8.UTFMax {
1018
-			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
1019
-			copy(nb, b[0:w])
1020
-			b = nb
1021
-		}
1022
-		switch c := s[r]; {
1023
-		case c == '\\':
1024
-			r++
1025
-			if r >= len(s) {
1026
-				return
1027
-			}
1028
-			switch s[r] {
1029
-			default:
1030
-				return
1031
-			case '"', '\\', '/', '\'':
1032
-				b[w] = s[r]
1033
-				r++
1034
-				w++
1035
-			case 'b':
1036
-				b[w] = '\b'
1037
-				r++
1038
-				w++
1039
-			case 'f':
1040
-				b[w] = '\f'
1041
-				r++
1042
-				w++
1043
-			case 'n':
1044
-				b[w] = '\n'
1045
-				r++
1046
-				w++
1047
-			case 'r':
1048
-				b[w] = '\r'
1049
-				r++
1050
-				w++
1051
-			case 't':
1052
-				b[w] = '\t'
1053
-				r++
1054
-				w++
1055
-			case 'u':
1056
-				r--
1057
-				rr := getu4(s[r:])
1058
-				if rr < 0 {
1059
-					return
1060
-				}
1061
-				r += 6
1062
-				if utf16.IsSurrogate(rr) {
1063
-					rr1 := getu4(s[r:])
1064
-					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
1065
-						// A valid pair; consume.
1066
-						r += 6
1067
-						w += utf8.EncodeRune(b[w:], dec)
1068
-						break
1069
-					}
1070
-					// Invalid surrogate; fall back to replacement rune.
1071
-					rr = unicode.ReplacementChar
1072
-				}
1073
-				w += utf8.EncodeRune(b[w:], rr)
1074
-			}
1075
-
1076
-		// Quote, control characters are invalid.
1077
-		case c == '"', c < ' ':
1078
-			return
1079
-
1080
-		// ASCII
1081
-		case c < utf8.RuneSelf:
1082
-			b[w] = c
1083
-			r++
1084
-			w++
1085
-
1086
-		// Coerce to well-formed UTF-8.
1087
-		default:
1088
-			rr, size := utf8.DecodeRune(s[r:])
1089
-			r += size
1090
-			w += utf8.EncodeRune(b[w:], rr)
1091
-		}
1092
-	}
1093
-	return b[0:w], true
1094
-}
1095 1
deleted file mode 100644
... ...
@@ -1,1245 +0,0 @@
1
-// Copyright 2010 The Go Authors.  All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-// Package json implements encoding and decoding of JSON objects as defined in
6
-// RFC 4627. The mapping between JSON objects and Go values is described
7
-// in the documentation for the Marshal and Unmarshal functions.
8
-//
9
-// See "JSON and Go" for an introduction to this package:
10
-// https://golang.org/doc/articles/json_and_go.html
11
-package json
12
-
13
-import (
14
-	"bytes"
15
-	"encoding"
16
-	"encoding/base64"
17
-	"math"
18
-	"reflect"
19
-	"runtime"
20
-	"sort"
21
-	"strconv"
22
-	"strings"
23
-	"sync"
24
-	"unicode"
25
-	"unicode/utf8"
26
-)
27
-
28
-// Marshal returns the JSON encoding of v.
29
-//
30
-// Marshal traverses the value v recursively.
31
-// If an encountered value implements the Marshaler interface
32
-// and is not a nil pointer, Marshal calls its MarshalJSON method
33
-// to produce JSON.  The nil pointer exception is not strictly necessary
34
-// but mimics a similar, necessary exception in the behavior of
35
-// UnmarshalJSON.
36
-//
37
-// Otherwise, Marshal uses the following type-dependent default encodings:
38
-//
39
-// Boolean values encode as JSON booleans.
40
-//
41
-// Floating point, integer, and Number values encode as JSON numbers.
42
-//
43
-// String values encode as JSON strings coerced to valid UTF-8,
44
-// replacing invalid bytes with the Unicode replacement rune.
45
-// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
46
-// to keep some browsers from misinterpreting JSON output as HTML.
47
-// Ampersand "&" is also escaped to "\u0026" for the same reason.
48
-//
49
-// Array and slice values encode as JSON arrays, except that
50
-// []byte encodes as a base64-encoded string, and a nil slice
51
-// encodes as the null JSON object.
52
-//
53
-// Struct values encode as JSON objects. Each exported struct field
54
-// becomes a member of the object unless
55
-//   - the field's tag is "-", or
56
-//   - the field is empty and its tag specifies the "omitempty" option.
57
-// The empty values are false, 0, any
58
-// nil pointer or interface value, and any array, slice, map, or string of
59
-// length zero. The object's default key string is the struct field name
60
-// but can be specified in the struct field's tag value. The "json" key in
61
-// the struct field's tag value is the key name, followed by an optional comma
62
-// and options. Examples:
63
-//
64
-//   // Field is ignored by this package.
65
-//   Field int `json:"-"`
66
-//
67
-//   // Field appears in JSON as key "myName".
68
-//   Field int `json:"myName"`
69
-//
70
-//   // Field appears in JSON as key "myName" and
71
-//   // the field is omitted from the object if its value is empty,
72
-//   // as defined above.
73
-//   Field int `json:"myName,omitempty"`
74
-//
75
-//   // Field appears in JSON as key "Field" (the default), but
76
-//   // the field is skipped if empty.
77
-//   // Note the leading comma.
78
-//   Field int `json:",omitempty"`
79
-//
80
-// The "string" option signals that a field is stored as JSON inside a
81
-// JSON-encoded string. It applies only to fields of string, floating point,
82
-// integer, or boolean types. This extra level of encoding is sometimes used
83
-// when communicating with JavaScript programs:
84
-//
85
-//    Int64String int64 `json:",string"`
86
-//
87
-// The key name will be used if it's a non-empty string consisting of
88
-// only Unicode letters, digits, dollar signs, percent signs, hyphens,
89
-// underscores and slashes.
90
-//
91
-// Anonymous struct fields are usually marshaled as if their inner exported fields
92
-// were fields in the outer struct, subject to the usual Go visibility rules amended
93
-// as described in the next paragraph.
94
-// An anonymous struct field with a name given in its JSON tag is treated as
95
-// having that name, rather than being anonymous.
96
-// An anonymous struct field of interface type is treated the same as having
97
-// that type as its name, rather than being anonymous.
98
-//
99
-// The Go visibility rules for struct fields are amended for JSON when
100
-// deciding which field to marshal or unmarshal. If there are
101
-// multiple fields at the same level, and that level is the least
102
-// nested (and would therefore be the nesting level selected by the
103
-// usual Go rules), the following extra rules apply:
104
-//
105
-// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
106
-// even if there are multiple untagged fields that would otherwise conflict.
107
-// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
108
-// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
109
-//
110
-// Handling of anonymous struct fields is new in Go 1.1.
111
-// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
112
-// an anonymous struct field in both current and earlier versions, give the field
113
-// a JSON tag of "-".
114
-//
115
-// Map values encode as JSON objects.
116
-// The map's key type must be string; the map keys are used as JSON object
117
-// keys, subject to the UTF-8 coercion described for string values above.
118
-//
119
-// Pointer values encode as the value pointed to.
120
-// A nil pointer encodes as the null JSON object.
121
-//
122
-// Interface values encode as the value contained in the interface.
123
-// A nil interface value encodes as the null JSON object.
124
-//
125
-// Channel, complex, and function values cannot be encoded in JSON.
126
-// Attempting to encode such a value causes Marshal to return
127
-// an UnsupportedTypeError.
128
-//
129
-// JSON cannot represent cyclic data structures and Marshal does not
130
-// handle them.  Passing cyclic structures to Marshal will result in
131
-// an infinite recursion.
132
-//
133
-func Marshal(v interface{}) ([]byte, error) {
134
-	return marshal(v, false)
135
-}
136
-
137
-// MarshalIndent is like Marshal but applies Indent to format the output.
138
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
139
-	b, err := Marshal(v)
140
-	if err != nil {
141
-		return nil, err
142
-	}
143
-	var buf bytes.Buffer
144
-	err = Indent(&buf, b, prefix, indent)
145
-	if err != nil {
146
-		return nil, err
147
-	}
148
-	return buf.Bytes(), nil
149
-}
150
-
151
-// MarshalCanonical is like Marshal but encodes into Canonical JSON.
152
-// Read more at: http://wiki.laptop.org/go/Canonical_JSON
153
-func MarshalCanonical(v interface{}) ([]byte, error) {
154
-	return marshal(v, true)
155
-}
156
-
157
-func marshal(v interface{}, canonical bool) ([]byte, error) {
158
-	e := &encodeState{canonical: canonical}
159
-	err := e.marshal(v)
160
-	if err != nil {
161
-		return nil, err
162
-	}
163
-	return e.Bytes(), nil
164
-}
165
-
166
-// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
167
-// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
168
-// so that the JSON will be safe to embed inside HTML <script> tags.
169
-// For historical reasons, web browsers don't honor standard HTML
170
-// escaping within <script> tags, so an alternative JSON encoding must
171
-// be used.
172
-func HTMLEscape(dst *bytes.Buffer, src []byte) {
173
-	// The characters can only appear in string literals,
174
-	// so just scan the string one byte at a time.
175
-	start := 0
176
-	for i, c := range src {
177
-		if c == '<' || c == '>' || c == '&' {
178
-			if start < i {
179
-				dst.Write(src[start:i])
180
-			}
181
-			dst.WriteString(`\u00`)
182
-			dst.WriteByte(hex[c>>4])
183
-			dst.WriteByte(hex[c&0xF])
184
-			start = i + 1
185
-		}
186
-		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
187
-		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
188
-			if start < i {
189
-				dst.Write(src[start:i])
190
-			}
191
-			dst.WriteString(`\u202`)
192
-			dst.WriteByte(hex[src[i+2]&0xF])
193
-			start = i + 3
194
-		}
195
-	}
196
-	if start < len(src) {
197
-		dst.Write(src[start:])
198
-	}
199
-}
200
-
201
-// Marshaler is the interface implemented by objects that
202
-// can marshal themselves into valid JSON.
203
-type Marshaler interface {
204
-	MarshalJSON() ([]byte, error)
205
-}
206
-
207
-// An UnsupportedTypeError is returned by Marshal when attempting
208
-// to encode an unsupported value type.
209
-type UnsupportedTypeError struct {
210
-	Type reflect.Type
211
-}
212
-
213
-func (e *UnsupportedTypeError) Error() string {
214
-	return "json: unsupported type: " + e.Type.String()
215
-}
216
-
217
-type UnsupportedValueError struct {
218
-	Value reflect.Value
219
-	Str   string
220
-}
221
-
222
-func (e *UnsupportedValueError) Error() string {
223
-	return "json: unsupported value: " + e.Str
224
-}
225
-
226
-// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
227
-// attempting to encode a string value with invalid UTF-8 sequences.
228
-// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
229
-// replacing invalid bytes with the Unicode replacement rune U+FFFD.
230
-// This error is no longer generated but is kept for backwards compatibility
231
-// with programs that might mention it.
232
-type InvalidUTF8Error struct {
233
-	S string // the whole string value that caused the error
234
-}
235
-
236
-func (e *InvalidUTF8Error) Error() string {
237
-	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
238
-}
239
-
240
-type MarshalerError struct {
241
-	Type reflect.Type
242
-	Err  error
243
-}
244
-
245
-func (e *MarshalerError) Error() string {
246
-	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
247
-}
248
-
249
-var hex = "0123456789abcdef"
250
-
251
-// An encodeState encodes JSON into a bytes.Buffer.
252
-type encodeState struct {
253
-	bytes.Buffer // accumulated output
254
-	scratch      [64]byte
255
-	canonical    bool
256
-}
257
-
258
-var encodeStatePool sync.Pool
259
-
260
-func newEncodeState(canonical bool) *encodeState {
261
-	if v := encodeStatePool.Get(); v != nil {
262
-		e := v.(*encodeState)
263
-		e.Reset()
264
-		e.canonical = canonical
265
-		return e
266
-	}
267
-	return &encodeState{canonical: canonical}
268
-}
269
-
270
-func (e *encodeState) marshal(v interface{}) (err error) {
271
-	defer func() {
272
-		if r := recover(); r != nil {
273
-			if _, ok := r.(runtime.Error); ok {
274
-				panic(r)
275
-			}
276
-			if s, ok := r.(string); ok {
277
-				panic(s)
278
-			}
279
-			err = r.(error)
280
-		}
281
-	}()
282
-	e.reflectValue(reflect.ValueOf(v))
283
-	return nil
284
-}
285
-
286
-func (e *encodeState) error(err error) {
287
-	panic(err)
288
-}
289
-
290
-func isEmptyValue(v reflect.Value) bool {
291
-	switch v.Kind() {
292
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
293
-		return v.Len() == 0
294
-	case reflect.Bool:
295
-		return !v.Bool()
296
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
297
-		return v.Int() == 0
298
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
299
-		return v.Uint() == 0
300
-	case reflect.Float32, reflect.Float64:
301
-		return v.Float() == 0
302
-	case reflect.Interface, reflect.Ptr:
303
-		return v.IsNil()
304
-	}
305
-	return false
306
-}
307
-
308
-func (e *encodeState) reflectValue(v reflect.Value) {
309
-	e.valueEncoder(v)(e, v, false)
310
-}
311
-
312
-type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
313
-
314
-var encoderCache struct {
315
-	sync.RWMutex
316
-	m map[reflect.Type]encoderFunc
317
-}
318
-
319
-func (e *encodeState) valueEncoder(v reflect.Value) encoderFunc {
320
-	if !v.IsValid() {
321
-		return invalidValueEncoder
322
-	}
323
-	return e.typeEncoder(v.Type())
324
-}
325
-
326
-func (e *encodeState) typeEncoder(t reflect.Type) encoderFunc {
327
-	encoderCache.RLock()
328
-	f := encoderCache.m[t]
329
-	encoderCache.RUnlock()
330
-	if f != nil {
331
-		return f
332
-	}
333
-
334
-	// To deal with recursive types, populate the map with an
335
-	// indirect func before we build it. This type waits on the
336
-	// real func (f) to be ready and then calls it.  This indirect
337
-	// func is only used for recursive types.
338
-	encoderCache.Lock()
339
-	if encoderCache.m == nil {
340
-		encoderCache.m = make(map[reflect.Type]encoderFunc)
341
-	}
342
-	var wg sync.WaitGroup
343
-	wg.Add(1)
344
-	encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
345
-		wg.Wait()
346
-		f(e, v, quoted)
347
-	}
348
-	encoderCache.Unlock()
349
-
350
-	// Compute fields without lock.
351
-	// Might duplicate effort but won't hold other computations back.
352
-	f = e.newTypeEncoder(t, true)
353
-	wg.Done()
354
-	encoderCache.Lock()
355
-	encoderCache.m[t] = f
356
-	encoderCache.Unlock()
357
-	return f
358
-}
359
-
360
-var (
361
-	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
362
-	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
363
-)
364
-
365
-// newTypeEncoder constructs an encoderFunc for a type.
366
-// The returned encoder only checks CanAddr when allowAddr is true.
367
-func (e *encodeState) newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
368
-	if t.Implements(marshalerType) {
369
-		return marshalerEncoder
370
-	}
371
-	if t.Kind() != reflect.Ptr && allowAddr {
372
-		if reflect.PtrTo(t).Implements(marshalerType) {
373
-			return newCondAddrEncoder(addrMarshalerEncoder, e.newTypeEncoder(t, false))
374
-		}
375
-	}
376
-
377
-	if t.Implements(textMarshalerType) {
378
-		return textMarshalerEncoder
379
-	}
380
-	if t.Kind() != reflect.Ptr && allowAddr {
381
-		if reflect.PtrTo(t).Implements(textMarshalerType) {
382
-			return newCondAddrEncoder(addrTextMarshalerEncoder, e.newTypeEncoder(t, false))
383
-		}
384
-	}
385
-
386
-	switch t.Kind() {
387
-	case reflect.Bool:
388
-		return boolEncoder
389
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
390
-		return intEncoder
391
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
392
-		return uintEncoder
393
-	case reflect.Float32:
394
-		return float32Encoder
395
-	case reflect.Float64:
396
-		return float64Encoder
397
-	case reflect.String:
398
-		return stringEncoder
399
-	case reflect.Interface:
400
-		return interfaceEncoder
401
-	case reflect.Struct:
402
-		return e.newStructEncoder(t)
403
-	case reflect.Map:
404
-		return e.newMapEncoder(t)
405
-	case reflect.Slice:
406
-		return e.newSliceEncoder(t)
407
-	case reflect.Array:
408
-		return e.newArrayEncoder(t)
409
-	case reflect.Ptr:
410
-		return e.newPtrEncoder(t)
411
-	default:
412
-		return unsupportedTypeEncoder
413
-	}
414
-}
415
-
416
-func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
417
-	e.WriteString("null")
418
-}
419
-
420
-func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
421
-	if v.Kind() == reflect.Ptr && v.IsNil() {
422
-		e.WriteString("null")
423
-		return
424
-	}
425
-	m := v.Interface().(Marshaler)
426
-	b, err := m.MarshalJSON()
427
-	if err == nil {
428
-		// copy JSON into buffer, checking validity.
429
-		err = compact(&e.Buffer, b, true)
430
-	}
431
-	if err != nil {
432
-		e.error(&MarshalerError{v.Type(), err})
433
-	}
434
-}
435
-
436
-func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
437
-	va := v.Addr()
438
-	if va.IsNil() {
439
-		e.WriteString("null")
440
-		return
441
-	}
442
-	m := va.Interface().(Marshaler)
443
-	b, err := m.MarshalJSON()
444
-	if err == nil {
445
-		// copy JSON into buffer, checking validity.
446
-		err = compact(&e.Buffer, b, true)
447
-	}
448
-	if err != nil {
449
-		e.error(&MarshalerError{v.Type(), err})
450
-	}
451
-}
452
-
453
-func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
454
-	if v.Kind() == reflect.Ptr && v.IsNil() {
455
-		e.WriteString("null")
456
-		return
457
-	}
458
-	m := v.Interface().(encoding.TextMarshaler)
459
-	b, err := m.MarshalText()
460
-	if err == nil {
461
-		_, err = e.stringBytes(b)
462
-	}
463
-	if err != nil {
464
-		e.error(&MarshalerError{v.Type(), err})
465
-	}
466
-}
467
-
468
-func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
469
-	va := v.Addr()
470
-	if va.IsNil() {
471
-		e.WriteString("null")
472
-		return
473
-	}
474
-	m := va.Interface().(encoding.TextMarshaler)
475
-	b, err := m.MarshalText()
476
-	if err == nil {
477
-		_, err = e.stringBytes(b)
478
-	}
479
-	if err != nil {
480
-		e.error(&MarshalerError{v.Type(), err})
481
-	}
482
-}
483
-
484
-func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
485
-	if quoted {
486
-		e.WriteByte('"')
487
-	}
488
-	if v.Bool() {
489
-		e.WriteString("true")
490
-	} else {
491
-		e.WriteString("false")
492
-	}
493
-	if quoted {
494
-		e.WriteByte('"')
495
-	}
496
-}
497
-
498
-func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
499
-	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
500
-	if quoted {
501
-		e.WriteByte('"')
502
-	}
503
-	e.Write(b)
504
-	if quoted {
505
-		e.WriteByte('"')
506
-	}
507
-}
508
-
509
-func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
510
-	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
511
-	if quoted {
512
-		e.WriteByte('"')
513
-	}
514
-	e.Write(b)
515
-	if quoted {
516
-		e.WriteByte('"')
517
-	}
518
-}
519
-
520
-type floatEncoder int // number of bits
521
-
522
-func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
523
-	f := v.Float()
524
-	if math.IsInf(f, 0) || math.IsNaN(f) || (e.canonical && math.Floor(f) != f) {
525
-		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
526
-	}
527
-
528
-	var b []byte
529
-	if e.canonical {
530
-		b = strconv.AppendInt(e.scratch[:0], int64(f), 10)
531
-	} else {
532
-		b = strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
533
-	}
534
-	if quoted {
535
-		e.WriteByte('"')
536
-	}
537
-	e.Write(b)
538
-	if quoted {
539
-		e.WriteByte('"')
540
-	}
541
-}
542
-
543
-var (
544
-	float32Encoder = (floatEncoder(32)).encode
545
-	float64Encoder = (floatEncoder(64)).encode
546
-)
547
-
548
-func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
549
-	if v.Type() == numberType {
550
-		numStr := v.String()
551
-		if numStr == "" {
552
-			numStr = "0" // Number's zero-val
553
-		}
554
-		e.WriteString(numStr)
555
-		return
556
-	}
557
-	if quoted {
558
-		sb, err := Marshal(v.String())
559
-		if err != nil {
560
-			e.error(err)
561
-		}
562
-		e.string(string(sb))
563
-	} else {
564
-		e.string(v.String())
565
-	}
566
-}
567
-
568
-func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
569
-	if v.IsNil() {
570
-		e.WriteString("null")
571
-		return
572
-	}
573
-	e.reflectValue(v.Elem())
574
-}
575
-
576
-func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
577
-	e.error(&UnsupportedTypeError{v.Type()})
578
-}
579
-
580
-type structEncoder struct {
581
-	fields    []field
582
-	fieldEncs []encoderFunc
583
-}
584
-
585
-func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
586
-	e.WriteByte('{')
587
-	first := true
588
-	for i, f := range se.fields {
589
-		fv := fieldByIndex(v, f.index)
590
-		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
591
-			continue
592
-		}
593
-		if first {
594
-			first = false
595
-		} else {
596
-			e.WriteByte(',')
597
-		}
598
-		e.string(f.name)
599
-		e.WriteByte(':')
600
-		se.fieldEncs[i](e, fv, f.quoted)
601
-	}
602
-	e.WriteByte('}')
603
-}
604
-
605
-func (e *encodeState) newStructEncoder(t reflect.Type) encoderFunc {
606
-	fields := cachedTypeFields(t, e.canonical)
607
-	se := &structEncoder{
608
-		fields:    fields,
609
-		fieldEncs: make([]encoderFunc, len(fields)),
610
-	}
611
-	for i, f := range fields {
612
-		se.fieldEncs[i] = e.typeEncoder(typeByIndex(t, f.index))
613
-	}
614
-	return se.encode
615
-}
616
-
617
-type mapEncoder struct {
618
-	elemEnc encoderFunc
619
-}
620
-
621
-func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
622
-	if v.IsNil() {
623
-		e.WriteString("null")
624
-		return
625
-	}
626
-	e.WriteByte('{')
627
-	var sv stringValues = v.MapKeys()
628
-	sort.Sort(sv)
629
-	for i, k := range sv {
630
-		if i > 0 {
631
-			e.WriteByte(',')
632
-		}
633
-		e.string(k.String())
634
-		e.WriteByte(':')
635
-		me.elemEnc(e, v.MapIndex(k), false)
636
-	}
637
-	e.WriteByte('}')
638
-}
639
-
640
-func (e *encodeState) newMapEncoder(t reflect.Type) encoderFunc {
641
-	if t.Key().Kind() != reflect.String {
642
-		return unsupportedTypeEncoder
643
-	}
644
-	me := &mapEncoder{e.typeEncoder(t.Elem())}
645
-	return me.encode
646
-}
647
-
648
-func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
649
-	if v.IsNil() {
650
-		e.WriteString("null")
651
-		return
652
-	}
653
-	s := v.Bytes()
654
-	e.WriteByte('"')
655
-	if len(s) < 1024 {
656
-		// for small buffers, using Encode directly is much faster.
657
-		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
658
-		base64.StdEncoding.Encode(dst, s)
659
-		e.Write(dst)
660
-	} else {
661
-		// for large buffers, avoid unnecessary extra temporary
662
-		// buffer space.
663
-		enc := base64.NewEncoder(base64.StdEncoding, e)
664
-		enc.Write(s)
665
-		enc.Close()
666
-	}
667
-	e.WriteByte('"')
668
-}
669
-
670
-// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
671
-type sliceEncoder struct {
672
-	arrayEnc encoderFunc
673
-}
674
-
675
-func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
676
-	if v.IsNil() {
677
-		e.WriteString("null")
678
-		return
679
-	}
680
-	se.arrayEnc(e, v, false)
681
-}
682
-
683
-func (e *encodeState) newSliceEncoder(t reflect.Type) encoderFunc {
684
-	// Byte slices get special treatment; arrays don't.
685
-	if t.Elem().Kind() == reflect.Uint8 {
686
-		return encodeByteSlice
687
-	}
688
-	enc := &sliceEncoder{e.newArrayEncoder(t)}
689
-	return enc.encode
690
-}
691
-
692
-type arrayEncoder struct {
693
-	elemEnc encoderFunc
694
-}
695
-
696
-func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
697
-	e.WriteByte('[')
698
-	n := v.Len()
699
-	for i := 0; i < n; i++ {
700
-		if i > 0 {
701
-			e.WriteByte(',')
702
-		}
703
-		ae.elemEnc(e, v.Index(i), false)
704
-	}
705
-	e.WriteByte(']')
706
-}
707
-
708
-func (e *encodeState) newArrayEncoder(t reflect.Type) encoderFunc {
709
-	enc := &arrayEncoder{e.typeEncoder(t.Elem())}
710
-	return enc.encode
711
-}
712
-
713
-type ptrEncoder struct {
714
-	elemEnc encoderFunc
715
-}
716
-
717
-func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
718
-	if v.IsNil() {
719
-		e.WriteString("null")
720
-		return
721
-	}
722
-	pe.elemEnc(e, v.Elem(), quoted)
723
-}
724
-
725
-func (e *encodeState) newPtrEncoder(t reflect.Type) encoderFunc {
726
-	enc := &ptrEncoder{e.typeEncoder(t.Elem())}
727
-	return enc.encode
728
-}
729
-
730
-type condAddrEncoder struct {
731
-	canAddrEnc, elseEnc encoderFunc
732
-}
733
-
734
-func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
735
-	if v.CanAddr() {
736
-		ce.canAddrEnc(e, v, quoted)
737
-	} else {
738
-		ce.elseEnc(e, v, quoted)
739
-	}
740
-}
741
-
742
-// newCondAddrEncoder returns an encoder that checks whether its value
743
-// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
744
-func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
745
-	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
746
-	return enc.encode
747
-}
748
-
749
-func isValidTag(s string) bool {
750
-	if s == "" {
751
-		return false
752
-	}
753
-	for _, c := range s {
754
-		switch {
755
-		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
756
-			// Backslash and quote chars are reserved, but
757
-			// otherwise any punctuation chars are allowed
758
-			// in a tag name.
759
-		default:
760
-			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
761
-				return false
762
-			}
763
-		}
764
-	}
765
-	return true
766
-}
767
-
768
-func fieldByIndex(v reflect.Value, index []int) reflect.Value {
769
-	for _, i := range index {
770
-		if v.Kind() == reflect.Ptr {
771
-			if v.IsNil() {
772
-				return reflect.Value{}
773
-			}
774
-			v = v.Elem()
775
-		}
776
-		v = v.Field(i)
777
-	}
778
-	return v
779
-}
780
-
781
-func typeByIndex(t reflect.Type, index []int) reflect.Type {
782
-	for _, i := range index {
783
-		if t.Kind() == reflect.Ptr {
784
-			t = t.Elem()
785
-		}
786
-		t = t.Field(i).Type
787
-	}
788
-	return t
789
-}
790
-
791
-// stringValues is a slice of reflect.Value holding *reflect.StringValue.
792
-// It implements the methods to sort by string.
793
-type stringValues []reflect.Value
794
-
795
-func (sv stringValues) Len() int           { return len(sv) }
796
-func (sv stringValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
797
-func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
798
-func (sv stringValues) get(i int) string   { return sv[i].String() }
799
-
800
-// NOTE: keep in sync with stringBytes below.
801
-func (e *encodeState) string(s string) (int, error) {
802
-	len0 := e.Len()
803
-	e.WriteByte('"')
804
-	start := 0
805
-	for i := 0; i < len(s); {
806
-		if b := s[i]; b < utf8.RuneSelf {
807
-			if b != '\\' && b != '"' {
808
-				if e.canonical || (0x20 <= b && b != '<' && b != '>' && b != '&') {
809
-					i++
810
-					continue
811
-				}
812
-			}
813
-			if start < i {
814
-				e.WriteString(s[start:i])
815
-			}
816
-			switch b {
817
-			case '\\', '"':
818
-				e.WriteByte('\\')
819
-				e.WriteByte(b)
820
-			case '\n':
821
-				e.WriteByte('\\')
822
-				e.WriteByte('n')
823
-			case '\r':
824
-				e.WriteByte('\\')
825
-				e.WriteByte('r')
826
-			case '\t':
827
-				e.WriteByte('\\')
828
-				e.WriteByte('t')
829
-			default:
830
-				// This encodes bytes < 0x20 except for \n and \r,
831
-				// as well as <, > and &. The latter are escaped because they
832
-				// can lead to security holes when user-controlled strings
833
-				// are rendered into JSON and served to some browsers.
834
-				e.WriteString(`\u00`)
835
-				e.WriteByte(hex[b>>4])
836
-				e.WriteByte(hex[b&0xF])
837
-			}
838
-			i++
839
-			start = i
840
-			continue
841
-		}
842
-		if e.canonical {
843
-			i++
844
-			continue
845
-		}
846
-		c, size := utf8.DecodeRuneInString(s[i:])
847
-		if c == utf8.RuneError && size == 1 {
848
-			if start < i {
849
-				e.WriteString(s[start:i])
850
-			}
851
-			e.WriteString(`\ufffd`)
852
-			i += size
853
-			start = i
854
-			continue
855
-		}
856
-		// U+2028 is LINE SEPARATOR.
857
-		// U+2029 is PARAGRAPH SEPARATOR.
858
-		// They are both technically valid characters in JSON strings,
859
-		// but don't work in JSONP, which has to be evaluated as JavaScript,
860
-		// and can lead to security holes there. It is valid JSON to
861
-		// escape them, so we do so unconditionally.
862
-		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
863
-		if c == '\u2028' || c == '\u2029' {
864
-			if start < i {
865
-				e.WriteString(s[start:i])
866
-			}
867
-			e.WriteString(`\u202`)
868
-			e.WriteByte(hex[c&0xF])
869
-			i += size
870
-			start = i
871
-			continue
872
-		}
873
-		i += size
874
-	}
875
-	if start < len(s) {
876
-		e.WriteString(s[start:])
877
-	}
878
-	e.WriteByte('"')
879
-	return e.Len() - len0, nil
880
-}
881
-
882
-// NOTE: keep in sync with string above.
883
-func (e *encodeState) stringBytes(s []byte) (int, error) {
884
-	len0 := e.Len()
885
-	e.WriteByte('"')
886
-	start := 0
887
-	for i := 0; i < len(s); {
888
-		if b := s[i]; b < utf8.RuneSelf {
889
-			if b != '\\' && b != '"' {
890
-				if e.canonical || (0x20 <= b && b != '<' && b != '>' && b != '&') {
891
-					i++
892
-					continue
893
-				}
894
-			}
895
-			if start < i {
896
-				e.Write(s[start:i])
897
-			}
898
-			switch b {
899
-			case '\\', '"':
900
-				e.WriteByte('\\')
901
-				e.WriteByte(b)
902
-			case '\n':
903
-				e.WriteByte('\\')
904
-				e.WriteByte('n')
905
-			case '\r':
906
-				e.WriteByte('\\')
907
-				e.WriteByte('r')
908
-			case '\t':
909
-				e.WriteByte('\\')
910
-				e.WriteByte('t')
911
-			default:
912
-				// This encodes bytes < 0x20 except for \n and \r,
913
-				// as well as <, >, and &. The latter are escaped because they
914
-				// can lead to security holes when user-controlled strings
915
-				// are rendered into JSON and served to some browsers.
916
-				e.WriteString(`\u00`)
917
-				e.WriteByte(hex[b>>4])
918
-				e.WriteByte(hex[b&0xF])
919
-			}
920
-			i++
921
-			start = i
922
-			continue
923
-		}
924
-		if e.canonical {
925
-			i++
926
-			continue
927
-		}
928
-		c, size := utf8.DecodeRune(s[i:])
929
-		if c == utf8.RuneError && size == 1 {
930
-			if start < i {
931
-				e.Write(s[start:i])
932
-			}
933
-			e.WriteString(`\ufffd`)
934
-			i += size
935
-			start = i
936
-			continue
937
-		}
938
-		// U+2028 is LINE SEPARATOR.
939
-		// U+2029 is PARAGRAPH SEPARATOR.
940
-		// They are both technically valid characters in JSON strings,
941
-		// but don't work in JSONP, which has to be evaluated as JavaScript,
942
-		// and can lead to security holes there. It is valid JSON to
943
-		// escape them, so we do so unconditionally.
944
-		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
945
-		if c == '\u2028' || c == '\u2029' {
946
-			if start < i {
947
-				e.Write(s[start:i])
948
-			}
949
-			e.WriteString(`\u202`)
950
-			e.WriteByte(hex[c&0xF])
951
-			i += size
952
-			start = i
953
-			continue
954
-		}
955
-		i += size
956
-	}
957
-	if start < len(s) {
958
-		e.Write(s[start:])
959
-	}
960
-	e.WriteByte('"')
961
-	return e.Len() - len0, nil
962
-}
963
-
964
-// A field represents a single field found in a struct.
965
-type field struct {
966
-	name      string
967
-	nameBytes []byte                 // []byte(name)
968
-	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
969
-
970
-	tag       bool
971
-	index     []int
972
-	typ       reflect.Type
973
-	omitEmpty bool
974
-	quoted    bool
975
-}
976
-
977
-func fillField(f field) field {
978
-	f.nameBytes = []byte(f.name)
979
-	f.equalFold = foldFunc(f.nameBytes)
980
-	return f
981
-}
982
-
983
-// byName sorts field by name, breaking ties with depth,
984
-// then breaking ties with "name came from json tag", then
985
-// breaking ties with index sequence.
986
-type byName []field
987
-
988
-func (x byName) Len() int { return len(x) }
989
-
990
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
991
-
992
-func (x byName) Less(i, j int) bool {
993
-	if x[i].name != x[j].name {
994
-		return x[i].name < x[j].name
995
-	}
996
-	if len(x[i].index) != len(x[j].index) {
997
-		return len(x[i].index) < len(x[j].index)
998
-	}
999
-	if x[i].tag != x[j].tag {
1000
-		return x[i].tag
1001
-	}
1002
-	return byIndex(x).Less(i, j)
1003
-}
1004
-
1005
-// byIndex sorts field by index sequence.
1006
-type byIndex []field
1007
-
1008
-func (x byIndex) Len() int { return len(x) }
1009
-
1010
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
1011
-
1012
-func (x byIndex) Less(i, j int) bool {
1013
-	for k, xik := range x[i].index {
1014
-		if k >= len(x[j].index) {
1015
-			return false
1016
-		}
1017
-		if xik != x[j].index[k] {
1018
-			return xik < x[j].index[k]
1019
-		}
1020
-	}
1021
-	return len(x[i].index) < len(x[j].index)
1022
-}
1023
-
1024
-// typeFields returns a list of fields that JSON should recognize for the given type.
1025
-// The algorithm is breadth-first search over the set of structs to include - the top struct
1026
-// and then any reachable anonymous structs.
1027
-func typeFields(t reflect.Type) []field {
1028
-	// Anonymous fields to explore at the current level and the next.
1029
-	current := []field{}
1030
-	next := []field{{typ: t}}
1031
-
1032
-	// Count of queued names for current level and the next.
1033
-	count := map[reflect.Type]int{}
1034
-	nextCount := map[reflect.Type]int{}
1035
-
1036
-	// Types already visited at an earlier level.
1037
-	visited := map[reflect.Type]bool{}
1038
-
1039
-	// Fields found.
1040
-	var fields []field
1041
-
1042
-	for len(next) > 0 {
1043
-		current, next = next, current[:0]
1044
-		count, nextCount = nextCount, map[reflect.Type]int{}
1045
-
1046
-		for _, f := range current {
1047
-			if visited[f.typ] {
1048
-				continue
1049
-			}
1050
-			visited[f.typ] = true
1051
-
1052
-			// Scan f.typ for fields to include.
1053
-			for i := 0; i < f.typ.NumField(); i++ {
1054
-				sf := f.typ.Field(i)
1055
-				if sf.PkgPath != "" { // unexported
1056
-					continue
1057
-				}
1058
-				tag := sf.Tag.Get("json")
1059
-				if tag == "-" {
1060
-					continue
1061
-				}
1062
-				name, opts := parseTag(tag)
1063
-				if !isValidTag(name) {
1064
-					name = ""
1065
-				}
1066
-				index := make([]int, len(f.index)+1)
1067
-				copy(index, f.index)
1068
-				index[len(f.index)] = i
1069
-
1070
-				ft := sf.Type
1071
-				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
1072
-					// Follow pointer.
1073
-					ft = ft.Elem()
1074
-				}
1075
-
1076
-				// Only strings, floats, integers, and booleans can be quoted.
1077
-				quoted := false
1078
-				if opts.Contains("string") {
1079
-					switch ft.Kind() {
1080
-					case reflect.Bool,
1081
-						reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
1082
-						reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
1083
-						reflect.Float32, reflect.Float64,
1084
-						reflect.String:
1085
-						quoted = true
1086
-					}
1087
-				}
1088
-
1089
-				// Record found field and index sequence.
1090
-				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
1091
-					tagged := name != ""
1092
-					if name == "" {
1093
-						name = sf.Name
1094
-					}
1095
-					fields = append(fields, fillField(field{
1096
-						name:      name,
1097
-						tag:       tagged,
1098
-						index:     index,
1099
-						typ:       ft,
1100
-						omitEmpty: opts.Contains("omitempty"),
1101
-						quoted:    quoted,
1102
-					}))
1103
-					if count[f.typ] > 1 {
1104
-						// If there were multiple instances, add a second,
1105
-						// so that the annihilation code will see a duplicate.
1106
-						// It only cares about the distinction between 1 or 2,
1107
-						// so don't bother generating any more copies.
1108
-						fields = append(fields, fields[len(fields)-1])
1109
-					}
1110
-					continue
1111
-				}
1112
-
1113
-				// Record new anonymous struct to explore in next round.
1114
-				nextCount[ft]++
1115
-				if nextCount[ft] == 1 {
1116
-					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
1117
-				}
1118
-			}
1119
-		}
1120
-	}
1121
-
1122
-	sort.Sort(byName(fields))
1123
-
1124
-	// Delete all fields that are hidden by the Go rules for embedded fields,
1125
-	// except that fields with JSON tags are promoted.
1126
-
1127
-	// The fields are sorted in primary order of name, secondary order
1128
-	// of field index length. Loop over names; for each name, delete
1129
-	// hidden fields by choosing the one dominant field that survives.
1130
-	out := fields[:0]
1131
-	for advance, i := 0, 0; i < len(fields); i += advance {
1132
-		// One iteration per name.
1133
-		// Find the sequence of fields with the name of this first field.
1134
-		fi := fields[i]
1135
-		name := fi.name
1136
-		for advance = 1; i+advance < len(fields); advance++ {
1137
-			fj := fields[i+advance]
1138
-			if fj.name != name {
1139
-				break
1140
-			}
1141
-		}
1142
-		if advance == 1 { // Only one field with this name
1143
-			out = append(out, fi)
1144
-			continue
1145
-		}
1146
-		dominant, ok := dominantField(fields[i : i+advance])
1147
-		if ok {
1148
-			out = append(out, dominant)
1149
-		}
1150
-	}
1151
-
1152
-	return out
1153
-}
1154
-
1155
-// dominantField looks through the fields, all of which are known to
1156
-// have the same name, to find the single field that dominates the
1157
-// others using Go's embedding rules, modified by the presence of
1158
-// JSON tags. If there are multiple top-level fields, the boolean
1159
-// will be false: This condition is an error in Go and we skip all
1160
-// the fields.
1161
-func dominantField(fields []field) (field, bool) {
1162
-	// The fields are sorted in increasing index-length order. The winner
1163
-	// must therefore be one with the shortest index length. Drop all
1164
-	// longer entries, which is easy: just truncate the slice.
1165
-	length := len(fields[0].index)
1166
-	tagged := -1 // Index of first tagged field.
1167
-	for i, f := range fields {
1168
-		if len(f.index) > length {
1169
-			fields = fields[:i]
1170
-			break
1171
-		}
1172
-		if f.tag {
1173
-			if tagged >= 0 {
1174
-				// Multiple tagged fields at the same level: conflict.
1175
-				// Return no field.
1176
-				return field{}, false
1177
-			}
1178
-			tagged = i
1179
-		}
1180
-	}
1181
-	if tagged >= 0 {
1182
-		return fields[tagged], true
1183
-	}
1184
-	// All remaining fields have the same length. If there's more than one,
1185
-	// we have a conflict (two fields named "X" at the same level) and we
1186
-	// return no field.
1187
-	if len(fields) > 1 {
1188
-		return field{}, false
1189
-	}
1190
-	return fields[0], true
1191
-}
1192
-
1193
-type fields struct {
1194
-	byName  []field
1195
-	byIndex []field
1196
-}
1197
-
1198
-var fieldCache struct {
1199
-	sync.RWMutex
1200
-	m map[reflect.Type]*fields
1201
-}
1202
-
1203
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
1204
-func cachedTypeFields(t reflect.Type, canonical bool) []field {
1205
-	fieldCache.RLock()
1206
-	x := fieldCache.m[t]
1207
-	fieldCache.RUnlock()
1208
-
1209
-	var f []field
1210
-	if x != nil {
1211
-		if canonical {
1212
-			f = x.byName
1213
-		}
1214
-		f = x.byIndex
1215
-	}
1216
-	if f != nil {
1217
-		return f
1218
-	}
1219
-
1220
-	// Compute fields without lock.
1221
-	// Might duplicate effort but won't hold other computations back.
1222
-	f = typeFields(t)
1223
-	if f == nil {
1224
-		f = []field{}
1225
-	}
1226
-	if !canonical {
1227
-		sort.Sort(byIndex(f))
1228
-	}
1229
-
1230
-	fieldCache.Lock()
1231
-	if fieldCache.m == nil {
1232
-		fieldCache.m = map[reflect.Type]*fields{}
1233
-	}
1234
-	x = fieldCache.m[t]
1235
-	fieldCache.Unlock()
1236
-	if x == nil {
1237
-		x = new(fields)
1238
-	}
1239
-	if canonical {
1240
-		x.byName = f
1241
-	} else {
1242
-		x.byIndex = f
1243
-	}
1244
-	return f
1245
-}
1246 1
deleted file mode 100644
... ...
@@ -1,143 +0,0 @@
1
-// Copyright 2013 The Go Authors. All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-package json
6
-
7
-import (
8
-	"bytes"
9
-	"unicode/utf8"
10
-)
11
-
12
-const (
13
-	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
14
-	kelvin       = '\u212a'
15
-	smallLongEss = '\u017f'
16
-)
17
-
18
-// foldFunc returns one of four different case folding equivalence
19
-// functions, from most general (and slow) to fastest:
20
-//
21
-// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
22
-// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
23
-// 3) asciiEqualFold, no special, but includes non-letters (including _)
24
-// 4) simpleLetterEqualFold, no specials, no non-letters.
25
-//
26
-// The letters S and K are special because they map to 3 runes, not just 2:
27
-//  * S maps to s and to U+017F 'Å¿' Latin small letter long s
28
-//  * k maps to K and to U+212A 'K' Kelvin sign
29
-// See https://play.golang.org/p/tTxjOc0OGo
30
-//
31
-// The returned function is specialized for matching against s and
32
-// should only be given s. It's not curried for performance reasons.
33
-func foldFunc(s []byte) func(s, t []byte) bool {
34
-	nonLetter := false
35
-	special := false // special letter
36
-	for _, b := range s {
37
-		if b >= utf8.RuneSelf {
38
-			return bytes.EqualFold
39
-		}
40
-		upper := b & caseMask
41
-		if upper < 'A' || upper > 'Z' {
42
-			nonLetter = true
43
-		} else if upper == 'K' || upper == 'S' {
44
-			// See above for why these letters are special.
45
-			special = true
46
-		}
47
-	}
48
-	if special {
49
-		return equalFoldRight
50
-	}
51
-	if nonLetter {
52
-		return asciiEqualFold
53
-	}
54
-	return simpleLetterEqualFold
55
-}
56
-
57
-// equalFoldRight is a specialization of bytes.EqualFold when s is
58
-// known to be all ASCII (including punctuation), but contains an 's',
59
-// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
60
-// See comments on foldFunc.
61
-func equalFoldRight(s, t []byte) bool {
62
-	for _, sb := range s {
63
-		if len(t) == 0 {
64
-			return false
65
-		}
66
-		tb := t[0]
67
-		if tb < utf8.RuneSelf {
68
-			if sb != tb {
69
-				sbUpper := sb & caseMask
70
-				if 'A' <= sbUpper && sbUpper <= 'Z' {
71
-					if sbUpper != tb&caseMask {
72
-						return false
73
-					}
74
-				} else {
75
-					return false
76
-				}
77
-			}
78
-			t = t[1:]
79
-			continue
80
-		}
81
-		// sb is ASCII and t is not. t must be either kelvin
82
-		// sign or long s; sb must be s, S, k, or K.
83
-		tr, size := utf8.DecodeRune(t)
84
-		switch sb {
85
-		case 's', 'S':
86
-			if tr != smallLongEss {
87
-				return false
88
-			}
89
-		case 'k', 'K':
90
-			if tr != kelvin {
91
-				return false
92
-			}
93
-		default:
94
-			return false
95
-		}
96
-		t = t[size:]
97
-
98
-	}
99
-	if len(t) > 0 {
100
-		return false
101
-	}
102
-	return true
103
-}
104
-
105
-// asciiEqualFold is a specialization of bytes.EqualFold for use when
106
-// s is all ASCII (but may contain non-letters) and contains no
107
-// special-folding letters.
108
-// See comments on foldFunc.
109
-func asciiEqualFold(s, t []byte) bool {
110
-	if len(s) != len(t) {
111
-		return false
112
-	}
113
-	for i, sb := range s {
114
-		tb := t[i]
115
-		if sb == tb {
116
-			continue
117
-		}
118
-		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
119
-			if sb&caseMask != tb&caseMask {
120
-				return false
121
-			}
122
-		} else {
123
-			return false
124
-		}
125
-	}
126
-	return true
127
-}
128
-
129
-// simpleLetterEqualFold is a specialization of bytes.EqualFold for
130
-// use when s is all ASCII letters (no underscores, etc) and also
131
-// doesn't contain 'k', 'K', 's', or 'S'.
132
-// See comments on foldFunc.
133
-func simpleLetterEqualFold(s, t []byte) bool {
134
-	if len(s) != len(t) {
135
-		return false
136
-	}
137
-	for i, b := range s {
138
-		if b&caseMask != t[i]&caseMask {
139
-			return false
140
-		}
141
-	}
142
-	return true
143
-}
144 1
deleted file mode 100644
... ...
@@ -1,137 +0,0 @@
1
-// Copyright 2010 The Go Authors.  All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-package json
6
-
7
-import "bytes"
8
-
9
-// Compact appends to dst the JSON-encoded src with
10
-// insignificant space characters elided.
11
-func Compact(dst *bytes.Buffer, src []byte) error {
12
-	return compact(dst, src, false)
13
-}
14
-
15
-func compact(dst *bytes.Buffer, src []byte, escape bool) error {
16
-	origLen := dst.Len()
17
-	var scan scanner
18
-	scan.reset()
19
-	start := 0
20
-	for i, c := range src {
21
-		if escape && (c == '<' || c == '>' || c == '&') {
22
-			if start < i {
23
-				dst.Write(src[start:i])
24
-			}
25
-			dst.WriteString(`\u00`)
26
-			dst.WriteByte(hex[c>>4])
27
-			dst.WriteByte(hex[c&0xF])
28
-			start = i + 1
29
-		}
30
-		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
31
-		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
32
-			if start < i {
33
-				dst.Write(src[start:i])
34
-			}
35
-			dst.WriteString(`\u202`)
36
-			dst.WriteByte(hex[src[i+2]&0xF])
37
-			start = i + 3
38
-		}
39
-		v := scan.step(&scan, int(c))
40
-		if v >= scanSkipSpace {
41
-			if v == scanError {
42
-				break
43
-			}
44
-			if start < i {
45
-				dst.Write(src[start:i])
46
-			}
47
-			start = i + 1
48
-		}
49
-	}
50
-	if scan.eof() == scanError {
51
-		dst.Truncate(origLen)
52
-		return scan.err
53
-	}
54
-	if start < len(src) {
55
-		dst.Write(src[start:])
56
-	}
57
-	return nil
58
-}
59
-
60
-func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
61
-	dst.WriteByte('\n')
62
-	dst.WriteString(prefix)
63
-	for i := 0; i < depth; i++ {
64
-		dst.WriteString(indent)
65
-	}
66
-}
67
-
68
-// Indent appends to dst an indented form of the JSON-encoded src.
69
-// Each element in a JSON object or array begins on a new,
70
-// indented line beginning with prefix followed by one or more
71
-// copies of indent according to the indentation nesting.
72
-// The data appended to dst does not begin with the prefix nor
73
-// any indentation, and has no trailing newline, to make it
74
-// easier to embed inside other formatted JSON data.
75
-func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
76
-	origLen := dst.Len()
77
-	var scan scanner
78
-	scan.reset()
79
-	needIndent := false
80
-	depth := 0
81
-	for _, c := range src {
82
-		scan.bytes++
83
-		v := scan.step(&scan, int(c))
84
-		if v == scanSkipSpace {
85
-			continue
86
-		}
87
-		if v == scanError {
88
-			break
89
-		}
90
-		if needIndent && v != scanEndObject && v != scanEndArray {
91
-			needIndent = false
92
-			depth++
93
-			newline(dst, prefix, indent, depth)
94
-		}
95
-
96
-		// Emit semantically uninteresting bytes
97
-		// (in particular, punctuation in strings) unmodified.
98
-		if v == scanContinue {
99
-			dst.WriteByte(c)
100
-			continue
101
-		}
102
-
103
-		// Add spacing around real punctuation.
104
-		switch c {
105
-		case '{', '[':
106
-			// delay indent so that empty object and array are formatted as {} and [].
107
-			needIndent = true
108
-			dst.WriteByte(c)
109
-
110
-		case ',':
111
-			dst.WriteByte(c)
112
-			newline(dst, prefix, indent, depth)
113
-
114
-		case ':':
115
-			dst.WriteByte(c)
116
-			dst.WriteByte(' ')
117
-
118
-		case '}', ']':
119
-			if needIndent {
120
-				// suppress indent in empty object/array
121
-				needIndent = false
122
-			} else {
123
-				depth--
124
-				newline(dst, prefix, indent, depth)
125
-			}
126
-			dst.WriteByte(c)
127
-
128
-		default:
129
-			dst.WriteByte(c)
130
-		}
131
-	}
132
-	if scan.eof() == scanError {
133
-		dst.Truncate(origLen)
134
-		return scan.err
135
-	}
136
-	return nil
137
-}
138 1
deleted file mode 100644
... ...
@@ -1,630 +0,0 @@
1
-// Copyright 2010 The Go Authors.  All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-package json
6
-
7
-// JSON value parser state machine.
8
-// Just about at the limit of what is reasonable to write by hand.
9
-// Some parts are a bit tedious, but overall it nicely factors out the
10
-// otherwise common code from the multiple scanning functions
11
-// in this package (Compact, Indent, checkValid, nextValue, etc).
12
-//
13
-// This file starts with two simple examples using the scanner
14
-// before diving into the scanner itself.
15
-
16
-import "strconv"
17
-
18
-// checkValid verifies that data is valid JSON-encoded data.
19
-// scan is passed in for use by checkValid to avoid an allocation.
20
-func checkValid(data []byte, scan *scanner) error {
21
-	scan.reset()
22
-	for _, c := range data {
23
-		scan.bytes++
24
-		if scan.step(scan, int(c)) == scanError {
25
-			return scan.err
26
-		}
27
-	}
28
-	if scan.eof() == scanError {
29
-		return scan.err
30
-	}
31
-	return nil
32
-}
33
-
34
-// nextValue splits data after the next whole JSON value,
35
-// returning that value and the bytes that follow it as separate slices.
36
-// scan is passed in for use by nextValue to avoid an allocation.
37
-func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
38
-	scan.reset()
39
-	for i, c := range data {
40
-		v := scan.step(scan, int(c))
41
-		if v >= scanEndObject {
42
-			switch v {
43
-			// probe the scanner with a space to determine whether we will
44
-			// get scanEnd on the next character. Otherwise, if the next character
45
-			// is not a space, scanEndTop allocates a needless error.
46
-			case scanEndObject, scanEndArray:
47
-				if scan.step(scan, ' ') == scanEnd {
48
-					return data[:i+1], data[i+1:], nil
49
-				}
50
-			case scanError:
51
-				return nil, nil, scan.err
52
-			case scanEnd:
53
-				return data[0:i], data[i:], nil
54
-			}
55
-		}
56
-	}
57
-	if scan.eof() == scanError {
58
-		return nil, nil, scan.err
59
-	}
60
-	return data, nil, nil
61
-}
62
-
63
-// A SyntaxError is a description of a JSON syntax error.
64
-type SyntaxError struct {
65
-	msg    string // description of error
66
-	Offset int64  // error occurred after reading Offset bytes
67
-}
68
-
69
-func (e *SyntaxError) Error() string { return e.msg }
70
-
71
-// A scanner is a JSON scanning state machine.
72
-// Callers call scan.reset() and then pass bytes in one at a time
73
-// by calling scan.step(&scan, c) for each byte.
74
-// The return value, referred to as an opcode, tells the
75
-// caller about significant parsing events like beginning
76
-// and ending literals, objects, and arrays, so that the
77
-// caller can follow along if it wishes.
78
-// The return value scanEnd indicates that a single top-level
79
-// JSON value has been completed, *before* the byte that
80
-// just got passed in.  (The indication must be delayed in order
81
-// to recognize the end of numbers: is 123 a whole value or
82
-// the beginning of 12345e+6?).
83
-type scanner struct {
84
-	// The step is a func to be called to execute the next transition.
85
-	// Also tried using an integer constant and a single func
86
-	// with a switch, but using the func directly was 10% faster
87
-	// on a 64-bit Mac Mini, and it's nicer to read.
88
-	step func(*scanner, int) int
89
-
90
-	// Reached end of top-level value.
91
-	endTop bool
92
-
93
-	// Stack of what we're in the middle of - array values, object keys, object values.
94
-	parseState []int
95
-
96
-	// Error that happened, if any.
97
-	err error
98
-
99
-	// 1-byte redo (see undo method)
100
-	redo      bool
101
-	redoCode  int
102
-	redoState func(*scanner, int) int
103
-
104
-	// total bytes consumed, updated by decoder.Decode
105
-	bytes int64
106
-}
107
-
108
-// These values are returned by the state transition functions
109
-// assigned to scanner.state and the method scanner.eof.
110
-// They give details about the current state of the scan that
111
-// callers might be interested to know about.
112
-// It is okay to ignore the return value of any particular
113
-// call to scanner.state: if one call returns scanError,
114
-// every subsequent call will return scanError too.
115
-const (
116
-	// Continue.
117
-	scanContinue     = iota // uninteresting byte
118
-	scanBeginLiteral        // end implied by next result != scanContinue
119
-	scanBeginObject         // begin object
120
-	scanObjectKey           // just finished object key (string)
121
-	scanObjectValue         // just finished non-last object value
122
-	scanEndObject           // end object (implies scanObjectValue if possible)
123
-	scanBeginArray          // begin array
124
-	scanArrayValue          // just finished array value
125
-	scanEndArray            // end array (implies scanArrayValue if possible)
126
-	scanSkipSpace           // space byte; can skip; known to be last "continue" result
127
-
128
-	// Stop.
129
-	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
130
-	scanError // hit an error, scanner.err.
131
-)
132
-
133
-// These values are stored in the parseState stack.
134
-// They give the current state of a composite value
135
-// being scanned.  If the parser is inside a nested value
136
-// the parseState describes the nested state, outermost at entry 0.
137
-const (
138
-	parseObjectKey   = iota // parsing object key (before colon)
139
-	parseObjectValue        // parsing object value (after colon)
140
-	parseArrayValue         // parsing array value
141
-)
142
-
143
-// reset prepares the scanner for use.
144
-// It must be called before calling s.step.
145
-func (s *scanner) reset() {
146
-	s.step = stateBeginValue
147
-	s.parseState = s.parseState[0:0]
148
-	s.err = nil
149
-	s.redo = false
150
-	s.endTop = false
151
-}
152
-
153
-// eof tells the scanner that the end of input has been reached.
154
-// It returns a scan status just as s.step does.
155
-func (s *scanner) eof() int {
156
-	if s.err != nil {
157
-		return scanError
158
-	}
159
-	if s.endTop {
160
-		return scanEnd
161
-	}
162
-	s.step(s, ' ')
163
-	if s.endTop {
164
-		return scanEnd
165
-	}
166
-	if s.err == nil {
167
-		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
168
-	}
169
-	return scanError
170
-}
171
-
172
-// pushParseState pushes a new parse state p onto the parse stack.
173
-func (s *scanner) pushParseState(p int) {
174
-	s.parseState = append(s.parseState, p)
175
-}
176
-
177
-// popParseState pops a parse state (already obtained) off the stack
178
-// and updates s.step accordingly.
179
-func (s *scanner) popParseState() {
180
-	n := len(s.parseState) - 1
181
-	s.parseState = s.parseState[0:n]
182
-	s.redo = false
183
-	if n == 0 {
184
-		s.step = stateEndTop
185
-		s.endTop = true
186
-	} else {
187
-		s.step = stateEndValue
188
-	}
189
-}
190
-
191
-func isSpace(c rune) bool {
192
-	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
193
-}
194
-
195
-// stateBeginValueOrEmpty is the state after reading `[`.
196
-func stateBeginValueOrEmpty(s *scanner, c int) int {
197
-	if c <= ' ' && isSpace(rune(c)) {
198
-		return scanSkipSpace
199
-	}
200
-	if c == ']' {
201
-		return stateEndValue(s, c)
202
-	}
203
-	return stateBeginValue(s, c)
204
-}
205
-
206
-// stateBeginValue is the state at the beginning of the input.
207
-func stateBeginValue(s *scanner, c int) int {
208
-	if c <= ' ' && isSpace(rune(c)) {
209
-		return scanSkipSpace
210
-	}
211
-	switch c {
212
-	case '{':
213
-		s.step = stateBeginStringOrEmpty
214
-		s.pushParseState(parseObjectKey)
215
-		return scanBeginObject
216
-	case '[':
217
-		s.step = stateBeginValueOrEmpty
218
-		s.pushParseState(parseArrayValue)
219
-		return scanBeginArray
220
-	case '"':
221
-		s.step = stateInString
222
-		return scanBeginLiteral
223
-	case '-':
224
-		s.step = stateNeg
225
-		return scanBeginLiteral
226
-	case '0': // beginning of 0.123
227
-		s.step = state0
228
-		return scanBeginLiteral
229
-	case 't': // beginning of true
230
-		s.step = stateT
231
-		return scanBeginLiteral
232
-	case 'f': // beginning of false
233
-		s.step = stateF
234
-		return scanBeginLiteral
235
-	case 'n': // beginning of null
236
-		s.step = stateN
237
-		return scanBeginLiteral
238
-	}
239
-	if '1' <= c && c <= '9' { // beginning of 1234.5
240
-		s.step = state1
241
-		return scanBeginLiteral
242
-	}
243
-	return s.error(c, "looking for beginning of value")
244
-}
245
-
246
-// stateBeginStringOrEmpty is the state after reading `{`.
247
-func stateBeginStringOrEmpty(s *scanner, c int) int {
248
-	if c <= ' ' && isSpace(rune(c)) {
249
-		return scanSkipSpace
250
-	}
251
-	if c == '}' {
252
-		n := len(s.parseState)
253
-		s.parseState[n-1] = parseObjectValue
254
-		return stateEndValue(s, c)
255
-	}
256
-	return stateBeginString(s, c)
257
-}
258
-
259
-// stateBeginString is the state after reading `{"key": value,`.
260
-func stateBeginString(s *scanner, c int) int {
261
-	if c <= ' ' && isSpace(rune(c)) {
262
-		return scanSkipSpace
263
-	}
264
-	if c == '"' {
265
-		s.step = stateInString
266
-		return scanBeginLiteral
267
-	}
268
-	return s.error(c, "looking for beginning of object key string")
269
-}
270
-
271
-// stateEndValue is the state after completing a value,
272
-// such as after reading `{}` or `true` or `["x"`.
273
-func stateEndValue(s *scanner, c int) int {
274
-	n := len(s.parseState)
275
-	if n == 0 {
276
-		// Completed top-level before the current byte.
277
-		s.step = stateEndTop
278
-		s.endTop = true
279
-		return stateEndTop(s, c)
280
-	}
281
-	if c <= ' ' && isSpace(rune(c)) {
282
-		s.step = stateEndValue
283
-		return scanSkipSpace
284
-	}
285
-	ps := s.parseState[n-1]
286
-	switch ps {
287
-	case parseObjectKey:
288
-		if c == ':' {
289
-			s.parseState[n-1] = parseObjectValue
290
-			s.step = stateBeginValue
291
-			return scanObjectKey
292
-		}
293
-		return s.error(c, "after object key")
294
-	case parseObjectValue:
295
-		if c == ',' {
296
-			s.parseState[n-1] = parseObjectKey
297
-			s.step = stateBeginString
298
-			return scanObjectValue
299
-		}
300
-		if c == '}' {
301
-			s.popParseState()
302
-			return scanEndObject
303
-		}
304
-		return s.error(c, "after object key:value pair")
305
-	case parseArrayValue:
306
-		if c == ',' {
307
-			s.step = stateBeginValue
308
-			return scanArrayValue
309
-		}
310
-		if c == ']' {
311
-			s.popParseState()
312
-			return scanEndArray
313
-		}
314
-		return s.error(c, "after array element")
315
-	}
316
-	return s.error(c, "")
317
-}
318
-
319
-// stateEndTop is the state after finishing the top-level value,
320
-// such as after reading `{}` or `[1,2,3]`.
321
-// Only space characters should be seen now.
322
-func stateEndTop(s *scanner, c int) int {
323
-	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
324
-		// Complain about non-space byte on next call.
325
-		s.error(c, "after top-level value")
326
-	}
327
-	return scanEnd
328
-}
329
-
330
-// stateInString is the state after reading `"`.
331
-func stateInString(s *scanner, c int) int {
332
-	if c == '"' {
333
-		s.step = stateEndValue
334
-		return scanContinue
335
-	}
336
-	if c == '\\' {
337
-		s.step = stateInStringEsc
338
-		return scanContinue
339
-	}
340
-	if c < 0x20 {
341
-		return s.error(c, "in string literal")
342
-	}
343
-	return scanContinue
344
-}
345
-
346
-// stateInStringEsc is the state after reading `"\` during a quoted string.
347
-func stateInStringEsc(s *scanner, c int) int {
348
-	switch c {
349
-	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
350
-		s.step = stateInString
351
-		return scanContinue
352
-	}
353
-	if c == 'u' {
354
-		s.step = stateInStringEscU
355
-		return scanContinue
356
-	}
357
-	return s.error(c, "in string escape code")
358
-}
359
-
360
-// stateInStringEscU is the state after reading `"\u` during a quoted string.
361
-func stateInStringEscU(s *scanner, c int) int {
362
-	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
363
-		s.step = stateInStringEscU1
364
-		return scanContinue
365
-	}
366
-	// numbers
367
-	return s.error(c, "in \\u hexadecimal character escape")
368
-}
369
-
370
-// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
371
-func stateInStringEscU1(s *scanner, c int) int {
372
-	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
373
-		s.step = stateInStringEscU12
374
-		return scanContinue
375
-	}
376
-	// numbers
377
-	return s.error(c, "in \\u hexadecimal character escape")
378
-}
379
-
380
-// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
381
-func stateInStringEscU12(s *scanner, c int) int {
382
-	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
383
-		s.step = stateInStringEscU123
384
-		return scanContinue
385
-	}
386
-	// numbers
387
-	return s.error(c, "in \\u hexadecimal character escape")
388
-}
389
-
390
-// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
391
-func stateInStringEscU123(s *scanner, c int) int {
392
-	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
393
-		s.step = stateInString
394
-		return scanContinue
395
-	}
396
-	// numbers
397
-	return s.error(c, "in \\u hexadecimal character escape")
398
-}
399
-
400
-// stateNeg is the state after reading `-` during a number.
401
-func stateNeg(s *scanner, c int) int {
402
-	if c == '0' {
403
-		s.step = state0
404
-		return scanContinue
405
-	}
406
-	if '1' <= c && c <= '9' {
407
-		s.step = state1
408
-		return scanContinue
409
-	}
410
-	return s.error(c, "in numeric literal")
411
-}
412
-
413
-// state1 is the state after reading a non-zero integer during a number,
414
-// such as after reading `1` or `100` but not `0`.
415
-func state1(s *scanner, c int) int {
416
-	if '0' <= c && c <= '9' {
417
-		s.step = state1
418
-		return scanContinue
419
-	}
420
-	return state0(s, c)
421
-}
422
-
423
-// state0 is the state after reading `0` during a number.
424
-func state0(s *scanner, c int) int {
425
-	if c == '.' {
426
-		s.step = stateDot
427
-		return scanContinue
428
-	}
429
-	if c == 'e' || c == 'E' {
430
-		s.step = stateE
431
-		return scanContinue
432
-	}
433
-	return stateEndValue(s, c)
434
-}
435
-
436
-// stateDot is the state after reading the integer and decimal point in a number,
437
-// such as after reading `1.`.
438
-func stateDot(s *scanner, c int) int {
439
-	if '0' <= c && c <= '9' {
440
-		s.step = stateDot0
441
-		return scanContinue
442
-	}
443
-	return s.error(c, "after decimal point in numeric literal")
444
-}
445
-
446
-// stateDot0 is the state after reading the integer, decimal point, and subsequent
447
-// digits of a number, such as after reading `3.14`.
448
-func stateDot0(s *scanner, c int) int {
449
-	if '0' <= c && c <= '9' {
450
-		s.step = stateDot0
451
-		return scanContinue
452
-	}
453
-	if c == 'e' || c == 'E' {
454
-		s.step = stateE
455
-		return scanContinue
456
-	}
457
-	return stateEndValue(s, c)
458
-}
459
-
460
-// stateE is the state after reading the mantissa and e in a number,
461
-// such as after reading `314e` or `0.314e`.
462
-func stateE(s *scanner, c int) int {
463
-	if c == '+' {
464
-		s.step = stateESign
465
-		return scanContinue
466
-	}
467
-	if c == '-' {
468
-		s.step = stateESign
469
-		return scanContinue
470
-	}
471
-	return stateESign(s, c)
472
-}
473
-
474
-// stateESign is the state after reading the mantissa, e, and sign in a number,
475
-// such as after reading `314e-` or `0.314e+`.
476
-func stateESign(s *scanner, c int) int {
477
-	if '0' <= c && c <= '9' {
478
-		s.step = stateE0
479
-		return scanContinue
480
-	}
481
-	return s.error(c, "in exponent of numeric literal")
482
-}
483
-
484
-// stateE0 is the state after reading the mantissa, e, optional sign,
485
-// and at least one digit of the exponent in a number,
486
-// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
487
-func stateE0(s *scanner, c int) int {
488
-	if '0' <= c && c <= '9' {
489
-		s.step = stateE0
490
-		return scanContinue
491
-	}
492
-	return stateEndValue(s, c)
493
-}
494
-
495
-// stateT is the state after reading `t`.
496
-func stateT(s *scanner, c int) int {
497
-	if c == 'r' {
498
-		s.step = stateTr
499
-		return scanContinue
500
-	}
501
-	return s.error(c, "in literal true (expecting 'r')")
502
-}
503
-
504
-// stateTr is the state after reading `tr`.
505
-func stateTr(s *scanner, c int) int {
506
-	if c == 'u' {
507
-		s.step = stateTru
508
-		return scanContinue
509
-	}
510
-	return s.error(c, "in literal true (expecting 'u')")
511
-}
512
-
513
-// stateTru is the state after reading `tru`.
514
-func stateTru(s *scanner, c int) int {
515
-	if c == 'e' {
516
-		s.step = stateEndValue
517
-		return scanContinue
518
-	}
519
-	return s.error(c, "in literal true (expecting 'e')")
520
-}
521
-
522
-// stateF is the state after reading `f`.
523
-func stateF(s *scanner, c int) int {
524
-	if c == 'a' {
525
-		s.step = stateFa
526
-		return scanContinue
527
-	}
528
-	return s.error(c, "in literal false (expecting 'a')")
529
-}
530
-
531
-// stateFa is the state after reading `fa`.
532
-func stateFa(s *scanner, c int) int {
533
-	if c == 'l' {
534
-		s.step = stateFal
535
-		return scanContinue
536
-	}
537
-	return s.error(c, "in literal false (expecting 'l')")
538
-}
539
-
540
-// stateFal is the state after reading `fal`.
541
-func stateFal(s *scanner, c int) int {
542
-	if c == 's' {
543
-		s.step = stateFals
544
-		return scanContinue
545
-	}
546
-	return s.error(c, "in literal false (expecting 's')")
547
-}
548
-
549
-// stateFals is the state after reading `fals`.
550
-func stateFals(s *scanner, c int) int {
551
-	if c == 'e' {
552
-		s.step = stateEndValue
553
-		return scanContinue
554
-	}
555
-	return s.error(c, "in literal false (expecting 'e')")
556
-}
557
-
558
-// stateN is the state after reading `n`.
559
-func stateN(s *scanner, c int) int {
560
-	if c == 'u' {
561
-		s.step = stateNu
562
-		return scanContinue
563
-	}
564
-	return s.error(c, "in literal null (expecting 'u')")
565
-}
566
-
567
-// stateNu is the state after reading `nu`.
568
-func stateNu(s *scanner, c int) int {
569
-	if c == 'l' {
570
-		s.step = stateNul
571
-		return scanContinue
572
-	}
573
-	return s.error(c, "in literal null (expecting 'l')")
574
-}
575
-
576
-// stateNul is the state after reading `nul`.
577
-func stateNul(s *scanner, c int) int {
578
-	if c == 'l' {
579
-		s.step = stateEndValue
580
-		return scanContinue
581
-	}
582
-	return s.error(c, "in literal null (expecting 'l')")
583
-}
584
-
585
-// stateError is the state after reaching a syntax error,
586
-// such as after reading `[1}` or `5.1.2`.
587
-func stateError(s *scanner, c int) int {
588
-	return scanError
589
-}
590
-
591
-// error records an error and switches to the error state.
592
-func (s *scanner) error(c int, context string) int {
593
-	s.step = stateError
594
-	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
595
-	return scanError
596
-}
597
-
598
-// quoteChar formats c as a quoted character literal
599
-func quoteChar(c int) string {
600
-	// special cases - different from quoted strings
601
-	if c == '\'' {
602
-		return `'\''`
603
-	}
604
-	if c == '"' {
605
-		return `'"'`
606
-	}
607
-
608
-	// use quoted string with different quotation marks
609
-	s := strconv.Quote(string(c))
610
-	return "'" + s[1:len(s)-1] + "'"
611
-}
612
-
613
-// undo causes the scanner to return scanCode from the next state transition.
614
-// This gives callers a simple 1-byte undo mechanism.
615
-func (s *scanner) undo(scanCode int) {
616
-	if s.redo {
617
-		panic("json: invalid use of scanner")
618
-	}
619
-	s.redoCode = scanCode
620
-	s.redoState = s.step
621
-	s.step = stateRedo
622
-	s.redo = true
623
-}
624
-
625
-// stateRedo helps implement the scanner's 1-byte undo.
626
-func stateRedo(s *scanner, c int) int {
627
-	s.redo = false
628
-	s.step = s.redoState
629
-	return s.redoCode
630
-}
631 1
deleted file mode 100644
... ...
@@ -1,487 +0,0 @@
1
-// Copyright 2010 The Go Authors.  All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-package json
6
-
7
-import (
8
-	"bytes"
9
-	"errors"
10
-	"io"
11
-)
12
-
13
-// A Decoder reads and decodes JSON objects from an input stream.
14
-type Decoder struct {
15
-	r     io.Reader
16
-	buf   []byte
17
-	d     decodeState
18
-	scanp int // start of unread data in buf
19
-	scan  scanner
20
-	err   error
21
-
22
-	tokenState int
23
-	tokenStack []int
24
-}
25
-
26
-// NewDecoder returns a new decoder that reads from r.
27
-//
28
-// The decoder introduces its own buffering and may
29
-// read data from r beyond the JSON values requested.
30
-func NewDecoder(r io.Reader) *Decoder {
31
-	return &Decoder{r: r}
32
-}
33
-
34
-// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
35
-// Number instead of as a float64.
36
-func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
37
-
38
-// Decode reads the next JSON-encoded value from its
39
-// input and stores it in the value pointed to by v.
40
-//
41
-// See the documentation for Unmarshal for details about
42
-// the conversion of JSON into a Go value.
43
-func (dec *Decoder) Decode(v interface{}) error {
44
-	if dec.err != nil {
45
-		return dec.err
46
-	}
47
-
48
-	if err := dec.tokenPrepareForDecode(); err != nil {
49
-		return err
50
-	}
51
-
52
-	if !dec.tokenValueAllowed() {
53
-		return &SyntaxError{msg: "not at beginning of value"}
54
-	}
55
-
56
-	// Read whole value into buffer.
57
-	n, err := dec.readValue()
58
-	if err != nil {
59
-		return err
60
-	}
61
-	dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
62
-	dec.scanp += n
63
-
64
-	// Don't save err from unmarshal into dec.err:
65
-	// the connection is still usable since we read a complete JSON
66
-	// object from it before the error happened.
67
-	err = dec.d.unmarshal(v)
68
-
69
-	// fixup token streaming state
70
-	dec.tokenValueEnd()
71
-
72
-	return err
73
-}
74
-
75
-// Buffered returns a reader of the data remaining in the Decoder's
76
-// buffer. The reader is valid until the next call to Decode.
77
-func (dec *Decoder) Buffered() io.Reader {
78
-	return bytes.NewReader(dec.buf[dec.scanp:])
79
-}
80
-
81
-// readValue reads a JSON value into dec.buf.
82
-// It returns the length of the encoding.
83
-func (dec *Decoder) readValue() (int, error) {
84
-	dec.scan.reset()
85
-
86
-	scanp := dec.scanp
87
-	var err error
88
-Input:
89
-	for {
90
-		// Look in the buffer for a new value.
91
-		for i, c := range dec.buf[scanp:] {
92
-			dec.scan.bytes++
93
-			v := dec.scan.step(&dec.scan, int(c))
94
-			if v == scanEnd {
95
-				scanp += i
96
-				break Input
97
-			}
98
-			// scanEnd is delayed one byte.
99
-			// We might block trying to get that byte from src,
100
-			// so instead invent a space byte.
101
-			if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
102
-				scanp += i + 1
103
-				break Input
104
-			}
105
-			if v == scanError {
106
-				dec.err = dec.scan.err
107
-				return 0, dec.scan.err
108
-			}
109
-		}
110
-		scanp = len(dec.buf)
111
-
112
-		// Did the last read have an error?
113
-		// Delayed until now to allow buffer scan.
114
-		if err != nil {
115
-			if err == io.EOF {
116
-				if dec.scan.step(&dec.scan, ' ') == scanEnd {
117
-					break Input
118
-				}
119
-				if nonSpace(dec.buf) {
120
-					err = io.ErrUnexpectedEOF
121
-				}
122
-			}
123
-			dec.err = err
124
-			return 0, err
125
-		}
126
-
127
-		n := scanp - dec.scanp
128
-		err = dec.refill()
129
-		scanp = dec.scanp + n
130
-	}
131
-	return scanp - dec.scanp, nil
132
-}
133
-
134
-func (dec *Decoder) refill() error {
135
-	// Make room to read more into the buffer.
136
-	// First slide down data already consumed.
137
-	if dec.scanp > 0 {
138
-		n := copy(dec.buf, dec.buf[dec.scanp:])
139
-		dec.buf = dec.buf[:n]
140
-		dec.scanp = 0
141
-	}
142
-
143
-	// Grow buffer if not large enough.
144
-	const minRead = 512
145
-	if cap(dec.buf)-len(dec.buf) < minRead {
146
-		newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
147
-		copy(newBuf, dec.buf)
148
-		dec.buf = newBuf
149
-	}
150
-
151
-	// Read.  Delay error for next iteration (after scan).
152
-	n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
153
-	dec.buf = dec.buf[0 : len(dec.buf)+n]
154
-
155
-	return err
156
-}
157
-
158
-func nonSpace(b []byte) bool {
159
-	for _, c := range b {
160
-		if !isSpace(rune(c)) {
161
-			return true
162
-		}
163
-	}
164
-	return false
165
-}
166
-
167
-// An Encoder writes JSON objects to an output stream.
168
-type Encoder struct {
169
-	w         io.Writer
170
-	err       error
171
-	canonical bool
172
-}
173
-
174
-// NewEncoder returns a new encoder that writes to w.
175
-func NewEncoder(w io.Writer) *Encoder {
176
-	return &Encoder{w: w}
177
-}
178
-
179
-// Canonical causes the encoder to switch to Canonical JSON mode.
180
-// Read more at: http://wiki.laptop.org/go/Canonical_JSON
181
-func (enc *Encoder) Canonical() { enc.canonical = true }
182
-
183
-// Encode writes the JSON encoding of v to the stream,
184
-// followed by a newline character.
185
-//
186
-// See the documentation for Marshal for details about the
187
-// conversion of Go values to JSON.
188
-func (enc *Encoder) Encode(v interface{}) error {
189
-	if enc.err != nil {
190
-		return enc.err
191
-	}
192
-	e := newEncodeState(enc.canonical)
193
-	err := e.marshal(v)
194
-	if err != nil {
195
-		return err
196
-	}
197
-
198
-	if !enc.canonical {
199
-		// Terminate each value with a newline.
200
-		// This makes the output look a little nicer
201
-		// when debugging, and some kind of space
202
-		// is required if the encoded value was a number,
203
-		// so that the reader knows there aren't more
204
-		// digits coming.
205
-		e.WriteByte('\n')
206
-	}
207
-
208
-	if _, err = enc.w.Write(e.Bytes()); err != nil {
209
-		enc.err = err
210
-	}
211
-	encodeStatePool.Put(e)
212
-	return err
213
-}
214
-
215
-// RawMessage is a raw encoded JSON object.
216
-// It implements Marshaler and Unmarshaler and can
217
-// be used to delay JSON decoding or precompute a JSON encoding.
218
-type RawMessage []byte
219
-
220
-// MarshalJSON returns *m as the JSON encoding of m.
221
-func (m *RawMessage) MarshalJSON() ([]byte, error) {
222
-	return *m, nil
223
-}
224
-
225
-// UnmarshalJSON sets *m to a copy of data.
226
-func (m *RawMessage) UnmarshalJSON(data []byte) error {
227
-	if m == nil {
228
-		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
229
-	}
230
-	*m = append((*m)[0:0], data...)
231
-	return nil
232
-}
233
-
234
-var _ Marshaler = (*RawMessage)(nil)
235
-var _ Unmarshaler = (*RawMessage)(nil)
236
-
237
-// A Token holds a value of one of these types:
238
-//
239
-//	Delim, for the four JSON delimiters [ ] { }
240
-//	bool, for JSON booleans
241
-//	float64, for JSON numbers
242
-//	Number, for JSON numbers
243
-//	string, for JSON string literals
244
-//	nil, for JSON null
245
-//
246
-type Token interface{}
247
-
248
-const (
249
-	tokenTopValue = iota
250
-	tokenArrayStart
251
-	tokenArrayValue
252
-	tokenArrayComma
253
-	tokenObjectStart
254
-	tokenObjectKey
255
-	tokenObjectColon
256
-	tokenObjectValue
257
-	tokenObjectComma
258
-)
259
-
260
-// advance tokenstate from a separator state to a value state
261
-func (dec *Decoder) tokenPrepareForDecode() error {
262
-	// Note: Not calling peek before switch, to avoid
263
-	// putting peek into the standard Decode path.
264
-	// peek is only called when using the Token API.
265
-	switch dec.tokenState {
266
-	case tokenArrayComma:
267
-		c, err := dec.peek()
268
-		if err != nil {
269
-			return err
270
-		}
271
-		if c != ',' {
272
-			return &SyntaxError{"expected comma after array element", 0}
273
-		}
274
-		dec.scanp++
275
-		dec.tokenState = tokenArrayValue
276
-	case tokenObjectColon:
277
-		c, err := dec.peek()
278
-		if err != nil {
279
-			return err
280
-		}
281
-		if c != ':' {
282
-			return &SyntaxError{"expected colon after object key", 0}
283
-		}
284
-		dec.scanp++
285
-		dec.tokenState = tokenObjectValue
286
-	}
287
-	return nil
288
-}
289
-
290
-func (dec *Decoder) tokenValueAllowed() bool {
291
-	switch dec.tokenState {
292
-	case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
293
-		return true
294
-	}
295
-	return false
296
-}
297
-
298
-func (dec *Decoder) tokenValueEnd() {
299
-	switch dec.tokenState {
300
-	case tokenArrayStart, tokenArrayValue:
301
-		dec.tokenState = tokenArrayComma
302
-	case tokenObjectValue:
303
-		dec.tokenState = tokenObjectComma
304
-	}
305
-}
306
-
307
-// A Delim is a JSON array or object delimiter, one of [ ] { or }.
308
-type Delim rune
309
-
310
-func (d Delim) String() string {
311
-	return string(d)
312
-}
313
-
314
-// Token returns the next JSON token in the input stream.
315
-// At the end of the input stream, Token returns nil, io.EOF.
316
-//
317
-// Token guarantees that the delimiters [ ] { } it returns are
318
-// properly nested and matched: if Token encounters an unexpected
319
-// delimiter in the input, it will return an error.
320
-//
321
-// The input stream consists of basic JSON values—bool, string,
322
-// number, and null—along with delimiters [ ] { } of type Delim
323
-// to mark the start and end of arrays and objects.
324
-// Commas and colons are elided.
325
-func (dec *Decoder) Token() (Token, error) {
326
-	for {
327
-		c, err := dec.peek()
328
-		if err != nil {
329
-			return nil, err
330
-		}
331
-		switch c {
332
-		case '[':
333
-			if !dec.tokenValueAllowed() {
334
-				return dec.tokenError(c)
335
-			}
336
-			dec.scanp++
337
-			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
338
-			dec.tokenState = tokenArrayStart
339
-			return Delim('['), nil
340
-
341
-		case ']':
342
-			if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
343
-				return dec.tokenError(c)
344
-			}
345
-			dec.scanp++
346
-			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
347
-			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
348
-			dec.tokenValueEnd()
349
-			return Delim(']'), nil
350
-
351
-		case '{':
352
-			if !dec.tokenValueAllowed() {
353
-				return dec.tokenError(c)
354
-			}
355
-			dec.scanp++
356
-			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
357
-			dec.tokenState = tokenObjectStart
358
-			return Delim('{'), nil
359
-
360
-		case '}':
361
-			if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
362
-				return dec.tokenError(c)
363
-			}
364
-			dec.scanp++
365
-			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
366
-			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
367
-			dec.tokenValueEnd()
368
-			return Delim('}'), nil
369
-
370
-		case ':':
371
-			if dec.tokenState != tokenObjectColon {
372
-				return dec.tokenError(c)
373
-			}
374
-			dec.scanp++
375
-			dec.tokenState = tokenObjectValue
376
-			continue
377
-
378
-		case ',':
379
-			if dec.tokenState == tokenArrayComma {
380
-				dec.scanp++
381
-				dec.tokenState = tokenArrayValue
382
-				continue
383
-			}
384
-			if dec.tokenState == tokenObjectComma {
385
-				dec.scanp++
386
-				dec.tokenState = tokenObjectKey
387
-				continue
388
-			}
389
-			return dec.tokenError(c)
390
-
391
-		case '"':
392
-			if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
393
-				var x string
394
-				old := dec.tokenState
395
-				dec.tokenState = tokenTopValue
396
-				err := dec.Decode(&x)
397
-				dec.tokenState = old
398
-				if err != nil {
399
-					clearOffset(err)
400
-					return nil, err
401
-				}
402
-				dec.tokenState = tokenObjectColon
403
-				return x, nil
404
-			}
405
-			fallthrough
406
-
407
-		default:
408
-			if !dec.tokenValueAllowed() {
409
-				return dec.tokenError(c)
410
-			}
411
-			var x interface{}
412
-			if err := dec.Decode(&x); err != nil {
413
-				clearOffset(err)
414
-				return nil, err
415
-			}
416
-			return x, nil
417
-		}
418
-	}
419
-}
420
-
421
-func clearOffset(err error) {
422
-	if s, ok := err.(*SyntaxError); ok {
423
-		s.Offset = 0
424
-	}
425
-}
426
-
427
-func (dec *Decoder) tokenError(c byte) (Token, error) {
428
-	var context string
429
-	switch dec.tokenState {
430
-	case tokenTopValue:
431
-		context = " looking for beginning of value"
432
-	case tokenArrayStart, tokenArrayValue, tokenObjectValue:
433
-		context = " looking for beginning of value"
434
-	case tokenArrayComma:
435
-		context = " after array element"
436
-	case tokenObjectKey:
437
-		context = " looking for beginning of object key string"
438
-	case tokenObjectColon:
439
-		context = " after object key"
440
-	case tokenObjectComma:
441
-		context = " after object key:value pair"
442
-	}
443
-	return nil, &SyntaxError{"invalid character " + quoteChar(int(c)) + " " + context, 0}
444
-}
445
-
446
-// More reports whether there is another element in the
447
-// current array or object being parsed.
448
-func (dec *Decoder) More() bool {
449
-	c, err := dec.peek()
450
-	return err == nil && c != ']' && c != '}'
451
-}
452
-
453
-func (dec *Decoder) peek() (byte, error) {
454
-	var err error
455
-	for {
456
-		for i := dec.scanp; i < len(dec.buf); i++ {
457
-			c := dec.buf[i]
458
-			if isSpace(rune(c)) {
459
-				continue
460
-			}
461
-			dec.scanp = i
462
-			return c, nil
463
-		}
464
-		// buffer has been scanned, now report any error
465
-		if err != nil {
466
-			return 0, err
467
-		}
468
-		err = dec.refill()
469
-	}
470
-}
471
-
472
-/*
473
-TODO
474
-
475
-// EncodeToken writes the given JSON token to the stream.
476
-// It returns an error if the delimiters [ ] { } are not properly used.
477
-//
478
-// EncodeToken does not call Flush, because usually it is part of
479
-// a larger operation such as Encode, and those will call Flush when finished.
480
-// Callers that create an Encoder and then invoke EncodeToken directly,
481
-// without using Encode, need to call Flush when finished to ensure that
482
-// the JSON is written to the underlying writer.
483
-func (e *Encoder) EncodeToken(t Token) error  {
484
-	...
485
-}
486
-
487
-*/
488 1
deleted file mode 100644
... ...
@@ -1,44 +0,0 @@
1
-// Copyright 2011 The Go Authors. All rights reserved.
2
-// Use of this source code is governed by a BSD-style
3
-// license that can be found in the LICENSE file.
4
-
5
-package json
6
-
7
-import (
8
-	"strings"
9
-)
10
-
11
-// tagOptions is the string following a comma in a struct field's "json"
12
-// tag, or the empty string. It does not include the leading comma.
13
-type tagOptions string
14
-
15
-// parseTag splits a struct field's json tag into its name and
16
-// comma-separated options.
17
-func parseTag(tag string) (string, tagOptions) {
18
-	if idx := strings.Index(tag, ","); idx != -1 {
19
-		return tag[:idx], tagOptions(tag[idx+1:])
20
-	}
21
-	return tag, tagOptions("")
22
-}
23
-
24
-// Contains reports whether a comma-separated list of options
25
-// contains a particular substr flag. substr must be surrounded by a
26
-// string boundary or commas.
27
-func (o tagOptions) Contains(optionName string) bool {
28
-	if len(o) == 0 {
29
-		return false
30
-	}
31
-	s := string(o)
32
-	for s != "" {
33
-		var next string
34
-		i := strings.Index(s, ",")
35
-		if i >= 0 {
36
-			s, next = s[:i], s[i+1:]
37
-		}
38
-		if s == optionName {
39
-			return true
40
-		}
41
-		s = next
42
-	}
43
-	return false
44
-}