Browse code

bump cloud.google.com/go v-0.44.3

bump cloud.google.com/go v0.44.3:

full diff: https://github.com/googleapis/google-cloud-go/compare/v0.23.0...v0.44.3

bump googleapis/gax-go v2.0.5

full diff: https://github.com/googleapis/gax-go/compare/v2.0.0...v2.0.5

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Tibor Vass <tibor@docker.com>

Sebastiaan van Stijn authored on 2019/08/26 22:47:02
Showing 49 changed files
... ...
@@ -118,8 +118,8 @@ github.com/bsphere/le_go                            7a984a84b5492ae539b79b62fb4a
118 118
 golang.org/x/oauth2                                 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
119 119
 google.golang.org/api                               de943baf05a022a8f921b544b7827bacaba1aed5
120 120
 go.opencensus.io                                    c3ed530f775d85e577ca652cb052a52c078aad26 # v0.11.0
121
-cloud.google.com/go                                 0fd7230b2a7505833d5f69b75cbd6c9582401479 # v0.23.0
122
-github.com/googleapis/gax-go                        317e0006254c44a0ac427cc52a0e083ff0b9622f # v2.0.0
121
+cloud.google.com/go                                 ceeb313ad77b789a7fa5287b36a1d127b69b7093 # v0.44.3
122
+github.com/googleapis/gax-go                        bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2 # v2.0.5
123 123
 google.golang.org/genproto                          3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
124 124
 
125 125
 # containerd
... ...
@@ -8,7 +8,7 @@ Go packages for [Google Cloud Platform](https://cloud.google.com) services.
8 8
 import "cloud.google.com/go"
9 9
 ```
10 10
 
11
-To install the packages on your system,
11
+To install the packages on your system, *do not clone the repo*. Instead use
12 12
 
13 13
 ```
14 14
 $ go get -u cloud.google.com/go/...
... ...
@@ -19,263 +19,44 @@ make backwards-incompatible changes.
19 19
 
20 20
 **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
21 21
 
22
-  * [News](#news)
23
-  * [Supported APIs](#supported-apis)
24
-  * [Go Versions Supported](#go-versions-supported)
25
-  * [Authorization](#authorization)
26
-  * [Cloud Datastore](#cloud-datastore-)
27
-  * [Cloud Storage](#cloud-storage-)
28
-  * [Cloud Pub/Sub](#cloud-pub-sub-)
29
-  * [Cloud BigQuery](#cloud-bigquery-)
30
-  * [Stackdriver Logging](#stackdriver-logging-)
31
-  * [Cloud Spanner](#cloud-spanner-)
32
-
33
-
34
-## News
35
-
36
-_May 18, 2018_
37
-
38
-*v0.23.0*
39
-
40
-- bigquery: Add DDL stats to query statistics.
41
-- bigtable:
42
-  - cbt: Add cells-per-column limit for row lookup.
43
-  - cbt: Make it possible to combine read filters.
44
-- dlp: v2beta2 client removed. Use the v2 client instead.
45
-- firestore, spanner: Fix compilation errors due to protobuf changes.
46
-
47
-_May 8, 2018_
48
-
49
-*v0.22.0*
50
-
51
-- bigtable:
52
-  - cbt: Support cells per column limit for row read.
53
-  - bttest: Correctly handle empty RowSet.
54
-  - Fix ReadModifyWrite operation in emulator.
55
-  - Fix API path in GetCluster.
56
-
57
-- bigquery:
58
-  - BEHAVIOR CHANGE: Retry on 503 status code.
59
-  - Add dataset.DeleteWithContents.
60
-  - Add SchemaUpdateOptions for query jobs.
61
-  - Add Timeline to QueryStatistics.
62
-  - Add more stats to ExplainQueryStage.
63
-  - Support Parquet data format.
64
-
65
-- datastore:
66
-  - Support omitempty for times.
67
-
68
-- dlp:
69
-  - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
70
-  which is now out of beta.
71
-  - Add v2 client.
72
-
73
-- firestore:
74
-  - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
75
-
76
-- iam:
77
-  - Support JWT signing via SignJwt callopt.
78
-
79
-- profiler:
80
-  - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
81
-  - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
82
-  - Avoid returning empty serial port output.
83
-
84
-- pubsub:
85
-  - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
86
-  - BEHAVIOR CHANGE: Don't backoff on EOF.
87
-  - pstest: Support Acknowledge and ModifyAckDeadline RPCs.
88
-
89
-- redis:
90
-  - Add v1 beta Redis client.
91
-
92
-- spanner:
93
-  - Support SessionLabels.
94
-
95
-- speech:
96
-  - Add api v1 beta1 client.
97
-
98
-- storage:
99
-  - BEHAVIOR CHANGE: Retry reads when retryable error occurs.
100
-  - Fix delete of object in requester-pays bucket.
101
-  - Support KMS integration.
102
-
103
-_April 9, 2018_
104
-
105
-*v0.21.0*
106
-
107
-- bigquery:
108
-  - Add OpenCensus tracing.
109
-
110
-- firestore:
111
-  - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot
112
-    whose Exists method returns false. DocumentRef.Get and Transaction.Get
113
-    return the non-nil DocumentSnapshot in addition to a NotFound error.
114
-    **DocumentRef.GetAll and Transaction.GetAll return a non-nil
115
-    DocumentSnapshot instead of nil.**
116
-  - Add DocumentIterator.Stop. **Call Stop whenever you are done with a
117
-    DocumentIterator.**
118
-  - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime
119
-    notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen.
120
-  - Canceling an RPC now always returns a grpc.Status with codes.Canceled.
121
-
122
-- spanner:
123
-  - Add `CommitTimestamp`, which supports inserting the commit timestamp of a
124
-    transaction into a column.
125
-
126
-_March 22, 2018_
127
-
128
-*v0.20.0*
129
-
130
-- bigquery: Support SchemaUpdateOptions for load jobs.
131
-
132
-- bigtable:
133
-  - Add SampleRowKeys.
134
-  - cbt: Support union, intersection GCPolicy.
135
-  - Retry admin RPCS.
136
-  - Add trace spans to retries.
137
-
138
-- datastore: Add OpenCensus tracing.
139
-
140
-- firestore:
141
-  - Fix queries involving Null and NaN.
142
-  - Allow Timestamp protobuffers for time values.
143
-
144
-- logging: Add a WriteTimeout option.
145
-
146
-- spanner: Support Batch API.
147
-
148
-- storage: Add OpenCensus tracing.
149
-
150
-
151
-_February 26, 2018_
152
-
153
-*v0.19.0*
154
-
155
-- bigquery:
156
-  - Support customer-managed encryption keys.
157
-
158
-- bigtable:
159
-  - Improved emulator support.
160
-  - Support GetCluster.
161
-
162
-- datastore:
163
-  - Add general mutations.
164
-  - Support pointer struct fields.
165
-  - Support transaction options.
166
-
167
-- firestore:
168
-  - Add Transaction.GetAll.
169
-  - Support document cursors.
170
-
171
-- logging:
172
-  - Support concurrent RPCs to the service.
173
-  - Support per-entry resources.
174
-
175
-- profiler:
176
-  - Add config options to disable heap and thread profiling.
177
-  - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
178
-
179
-- pubsub:
180
-  - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
181
-    callback returns).
182
-  - Add SubscriptionInProject.
183
-  - Add OpenCensus instrumentation for streaming pull.
184
-
185
-- storage:
186
-  - Support CORS.
187
-
188
-
189
-_January 18, 2018_
190
-
191
-*v0.18.0*
192
-
193
-- bigquery:
194
-  - Marked stable.
195
-  - Schema inference of nullable fields supported.
196
-  - Added TimePartitioning to QueryConfig.
197
-
198
-- firestore: Data provided to DocumentRef.Set with a Merge option can contain
199
-  Delete sentinels.
200
-
201
-- logging: Clients can accept parent resources other than projects.
202
-
203
-- pubsub:
204
-  - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
205
-  - Support updating more subscription metadata: AckDeadline,
206
-    RetainAckedMessages and RetentionDuration.
207
-
208
-- oslogin/apiv1beta: New client for the Cloud OS Login API.
209
-
210
-- rpcreplay: A package for recording and replaying gRPC traffic.
211
-
212
-- spanner:
213
-  - Add a ReadWithOptions that supports a row limit, as well as an index.
214
-  - Support query plan and execution statistics.
215
-  - Added [OpenCensus](http://opencensus.io) support.
216
-
217
-- storage: Clarify checksum validation for gzipped files (it is not validated
218
-  when the file is served uncompressed).
219
-
220
-
221
-_December 11, 2017_
222
-
223
-*v0.17.0*
224
-
225
-- firestore BREAKING CHANGES:
226
-  - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
227
-    Change
228
-        `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
229
-    to
230
-        `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
231
-
232
-    Change
233
-        `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
234
-    to
235
-        `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
236
-  - Rename MergePaths to Merge; require args to be FieldPaths
237
-  - A value stored as an integer can be read into a floating-point field, and vice versa.
238
-- bigtable/cmd/cbt:
239
-  - Support deleting a column.
240
-  - Add regex option for row read.
241
-- spanner: Mark stable.
242
-- storage:
243
-  - Add Reader.ContentEncoding method.
244
-  - Fix handling of SignedURL headers.
245
-- bigquery:
246
-  - If Uploader.Put is called with no rows, it returns nil without making a
247
-    call.
248
-  - Schema inference supports the "nullable" option in struct tags for
249
-    non-required fields.
250
-  - TimePartitioning supports "Field".
251
-
252
-
253
-[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
254
-
255 22
 ## Supported APIs
256 23
 
257
-Google API                       | Status       | Package
258
-[BigQuery][cloud-bigquery]       | stable       | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
259
-[Bigtable][cloud-bigtable]       | stable       | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
260
-[Container][cloud-container]     | alpha        | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
261
-[Data Loss Prevention][cloud-dlp]| alpha        | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
262
-[Datastore][cloud-datastore]     | stable       | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
263
-[Debugger][cloud-debugger]       | alpha        | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
264
-[ErrorReporting][cloud-errors]   | alpha        | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
265
-[Firestore][cloud-firestore]     | beta         | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
266
-[Language][cloud-language]       | stable       | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
267
-[Logging][cloud-logging]         | stable       | [`cloud.google.com/go/logging`][cloud-logging-ref]
268
-[Monitoring][cloud-monitoring]   | beta         | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
269
-[OS Login][cloud-oslogin]        | alpha        | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
270
-[Pub/Sub][cloud-pubsub]          | beta         | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
271
-[Spanner][cloud-spanner]         | stable       | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
272
-[Speech][cloud-speech]           | stable       | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
273
-[Storage][cloud-storage]         | stable       | [`cloud.google.com/go/storage`][cloud-storage-ref]
274
-[Translation][cloud-translation] | stable       | [`cloud.google.com/go/translate`][cloud-translation-ref]
275
-[Video Intelligence][cloud-video]| beta         | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
276
-[Vision][cloud-vision]           | stable       | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
277
-
24
+Google API                                      | Status       | Package
25
+------------------------------------------------|--------------|-----------------------------------------------------------
26
+[Asset][cloud-asset]                            | alpha        | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
27
+[BigQuery][cloud-bigquery]                      | stable       | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
28
+[Bigtable][cloud-bigtable]                      | stable       | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
29
+[Cloudtasks][cloud-tasks]                       | stable       | [`cloud.google.com/go/cloudtasks/apiv2`][cloud-tasks-ref]
30
+[Container][cloud-container]                    | stable       | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
31
+[ContainerAnalysis][cloud-containeranalysis]    | beta         | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
32
+[Dataproc][cloud-dataproc]                      | stable       | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
33
+[Datastore][cloud-datastore]                    | stable       | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
34
+[Debugger][cloud-debugger]                      | alpha        | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
35
+[Dialogflow][cloud-dialogflow]                  | alpha        | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
36
+[Data Loss Prevention][cloud-dlp]               | alpha        | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
37
+[ErrorReporting][cloud-errors]                  | alpha        | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
38
+[Firestore][cloud-firestore]                    | stable       | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
39
+[IAM][cloud-iam]                                | stable       | [`cloud.google.com/go/iam`][cloud-iam-ref]
40
+[IoT][cloud-iot]                                | alpha        | [`cloud.google.com/iot/apiv1`][cloud-iot-ref]
41
+[KMS][cloud-kms]                                | stable       | [`cloud.google.com/go/kms`][cloud-kms-ref]
42
+[Natural Language][cloud-natural-language]      | stable       | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
43
+[Logging][cloud-logging]                        | stable       | [`cloud.google.com/go/logging`][cloud-logging-ref]
44
+[Monitoring][cloud-monitoring]                  | alpha        | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
45
+[OS Login][cloud-oslogin]                       | alpha        | [`cloud.google.com/go/oslogin/apiv1`][cloud-oslogin-ref]
46
+[Pub/Sub][cloud-pubsub]                         | stable       | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
47
+[Phishing Protection][cloud-phishingprotection] | alpha        | [`cloud.google.com/go/phishingprotection/apiv1betad1`][cloud-phishingprotection-ref]
48
+[reCAPTCHA Enterprise][cloud-recaptcha]         | alpha        | [`cloud.google.com/go/recaptchaenterprise/apiv1betad1`][cloud-recaptcha-ref]
49
+[Memorystore][cloud-memorystore]                | alpha        | [`cloud.google.com/go/redis/apiv1`][cloud-memorystore-ref]
50
+[Scheduler][cloud-scheduler]                    | stable       | [`cloud.google.com/go/scheduler/apiv1`][cloud-scheduler-ref]
51
+[Spanner][cloud-spanner]                        | stable       | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
52
+[Speech][cloud-speech]                          | stable       | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
53
+[Storage][cloud-storage]                        | stable       | [`cloud.google.com/go/storage`][cloud-storage-ref]
54
+[Talent][cloud-talent]                          | alpha        | [`cloud.google.com/go/talent/apiv4beta1`][cloud-talent-ref]
55
+[Text To Speech][cloud-texttospeech]            | alpha        | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
56
+[Trace][cloud-trace]                            | alpha        | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
57
+[Translate][cloud-translate]                    | stable       | [`cloud.google.com/go/translate`][cloud-translate-ref]
58
+[Video Intelligence][cloud-video]               | alpha        | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
59
+[Vision][cloud-vision]                          | stable       | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
278 60
 
279 61
 > **Alpha status**: the API is still being actively developed. As a
280 62
 > result, it might change in backward-incompatible ways and is not recommended
... ...
@@ -288,23 +69,16 @@ Google API                       | Status       | Package
288 288
 > **Stable status**: the API is mature and ready for production use. We will
289 289
 > continue addressing bugs and feature requests.
290 290
 
291
-Documentation and examples are available at
292
-https://godoc.org/cloud.google.com/go
293
-
294
-Visit or join the
295
-[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
296
-for updates on these packages.
291
+Documentation and examples are available at [godoc.org/cloud.google.com/go](godoc.org/cloud.google.com/go)
297 292
 
298 293
 ## Go Versions Supported
299 294
 
300 295
 We support the two most recent major versions of Go. If Google App Engine uses
301
-an older version, we support that as well. You can see which versions are
302
-currently supported by looking at the lines following `go:` in
303
-[`.travis.yml`](.travis.yml).
296
+an older version, we support that as well.
304 297
 
305 298
 ## Authorization
306 299
 
307
-By default, each API will use [Google Application Default Credentials][default-creds]
300
+By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
308 301
 for authorization credentials used in calling the API endpoints. This will allow your
309 302
 application to run in many environments without requiring explicit configuration.
310 303
 
... ...
@@ -316,12 +90,12 @@ client, err := storage.NewClient(ctx)
316 316
 To authorize using a
317 317
 [JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
318 318
 pass
319
-[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
319
+[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile)
320 320
 to the `NewClient` function of the desired package. For example:
321 321
 
322 322
 [snip]:# (auth-JSON)
323 323
 ```go
324
-client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
324
+client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
325 325
 ```
326 326
 
327 327
 You can exert more control over authorization by using the
... ...
@@ -335,249 +109,6 @@ tokenSource := ...
335 335
 client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
336 336
 ```
337 337
 
338
-## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
339
-
340
-- [About Cloud Datastore][cloud-datastore]
341
-- [Activating the API for your project][cloud-datastore-activation]
342
-- [API documentation][cloud-datastore-docs]
343
-- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
344
-- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
345
-
346
-### Example Usage
347
-
348
-First create a `datastore.Client` to use throughout your application:
349
-
350
-[snip]:# (datastore-1)
351
-```go
352
-client, err := datastore.NewClient(ctx, "my-project-id")
353
-if err != nil {
354
-	log.Fatal(err)
355
-}
356
-```
357
-
358
-Then use that client to interact with the API:
359
-
360
-[snip]:# (datastore-2)
361
-```go
362
-type Post struct {
363
-	Title       string
364
-	Body        string `datastore:",noindex"`
365
-	PublishedAt time.Time
366
-}
367
-keys := []*datastore.Key{
368
-	datastore.NameKey("Post", "post1", nil),
369
-	datastore.NameKey("Post", "post2", nil),
370
-}
371
-posts := []*Post{
372
-	{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
373
-	{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
374
-}
375
-if _, err := client.PutMulti(ctx, keys, posts); err != nil {
376
-	log.Fatal(err)
377
-}
378
-```
379
-
380
-## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
381
-
382
-- [About Cloud Storage][cloud-storage]
383
-- [API documentation][cloud-storage-docs]
384
-- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
385
-- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
386
-
387
-### Example Usage
388
-
389
-First create a `storage.Client` to use throughout your application:
390
-
391
-[snip]:# (storage-1)
392
-```go
393
-client, err := storage.NewClient(ctx)
394
-if err != nil {
395
-	log.Fatal(err)
396
-}
397
-```
398
-
399
-[snip]:# (storage-2)
400
-```go
401
-// Read the object1 from bucket.
402
-rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
403
-if err != nil {
404
-	log.Fatal(err)
405
-}
406
-defer rc.Close()
407
-body, err := ioutil.ReadAll(rc)
408
-if err != nil {
409
-	log.Fatal(err)
410
-}
411
-```
412
-
413
-## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
414
-
415
-- [About Cloud Pubsub][cloud-pubsub]
416
-- [API documentation][cloud-pubsub-docs]
417
-- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
418
-- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
419
-
420
-### Example Usage
421
-
422
-First create a `pubsub.Client` to use throughout your application:
423
-
424
-[snip]:# (pubsub-1)
425
-```go
426
-client, err := pubsub.NewClient(ctx, "project-id")
427
-if err != nil {
428
-	log.Fatal(err)
429
-}
430
-```
431
-
432
-Then use the client to publish and subscribe:
433
-
434
-[snip]:# (pubsub-2)
435
-```go
436
-// Publish "hello world" on topic1.
437
-topic := client.Topic("topic1")
438
-res := topic.Publish(ctx, &pubsub.Message{
439
-	Data: []byte("hello world"),
440
-})
441
-// The publish happens asynchronously.
442
-// Later, you can get the result from res:
443
-...
444
-msgID, err := res.Get(ctx)
445
-if err != nil {
446
-	log.Fatal(err)
447
-}
448
-
449
-// Use a callback to receive messages via subscription1.
450
-sub := client.Subscription("subscription1")
451
-err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
452
-	fmt.Println(m.Data)
453
-	m.Ack() // Acknowledge that we've consumed the message.
454
-})
455
-if err != nil {
456
-	log.Println(err)
457
-}
458
-```
459
-
460
-## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
461
-
462
-- [About Cloud BigQuery][cloud-bigquery]
463
-- [API documentation][cloud-bigquery-docs]
464
-- [Go client documentation][cloud-bigquery-ref]
465
-- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
466
-
467
-### Example Usage
468
-
469
-First create a `bigquery.Client` to use throughout your application:
470
-[snip]:# (bq-1)
471
-```go
472
-c, err := bigquery.NewClient(ctx, "my-project-ID")
473
-if err != nil {
474
-	// TODO: Handle error.
475
-}
476
-```
477
-
478
-Then use that client to interact with the API:
479
-[snip]:# (bq-2)
480
-```go
481
-// Construct a query.
482
-q := c.Query(`
483
-    SELECT year, SUM(number)
484
-    FROM [bigquery-public-data:usa_names.usa_1910_2013]
485
-    WHERE name = "William"
486
-    GROUP BY year
487
-    ORDER BY year
488
-`)
489
-// Execute the query.
490
-it, err := q.Read(ctx)
491
-if err != nil {
492
-	// TODO: Handle error.
493
-}
494
-// Iterate through the results.
495
-for {
496
-	var values []bigquery.Value
497
-	err := it.Next(&values)
498
-	if err == iterator.Done {
499
-		break
500
-	}
501
-	if err != nil {
502
-		// TODO: Handle error.
503
-	}
504
-	fmt.Println(values)
505
-}
506
-```
507
-
508
-
509
-## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
510
-
511
-- [About Stackdriver Logging][cloud-logging]
512
-- [API documentation][cloud-logging-docs]
513
-- [Go client documentation][cloud-logging-ref]
514
-- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
515
-
516
-### Example Usage
517
-
518
-First create a `logging.Client` to use throughout your application:
519
-[snip]:# (logging-1)
520
-```go
521
-ctx := context.Background()
522
-client, err := logging.NewClient(ctx, "my-project")
523
-if err != nil {
524
-	// TODO: Handle error.
525
-}
526
-```
527
-
528
-Usually, you'll want to add log entries to a buffer to be periodically flushed
529
-(automatically and asynchronously) to the Stackdriver Logging service.
530
-[snip]:# (logging-2)
531
-```go
532
-logger := client.Logger("my-log")
533
-logger.Log(logging.Entry{Payload: "something happened!"})
534
-```
535
-
536
-Close your client before your program exits, to flush any buffered log entries.
537
-[snip]:# (logging-3)
538
-```go
539
-err = client.Close()
540
-if err != nil {
541
-	// TODO: Handle error.
542
-}
543
-```
544
-
545
-## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)
546
-
547
-- [About Cloud Spanner][cloud-spanner]
548
-- [API documentation][cloud-spanner-docs]
549
-- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
550
-
551
-### Example Usage
552
-
553
-First create a `spanner.Client` to use throughout your application:
554
-
555
-[snip]:# (spanner-1)
556
-```go
557
-client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
558
-if err != nil {
559
-	log.Fatal(err)
560
-}
561
-```
562
-
563
-[snip]:# (spanner-2)
564
-```go
565
-// Simple Reads And Writes
566
-_, err = client.Apply(ctx, []*spanner.Mutation{
567
-	spanner.Insert("Users",
568
-		[]string{"name", "email"},
569
-		[]interface{}{"alice", "a@example.com"})})
570
-if err != nil {
571
-	log.Fatal(err)
572
-}
573
-row, err := client.Single().ReadRow(ctx, "Users",
574
-	spanner.Key{"alice"}, []string{"email"})
575
-if err != nil {
576
-	log.Fatal(err)
577
-}
578
-```
579
-
580
-
581 338
 ## Contributing
582 339
 
583 340
 Contributions are welcome. Please, see the
... ...
@@ -592,32 +123,23 @@ for more information.
592 592
 
593 593
 [cloud-datastore]: https://cloud.google.com/datastore/
594 594
 [cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
595
-[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
596
-[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
597 595
 
598 596
 [cloud-firestore]: https://cloud.google.com/firestore/
599 597
 [cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
600
-[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
601
-[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
602 598
 
603 599
 [cloud-pubsub]: https://cloud.google.com/pubsub/
604 600
 [cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
605
-[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
606 601
 
607 602
 [cloud-storage]: https://cloud.google.com/storage/
608 603
 [cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
609
-[cloud-storage-docs]: https://cloud.google.com/storage/docs
610
-[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
611 604
 
612 605
 [cloud-bigtable]: https://cloud.google.com/bigtable/
613 606
 [cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
614 607
 
615 608
 [cloud-bigquery]: https://cloud.google.com/bigquery/
616
-[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
617 609
 [cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
618 610
 
619 611
 [cloud-logging]: https://cloud.google.com/logging/
620
-[cloud-logging-docs]: https://cloud.google.com/logging/docs
621 612
 [cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
622 613
 
623 614
 [cloud-monitoring]: https://cloud.google.com/monitoring/
... ...
@@ -630,17 +152,16 @@ for more information.
630 630
 [cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
631 631
 
632 632
 [cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
633
-[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest
633
+[cloud-oslogin-ref]: https://cloud.google.com/go/oslogin/apiv1
634 634
 
635 635
 [cloud-speech]: https://cloud.google.com/speech
636 636
 [cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
637 637
 
638 638
 [cloud-spanner]: https://cloud.google.com/spanner/
639 639
 [cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
640
-[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
641 640
 
642
-[cloud-translation]: https://cloud.google.com/translation
643
-[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
641
+[cloud-translate]: https://cloud.google.com/translate
642
+[cloud-translate-ref]: https://godoc.org/cloud.google.com/go/translate
644 643
 
645 644
 [cloud-video]: https://cloud.google.com/video-intelligence/
646 645
 [cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
... ...
@@ -657,4 +178,50 @@ for more information.
657 657
 [cloud-dlp]: https://cloud.google.com/dlp/
658 658
 [cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
659 659
 
660
-[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
660
+[cloud-dataproc]: https://cloud.google.com/dataproc/
661
+[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1
662
+
663
+[cloud-iam]: https://cloud.google.com/iam/
664
+[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam
665
+
666
+[cloud-kms]: https://cloud.google.com/kms/
667
+[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1
668
+
669
+[cloud-natural-language]: https://cloud.google.com/natural-language/
670
+[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
671
+
672
+[cloud-memorystore]: https://cloud.google.com/memorystore/
673
+[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1
674
+
675
+[cloud-texttospeech]: https://cloud.google.com/texttospeech/
676
+[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1
677
+
678
+[cloud-trace]: https://cloud.google.com/trace/
679
+[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2
680
+
681
+[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
682
+[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2
683
+
684
+[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
685
+[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1
686
+
687
+[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
688
+[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
689
+
690
+[cloud-tasks]: https://cloud.google.com/tasks/
691
+[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2
692
+
693
+[cloud-scheduler]: https://cloud.google.com/scheduler
694
+[cloud-scheduler-ref]: https://godoc.org/cloud.google.com/go/scheduler/apiv1
695
+
696
+[cloud-iot]: https://cloud.google.com/iot-core/
697
+[cloud-iot-ref]: https://godoc.org/cloud.google.com/go/iot/apiv1
698
+
699
+[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
700
+[cloud-phishingprotection-ref]: https://cloud.google.com/go/phishingprotection/apiv1beta1
701
+
702
+[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
703
+[cloud-recaptcha-ref]: https://cloud.google.com/go/recaptchaenterprise/apiv1beta1
704
+
705
+[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
706
+[cloud-talent-ref]: https://godoc.org/cloud.google.com/go/talent/apiv4beta1
661 707
new file mode 100644
... ...
@@ -0,0 +1,100 @@
0
+// Copyright 2014 Google LLC
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//      http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+/*
15
+Package cloud is the root of the packages used to access Google Cloud
16
+Services. See https://godoc.org/cloud.google.com/go for a full list
17
+of sub-packages.
18
+
19
+
20
+Client Options
21
+
22
+All clients in sub-packages are configurable via client options. These options are
23
+described here: https://godoc.org/google.golang.org/api/option.
24
+
25
+
26
+Authentication and Authorization
27
+
28
+All the clients in sub-packages support authentication via Google Application Default
29
+Credentials (see https://cloud.google.com/docs/authentication/production), or
30
+by providing a JSON key file for a Service Account. See the authentication examples
31
+in this package for details.
32
+
33
+
34
+Timeouts and Cancellation
35
+
36
+By default, all requests in sub-packages will run indefinitely, retrying on transient
37
+errors when correctness allows. To set timeouts or arrange for cancellation, use
38
+contexts. See the examples for details.
39
+
40
+Do not attempt to control the initial connection (dialing) of a service by setting a
41
+timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
42
+would be ineffective and would only interfere with credential refreshing, which uses
43
+the same context.
44
+
45
+
46
+Connection Pooling
47
+
48
+Connection pooling differs in clients based on their transport. Cloud
49
+clients either rely on HTTP or gRPC transports to communicate
50
+with Google Cloud.
51
+
52
+Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
53
+underlying HTTP transport to cache connections for later re-use. These are cached to
54
+the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
55
+http.DefaultTransport.
56
+
57
+For gRPC clients (all others in this repo), connection pooling is configurable. Users
58
+of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
59
+option to NewClient calls. This configures the underlying gRPC connections to be
60
+pooled and addressed in a round robin fashion.
61
+
62
+
63
+Using the Libraries with Docker
64
+
65
+Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
66
+hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
67
+for more information.
68
+
69
+
70
+Debugging
71
+
72
+To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
73
+https://godoc.org/google.golang.org/grpc/grpclog for more information.
74
+
75
+For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
76
+
77
+
78
+Client Stability
79
+
80
+Clients in this repository are considered alpha or beta unless otherwise
81
+marked as stable in the README.md. Semver is not used to communicate stability
82
+of clients.
83
+
84
+Alpha and beta clients may change or go away without notice.
85
+
86
+Clients marked stable will maintain compatibility with future versions for as
87
+long as we can reasonably sustain. Incompatible changes might be made in some
88
+situations, including:
89
+
90
+- Security bugs may prompt backwards-incompatible changes.
91
+
92
+- Situations in which components are no longer feasible to maintain without
93
+making breaking changes, including removal.
94
+
95
+- Parts of the client surface may be outright unstable and subject to change.
96
+These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
97
+and subject to change or removal without notice."
98
+*/
99
+package cloud // import "cloud.google.com/go"
0 100
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+// +build ignore
1
+
2
+// Empty include file to generate z symbols
3
+
4
+
5
+
6
+
7
+
8
+// EOF
0 9
new file mode 100644
... ...
@@ -0,0 +1,472 @@
0
+// Copyright 2018 Google Inc. All Rights Reserved.
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//      http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+/*
15
+ * Line tables
16
+ */
17
+
18
+package gosym
19
+
20
+import (
21
+	"encoding/binary"
22
+	"sync"
23
+)
24
+
25
+// A LineTable is a data structure mapping program counters to line numbers.
26
+//
27
+// In Go 1.1 and earlier, each function (represented by a Func) had its own LineTable,
28
+// and the line number corresponded to a numbering of all source lines in the
29
+// program, across all files. That absolute line number would then have to be
30
+// converted separately to a file name and line number within the file.
31
+//
32
+// In Go 1.2, the format of the data changed so that there is a single LineTable
33
+// for the entire program, shared by all Funcs, and there are no absolute line
34
+// numbers, just line numbers within specific files.
35
+//
36
+// For the most part, LineTable's methods should be treated as an internal
37
+// detail of the package; callers should use the methods on Table instead.
38
+type LineTable struct {
39
+	Data []byte
40
+	PC   uint64
41
+	Line int
42
+
43
+	// Go 1.2 state
44
+	mu       sync.Mutex
45
+	go12     int // is this in Go 1.2 format? -1 no, 0 unknown, 1 yes
46
+	binary   binary.ByteOrder
47
+	quantum  uint32
48
+	ptrsize  uint32
49
+	functab  []byte
50
+	nfunctab uint32
51
+	filetab  []byte
52
+	nfiletab uint32
53
+	fileMap  map[string]uint32
54
+}
55
+
56
+// NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4,
57
+// but we have no idea whether we're using arm or not. This only
58
+// matters in the old (pre-Go 1.2) symbol table format, so it's not worth
59
+// fixing.
60
+const oldQuantum = 1
61
+
62
+func (t *LineTable) parse(targetPC uint64, targetLine int) (b []byte, pc uint64, line int) {
63
+	// The PC/line table can be thought of as a sequence of
64
+	//  <pc update>* <line update>
65
+	// batches.  Each update batch results in a (pc, line) pair,
66
+	// where line applies to every PC from pc up to but not
67
+	// including the pc of the next pair.
68
+	//
69
+	// Here we process each update individually, which simplifies
70
+	// the code, but makes the corner cases more confusing.
71
+	b, pc, line = t.Data, t.PC, t.Line
72
+	for pc <= targetPC && line != targetLine && len(b) > 0 {
73
+		code := b[0]
74
+		b = b[1:]
75
+		switch {
76
+		case code == 0:
77
+			if len(b) < 4 {
78
+				b = b[0:0]
79
+				break
80
+			}
81
+			val := binary.BigEndian.Uint32(b)
82
+			b = b[4:]
83
+			line += int(val)
84
+		case code <= 64:
85
+			line += int(code)
86
+		case code <= 128:
87
+			line -= int(code - 64)
88
+		default:
89
+			pc += oldQuantum * uint64(code-128)
90
+			continue
91
+		}
92
+		pc += oldQuantum
93
+	}
94
+	return b, pc, line
95
+}
96
+
97
+func (t *LineTable) slice(pc uint64) *LineTable {
98
+	data, pc, line := t.parse(pc, -1)
99
+	return &LineTable{Data: data, PC: pc, Line: line}
100
+}
101
+
102
+// PCToLine returns the line number for the given program counter.
103
+// Callers should use Table's PCToLine method instead.
104
+func (t *LineTable) PCToLine(pc uint64) int {
105
+	if t.isGo12() {
106
+		return t.go12PCToLine(pc)
107
+	}
108
+	_, _, line := t.parse(pc, -1)
109
+	return line
110
+}
111
+
112
+// LineToPC returns the program counter for the given line number,
113
+// considering only program counters before maxpc.
114
+// Callers should use Table's LineToPC method instead.
115
+func (t *LineTable) LineToPC(line int, maxpc uint64) uint64 {
116
+	if t.isGo12() {
117
+		return 0
118
+	}
119
+	_, pc, line1 := t.parse(maxpc, line)
120
+	if line1 != line {
121
+		return 0
122
+	}
123
+	// Subtract quantum from PC to account for post-line increment
124
+	return pc - oldQuantum
125
+}
126
+
127
+// NewLineTable returns a new PC/line table
128
+// corresponding to the encoded data.
129
+// Text must be the start address of the
130
+// corresponding text segment.
131
+func NewLineTable(data []byte, text uint64) *LineTable {
132
+	return &LineTable{Data: data, PC: text, Line: 0}
133
+}
134
+
135
+// Go 1.2 symbol table format.
136
+// See golang.org/s/go12symtab.
137
+//
138
+// A general note about the methods here: rather than try to avoid
139
+// index out of bounds errors, we trust Go to detect them, and then
140
+// we recover from the panics and treat them as indicative of a malformed
141
+// or incomplete table.
142
+//
143
+// The methods called by symtab.go, which begin with "go12" prefixes,
144
+// are expected to have that recovery logic.
145
+
146
+// isGo12 reports whether this is a Go 1.2 (or later) symbol table.
147
+func (t *LineTable) isGo12() bool {
148
+	t.go12Init()
149
+	return t.go12 == 1
150
+}
151
+
152
+const go12magic = 0xfffffffb
153
+
154
+// uintptr returns the pointer-sized value encoded at b.
155
+// The pointer size is dictated by the table being read.
156
+func (t *LineTable) uintptr(b []byte) uint64 {
157
+	if t.ptrsize == 4 {
158
+		return uint64(t.binary.Uint32(b))
159
+	}
160
+	return t.binary.Uint64(b)
161
+}
162
+
163
+// go12init initializes the Go 1.2 metadata if t is a Go 1.2 symbol table.
164
+func (t *LineTable) go12Init() {
165
+	t.mu.Lock()
166
+	defer t.mu.Unlock()
167
+	if t.go12 != 0 {
168
+		return
169
+	}
170
+
171
+	defer func() {
172
+		// If we panic parsing, assume it's not a Go 1.2 symbol table.
173
+		recover()
174
+	}()
175
+
176
+	// Check header: 4-byte magic, two zeros, pc quantum, pointer size.
177
+	t.go12 = -1 // not Go 1.2 until proven otherwise
178
+	if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
179
+		(t.Data[6] != 1 && t.Data[6] != 4) || // pc quantum
180
+		(t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
181
+		return
182
+	}
183
+
184
+	switch uint32(go12magic) {
185
+	case binary.LittleEndian.Uint32(t.Data):
186
+		t.binary = binary.LittleEndian
187
+	case binary.BigEndian.Uint32(t.Data):
188
+		t.binary = binary.BigEndian
189
+	default:
190
+		return
191
+	}
192
+
193
+	t.quantum = uint32(t.Data[6])
194
+	t.ptrsize = uint32(t.Data[7])
195
+
196
+	t.nfunctab = uint32(t.uintptr(t.Data[8:]))
197
+	t.functab = t.Data[8+t.ptrsize:]
198
+	functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize
199
+	fileoff := t.binary.Uint32(t.functab[functabsize:])
200
+	t.functab = t.functab[:functabsize]
201
+	t.filetab = t.Data[fileoff:]
202
+	t.nfiletab = t.binary.Uint32(t.filetab)
203
+	t.filetab = t.filetab[:t.nfiletab*4]
204
+
205
+	t.go12 = 1 // so far so good
206
+}
207
+
208
+// go12Funcs returns a slice of Funcs derived from the Go 1.2 pcln table.
209
+func (t *LineTable) go12Funcs() []Func {
210
+	// Assume it is malformed and return nil on error.
211
+	defer func() {
212
+		recover()
213
+	}()
214
+
215
+	n := len(t.functab) / int(t.ptrsize) / 2
216
+	funcs := make([]Func, n)
217
+	for i := range funcs {
218
+		f := &funcs[i]
219
+		f.Entry = uint64(t.uintptr(t.functab[2*i*int(t.ptrsize):]))
220
+		f.End = uint64(t.uintptr(t.functab[(2*i+2)*int(t.ptrsize):]))
221
+		info := t.Data[t.uintptr(t.functab[(2*i+1)*int(t.ptrsize):]):]
222
+		f.LineTable = t
223
+		f.FrameSize = int(t.binary.Uint32(info[t.ptrsize+2*4:]))
224
+		f.Sym = &Sym{
225
+			Value:  f.Entry,
226
+			Type:   'T',
227
+			Name:   t.string(t.binary.Uint32(info[t.ptrsize:])),
228
+			GoType: 0,
229
+			Func:   f,
230
+		}
231
+	}
232
+	return funcs
233
+}
234
+
235
+// findFunc returns the func corresponding to the given program counter.
236
+func (t *LineTable) findFunc(pc uint64) []byte {
237
+	if pc < t.uintptr(t.functab) || pc >= t.uintptr(t.functab[len(t.functab)-int(t.ptrsize):]) {
238
+		return nil
239
+	}
240
+
241
+	// The function table is a list of 2*nfunctab+1 uintptrs,
242
+	// alternating program counters and offsets to func structures.
243
+	f := t.functab
244
+	nf := t.nfunctab
245
+	for nf > 0 {
246
+		m := nf / 2
247
+		fm := f[2*t.ptrsize*m:]
248
+		if t.uintptr(fm) <= pc && pc < t.uintptr(fm[2*t.ptrsize:]) {
249
+			return t.Data[t.uintptr(fm[t.ptrsize:]):]
250
+		} else if pc < t.uintptr(fm) {
251
+			nf = m
252
+		} else {
253
+			f = f[(m+1)*2*t.ptrsize:]
254
+			nf -= m + 1
255
+		}
256
+	}
257
+	return nil
258
+}
259
+
260
+// readvarint reads, removes, and returns a varint from *pp.
261
+func (t *LineTable) readvarint(pp *[]byte) uint32 {
262
+	var v, shift uint32
263
+	p := *pp
264
+	for shift = 0; ; shift += 7 {
265
+		b := p[0]
266
+		p = p[1:]
267
+		v |= (uint32(b) & 0x7F) << shift
268
+		if b&0x80 == 0 {
269
+			break
270
+		}
271
+	}
272
+	*pp = p
273
+	return v
274
+}
275
+
276
+// string returns a Go string found at off.
277
+func (t *LineTable) string(off uint32) string {
278
+	for i := off; ; i++ {
279
+		if t.Data[i] == 0 {
280
+			return string(t.Data[off:i])
281
+		}
282
+	}
283
+}
284
+
285
+// step advances to the next pc, value pair in the encoded table.
286
+func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool {
287
+	uvdelta := t.readvarint(p)
288
+	if uvdelta == 0 && !first {
289
+		return false
290
+	}
291
+	if uvdelta&1 != 0 {
292
+		uvdelta = ^(uvdelta >> 1)
293
+	} else {
294
+		uvdelta >>= 1
295
+	}
296
+	vdelta := int32(uvdelta)
297
+	pcdelta := t.readvarint(p) * t.quantum
298
+	*pc += uint64(pcdelta)
299
+	*val += vdelta
300
+	return true
301
+}
302
+
303
+// pcvalue reports the value associated with the target pc.
304
+// off is the offset to the beginning of the pc-value table,
305
+// and entry is the start PC for the corresponding function.
306
+func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 {
307
+	if off == 0 {
308
+		return -1
309
+	}
310
+	p := t.Data[off:]
311
+
312
+	val := int32(-1)
313
+	pc := entry
314
+	for t.step(&p, &pc, &val, pc == entry) {
315
+		if targetpc < pc {
316
+			return val
317
+		}
318
+	}
319
+	return -1
320
+}
321
+
322
+// findFileLine scans one function in the binary looking for a
323
+// program counter in the given file on the given line.
324
+// It does so by running the pc-value tables mapping program counter
325
+// to file number. Since most functions come from a single file, these
326
+// are usually short and quick to scan. If a file match is found, then the
327
+// code goes to the expense of looking for a simultaneous line number match.
328
+func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32) uint64 {
329
+	if filetab == 0 || linetab == 0 {
330
+		return 0
331
+	}
332
+
333
+	fp := t.Data[filetab:]
334
+	fl := t.Data[linetab:]
335
+	fileVal := int32(-1)
336
+	filePC := entry
337
+	lineVal := int32(-1)
338
+	linePC := entry
339
+	fileStartPC := filePC
340
+	for t.step(&fp, &filePC, &fileVal, filePC == entry) {
341
+		if fileVal == filenum && fileStartPC < filePC {
342
+			// fileVal is in effect starting at fileStartPC up to
343
+			// but not including filePC, and it's the file we want.
344
+			// Run the PC table looking for a matching line number
345
+			// or until we reach filePC.
346
+			lineStartPC := linePC
347
+			for linePC < filePC && t.step(&fl, &linePC, &lineVal, linePC == entry) {
348
+				// lineVal is in effect until linePC, and lineStartPC < filePC.
349
+				if lineVal == line {
350
+					if fileStartPC <= lineStartPC {
351
+						return lineStartPC
352
+					}
353
+					if fileStartPC < linePC {
354
+						return fileStartPC
355
+					}
356
+				}
357
+				lineStartPC = linePC
358
+			}
359
+		}
360
+		fileStartPC = filePC
361
+	}
362
+	return 0
363
+}
364
+
365
+// go12PCToLine maps program counter to line number for the Go 1.2 pcln table.
366
+func (t *LineTable) go12PCToLine(pc uint64) (line int) {
367
+	return t.go12PCToVal(pc, t.ptrsize+5*4)
368
+}
369
+
370
+// go12PCToSPAdj maps program counter to Stack Pointer adjustment for the Go 1.2 pcln table.
371
+func (t *LineTable) go12PCToSPAdj(pc uint64) (spadj int) {
372
+	return t.go12PCToVal(pc, t.ptrsize+3*4)
373
+}
374
+
375
+func (t *LineTable) go12PCToVal(pc uint64, fOffset uint32) (val int) {
376
+	defer func() {
377
+		if recover() != nil {
378
+			val = -1
379
+		}
380
+	}()
381
+
382
+	f := t.findFunc(pc)
383
+	if f == nil {
384
+		return -1
385
+	}
386
+	entry := t.uintptr(f)
387
+	linetab := t.binary.Uint32(f[fOffset:])
388
+	return int(t.pcvalue(linetab, entry, pc))
389
+}
390
+
391
+// go12PCToFile maps program counter to file name for the Go 1.2 pcln table.
392
+func (t *LineTable) go12PCToFile(pc uint64) (file string) {
393
+	defer func() {
394
+		if recover() != nil {
395
+			file = ""
396
+		}
397
+	}()
398
+
399
+	f := t.findFunc(pc)
400
+	if f == nil {
401
+		return ""
402
+	}
403
+	entry := t.uintptr(f)
404
+	filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
405
+	fno := t.pcvalue(filetab, entry, pc)
406
+	if fno <= 0 {
407
+		return ""
408
+	}
409
+	return t.string(t.binary.Uint32(t.filetab[4*fno:]))
410
+}
411
+
412
+// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2 pcln table.
413
+func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) {
414
+	defer func() {
415
+		if recover() != nil {
416
+			pc = 0
417
+		}
418
+	}()
419
+
420
+	t.initFileMap()
421
+	filenum := t.fileMap[file]
422
+	if filenum == 0 {
423
+		return 0
424
+	}
425
+
426
+	// Scan all functions.
427
+	// If this turns out to be a bottleneck, we could build a map[int32][]int32
428
+	// mapping file number to a list of functions with code from that file.
429
+	for i := uint32(0); i < t.nfunctab; i++ {
430
+		f := t.Data[t.uintptr(t.functab[2*t.ptrsize*i+t.ptrsize:]):]
431
+		entry := t.uintptr(f)
432
+		filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
433
+		linetab := t.binary.Uint32(f[t.ptrsize+5*4:])
434
+		pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line))
435
+		if pc != 0 {
436
+			return pc
437
+		}
438
+	}
439
+	return 0
440
+}
441
+
442
+// initFileMap initializes the map from file name to file number.
443
+func (t *LineTable) initFileMap() {
444
+	t.mu.Lock()
445
+	defer t.mu.Unlock()
446
+
447
+	if t.fileMap != nil {
448
+		return
449
+	}
450
+	m := make(map[string]uint32)
451
+
452
+	for i := uint32(1); i < t.nfiletab; i++ {
453
+		s := t.string(t.binary.Uint32(t.filetab[4*i:]))
454
+		m[s] = i
455
+	}
456
+	t.fileMap = m
457
+}
458
+
459
+// go12MapFiles adds to m a key for every file in the Go 1.2 LineTable.
460
+// Every key maps to obj. That's not a very interesting map, but it provides
461
+// a way for callers to obtain the list of files in the program.
462
+func (t *LineTable) go12MapFiles(m map[string]*Obj, obj *Obj) {
463
+	defer func() {
464
+		recover()
465
+	}()
466
+
467
+	t.initFileMap()
468
+	for file := range t.fileMap {
469
+		m[file] = obj
470
+	}
471
+}
0 472
new file mode 100644
... ...
@@ -0,0 +1,731 @@
0
+// Copyright 2018 Google Inc. All Rights Reserved.
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//      http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package gosym implements access to the Go symbol
15
+// and line number tables embedded in Go binaries generated
16
+// by the gc compilers.
17
+package gosym
18
+
19
+// The table format is a variant of the format used in Plan 9's a.out
20
+// format, documented at http://plan9.bell-labs.com/magic/man2html/6/a.out.
21
+// The best reference for the differences between the Plan 9 format
22
+// and the Go format is the runtime source, specifically ../../runtime/symtab.c.
23
+
24
+import (
25
+	"bytes"
26
+	"encoding/binary"
27
+	"fmt"
28
+	"strconv"
29
+	"strings"
30
+)
31
+
32
+/*
33
+ * Symbols
34
+ */
35
+
36
+// A Sym represents a single symbol table entry.
37
+type Sym struct {
38
+	Value  uint64
39
+	Type   byte
40
+	Name   string
41
+	GoType uint64
42
+	// If this symbol if a function symbol, the corresponding Func
43
+	Func *Func
44
+}
45
+
46
+// Static reports whether this symbol is static (not visible outside its file).
47
+func (s *Sym) Static() bool { return s.Type >= 'a' }
48
+
49
+// PackageName returns the package part of the symbol name,
50
+// or the empty string if there is none.
51
+func (s *Sym) PackageName() string {
52
+	if i := strings.Index(s.Name, "."); i != -1 {
53
+		return s.Name[0:i]
54
+	}
55
+	return ""
56
+}
57
+
58
+// ReceiverName returns the receiver type name of this symbol,
59
+// or the empty string if there is none.
60
+func (s *Sym) ReceiverName() string {
61
+	l := strings.Index(s.Name, ".")
62
+	r := strings.LastIndex(s.Name, ".")
63
+	if l == -1 || r == -1 || l == r {
64
+		return ""
65
+	}
66
+	return s.Name[l+1 : r]
67
+}
68
+
69
+// BaseName returns the symbol name without the package or receiver name.
70
+func (s *Sym) BaseName() string {
71
+	if i := strings.LastIndex(s.Name, "."); i != -1 {
72
+		return s.Name[i+1:]
73
+	}
74
+	return s.Name
75
+}
76
+
77
+// A Func collects information about a single function.
78
+type Func struct {
79
+	Entry uint64
80
+	*Sym
81
+	End       uint64
82
+	Params    []*Sym
83
+	Locals    []*Sym
84
+	FrameSize int
85
+	LineTable *LineTable
86
+	Obj       *Obj
87
+}
88
+
89
+// An Obj represents a collection of functions in a symbol table.
90
+//
91
+// The exact method of division of a binary into separate Objs is an internal detail
92
+// of the symbol table format.
93
+//
94
+// In early versions of Go each source file became a different Obj.
95
+//
96
+// In Go 1 and Go 1.1, each package produced one Obj for all Go sources
97
+// and one Obj per C source file.
98
+//
99
+// In Go 1.2, there is a single Obj for the entire program.
100
+type Obj struct {
101
+	// Funcs is a list of functions in the Obj.
102
+	Funcs []Func
103
+
104
+	// In Go 1.1 and earlier, Paths is a list of symbols corresponding
105
+	// to the source file names that produced the Obj.
106
+	// In Go 1.2, Paths is nil.
107
+	// Use the keys of Table.Files to obtain a list of source files.
108
+	Paths []Sym // meta
109
+}
110
+
111
+/*
112
+ * Symbol tables
113
+ */
114
+
115
+// Table represents a Go symbol table.  It stores all of the
116
+// symbols decoded from the program and provides methods to translate
117
+// between symbols, names, and addresses.
118
+type Table struct {
119
+	Syms  []Sym
120
+	Funcs []Func
121
+	Files map[string]*Obj // nil for Go 1.2 and later binaries
122
+	Objs  []Obj           // nil for Go 1.2 and later binaries
123
+
124
+	go12line *LineTable // Go 1.2 line number table
125
+}
126
+
127
+type sym struct {
128
+	value  uint64
129
+	gotype uint64
130
+	typ    byte
131
+	name   []byte
132
+}
133
+
134
+var (
135
+	littleEndianSymtab    = []byte{0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00}
136
+	bigEndianSymtab       = []byte{0xFF, 0xFF, 0xFF, 0xFD, 0x00, 0x00, 0x00}
137
+	oldLittleEndianSymtab = []byte{0xFE, 0xFF, 0xFF, 0xFF, 0x00, 0x00}
138
+)
139
+
140
+func walksymtab(data []byte, fn func(sym) error) error {
141
+	if len(data) == 0 { // missing symtab is okay
142
+		return nil
143
+	}
144
+	var order binary.ByteOrder = binary.BigEndian
145
+	newTable := false
146
+	switch {
147
+	case bytes.HasPrefix(data, oldLittleEndianSymtab):
148
+		// Same as Go 1.0, but little endian.
149
+		// Format was used during interim development between Go 1.0 and Go 1.1.
150
+		// Should not be widespread, but easy to support.
151
+		data = data[6:]
152
+		order = binary.LittleEndian
153
+	case bytes.HasPrefix(data, bigEndianSymtab):
154
+		newTable = true
155
+	case bytes.HasPrefix(data, littleEndianSymtab):
156
+		newTable = true
157
+		order = binary.LittleEndian
158
+	}
159
+	var ptrsz int
160
+	if newTable {
161
+		if len(data) < 8 {
162
+			return &DecodingError{len(data), "unexpected EOF", nil}
163
+		}
164
+		ptrsz = int(data[7])
165
+		if ptrsz != 4 && ptrsz != 8 {
166
+			return &DecodingError{7, "invalid pointer size", ptrsz}
167
+		}
168
+		data = data[8:]
169
+	}
170
+	var s sym
171
+	p := data
172
+	for len(p) >= 4 {
173
+		var typ byte
174
+		if newTable {
175
+			// Symbol type, value, Go type.
176
+			typ = p[0] & 0x3F
177
+			wideValue := p[0]&0x40 != 0
178
+			goType := p[0]&0x80 != 0
179
+			if typ < 26 {
180
+				typ += 'A'
181
+			} else {
182
+				typ += 'a' - 26
183
+			}
184
+			s.typ = typ
185
+			p = p[1:]
186
+			if wideValue {
187
+				if len(p) < ptrsz {
188
+					return &DecodingError{len(data), "unexpected EOF", nil}
189
+				}
190
+				// fixed-width value
191
+				if ptrsz == 8 {
192
+					s.value = order.Uint64(p[0:8])
193
+					p = p[8:]
194
+				} else {
195
+					s.value = uint64(order.Uint32(p[0:4]))
196
+					p = p[4:]
197
+				}
198
+			} else {
199
+				// varint value
200
+				s.value = 0
201
+				shift := uint(0)
202
+				for len(p) > 0 && p[0]&0x80 != 0 {
203
+					s.value |= uint64(p[0]&0x7F) << shift
204
+					shift += 7
205
+					p = p[1:]
206
+				}
207
+				if len(p) == 0 {
208
+					return &DecodingError{len(data), "unexpected EOF", nil}
209
+				}
210
+				s.value |= uint64(p[0]) << shift
211
+				p = p[1:]
212
+			}
213
+			if goType {
214
+				if len(p) < ptrsz {
215
+					return &DecodingError{len(data), "unexpected EOF", nil}
216
+				}
217
+				// fixed-width go type
218
+				if ptrsz == 8 {
219
+					s.gotype = order.Uint64(p[0:8])
220
+					p = p[8:]
221
+				} else {
222
+					s.gotype = uint64(order.Uint32(p[0:4]))
223
+					p = p[4:]
224
+				}
225
+			}
226
+		} else {
227
+			// Value, symbol type.
228
+			s.value = uint64(order.Uint32(p[0:4]))
229
+			if len(p) < 5 {
230
+				return &DecodingError{len(data), "unexpected EOF", nil}
231
+			}
232
+			typ = p[4]
233
+			if typ&0x80 == 0 {
234
+				return &DecodingError{len(data) - len(p) + 4, "bad symbol type", typ}
235
+			}
236
+			typ &^= 0x80
237
+			s.typ = typ
238
+			p = p[5:]
239
+		}
240
+
241
+		// Name.
242
+		var i int
243
+		var nnul int
244
+		for i = 0; i < len(p); i++ {
245
+			if p[i] == 0 {
246
+				nnul = 1
247
+				break
248
+			}
249
+		}
250
+		switch typ {
251
+		case 'z', 'Z':
252
+			p = p[i+nnul:]
253
+			for i = 0; i+2 <= len(p); i += 2 {
254
+				if p[i] == 0 && p[i+1] == 0 {
255
+					nnul = 2
256
+					break
257
+				}
258
+			}
259
+		}
260
+		if len(p) < i+nnul {
261
+			return &DecodingError{len(data), "unexpected EOF", nil}
262
+		}
263
+		s.name = p[0:i]
264
+		i += nnul
265
+		p = p[i:]
266
+
267
+		if !newTable {
268
+			if len(p) < 4 {
269
+				return &DecodingError{len(data), "unexpected EOF", nil}
270
+			}
271
+			// Go type.
272
+			s.gotype = uint64(order.Uint32(p[:4]))
273
+			p = p[4:]
274
+		}
275
+		fn(s)
276
+	}
277
+	return nil
278
+}
279
+
280
+// NewTable decodes the Go symbol table in data,
281
+// returning an in-memory representation.
282
+func NewTable(symtab []byte, pcln *LineTable) (*Table, error) {
283
+	var n int
284
+	err := walksymtab(symtab, func(s sym) error {
285
+		n++
286
+		return nil
287
+	})
288
+	if err != nil {
289
+		return nil, err
290
+	}
291
+
292
+	var t Table
293
+	if pcln.isGo12() {
294
+		t.go12line = pcln
295
+	}
296
+	fname := make(map[uint16]string)
297
+	t.Syms = make([]Sym, 0, n)
298
+	nf := 0
299
+	nz := 0
300
+	lasttyp := uint8(0)
301
+	err = walksymtab(symtab, func(s sym) error {
302
+		n := len(t.Syms)
303
+		t.Syms = t.Syms[0 : n+1]
304
+		ts := &t.Syms[n]
305
+		ts.Type = s.typ
306
+		ts.Value = uint64(s.value)
307
+		ts.GoType = uint64(s.gotype)
308
+		switch s.typ {
309
+		default:
310
+			// rewrite name to use . instead of · (c2 b7)
311
+			w := 0
312
+			b := s.name
313
+			for i := 0; i < len(b); i++ {
314
+				if b[i] == 0xc2 && i+1 < len(b) && b[i+1] == 0xb7 {
315
+					i++
316
+					b[i] = '.'
317
+				}
318
+				b[w] = b[i]
319
+				w++
320
+			}
321
+			ts.Name = string(s.name[0:w])
322
+		case 'z', 'Z':
323
+			if lasttyp != 'z' && lasttyp != 'Z' {
324
+				nz++
325
+			}
326
+			for i := 0; i < len(s.name); i += 2 {
327
+				eltIdx := binary.BigEndian.Uint16(s.name[i : i+2])
328
+				elt, ok := fname[eltIdx]
329
+				if !ok {
330
+					return &DecodingError{-1, "bad filename code", eltIdx}
331
+				}
332
+				if n := len(ts.Name); n > 0 && ts.Name[n-1] != '/' {
333
+					ts.Name += "/"
334
+				}
335
+				ts.Name += elt
336
+			}
337
+		}
338
+		switch s.typ {
339
+		case 'T', 't', 'L', 'l':
340
+			nf++
341
+		case 'f':
342
+			fname[uint16(s.value)] = ts.Name
343
+		}
344
+		lasttyp = s.typ
345
+		return nil
346
+	})
347
+	if err != nil {
348
+		return nil, err
349
+	}
350
+
351
+	t.Funcs = make([]Func, 0, nf)
352
+	t.Files = make(map[string]*Obj)
353
+
354
+	var obj *Obj
355
+	if t.go12line != nil {
356
+		// Put all functions into one Obj.
357
+		t.Objs = make([]Obj, 1)
358
+		obj = &t.Objs[0]
359
+		t.go12line.go12MapFiles(t.Files, obj)
360
+	} else {
361
+		t.Objs = make([]Obj, 0, nz)
362
+	}
363
+
364
+	// Count text symbols and attach frame sizes, parameters, and
365
+	// locals to them.  Also, find object file boundaries.
366
+	lastf := 0
367
+	for i := 0; i < len(t.Syms); i++ {
368
+		sym := &t.Syms[i]
369
+		switch sym.Type {
370
+		case 'Z', 'z': // path symbol
371
+			if t.go12line != nil {
372
+				// Go 1.2 binaries have the file information elsewhere. Ignore.
373
+				break
374
+			}
375
+			// Finish the current object
376
+			if obj != nil {
377
+				obj.Funcs = t.Funcs[lastf:]
378
+			}
379
+			lastf = len(t.Funcs)
380
+
381
+			// Start new object
382
+			n := len(t.Objs)
383
+			t.Objs = t.Objs[0 : n+1]
384
+			obj = &t.Objs[n]
385
+
386
+			// Count & copy path symbols
387
+			var end int
388
+			for end = i + 1; end < len(t.Syms); end++ {
389
+				if c := t.Syms[end].Type; c != 'Z' && c != 'z' {
390
+					break
391
+				}
392
+			}
393
+			obj.Paths = t.Syms[i:end]
394
+			i = end - 1 // loop will i++
395
+
396
+			// Record file names
397
+			depth := 0
398
+			for j := range obj.Paths {
399
+				s := &obj.Paths[j]
400
+				if s.Name == "" {
401
+					depth--
402
+				} else {
403
+					if depth == 0 {
404
+						t.Files[s.Name] = obj
405
+					}
406
+					depth++
407
+				}
408
+			}
409
+
410
+		case 'T', 't', 'L', 'l': // text symbol
411
+			if n := len(t.Funcs); n > 0 {
412
+				t.Funcs[n-1].End = sym.Value
413
+			}
414
+			if sym.Name == "etext" {
415
+				continue
416
+			}
417
+
418
+			// Count parameter and local (auto) syms
419
+			var np, na int
420
+			var end int
421
+		countloop:
422
+			for end = i + 1; end < len(t.Syms); end++ {
423
+				switch t.Syms[end].Type {
424
+				case 'T', 't', 'L', 'l', 'Z', 'z':
425
+					break countloop
426
+				case 'p':
427
+					np++
428
+				case 'a':
429
+					na++
430
+				}
431
+			}
432
+
433
+			// Fill in the function symbol
434
+			n := len(t.Funcs)
435
+			t.Funcs = t.Funcs[0 : n+1]
436
+			fn := &t.Funcs[n]
437
+			sym.Func = fn
438
+			fn.Params = make([]*Sym, 0, np)
439
+			fn.Locals = make([]*Sym, 0, na)
440
+			fn.Sym = sym
441
+			fn.Entry = sym.Value
442
+			fn.Obj = obj
443
+			if t.go12line != nil {
444
+				// All functions share the same line table.
445
+				// It knows how to narrow down to a specific
446
+				// function quickly.
447
+				fn.LineTable = t.go12line
448
+			} else if pcln != nil {
449
+				fn.LineTable = pcln.slice(fn.Entry)
450
+				pcln = fn.LineTable
451
+			}
452
+			for j := i; j < end; j++ {
453
+				s := &t.Syms[j]
454
+				switch s.Type {
455
+				case 'm':
456
+					fn.FrameSize = int(s.Value)
457
+				case 'p':
458
+					n := len(fn.Params)
459
+					fn.Params = fn.Params[0 : n+1]
460
+					fn.Params[n] = s
461
+				case 'a':
462
+					n := len(fn.Locals)
463
+					fn.Locals = fn.Locals[0 : n+1]
464
+					fn.Locals[n] = s
465
+				}
466
+			}
467
+			i = end - 1 // loop will i++
468
+		}
469
+	}
470
+
471
+	if t.go12line != nil && nf == 0 {
472
+		t.Funcs = t.go12line.go12Funcs()
473
+	}
474
+	if obj != nil {
475
+		obj.Funcs = t.Funcs[lastf:]
476
+	}
477
+	return &t, nil
478
+}
479
+
480
+// PCToFunc returns the function containing the program counter pc,
481
+// or nil if there is no such function.
482
+func (t *Table) PCToFunc(pc uint64) *Func {
483
+	funcs := t.Funcs
484
+	for len(funcs) > 0 {
485
+		m := len(funcs) / 2
486
+		fn := &funcs[m]
487
+		switch {
488
+		case pc < fn.Entry:
489
+			funcs = funcs[0:m]
490
+		case fn.Entry <= pc && pc < fn.End:
491
+			return fn
492
+		default:
493
+			funcs = funcs[m+1:]
494
+		}
495
+	}
496
+	return nil
497
+}
498
+
499
+// PCToLine looks up line number information for a program counter.
500
+// If there is no information, it returns fn == nil.
501
+func (t *Table) PCToLine(pc uint64) (file string, line int, fn *Func) {
502
+	if fn = t.PCToFunc(pc); fn == nil {
503
+		return
504
+	}
505
+	if t.go12line != nil {
506
+		file = t.go12line.go12PCToFile(pc)
507
+		line = t.go12line.go12PCToLine(pc)
508
+	} else {
509
+		file, line = fn.Obj.lineFromAline(fn.LineTable.PCToLine(pc))
510
+	}
511
+	return
512
+}
513
+
514
+// PCToSPAdj returns the stack pointer adjustment for a program counter.
515
+func (t *Table) PCToSPAdj(pc uint64) (spadj int) {
516
+	if fn := t.PCToFunc(pc); fn == nil {
517
+		return 0
518
+	}
519
+	if t.go12line != nil {
520
+		return t.go12line.go12PCToSPAdj(pc)
521
+	}
522
+	return 0
523
+}
524
+
525
+// LineToPC looks up the first program counter on the given line in
526
+// the named file.  It returns UnknownPathError or UnknownLineError if
527
+// there is an error looking up this line.
528
+func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) {
529
+	obj, ok := t.Files[file]
530
+	if !ok {
531
+		return 0, nil, UnknownFileError(file)
532
+	}
533
+
534
+	if t.go12line != nil {
535
+		pc := t.go12line.go12LineToPC(file, line)
536
+		if pc == 0 {
537
+			return 0, nil, &UnknownLineError{file, line}
538
+		}
539
+		return pc, t.PCToFunc(pc), nil
540
+	}
541
+
542
+	abs, err := obj.alineFromLine(file, line)
543
+	if err != nil {
544
+		return
545
+	}
546
+	for i := range obj.Funcs {
547
+		f := &obj.Funcs[i]
548
+		pc := f.LineTable.LineToPC(abs, f.End)
549
+		if pc != 0 {
550
+			return pc, f, nil
551
+		}
552
+	}
553
+	return 0, nil, &UnknownLineError{file, line}
554
+}
555
+
556
+// LookupSym returns the text, data, or bss symbol with the given name,
557
+// or nil if no such symbol is found.
558
+func (t *Table) LookupSym(name string) *Sym {
559
+	// TODO(austin) Maybe make a map
560
+	for i := range t.Syms {
561
+		s := &t.Syms[i]
562
+		switch s.Type {
563
+		case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
564
+			if s.Name == name {
565
+				return s
566
+			}
567
+		}
568
+	}
569
+	return nil
570
+}
571
+
572
+// LookupFunc returns the text, data, or bss symbol with the given name,
573
+// or nil if no such symbol is found.
574
+func (t *Table) LookupFunc(name string) *Func {
575
+	for i := range t.Funcs {
576
+		f := &t.Funcs[i]
577
+		if f.Sym.Name == name {
578
+			return f
579
+		}
580
+	}
581
+	return nil
582
+}
583
+
584
+// SymByAddr returns the text, data, or bss symbol starting at the given address.
585
+func (t *Table) SymByAddr(addr uint64) *Sym {
586
+	for i := range t.Syms {
587
+		s := &t.Syms[i]
588
+		switch s.Type {
589
+		case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
590
+			if s.Value == addr {
591
+				return s
592
+			}
593
+		}
594
+	}
595
+	return nil
596
+}
597
+
598
+/*
599
+ * Object files
600
+ */
601
+
602
+// This is legacy code for Go 1.1 and earlier, which used the
603
+// Plan 9 format for pc-line tables. This code was never quite
604
+// correct. It's probably very close, and it's usually correct, but
605
+// we never quite found all the corner cases.
606
+//
607
+// Go 1.2 and later use a simpler format, documented at golang.org/s/go12symtab.
608
+
609
+func (o *Obj) lineFromAline(aline int) (string, int) {
610
+	type stackEnt struct {
611
+		path   string
612
+		start  int
613
+		offset int
614
+		prev   *stackEnt
615
+	}
616
+
617
+	noPath := &stackEnt{"", 0, 0, nil}
618
+	tos := noPath
619
+
620
+pathloop:
621
+	for _, s := range o.Paths {
622
+		val := int(s.Value)
623
+		switch {
624
+		case val > aline:
625
+			break pathloop
626
+
627
+		case val == 1:
628
+			// Start a new stack
629
+			tos = &stackEnt{s.Name, val, 0, noPath}
630
+
631
+		case s.Name == "":
632
+			// Pop
633
+			if tos == noPath {
634
+				return "<malformed symbol table>", 0
635
+			}
636
+			tos.prev.offset += val - tos.start
637
+			tos = tos.prev
638
+
639
+		default:
640
+			// Push
641
+			tos = &stackEnt{s.Name, val, 0, tos}
642
+		}
643
+	}
644
+
645
+	if tos == noPath {
646
+		return "", 0
647
+	}
648
+	return tos.path, aline - tos.start - tos.offset + 1
649
+}
650
+
651
+func (o *Obj) alineFromLine(path string, line int) (int, error) {
652
+	if line < 1 {
653
+		return 0, &UnknownLineError{path, line}
654
+	}
655
+
656
+	for i, s := range o.Paths {
657
+		// Find this path
658
+		if s.Name != path {
659
+			continue
660
+		}
661
+
662
+		// Find this line at this stack level
663
+		depth := 0
664
+		var incstart int
665
+		line += int(s.Value)
666
+	pathloop:
667
+		for _, s := range o.Paths[i:] {
668
+			val := int(s.Value)
669
+			switch {
670
+			case depth == 1 && val >= line:
671
+				return line - 1, nil
672
+
673
+			case s.Name == "":
674
+				depth--
675
+				if depth == 0 {
676
+					break pathloop
677
+				} else if depth == 1 {
678
+					line += val - incstart
679
+				}
680
+
681
+			default:
682
+				if depth == 1 {
683
+					incstart = val
684
+				}
685
+				depth++
686
+			}
687
+		}
688
+		return 0, &UnknownLineError{path, line}
689
+	}
690
+	return 0, UnknownFileError(path)
691
+}
692
+
693
+/*
694
+ * Errors
695
+ */
696
+
697
+// UnknownFileError represents a failure to find the specific file in
698
+// the symbol table.
699
+type UnknownFileError string
700
+
701
+func (e UnknownFileError) Error() string { return "unknown file: " + string(e) }
702
+
703
+// UnknownLineError represents a failure to map a line to a program
704
+// counter, either because the line is beyond the bounds of the file
705
+// or because there is no code on the given line.
706
+type UnknownLineError struct {
707
+	File string
708
+	Line int
709
+}
710
+
711
+func (e *UnknownLineError) Error() string {
712
+	return "no code at " + e.File + ":" + strconv.Itoa(e.Line)
713
+}
714
+
715
+// DecodingError represents an error during the decoding of
716
+// the symbol table.
717
+type DecodingError struct {
718
+	off int
719
+	msg string
720
+	val interface{}
721
+}
722
+
723
+func (e *DecodingError) Error() string {
724
+	msg := e.msg
725
+	if e.val != nil {
726
+		msg += fmt.Sprintf(" '%v'", e.val)
727
+	}
728
+	msg += fmt.Sprintf(" at byte %#x", e.off)
729
+	return msg
730
+}
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2014 Google Inc. All Rights Reserved.
1
+// Copyright 2014 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -20,6 +20,7 @@
20 20
 package metadata // import "cloud.google.com/go/compute/metadata"
21 21
 
22 22
 import (
23
+	"context"
23 24
 	"encoding/json"
24 25
 	"fmt"
25 26
 	"io/ioutil"
... ...
@@ -31,9 +32,6 @@ import (
31 31
 	"strings"
32 32
 	"sync"
33 33
 	"time"
34
-
35
-	"golang.org/x/net/context"
36
-	"golang.org/x/net/context/ctxhttp"
37 34
 )
38 35
 
39 36
 const (
... ...
@@ -64,7 +62,7 @@ var (
64 64
 )
65 65
 
66 66
 var (
67
-	metaClient = &http.Client{
67
+	defaultClient = &Client{hc: &http.Client{
68 68
 		Transport: &http.Transport{
69 69
 			Dial: (&net.Dialer{
70 70
 				Timeout:   2 * time.Second,
... ...
@@ -72,15 +70,15 @@ var (
72 72
 			}).Dial,
73 73
 			ResponseHeaderTimeout: 2 * time.Second,
74 74
 		},
75
-	}
76
-	subscribeClient = &http.Client{
75
+	}}
76
+	subscribeClient = &Client{hc: &http.Client{
77 77
 		Transport: &http.Transport{
78 78
 			Dial: (&net.Dialer{
79 79
 				Timeout:   2 * time.Second,
80 80
 				KeepAlive: 30 * time.Second,
81 81
 			}).Dial,
82 82
 		},
83
-	}
83
+	}}
84 84
 )
85 85
 
86 86
 // NotDefinedError is returned when requested metadata is not defined.
... ...
@@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string {
95 95
 	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
96 96
 }
97 97
 
98
-// Get returns a value from the metadata service.
99
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
100
-//
101
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
102
-// 169.254.169.254 will be used instead.
103
-//
104
-// If the requested metadata is not defined, the returned error will
105
-// be of type NotDefinedError.
106
-func Get(suffix string) (string, error) {
107
-	val, _, err := getETag(metaClient, suffix)
108
-	return val, err
109
-}
110
-
111
-// getETag returns a value from the metadata service as well as the associated
112
-// ETag using the provided client. This func is otherwise equivalent to Get.
113
-func getETag(client *http.Client, suffix string) (value, etag string, err error) {
114
-	// Using a fixed IP makes it very difficult to spoof the metadata service in
115
-	// a container, which is an important use-case for local testing of cloud
116
-	// deployments. To enable spoofing of the metadata service, the environment
117
-	// variable GCE_METADATA_HOST is first inspected to decide where metadata
118
-	// requests shall go.
119
-	host := os.Getenv(metadataHostEnv)
120
-	if host == "" {
121
-		// Using 169.254.169.254 instead of "metadata" here because Go
122
-		// binaries built with the "netgo" tag and without cgo won't
123
-		// know the search suffix for "metadata" is
124
-		// ".google.internal", and this IP address is documented as
125
-		// being stable anyway.
126
-		host = metadataIP
127
-	}
128
-	url := "http://" + host + "/computeMetadata/v1/" + suffix
129
-	req, _ := http.NewRequest("GET", url, nil)
130
-	req.Header.Set("Metadata-Flavor", "Google")
131
-	req.Header.Set("User-Agent", userAgent)
132
-	res, err := client.Do(req)
133
-	if err != nil {
134
-		return "", "", err
135
-	}
136
-	defer res.Body.Close()
137
-	if res.StatusCode == http.StatusNotFound {
138
-		return "", "", NotDefinedError(suffix)
139
-	}
140
-	if res.StatusCode != 200 {
141
-		return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
142
-	}
143
-	all, err := ioutil.ReadAll(res.Body)
144
-	if err != nil {
145
-		return "", "", err
146
-	}
147
-	return string(all), res.Header.Get("Etag"), nil
148
-}
149
-
150
-func getTrimmed(suffix string) (s string, err error) {
151
-	s, err = Get(suffix)
152
-	s = strings.TrimSpace(s)
153
-	return
154
-}
155
-
156
-func (c *cachedValue) get() (v string, err error) {
98
+func (c *cachedValue) get(cl *Client) (v string, err error) {
157 99
 	defer c.mu.Unlock()
158 100
 	c.mu.Lock()
159 101
 	if c.v != "" {
160 102
 		return c.v, nil
161 103
 	}
162 104
 	if c.trim {
163
-		v, err = getTrimmed(c.k)
105
+		v, err = cl.getTrimmed(c.k)
164 106
 	} else {
165
-		v, err = Get(c.k)
107
+		v, err = cl.Get(c.k)
166 108
 	}
167 109
 	if err == nil {
168 110
 		c.v = v
... ...
@@ -197,11 +137,11 @@ func testOnGCE() bool {
197 197
 	resc := make(chan bool, 2)
198 198
 
199 199
 	// Try two strategies in parallel.
200
-	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
200
+	// See https://github.com/googleapis/google-cloud-go/issues/194
201 201
 	go func() {
202 202
 		req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
203 203
 		req.Header.Set("User-Agent", userAgent)
204
-		res, err := ctxhttp.Do(ctx, metaClient, req)
204
+		res, err := defaultClient.hc.Do(req.WithContext(ctx))
205 205
 		if err != nil {
206 206
 			resc <- false
207 207
 			return
... ...
@@ -266,78 +206,183 @@ func systemInfoSuggestsGCE() bool {
266 266
 	return name == "Google" || name == "Google Compute Engine"
267 267
 }
268 268
 
269
-// Subscribe subscribes to a value from the metadata service.
270
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
271
-// The suffix may contain query parameters.
272
-//
273
-// Subscribe calls fn with the latest metadata value indicated by the provided
274
-// suffix. If the metadata value is deleted, fn is called with the empty string
275
-// and ok false. Subscribe blocks until fn returns a non-nil error or the value
276
-// is deleted. Subscribe returns the error value returned from the last call to
277
-// fn, which may be nil when ok == false.
269
+// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
270
+// ResponseHeaderTimeout).
278 271
 func Subscribe(suffix string, fn func(v string, ok bool) error) error {
279
-	const failedSubscribeSleep = time.Second * 5
272
+	return subscribeClient.Subscribe(suffix, fn)
273
+}
280 274
 
281
-	// First check to see if the metadata value exists at all.
282
-	val, lastETag, err := getETag(subscribeClient, suffix)
283
-	if err != nil {
284
-		return err
285
-	}
275
+// Get calls Client.Get on the default client.
276
+func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
286 277
 
287
-	if err := fn(val, true); err != nil {
288
-		return err
278
+// ProjectID returns the current instance's project ID string.
279
+func ProjectID() (string, error) { return defaultClient.ProjectID() }
280
+
281
+// NumericProjectID returns the current instance's numeric project ID.
282
+func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
283
+
284
+// InternalIP returns the instance's primary internal IP address.
285
+func InternalIP() (string, error) { return defaultClient.InternalIP() }
286
+
287
+// ExternalIP returns the instance's primary external (public) IP address.
288
+func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
289
+
290
+// Hostname returns the instance's hostname. This will be of the form
291
+// "<instanceID>.c.<projID>.internal".
292
+func Hostname() (string, error) { return defaultClient.Hostname() }
293
+
294
+// InstanceTags returns the list of user-defined instance tags,
295
+// assigned when initially creating a GCE instance.
296
+func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
297
+
298
+// InstanceID returns the current VM's numeric instance ID.
299
+func InstanceID() (string, error) { return defaultClient.InstanceID() }
300
+
301
+// InstanceName returns the current VM's instance ID string.
302
+func InstanceName() (string, error) { return defaultClient.InstanceName() }
303
+
304
+// Zone returns the current VM's zone, such as "us-central1-b".
305
+func Zone() (string, error) { return defaultClient.Zone() }
306
+
307
+// InstanceAttributes calls Client.InstanceAttributes on the default client.
308
+func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
309
+
310
+// ProjectAttributes calls Client.ProjectAttributes on the default client.
311
+func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
312
+
313
+// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
314
+func InstanceAttributeValue(attr string) (string, error) {
315
+	return defaultClient.InstanceAttributeValue(attr)
316
+}
317
+
318
+// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
319
+func ProjectAttributeValue(attr string) (string, error) {
320
+	return defaultClient.ProjectAttributeValue(attr)
321
+}
322
+
323
+// Scopes calls Client.Scopes on the default client.
324
+func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
325
+
326
+func strsContains(ss []string, s string) bool {
327
+	for _, v := range ss {
328
+		if v == s {
329
+			return true
330
+		}
289 331
 	}
332
+	return false
333
+}
290 334
 
291
-	ok := true
292
-	if strings.ContainsRune(suffix, '?') {
293
-		suffix += "&wait_for_change=true&last_etag="
294
-	} else {
295
-		suffix += "?wait_for_change=true&last_etag="
335
+// A Client provides metadata.
336
+type Client struct {
337
+	hc *http.Client
338
+}
339
+
340
+// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
341
+// will use the given http.Client instead of the default client.
342
+func NewClient(c *http.Client) *Client {
343
+	return &Client{hc: c}
344
+}
345
+
346
+// getETag returns a value from the metadata service as well as the associated ETag.
347
+// This func is otherwise equivalent to Get.
348
+func (c *Client) getETag(suffix string) (value, etag string, err error) {
349
+	// Using a fixed IP makes it very difficult to spoof the metadata service in
350
+	// a container, which is an important use-case for local testing of cloud
351
+	// deployments. To enable spoofing of the metadata service, the environment
352
+	// variable GCE_METADATA_HOST is first inspected to decide where metadata
353
+	// requests shall go.
354
+	host := os.Getenv(metadataHostEnv)
355
+	if host == "" {
356
+		// Using 169.254.169.254 instead of "metadata" here because Go
357
+		// binaries built with the "netgo" tag and without cgo won't
358
+		// know the search suffix for "metadata" is
359
+		// ".google.internal", and this IP address is documented as
360
+		// being stable anyway.
361
+		host = metadataIP
296 362
 	}
297
-	for {
298
-		val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
299
-		if err != nil {
300
-			if _, deleted := err.(NotDefinedError); !deleted {
301
-				time.Sleep(failedSubscribeSleep)
302
-				continue // Retry on other errors.
303
-			}
304
-			ok = false
305
-		}
306
-		lastETag = etag
363
+	u := "http://" + host + "/computeMetadata/v1/" + suffix
364
+	req, _ := http.NewRequest("GET", u, nil)
365
+	req.Header.Set("Metadata-Flavor", "Google")
366
+	req.Header.Set("User-Agent", userAgent)
367
+	res, err := c.hc.Do(req)
368
+	if err != nil {
369
+		return "", "", err
370
+	}
371
+	defer res.Body.Close()
372
+	if res.StatusCode == http.StatusNotFound {
373
+		return "", "", NotDefinedError(suffix)
374
+	}
375
+	all, err := ioutil.ReadAll(res.Body)
376
+	if err != nil {
377
+		return "", "", err
378
+	}
379
+	if res.StatusCode != 200 {
380
+		return "", "", &Error{Code: res.StatusCode, Message: string(all)}
381
+	}
382
+	return string(all), res.Header.Get("Etag"), nil
383
+}
307 384
 
308
-		if err := fn(val, ok); err != nil || !ok {
309
-			return err
310
-		}
385
+// Get returns a value from the metadata service.
386
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
387
+//
388
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
389
+// 169.254.169.254 will be used instead.
390
+//
391
+// If the requested metadata is not defined, the returned error will
392
+// be of type NotDefinedError.
393
+func (c *Client) Get(suffix string) (string, error) {
394
+	val, _, err := c.getETag(suffix)
395
+	return val, err
396
+}
397
+
398
+func (c *Client) getTrimmed(suffix string) (s string, err error) {
399
+	s, err = c.Get(suffix)
400
+	s = strings.TrimSpace(s)
401
+	return
402
+}
403
+
404
+func (c *Client) lines(suffix string) ([]string, error) {
405
+	j, err := c.Get(suffix)
406
+	if err != nil {
407
+		return nil, err
311 408
 	}
409
+	s := strings.Split(strings.TrimSpace(j), "\n")
410
+	for i := range s {
411
+		s[i] = strings.TrimSpace(s[i])
412
+	}
413
+	return s, nil
312 414
 }
313 415
 
314 416
 // ProjectID returns the current instance's project ID string.
315
-func ProjectID() (string, error) { return projID.get() }
417
+func (c *Client) ProjectID() (string, error) { return projID.get(c) }
316 418
 
317 419
 // NumericProjectID returns the current instance's numeric project ID.
318
-func NumericProjectID() (string, error) { return projNum.get() }
420
+func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
421
+
422
+// InstanceID returns the current VM's numeric instance ID.
423
+func (c *Client) InstanceID() (string, error) { return instID.get(c) }
319 424
 
320 425
 // InternalIP returns the instance's primary internal IP address.
321
-func InternalIP() (string, error) {
322
-	return getTrimmed("instance/network-interfaces/0/ip")
426
+func (c *Client) InternalIP() (string, error) {
427
+	return c.getTrimmed("instance/network-interfaces/0/ip")
323 428
 }
324 429
 
325 430
 // ExternalIP returns the instance's primary external (public) IP address.
326
-func ExternalIP() (string, error) {
327
-	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
431
+func (c *Client) ExternalIP() (string, error) {
432
+	return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
328 433
 }
329 434
 
330 435
 // Hostname returns the instance's hostname. This will be of the form
331 436
 // "<instanceID>.c.<projID>.internal".
332
-func Hostname() (string, error) {
333
-	return getTrimmed("instance/hostname")
437
+func (c *Client) Hostname() (string, error) {
438
+	return c.getTrimmed("instance/hostname")
334 439
 }
335 440
 
336 441
 // InstanceTags returns the list of user-defined instance tags,
337 442
 // assigned when initially creating a GCE instance.
338
-func InstanceTags() ([]string, error) {
443
+func (c *Client) InstanceTags() ([]string, error) {
339 444
 	var s []string
340
-	j, err := Get("instance/tags")
445
+	j, err := c.Get("instance/tags")
341 446
 	if err != nil {
342 447
 		return nil, err
343 448
 	}
... ...
@@ -347,14 +392,9 @@ func InstanceTags() ([]string, error) {
347 347
 	return s, nil
348 348
 }
349 349
 
350
-// InstanceID returns the current VM's numeric instance ID.
351
-func InstanceID() (string, error) {
352
-	return instID.get()
353
-}
354
-
355 350
 // InstanceName returns the current VM's instance ID string.
356
-func InstanceName() (string, error) {
357
-	host, err := Hostname()
351
+func (c *Client) InstanceName() (string, error) {
352
+	host, err := c.Hostname()
358 353
 	if err != nil {
359 354
 		return "", err
360 355
 	}
... ...
@@ -362,8 +402,8 @@ func InstanceName() (string, error) {
362 362
 }
363 363
 
364 364
 // Zone returns the current VM's zone, such as "us-central1-b".
365
-func Zone() (string, error) {
366
-	zone, err := getTrimmed("instance/zone")
365
+func (c *Client) Zone() (string, error) {
366
+	zone, err := c.getTrimmed("instance/zone")
367 367
 	// zone is of the form "projects/<projNum>/zones/<zoneName>".
368 368
 	if err != nil {
369 369
 		return "", err
... ...
@@ -374,24 +414,12 @@ func Zone() (string, error) {
374 374
 // InstanceAttributes returns the list of user-defined attributes,
375 375
 // assigned when initially creating a GCE VM instance. The value of an
376 376
 // attribute can be obtained with InstanceAttributeValue.
377
-func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
377
+func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
378 378
 
379 379
 // ProjectAttributes returns the list of user-defined attributes
380 380
 // applying to the project as a whole, not just this VM.  The value of
381 381
 // an attribute can be obtained with ProjectAttributeValue.
382
-func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
383
-
384
-func lines(suffix string) ([]string, error) {
385
-	j, err := Get(suffix)
386
-	if err != nil {
387
-		return nil, err
388
-	}
389
-	s := strings.Split(strings.TrimSpace(j), "\n")
390
-	for i := range s {
391
-		s[i] = strings.TrimSpace(s[i])
392
-	}
393
-	return s, nil
394
-}
382
+func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
395 383
 
396 384
 // InstanceAttributeValue returns the value of the provided VM
397 385
 // instance attribute.
... ...
@@ -401,8 +429,8 @@ func lines(suffix string) ([]string, error) {
401 401
 //
402 402
 // InstanceAttributeValue may return ("", nil) if the attribute was
403 403
 // defined to be the empty string.
404
-func InstanceAttributeValue(attr string) (string, error) {
405
-	return Get("instance/attributes/" + attr)
404
+func (c *Client) InstanceAttributeValue(attr string) (string, error) {
405
+	return c.Get("instance/attributes/" + attr)
406 406
 }
407 407
 
408 408
 // ProjectAttributeValue returns the value of the provided
... ...
@@ -413,25 +441,73 @@ func InstanceAttributeValue(attr string) (string, error) {
413 413
 //
414 414
 // ProjectAttributeValue may return ("", nil) if the attribute was
415 415
 // defined to be the empty string.
416
-func ProjectAttributeValue(attr string) (string, error) {
417
-	return Get("project/attributes/" + attr)
416
+func (c *Client) ProjectAttributeValue(attr string) (string, error) {
417
+	return c.Get("project/attributes/" + attr)
418 418
 }
419 419
 
420 420
 // Scopes returns the service account scopes for the given account.
421 421
 // The account may be empty or the string "default" to use the instance's
422 422
 // main account.
423
-func Scopes(serviceAccount string) ([]string, error) {
423
+func (c *Client) Scopes(serviceAccount string) ([]string, error) {
424 424
 	if serviceAccount == "" {
425 425
 		serviceAccount = "default"
426 426
 	}
427
-	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
427
+	return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
428 428
 }
429 429
 
430
-func strsContains(ss []string, s string) bool {
431
-	for _, v := range ss {
432
-		if v == s {
433
-			return true
430
+// Subscribe subscribes to a value from the metadata service.
431
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
432
+// The suffix may contain query parameters.
433
+//
434
+// Subscribe calls fn with the latest metadata value indicated by the provided
435
+// suffix. If the metadata value is deleted, fn is called with the empty string
436
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
437
+// is deleted. Subscribe returns the error value returned from the last call to
438
+// fn, which may be nil when ok == false.
439
+func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
440
+	const failedSubscribeSleep = time.Second * 5
441
+
442
+	// First check to see if the metadata value exists at all.
443
+	val, lastETag, err := c.getETag(suffix)
444
+	if err != nil {
445
+		return err
446
+	}
447
+
448
+	if err := fn(val, true); err != nil {
449
+		return err
450
+	}
451
+
452
+	ok := true
453
+	if strings.ContainsRune(suffix, '?') {
454
+		suffix += "&wait_for_change=true&last_etag="
455
+	} else {
456
+		suffix += "?wait_for_change=true&last_etag="
457
+	}
458
+	for {
459
+		val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
460
+		if err != nil {
461
+			if _, deleted := err.(NotDefinedError); !deleted {
462
+				time.Sleep(failedSubscribeSleep)
463
+				continue // Retry on other errors.
464
+			}
465
+			ok = false
466
+		}
467
+		lastETag = etag
468
+
469
+		if err := fn(val, ok); err != nil || !ok {
470
+			return err
434 471
 		}
435 472
 	}
436
-	return false
473
+}
474
+
475
+// Error contains an error response from the server.
476
+type Error struct {
477
+	// Code is the HTTP response status code.
478
+	Code int
479
+	// Message is the server response message.
480
+	Message string
481
+}
482
+
483
+func (e *Error) Error() string {
484
+	return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
437 485
 }
438 486
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+module cloud.google.com/go
1
+
2
+go 1.9
3
+
4
+require (
5
+	cloud.google.com/go/datastore v1.0.0
6
+	github.com/golang/mock v1.3.1
7
+	github.com/golang/protobuf v1.3.2
8
+	github.com/google/btree v1.0.0
9
+	github.com/google/go-cmp v0.3.0
10
+	github.com/google/martian v2.1.0+incompatible
11
+	github.com/google/pprof v0.0.0-20190515194954-54271f7e092f
12
+	github.com/googleapis/gax-go/v2 v2.0.5
13
+	github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024
14
+	go.opencensus.io v0.22.0
15
+	golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522
16
+	golang.org/x/lint v0.0.0-20190409202823-959b441ac422
17
+	golang.org/x/net v0.0.0-20190620200207-3b0461eec859
18
+	golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
19
+	golang.org/x/sync v0.0.0-20190423024810-112230192c58
20
+	golang.org/x/text v0.3.2
21
+	golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
22
+	golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
23
+	google.golang.org/api v0.8.0
24
+	google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64
25
+	google.golang.org/grpc v1.21.1
26
+	honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a
27
+	rsc.io/binaryregexp v0.2.0
28
+)
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2016 Google Inc. All Rights Reserved.
1
+// Copyright 2016 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -26,7 +26,7 @@ import (
26 26
 
27 27
 // Repo is the current version of the client libraries in this
28 28
 // repo. It should be a date in YYYYMMDD format.
29
-const Repo = "20180226"
29
+const Repo = "20190802"
30 30
 
31 31
 // Go returns the Go runtime version. The returned string
32 32
 // has no whitespace.
... ...
@@ -67,5 +67,5 @@ func goVer(s string) string {
67 67
 }
68 68
 
69 69
 func notSemverRune(r rune) bool {
70
-	return strings.IndexRune("0123456789.", r) < 0
70
+	return !strings.ContainsRune("0123456789.", r)
71 71
 }
72 72
new file mode 100644
... ...
@@ -0,0 +1,35 @@
0
+## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
1
+
2
+- [About Stackdriver Logging](https://cloud.google.com/logging/)
3
+- [API documentation](https://cloud.google.com/logging/docs)
4
+- [Go client documentation](https://godoc.org/cloud.google.com/go/logging)
5
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
6
+
7
+### Example Usage
8
+
9
+First create a `logging.Client` to use throughout your application:
10
+[snip]:# (logging-1)
11
+```go
12
+ctx := context.Background()
13
+client, err := logging.NewClient(ctx, "my-project")
14
+if err != nil {
15
+	// TODO: Handle error.
16
+}
17
+```
18
+
19
+Usually, you'll want to add log entries to a buffer to be periodically flushed
20
+(automatically and asynchronously) to the Stackdriver Logging service.
21
+[snip]:# (logging-2)
22
+```go
23
+logger := client.Logger("my-log")
24
+logger.Log(logging.Entry{Payload: "something happened!"})
25
+```
26
+
27
+Close your client before your program exits, to flush any buffered log entries.
28
+[snip]:# (logging-3)
29
+```go
30
+err = client.Close()
31
+if err != nil {
32
+	// TODO: Handle error.
33
+}
34
+```
0 35
\ No newline at end of file
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2018 Google LLC
1
+// Copyright 2019 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -12,17 +12,19 @@
12 12
 // See the License for the specific language governing permissions and
13 13
 // limitations under the License.
14 14
 
15
-// AUTO-GENERATED CODE. DO NOT EDIT.
15
+// Code generated by gapic-generator. DO NOT EDIT.
16 16
 
17 17
 package logging
18 18
 
19 19
 import (
20
+	"context"
21
+	"fmt"
20 22
 	"math"
23
+	"net/url"
21 24
 	"time"
22 25
 
23
-	"cloud.google.com/go/internal/version"
24
-	gax "github.com/googleapis/gax-go"
25
-	"golang.org/x/net/context"
26
+	"github.com/golang/protobuf/proto"
27
+	gax "github.com/googleapis/gax-go/v2"
26 28
 	"google.golang.org/api/iterator"
27 29
 	"google.golang.org/api/option"
28 30
 	"google.golang.org/api/transport"
... ...
@@ -63,8 +65,8 @@ func defaultConfigCallOptions() *ConfigCallOptions {
63 63
 					codes.Unavailable,
64 64
 				}, gax.Backoff{
65 65
 					Initial:    100 * time.Millisecond,
66
-					Max:        1000 * time.Millisecond,
67
-					Multiplier: 1.2,
66
+					Max:        60000 * time.Millisecond,
67
+					Multiplier: 1.3,
68 68
 				})
69 69
 			}),
70 70
 		},
... ...
@@ -73,7 +75,7 @@ func defaultConfigCallOptions() *ConfigCallOptions {
73 73
 		ListSinks:       retry[[2]string{"default", "idempotent"}],
74 74
 		GetSink:         retry[[2]string{"default", "idempotent"}],
75 75
 		CreateSink:      retry[[2]string{"default", "non_idempotent"}],
76
-		UpdateSink:      retry[[2]string{"default", "non_idempotent"}],
76
+		UpdateSink:      retry[[2]string{"default", "idempotent"}],
77 77
 		DeleteSink:      retry[[2]string{"default", "idempotent"}],
78 78
 		ListExclusions:  retry[[2]string{"default", "idempotent"}],
79 79
 		GetExclusion:    retry[[2]string{"default", "idempotent"}],
... ...
@@ -84,6 +86,8 @@ func defaultConfigCallOptions() *ConfigCallOptions {
84 84
 }
85 85
 
86 86
 // ConfigClient is a client for interacting with Stackdriver Logging API.
87
+//
88
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
87 89
 type ConfigClient struct {
88 90
 	// The connection to the service.
89 91
 	conn *grpc.ClientConn
... ...
@@ -100,8 +104,8 @@ type ConfigClient struct {
100 100
 
101 101
 // NewConfigClient creates a new config service v2 client.
102 102
 //
103
-// Service for configuring sinks used to export log entries outside of
104
-// Stackdriver Logging.
103
+// Service for configuring sinks used to export log entries out of
104
+// Logging.
105 105
 func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) {
106 106
 	conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...)
107 107
 	if err != nil {
... ...
@@ -132,16 +136,18 @@ func (c *ConfigClient) Close() error {
132 132
 // the `x-goog-api-client` header passed on each request. Intended for
133 133
 // use by Google-written clients.
134 134
 func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
135
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
136
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
135
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
136
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
137 137
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
138 138
 }
139 139
 
140 140
 // ListSinks lists sinks.
141 141
 func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator {
142
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
142
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
143
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
143 144
 	opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...)
144 145
 	it := &LogSinkIterator{}
146
+	req = proto.Clone(req).(*loggingpb.ListSinksRequest)
145 147
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) {
146 148
 		var resp *loggingpb.ListSinksResponse
147 149
 		req.PageToken = pageToken
... ...
@@ -169,12 +175,15 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
169 169
 		return nextPageToken, nil
170 170
 	}
171 171
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
172
+	it.pageInfo.MaxSize = int(req.PageSize)
173
+	it.pageInfo.Token = req.PageToken
172 174
 	return it
173 175
 }
174 176
 
175 177
 // GetSink gets a sink.
176 178
 func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
177
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
179
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
180
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
178 181
 	opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...)
179 182
 	var resp *loggingpb.LogSink
180 183
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -193,7 +202,8 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
193 193
 // writer_identity is not permitted to write to the destination.  A sink can
194 194
 // export log entries only from the resource owning the sink.
195 195
 func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
196
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
196
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
197
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
197 198
 	opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...)
198 199
 	var resp *loggingpb.LogSink
199 200
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -212,7 +222,8 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
212 212
 // The updated sink might also have a new writer_identity; see the
213 213
 // unique_writer_identity field.
214 214
 func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
215
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
215
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
216
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
216 217
 	opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
217 218
 	var resp *loggingpb.LogSink
218 219
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -229,7 +240,8 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
229 229
 // DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
230 230
 // service account is also deleted.
231 231
 func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
232
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
232
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
233
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
233 234
 	opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...)
234 235
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
235 236
 		var err error
... ...
@@ -241,9 +253,11 @@ func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSink
241 241
 
242 242
 // ListExclusions lists all the exclusions in a parent resource.
243 243
 func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator {
244
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
244
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
245
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
245 246
 	opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...)
246 247
 	it := &LogExclusionIterator{}
248
+	req = proto.Clone(req).(*loggingpb.ListExclusionsRequest)
247 249
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) {
248 250
 		var resp *loggingpb.ListExclusionsResponse
249 251
 		req.PageToken = pageToken
... ...
@@ -271,12 +285,15 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx
271 271
 		return nextPageToken, nil
272 272
 	}
273 273
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
274
+	it.pageInfo.MaxSize = int(req.PageSize)
275
+	it.pageInfo.Token = req.PageToken
274 276
 	return it
275 277
 }
276 278
 
277 279
 // GetExclusion gets the description of an exclusion.
278 280
 func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
279
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
281
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
282
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
280 283
 	opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...)
281 284
 	var resp *loggingpb.LogExclusion
282 285
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -294,7 +311,8 @@ func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclu
294 294
 // Only log entries belonging to that resource can be excluded.
295 295
 // You can have up to 10 exclusions in a resource.
296 296
 func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
297
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
297
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
298
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
298 299
 	opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...)
299 300
 	var resp *loggingpb.LogExclusion
300 301
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -310,7 +328,8 @@ func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.Creat
310 310
 
311 311
 // UpdateExclusion changes one or more properties of an existing exclusion.
312 312
 func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
313
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
313
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
314
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
314 315
 	opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...)
315 316
 	var resp *loggingpb.LogExclusion
316 317
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -326,7 +345,8 @@ func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.Updat
326 326
 
327 327
 // DeleteExclusion deletes an exclusion.
328 328
 func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error {
329
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
329
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
330
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
330 331
 	opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...)
331 332
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
332 333
 		var err error
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2018 Google LLC
1
+// Copyright 2019 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -12,20 +12,35 @@
12 12
 // See the License for the specific language governing permissions and
13 13
 // limitations under the License.
14 14
 
15
-// AUTO-GENERATED CODE. DO NOT EDIT.
15
+// Code generated by gapic-generator. DO NOT EDIT.
16 16
 
17 17
 // Package logging is an auto-generated package for the
18 18
 // Stackdriver Logging API.
19 19
 //
20 20
 //   NOTE: This package is in alpha. It is not stable, and is likely to change.
21 21
 //
22
-// Writes log entries and manages your Stackdriver Logging configuration.
22
+// Writes log entries and manages your Logging configuration.
23
+//
24
+// Use of Context
25
+//
26
+// The ctx passed to NewClient is used for authentication requests and
27
+// for creating the underlying connection, but is not used for subsequent calls.
28
+// Individual methods on the client use the ctx given to them.
29
+//
30
+// To close the open connection, use the Close() method.
31
+//
32
+// For information about setting deadlines, reusing contexts, and more
33
+// please visit godoc.org/cloud.google.com/go.
23 34
 //
24 35
 // Use the client at cloud.google.com/go/logging in preference to this.
25 36
 package logging // import "cloud.google.com/go/logging/apiv2"
26 37
 
27 38
 import (
28
-	"golang.org/x/net/context"
39
+	"context"
40
+	"runtime"
41
+	"strings"
42
+	"unicode"
43
+
29 44
 	"google.golang.org/grpc/metadata"
30 45
 )
31 46
 
... ...
@@ -50,3 +65,42 @@ func DefaultAuthScopes() []string {
50 50
 		"https://www.googleapis.com/auth/logging.write",
51 51
 	}
52 52
 }
53
+
54
+// versionGo returns the Go runtime version. The returned string
55
+// has no whitespace, suitable for reporting in header.
56
+func versionGo() string {
57
+	const develPrefix = "devel +"
58
+
59
+	s := runtime.Version()
60
+	if strings.HasPrefix(s, develPrefix) {
61
+		s = s[len(develPrefix):]
62
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
63
+			s = s[:p]
64
+		}
65
+		return s
66
+	}
67
+
68
+	notSemverRune := func(r rune) bool {
69
+		return strings.IndexRune("0123456789.", r) < 0
70
+	}
71
+
72
+	if strings.HasPrefix(s, "go1") {
73
+		s = s[2:]
74
+		var prerelease string
75
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
76
+			s, prerelease = s[:p], s[p:]
77
+		}
78
+		if strings.HasSuffix(s, ".") {
79
+			s += "0"
80
+		} else if strings.Count(s, ".") < 2 {
81
+			s += ".0"
82
+		}
83
+		if prerelease != "" {
84
+			s += "-" + prerelease
85
+		}
86
+		return s
87
+	}
88
+	return "UNKNOWN"
89
+}
90
+
91
+const versionClient = "20190801"
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2018 Google LLC
1
+// Copyright 2019 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -12,17 +12,19 @@
12 12
 // See the License for the specific language governing permissions and
13 13
 // limitations under the License.
14 14
 
15
-// AUTO-GENERATED CODE. DO NOT EDIT.
15
+// Code generated by gapic-generator. DO NOT EDIT.
16 16
 
17 17
 package logging
18 18
 
19 19
 import (
20
+	"context"
21
+	"fmt"
20 22
 	"math"
23
+	"net/url"
21 24
 	"time"
22 25
 
23
-	"cloud.google.com/go/internal/version"
24
-	gax "github.com/googleapis/gax-go"
25
-	"golang.org/x/net/context"
26
+	"github.com/golang/protobuf/proto"
27
+	gax "github.com/googleapis/gax-go/v2"
26 28
 	"google.golang.org/api/iterator"
27 29
 	"google.golang.org/api/option"
28 30
 	"google.golang.org/api/transport"
... ...
@@ -59,35 +61,24 @@ func defaultCallOptions() *CallOptions {
59 59
 					codes.Unavailable,
60 60
 				}, gax.Backoff{
61 61
 					Initial:    100 * time.Millisecond,
62
-					Max:        1000 * time.Millisecond,
63
-					Multiplier: 1.2,
64
-				})
65
-			}),
66
-		},
67
-		{"list", "idempotent"}: {
68
-			gax.WithRetry(func() gax.Retryer {
69
-				return gax.OnCodes([]codes.Code{
70
-					codes.DeadlineExceeded,
71
-					codes.Internal,
72
-					codes.Unavailable,
73
-				}, gax.Backoff{
74
-					Initial:    100 * time.Millisecond,
75
-					Max:        1000 * time.Millisecond,
76
-					Multiplier: 1.2,
62
+					Max:        60000 * time.Millisecond,
63
+					Multiplier: 1.3,
77 64
 				})
78 65
 			}),
79 66
 		},
80 67
 	}
81 68
 	return &CallOptions{
82 69
 		DeleteLog:                        retry[[2]string{"default", "idempotent"}],
83
-		WriteLogEntries:                  retry[[2]string{"default", "non_idempotent"}],
84
-		ListLogEntries:                   retry[[2]string{"list", "idempotent"}],
70
+		WriteLogEntries:                  retry[[2]string{"default", "idempotent"}],
71
+		ListLogEntries:                   retry[[2]string{"default", "idempotent"}],
85 72
 		ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
86
-		ListLogs: retry[[2]string{"default", "idempotent"}],
73
+		ListLogs:                         retry[[2]string{"default", "idempotent"}],
87 74
 	}
88 75
 }
89 76
 
90 77
 // Client is a client for interacting with Stackdriver Logging API.
78
+//
79
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
91 80
 type Client struct {
92 81
 	// The connection to the service.
93 82
 	conn *grpc.ClientConn
... ...
@@ -135,8 +126,8 @@ func (c *Client) Close() error {
135 135
 // the `x-goog-api-client` header passed on each request. Intended for
136 136
 // use by Google-written clients.
137 137
 func (c *Client) SetGoogleClientInfo(keyval ...string) {
138
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
139
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
138
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
139
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
140 140
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
141 141
 }
142 142
 
... ...
@@ -145,7 +136,8 @@ func (c *Client) SetGoogleClientInfo(keyval ...string) {
145 145
 // Log entries written shortly before the delete operation might not be
146 146
 // deleted.
147 147
 func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error {
148
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
148
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "log_name", url.QueryEscape(req.GetLogName())))
149
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
149 150
 	opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...)
150 151
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
151 152
 		var err error
... ...
@@ -155,13 +147,13 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest,
155 155
 	return err
156 156
 }
157 157
 
158
-// WriteLogEntries ## Log entry resources
159
-//
160
-// Writes log entries to Stackdriver Logging. This API method is the
161
-// only way to send log entries to Stackdriver Logging. This method
162
-// is used, directly or indirectly, by the Stackdriver Logging agent
163
-// (fluentd) and all logging libraries configured to use Stackdriver
164
-// Logging.
158
+// WriteLogEntries writes log entries to Logging. This API method is the
159
+// only way to send log entries to Logging. This method
160
+// is used, directly or indirectly, by the Logging agent
161
+// (fluentd) and all logging libraries configured to use Logging.
162
+// A single request may contain log entries for a maximum of 1000
163
+// different resources (projects, organizations, billing accounts or
164
+// folders)
165 165
 func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) {
166 166
 	ctx = insertMetadata(ctx, c.xGoogMetadata)
167 167
 	opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...)
... ...
@@ -178,12 +170,13 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
178 178
 }
179 179
 
180 180
 // ListLogEntries lists log entries.  Use this method to retrieve log entries from
181
-// Stackdriver Logging.  For ways to export log entries, see
181
+// Logging.  For ways to export log entries, see
182 182
 // Exporting Logs (at /logging/docs/export).
183 183
 func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
184 184
 	ctx = insertMetadata(ctx, c.xGoogMetadata)
185 185
 	opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)
186 186
 	it := &LogEntryIterator{}
187
+	req = proto.Clone(req).(*loggingpb.ListLogEntriesRequest)
187 188
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) {
188 189
 		var resp *loggingpb.ListLogEntriesResponse
189 190
 		req.PageToken = pageToken
... ...
@@ -211,15 +204,17 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
211 211
 		return nextPageToken, nil
212 212
 	}
213 213
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
214
+	it.pageInfo.MaxSize = int(req.PageSize)
215
+	it.pageInfo.Token = req.PageToken
214 216
 	return it
215 217
 }
216 218
 
217
-// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver
218
-// Logging.
219
+// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Logging.
219 220
 func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
220 221
 	ctx = insertMetadata(ctx, c.xGoogMetadata)
221 222
 	opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
222 223
 	it := &MonitoredResourceDescriptorIterator{}
224
+	req = proto.Clone(req).(*loggingpb.ListMonitoredResourceDescriptorsRequest)
223 225
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
224 226
 		var resp *loggingpb.ListMonitoredResourceDescriptorsResponse
225 227
 		req.PageToken = pageToken
... ...
@@ -247,15 +242,19 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
247 247
 		return nextPageToken, nil
248 248
 	}
249 249
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
250
+	it.pageInfo.MaxSize = int(req.PageSize)
251
+	it.pageInfo.Token = req.PageToken
250 252
 	return it
251 253
 }
252 254
 
253 255
 // ListLogs lists the logs in projects, organizations, folders, or billing accounts.
254 256
 // Only logs that have entries are listed.
255 257
 func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator {
256
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
258
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
259
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
257 260
 	opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...)
258 261
 	it := &StringIterator{}
262
+	req = proto.Clone(req).(*loggingpb.ListLogsRequest)
259 263
 	it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
260 264
 		var resp *loggingpb.ListLogsResponse
261 265
 		req.PageToken = pageToken
... ...
@@ -283,6 +282,8 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o
283 283
 		return nextPageToken, nil
284 284
 	}
285 285
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
286
+	it.pageInfo.MaxSize = int(req.PageSize)
287
+	it.pageInfo.Token = req.PageToken
286 288
 	return it
287 289
 }
288 290
 
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2018 Google LLC
1
+// Copyright 2019 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -12,17 +12,19 @@
12 12
 // See the License for the specific language governing permissions and
13 13
 // limitations under the License.
14 14
 
15
-// AUTO-GENERATED CODE. DO NOT EDIT.
15
+// Code generated by gapic-generator. DO NOT EDIT.
16 16
 
17 17
 package logging
18 18
 
19 19
 import (
20
+	"context"
21
+	"fmt"
20 22
 	"math"
23
+	"net/url"
21 24
 	"time"
22 25
 
23
-	"cloud.google.com/go/internal/version"
24
-	gax "github.com/googleapis/gax-go"
25
-	"golang.org/x/net/context"
26
+	"github.com/golang/protobuf/proto"
27
+	gax "github.com/googleapis/gax-go/v2"
26 28
 	"google.golang.org/api/iterator"
27 29
 	"google.golang.org/api/option"
28 30
 	"google.golang.org/api/transport"
... ...
@@ -58,8 +60,8 @@ func defaultMetricsCallOptions() *MetricsCallOptions {
58 58
 					codes.Unavailable,
59 59
 				}, gax.Backoff{
60 60
 					Initial:    100 * time.Millisecond,
61
-					Max:        1000 * time.Millisecond,
62
-					Multiplier: 1.2,
61
+					Max:        60000 * time.Millisecond,
62
+					Multiplier: 1.3,
63 63
 				})
64 64
 			}),
65 65
 		},
... ...
@@ -68,12 +70,14 @@ func defaultMetricsCallOptions() *MetricsCallOptions {
68 68
 		ListLogMetrics:  retry[[2]string{"default", "idempotent"}],
69 69
 		GetLogMetric:    retry[[2]string{"default", "idempotent"}],
70 70
 		CreateLogMetric: retry[[2]string{"default", "non_idempotent"}],
71
-		UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}],
71
+		UpdateLogMetric: retry[[2]string{"default", "idempotent"}],
72 72
 		DeleteLogMetric: retry[[2]string{"default", "idempotent"}],
73 73
 	}
74 74
 }
75 75
 
76 76
 // MetricsClient is a client for interacting with Stackdriver Logging API.
77
+//
78
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
77 79
 type MetricsClient struct {
78 80
 	// The connection to the service.
79 81
 	conn *grpc.ClientConn
... ...
@@ -121,16 +125,18 @@ func (c *MetricsClient) Close() error {
121 121
 // the `x-goog-api-client` header passed on each request. Intended for
122 122
 // use by Google-written clients.
123 123
 func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
124
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
125
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
124
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
125
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
126 126
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
127 127
 }
128 128
 
129 129
 // ListLogMetrics lists logs-based metrics.
130 130
 func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator {
131
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
131
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
132
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
132 133
 	opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...)
133 134
 	it := &LogMetricIterator{}
135
+	req = proto.Clone(req).(*loggingpb.ListLogMetricsRequest)
134 136
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) {
135 137
 		var resp *loggingpb.ListLogMetricsResponse
136 138
 		req.PageToken = pageToken
... ...
@@ -158,12 +164,15 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
158 158
 		return nextPageToken, nil
159 159
 	}
160 160
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
161
+	it.pageInfo.MaxSize = int(req.PageSize)
162
+	it.pageInfo.Token = req.PageToken
161 163
 	return it
162 164
 }
163 165
 
164 166
 // GetLogMetric gets a logs-based metric.
165 167
 func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
166
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
168
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
169
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
167 170
 	opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...)
168 171
 	var resp *loggingpb.LogMetric
169 172
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -179,7 +188,8 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM
179 179
 
180 180
 // CreateLogMetric creates a logs-based metric.
181 181
 func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
182
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
182
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
183
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
183 184
 	opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...)
184 185
 	var resp *loggingpb.LogMetric
185 186
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -195,7 +205,8 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea
195 195
 
196 196
 // UpdateLogMetric creates or updates a logs-based metric.
197 197
 func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
198
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
198
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
199
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
199 200
 	opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...)
200 201
 	var resp *loggingpb.LogMetric
201 202
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
... ...
@@ -211,7 +222,8 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda
211 211
 
212 212
 // DeleteLogMetric deletes a logs-based metric.
213 213
 func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error {
214
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
214
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
215
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
215 216
 	opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...)
216 217
 	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
217 218
 		var err error
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2016 Google Inc. All Rights Reserved.
1
+// Copyright 2016 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -21,9 +21,6 @@ This client uses Logging API v2.
21 21
 See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API.
22 22
 
23 23
 
24
-Note: This package is in beta.  Some backwards-incompatible changes may occur.
25
-
26
-
27 24
 Creating a Client
28 25
 
29 26
 Use a Client to interact with the Stackdriver Logging API.
... ...
@@ -65,7 +62,10 @@ For critical errors, you may want to send your log entries immediately.
65 65
 LogSync is slow and will block until the log entry has been sent, so it is
66 66
 not recommended for normal use.
67 67
 
68
-	lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"})
68
+	err = lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"})
69
+	if err != nil {
70
+		// TODO: Handle error.
71
+	}
69 72
 
70 73
 
71 74
 Payloads
... ...
@@ -85,11 +85,11 @@ If you have a []byte of JSON, wrap it in json.RawMessage:
85 85
 	lg.Log(logging.Entry{Payload: json.RawMessage(j)})
86 86
 
87 87
 
88
-The Standard Logger Interface
88
+The Standard Logger
89 89
 
90 90
 You may want use a standard log.Logger in your program.
91 91
 
92
-	// stdlg implements log.Logger
92
+	// stdlg is an instance of *log.Logger.
93 93
 	stdlg := lg.StandardLogger(logging.Info)
94 94
 	stdlg.Println("some info")
95 95
 
... ...
@@ -113,5 +113,22 @@ running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, se
113 113
 accounts can be viewed on the command line with the "gcloud logging read" command.
114 114
 
115 115
 
116
+Grouping Logs by Request
117
+
118
+To group all the log entries written during a single HTTP request, create two
119
+Loggers, a "parent" and a "child," with different log IDs. Both should be in the same
120
+project, and have the same MonitoredResouce type and labels.
121
+
122
+- Parent entries must have HTTPRequest.Request populated. (Strictly speaking, only the URL is necessary.)
123
+
124
+- A child entry's timestamp must be within the time interval covered by the parent request (i.e., older
125
+than parent.Timestamp, and newer than parent.Timestamp - parent.HTTPRequest.Latency, assuming the
126
+parent timestamp marks the end of the request.
127
+
128
+- The trace field must be populated in all of the entries and match exactly.
129
+
130
+You should observe the child log entries grouped under the parent on the console. The
131
+parent entry will not inherit the severity of its children; you must update the
132
+parent severity yourself.
116 133
 */
117 134
 package logging // import "cloud.google.com/go/logging"
118 135
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+module cloud.google.com/go/logging
1
+
2
+go 1.9
3
+
4
+require (
5
+	cloud.google.com/go v0.43.0
6
+	github.com/golang/protobuf v1.3.1
7
+	github.com/google/go-cmp v0.3.0
8
+	github.com/googleapis/gax-go/v2 v2.0.5
9
+	go.opencensus.io v0.22.0
10
+	golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
11
+	google.golang.org/api v0.7.0
12
+	google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532
13
+	google.golang.org/grpc v1.21.1
14
+)
0 15
new file mode 100644
... ...
@@ -0,0 +1,22 @@
0
+// Copyright 2019 Google LLC
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//      http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// This file, and the cloud.google.com/go import, won't actually become part of
15
+// the resultant binary.
16
+// +build modhack
17
+
18
+package logging
19
+
20
+// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
21
+import _ "cloud.google.com/go"
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2016 Google Inc. All Rights Reserved.
1
+// Copyright 2016 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -20,15 +20,17 @@ import (
20 20
 )
21 21
 
22 22
 const (
23
+	// ProdAddr is the production address.
23 24
 	ProdAddr = "logging.googleapis.com:443"
24
-	Version  = "0.2.0"
25 25
 )
26 26
 
27
+// LogPath creates a formatted path from a parent and a logID.
27 28
 func LogPath(parent, logID string) string {
28 29
 	logID = strings.Replace(logID, "/", "%2F", -1)
29 30
 	return fmt.Sprintf("%s/logs/%s", parent, logID)
30 31
 }
31 32
 
33
+// LogIDFromPath parses and returns the ID from a log path.
32 34
 func LogIDFromPath(parent, path string) string {
33 35
 	start := len(parent) + len("/logs/")
34 36
 	if len(path) < start {
... ...
@@ -1,4 +1,4 @@
1
-// Copyright 2016 Google Inc. All Rights Reserved.
1
+// Copyright 2016 Google LLC
2 2
 //
3 3
 // Licensed under the Apache License, Version 2.0 (the "License");
4 4
 // you may not use this file except in compliance with the License.
... ...
@@ -25,16 +25,19 @@
25 25
 package logging
26 26
 
27 27
 import (
28
+	"bytes"
29
+	"context"
28 30
 	"encoding/json"
29 31
 	"errors"
30 32
 	"fmt"
31 33
 	"log"
32
-	"math"
33 34
 	"net/http"
35
+	"regexp"
34 36
 	"strconv"
35 37
 	"strings"
36 38
 	"sync"
37 39
 	"time"
40
+	"unicode/utf8"
38 41
 
39 42
 	"cloud.google.com/go/compute/metadata"
40 43
 	"cloud.google.com/go/internal/version"
... ...
@@ -44,7 +47,6 @@ import (
44 44
 	"github.com/golang/protobuf/ptypes"
45 45
 	structpb "github.com/golang/protobuf/ptypes/struct"
46 46
 	tspb "github.com/golang/protobuf/ptypes/timestamp"
47
-	"golang.org/x/net/context"
48 47
 	"google.golang.org/api/option"
49 48
 	"google.golang.org/api/support/bundler"
50 49
 	mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
... ...
@@ -53,13 +55,13 @@ import (
53 53
 )
54 54
 
55 55
 const (
56
-	// Scope for reading from the logging service.
56
+	// ReadScope is the scope for reading from the logging service.
57 57
 	ReadScope = "https://www.googleapis.com/auth/logging.read"
58 58
 
59
-	// Scope for writing to the logging service.
59
+	// WriteScope is the scope for writing to the logging service.
60 60
 	WriteScope = "https://www.googleapis.com/auth/logging.write"
61 61
 
62
-	// Scope for administrative actions on the logging service.
62
+	// AdminScope is the scope for administrative actions on the logging service.
63 63
 	AdminScope = "https://www.googleapis.com/auth/logging.admin"
64 64
 )
65 65
 
... ...
@@ -234,7 +236,7 @@ type Logger struct {
234 234
 	// Options
235 235
 	commonResource *mrpb.MonitoredResource
236 236
 	commonLabels   map[string]string
237
-	writeTimeout   time.Duration
237
+	ctxFunc        func() (context.Context, func())
238 238
 }
239 239
 
240 240
 // A LoggerOption is a configuration option for a Logger.
... ...
@@ -274,12 +276,17 @@ func detectResource() *mrpb.MonitoredResource {
274 274
 		if err != nil {
275 275
 			return
276 276
 		}
277
+		name, err := metadata.InstanceName()
278
+		if err != nil {
279
+			return
280
+		}
277 281
 		detectedResource.pb = &mrpb.MonitoredResource{
278 282
 			Type: "gce_instance",
279 283
 			Labels: map[string]string{
280
-				"project_id":  projectID,
281
-				"instance_id": id,
282
-				"zone":        zone,
284
+				"project_id":    projectID,
285
+				"instance_id":   id,
286
+				"instance_name": name,
287
+				"zone":          zone,
283 288
 			},
284 289
 		}
285 290
 	})
... ...
@@ -398,6 +405,23 @@ type bufferedByteLimit int
398 398
 
399 399
 func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) }
400 400
 
401
+// ContextFunc is a function that will be called to obtain a context.Context for the
402
+// WriteLogEntries RPC executed in the background for calls to Logger.Log. The
403
+// default is a function that always returns context.Background. The second return
404
+// value of the function is a function to call after the RPC completes.
405
+//
406
+// The function is not used for calls to Logger.LogSync, since the caller can pass
407
+// in the context directly.
408
+//
409
+// This option is EXPERIMENTAL. It may be changed or removed.
410
+func ContextFunc(f func() (ctx context.Context, afterCall func())) LoggerOption {
411
+	return contextFunc(f)
412
+}
413
+
414
+type contextFunc func() (ctx context.Context, afterCall func())
415
+
416
+func (c contextFunc) set(l *Logger) { l.ctxFunc = c }
417
+
401 418
 // Logger returns a Logger that will write entries with the given log ID, such as
402 419
 // "syslog". A log ID must be less than 512 characters long and can only
403 420
 // include the following characters: upper and lower case alphanumeric
... ...
@@ -412,6 +436,7 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger {
412 412
 		client:         c,
413 413
 		logName:        internal.LogPath(c.parent, logID),
414 414
 		commonResource: r,
415
+		ctxFunc:        func() (context.Context, func()) { return context.Background(), nil },
415 416
 	}
416 417
 	l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) {
417 418
 		l.writeLogEntries(entries.([]*logpb.LogEntry))
... ...
@@ -578,6 +603,17 @@ type Entry struct {
578 578
 	// if any. If it contains a relative resource name, the name is assumed to
579 579
 	// be relative to //tracing.googleapis.com.
580 580
 	Trace string
581
+
582
+	// ID of the span within the trace associated with the log entry.
583
+	// The ID is a 16-character hexadecimal encoding of an 8-byte array.
584
+	SpanID string
585
+
586
+	// If set, symbolizes that this request was sampled.
587
+	TraceSampled bool
588
+
589
+	// Optional. Source code location information associated with the log entry,
590
+	// if any.
591
+	SourceLocation *logpb.LogEntrySourceLocation
581 592
 }
582 593
 
583 594
 // HTTPRequest contains an http.Request as well as additional
... ...
@@ -631,7 +667,7 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
631 631
 	u.Fragment = ""
632 632
 	pb := &logtypepb.HttpRequest{
633 633
 		RequestMethod:                  r.Request.Method,
634
-		RequestUrl:                     u.String(),
634
+		RequestUrl:                     fixUTF8(u.String()),
635 635
 		RequestSize:                    r.RequestSize,
636 636
 		Status:                         int32(r.Status),
637 637
 		ResponseSize:                   r.ResponseSize,
... ...
@@ -648,6 +684,27 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
648 648
 	return pb
649 649
 }
650 650
 
651
+// fixUTF8 is a helper that fixes an invalid UTF-8 string by replacing
652
+// invalid UTF-8 runes with the Unicode replacement character (U+FFFD).
653
+// See Issue https://github.com/googleapis/google-cloud-go/issues/1383.
654
+func fixUTF8(s string) string {
655
+	if utf8.ValidString(s) {
656
+		return s
657
+	}
658
+
659
+	// Otherwise time to build the sequence.
660
+	buf := new(bytes.Buffer)
661
+	buf.Grow(len(s))
662
+	for _, r := range s {
663
+		if utf8.ValidRune(r) {
664
+			buf.WriteRune(r)
665
+		} else {
666
+			buf.WriteRune('\uFFFD')
667
+		}
668
+	}
669
+	return buf.String()
670
+}
671
+
651 672
 // toProtoStruct converts v, which must marshal into a JSON object,
652 673
 // into a Google Struct proto.
653 674
 func toProtoStruct(v interface{}) (*structpb.Struct, error) {
... ...
@@ -713,7 +770,7 @@ func jsonValueToStructValue(v interface{}) *structpb.Value {
713 713
 // Prefer Log for most uses.
714 714
 // TODO(jba): come up with a better name (LogNow?) or eliminate.
715 715
 func (l *Logger) LogSync(ctx context.Context, e Entry) error {
716
-	ent, err := toLogEntry(e)
716
+	ent, err := l.toLogEntry(e)
717 717
 	if err != nil {
718 718
 		return err
719 719
 	}
... ...
@@ -728,7 +785,7 @@ func (l *Logger) LogSync(ctx context.Context, e Entry) error {
728 728
 
729 729
 // Log buffers the Entry for output to the logging service. It never blocks.
730 730
 func (l *Logger) Log(e Entry) {
731
-	ent, err := toLogEntry(e)
731
+	ent, err := l.toLogEntry(e)
732 732
 	if err != nil {
733 733
 		l.client.error(err)
734 734
 		return
... ...
@@ -756,12 +813,16 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) {
756 756
 		Labels:   l.commonLabels,
757 757
 		Entries:  entries,
758 758
 	}
759
-	ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
759
+	ctx, afterCall := l.ctxFunc()
760
+	ctx, cancel := context.WithTimeout(ctx, defaultWriteTimeout)
760 761
 	defer cancel()
761 762
 	_, err := l.client.client.WriteLogEntries(ctx, req)
762 763
 	if err != nil {
763 764
 		l.client.error(err)
764 765
 	}
766
+	if afterCall != nil {
767
+		afterCall()
768
+	}
765 769
 }
766 770
 
767 771
 // StandardLogger returns a *log.Logger for the provided severity.
... ...
@@ -771,14 +832,38 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) {
771 771
 // (for example by calling SetFlags or SetPrefix).
772 772
 func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] }
773 773
 
774
-func trunc32(i int) int32 {
775
-	if i > math.MaxInt32 {
776
-		i = math.MaxInt32
774
+var reCloudTraceContext = regexp.MustCompile(`([a-f\d]+)/([a-f\d]+);o=(\d)`)
775
+
776
+func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampled bool) {
777
+	// As per the format described at https://cloud.google.com/trace/docs/troubleshooting#force-trace
778
+	//    "X-Cloud-Trace-Context: TRACE_ID/SPAN_ID;o=TRACE_TRUE"
779
+	// for example:
780
+	//    "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/0;o=1"
781
+	//
782
+	// We expect:
783
+	//   * traceID:         "105445aa7843bc8bf206b120001000"
784
+	//   * spanID:          ""
785
+	//   * traceSampled:    true
786
+	matches := reCloudTraceContext.FindAllStringSubmatch(s, -1)
787
+	if len(matches) != 1 {
788
+		return
789
+	}
790
+
791
+	sub := matches[0]
792
+	if len(sub) != 4 {
793
+		return
777 794
 	}
778
-	return int32(i)
795
+
796
+	traceID, spanID = sub[1], sub[2]
797
+	if spanID == "0" {
798
+		spanID = ""
799
+	}
800
+	traceSampled = sub[3] == "1"
801
+
802
+	return
779 803
 }
780 804
 
781
-func toLogEntry(e Entry) (*logpb.LogEntry, error) {
805
+func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
782 806
 	if e.LogName != "" {
783 807
 		return nil, errors.New("logging: Entry.LogName should be not be set when writing")
784 808
 	}
... ...
@@ -790,15 +875,37 @@ func toLogEntry(e Entry) (*logpb.LogEntry, error) {
790 790
 	if err != nil {
791 791
 		return nil, err
792 792
 	}
793
+	if e.Trace == "" && e.HTTPRequest != nil && e.HTTPRequest.Request != nil {
794
+		traceHeader := e.HTTPRequest.Request.Header.Get("X-Cloud-Trace-Context")
795
+		if traceHeader != "" {
796
+			// Set to a relative resource name, as described at
797
+			// https://cloud.google.com/appengine/docs/flexible/go/writing-application-logs.
798
+			traceID, spanID, traceSampled := deconstructXCloudTraceContext(traceHeader)
799
+			if traceID != "" {
800
+				e.Trace = fmt.Sprintf("%s/traces/%s", l.client.parent, traceID)
801
+			}
802
+			if e.SpanID == "" {
803
+				e.SpanID = spanID
804
+			}
805
+
806
+			// If we previously hadn't set TraceSampled, let's retrieve it
807
+			// from the HTTP request's header, as per:
808
+			//   https://cloud.google.com/trace/docs/troubleshooting#force-trace
809
+			e.TraceSampled = e.TraceSampled || traceSampled
810
+		}
811
+	}
793 812
 	ent := &logpb.LogEntry{
794
-		Timestamp:   ts,
795
-		Severity:    logtypepb.LogSeverity(e.Severity),
796
-		InsertId:    e.InsertID,
797
-		HttpRequest: fromHTTPRequest(e.HTTPRequest),
798
-		Operation:   e.Operation,
799
-		Labels:      e.Labels,
800
-		Trace:       e.Trace,
801
-		Resource:    e.Resource,
813
+		Timestamp:      ts,
814
+		Severity:       logtypepb.LogSeverity(e.Severity),
815
+		InsertId:       e.InsertID,
816
+		HttpRequest:    fromHTTPRequest(e.HTTPRequest),
817
+		Operation:      e.Operation,
818
+		Labels:         e.Labels,
819
+		Trace:          e.Trace,
820
+		SpanId:         e.SpanID,
821
+		Resource:       e.Resource,
822
+		SourceLocation: e.SourceLocation,
823
+		TraceSampled:   e.TraceSampled,
802 824
 	}
803 825
 	switch p := e.Payload.(type) {
804 826
 	case string:
805 827
new file mode 100644
... ...
@@ -0,0 +1,33 @@
0
+// +build tools
1
+
2
+// Copyright 2018 Google LLC
3
+//
4
+// Licensed under the Apache License, Version 2.0 (the "License");
5
+// you may not use this file except in compliance with the License.
6
+// You may obtain a copy of the License at
7
+//
8
+//      http://www.apache.org/licenses/LICENSE-2.0
9
+//
10
+// Unless required by applicable law or agreed to in writing, software
11
+// distributed under the License is distributed on an "AS IS" BASIS,
12
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+// See the License for the specific language governing permissions and
14
+// limitations under the License.
15
+
16
+// This package exists to cause `go mod` and `go get` to believe these tools
17
+// are dependencies, even though they are not runtime dependencies of any
18
+// package (these are tools used by our CI builds). This means they will appear
19
+// in our `go.mod` file, but will not be a part of the build. Also, since the
20
+// build target is something non-existent, these should not be included in any
21
+// binaries.
22
+
23
+package cloud
24
+
25
+import (
26
+	_ "github.com/golang/protobuf/protoc-gen-go"
27
+	_ "github.com/jstemmer/go-junit-report"
28
+	_ "golang.org/x/exp/cmd/apidiff"
29
+	_ "golang.org/x/lint/golint"
30
+	_ "golang.org/x/tools/cmd/goimports"
31
+	_ "honnef.co/go/tools/cmd/staticcheck"
32
+)
0 33
new file mode 100644
... ...
@@ -0,0 +1,2808 @@
0
+// Go support for Protocol Buffers - Google's data interchange format
1
+//
2
+// Copyright 2010 The Go Authors.  All rights reserved.
3
+// https://github.com/golang/protobuf
4
+//
5
+// Redistribution and use in source and binary forms, with or without
6
+// modification, are permitted provided that the following conditions are
7
+// met:
8
+//
9
+//     * Redistributions of source code must retain the above copyright
10
+// notice, this list of conditions and the following disclaimer.
11
+//     * Redistributions in binary form must reproduce the above
12
+// copyright notice, this list of conditions and the following disclaimer
13
+// in the documentation and/or other materials provided with the
14
+// distribution.
15
+//     * Neither the name of Google Inc. nor the names of its
16
+// contributors may be used to endorse or promote products derived from
17
+// this software without specific prior written permission.
18
+//
19
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+/*
32
+	The code generator for the plugin for the Google protocol buffer compiler.
33
+	It generates Go code from the protocol buffer description files read by the
34
+	main routine.
35
+*/
36
+package generator
37
+
38
+import (
39
+	"bufio"
40
+	"bytes"
41
+	"compress/gzip"
42
+	"crypto/sha256"
43
+	"encoding/hex"
44
+	"fmt"
45
+	"go/ast"
46
+	"go/build"
47
+	"go/parser"
48
+	"go/printer"
49
+	"go/token"
50
+	"log"
51
+	"os"
52
+	"path"
53
+	"sort"
54
+	"strconv"
55
+	"strings"
56
+	"unicode"
57
+	"unicode/utf8"
58
+
59
+	"github.com/golang/protobuf/proto"
60
+	"github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
61
+
62
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
63
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
64
+)
65
+
66
+// generatedCodeVersion indicates a version of the generated code.
67
+// It is incremented whenever an incompatibility between the generated code and
68
+// proto package is introduced; the generated code references
69
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
70
+const generatedCodeVersion = 3
71
+
72
+// A Plugin provides functionality to add to the output during Go code generation,
73
+// such as to produce RPC stubs.
74
+type Plugin interface {
75
+	// Name identifies the plugin.
76
+	Name() string
77
+	// Init is called once after data structures are built but before
78
+	// code generation begins.
79
+	Init(g *Generator)
80
+	// Generate produces the code generated by the plugin for this file,
81
+	// except for the imports, by calling the generator's methods P, In, and Out.
82
+	Generate(file *FileDescriptor)
83
+	// GenerateImports produces the import declarations for this file.
84
+	// It is called after Generate.
85
+	GenerateImports(file *FileDescriptor)
86
+}
87
+
88
+var plugins []Plugin
89
+
90
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
91
+// It is typically called during initialization.
92
+func RegisterPlugin(p Plugin) {
93
+	plugins = append(plugins, p)
94
+}
95
+
96
+// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
97
+type GoImportPath string
98
+
99
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
100
+
101
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
102
+type GoPackageName string
103
+
104
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
105
+// a pointer to the FileDescriptorProto that represents it.  These types achieve that
106
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
107
+// structs have the same names as their contents, with "Proto" removed.
108
+// FileDescriptor is used to store the things that it points to.
109
+
110
+// The file and package name method are common to messages and enums.
111
+type common struct {
112
+	file *FileDescriptor // File this object comes from.
113
+}
114
+
115
+// GoImportPath is the import path of the Go package containing the type.
116
+func (c *common) GoImportPath() GoImportPath {
117
+	return c.file.importPath
118
+}
119
+
120
+func (c *common) File() *FileDescriptor { return c.file }
121
+
122
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
123
+	return file.GetSyntax() == "proto3"
124
+}
125
+
126
+func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
127
+
128
+// Descriptor represents a protocol buffer message.
129
+type Descriptor struct {
130
+	common
131
+	*descriptor.DescriptorProto
132
+	parent   *Descriptor            // The containing message, if any.
133
+	nested   []*Descriptor          // Inner messages, if any.
134
+	enums    []*EnumDescriptor      // Inner enums, if any.
135
+	ext      []*ExtensionDescriptor // Extensions, if any.
136
+	typename []string               // Cached typename vector.
137
+	index    int                    // The index into the container, whether the file or another message.
138
+	path     string                 // The SourceCodeInfo path as comma-separated integers.
139
+	group    bool
140
+}
141
+
142
+// TypeName returns the elements of the dotted type name.
143
+// The package name is not part of this name.
144
+func (d *Descriptor) TypeName() []string {
145
+	if d.typename != nil {
146
+		return d.typename
147
+	}
148
+	n := 0
149
+	for parent := d; parent != nil; parent = parent.parent {
150
+		n++
151
+	}
152
+	s := make([]string, n)
153
+	for parent := d; parent != nil; parent = parent.parent {
154
+		n--
155
+		s[n] = parent.GetName()
156
+	}
157
+	d.typename = s
158
+	return s
159
+}
160
+
161
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
162
+// Otherwise it will be the descriptor of the message in which it is defined.
163
+type EnumDescriptor struct {
164
+	common
165
+	*descriptor.EnumDescriptorProto
166
+	parent   *Descriptor // The containing message, if any.
167
+	typename []string    // Cached typename vector.
168
+	index    int         // The index into the container, whether the file or a message.
169
+	path     string      // The SourceCodeInfo path as comma-separated integers.
170
+}
171
+
172
+// TypeName returns the elements of the dotted type name.
173
+// The package name is not part of this name.
174
+func (e *EnumDescriptor) TypeName() (s []string) {
175
+	if e.typename != nil {
176
+		return e.typename
177
+	}
178
+	name := e.GetName()
179
+	if e.parent == nil {
180
+		s = make([]string, 1)
181
+	} else {
182
+		pname := e.parent.TypeName()
183
+		s = make([]string, len(pname)+1)
184
+		copy(s, pname)
185
+	}
186
+	s[len(s)-1] = name
187
+	e.typename = s
188
+	return s
189
+}
190
+
191
+// Everything but the last element of the full type name, CamelCased.
192
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
193
+func (e *EnumDescriptor) prefix() string {
194
+	if e.parent == nil {
195
+		// If the enum is not part of a message, the prefix is just the type name.
196
+		return CamelCase(*e.Name) + "_"
197
+	}
198
+	typeName := e.TypeName()
199
+	return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
200
+}
201
+
202
+// The integer value of the named constant in this enumerated type.
203
+func (e *EnumDescriptor) integerValueAsString(name string) string {
204
+	for _, c := range e.Value {
205
+		if c.GetName() == name {
206
+			return fmt.Sprint(c.GetNumber())
207
+		}
208
+	}
209
+	log.Fatal("cannot find value for enum constant")
210
+	return ""
211
+}
212
+
213
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
214
+// Otherwise it will be the descriptor of the message in which it is defined.
215
+type ExtensionDescriptor struct {
216
+	common
217
+	*descriptor.FieldDescriptorProto
218
+	parent *Descriptor // The containing message, if any.
219
+}
220
+
221
+// TypeName returns the elements of the dotted type name.
222
+// The package name is not part of this name.
223
+func (e *ExtensionDescriptor) TypeName() (s []string) {
224
+	name := e.GetName()
225
+	if e.parent == nil {
226
+		// top-level extension
227
+		s = make([]string, 1)
228
+	} else {
229
+		pname := e.parent.TypeName()
230
+		s = make([]string, len(pname)+1)
231
+		copy(s, pname)
232
+	}
233
+	s[len(s)-1] = name
234
+	return s
235
+}
236
+
237
+// DescName returns the variable name used for the generated descriptor.
238
+func (e *ExtensionDescriptor) DescName() string {
239
+	// The full type name.
240
+	typeName := e.TypeName()
241
+	// Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
242
+	for i, s := range typeName {
243
+		typeName[i] = CamelCase(s)
244
+	}
245
+	return "E_" + strings.Join(typeName, "_")
246
+}
247
+
248
+// ImportedDescriptor describes a type that has been publicly imported from another file.
249
+type ImportedDescriptor struct {
250
+	common
251
+	o Object
252
+}
253
+
254
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
255
+
256
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
257
+// It includes slices of all the messages and enums defined within it.
258
+// Those slices are constructed by WrapTypes.
259
+type FileDescriptor struct {
260
+	*descriptor.FileDescriptorProto
261
+	desc []*Descriptor          // All the messages defined in this file.
262
+	enum []*EnumDescriptor      // All the enums defined in this file.
263
+	ext  []*ExtensionDescriptor // All the top-level extensions defined in this file.
264
+	imp  []*ImportedDescriptor  // All types defined in files publicly imported by this file.
265
+
266
+	// Comments, stored as a map of path (comma-separated integers) to the comment.
267
+	comments map[string]*descriptor.SourceCodeInfo_Location
268
+
269
+	// The full list of symbols that are exported,
270
+	// as a map from the exported object to its symbols.
271
+	// This is used for supporting public imports.
272
+	exported map[Object][]symbol
273
+
274
+	importPath  GoImportPath  // Import path of this file's package.
275
+	packageName GoPackageName // Name of this file's Go package.
276
+
277
+	proto3 bool // whether to generate proto3 code for this file
278
+}
279
+
280
+// VarName is the variable name we'll use in the generated code to refer
281
+// to the compressed bytes of this descriptor. It is not exported, so
282
+// it is only valid inside the generated package.
283
+func (d *FileDescriptor) VarName() string {
284
+	h := sha256.Sum256([]byte(d.GetName()))
285
+	return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8]))
286
+}
287
+
288
+// goPackageOption interprets the file's go_package option.
289
+// If there is no go_package, it returns ("", "", false).
290
+// If there's a simple name, it returns ("", pkg, true).
291
+// If the option implies an import path, it returns (impPath, pkg, true).
292
+func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
293
+	opt := d.GetOptions().GetGoPackage()
294
+	if opt == "" {
295
+		return "", "", false
296
+	}
297
+	// A semicolon-delimited suffix delimits the import path and package name.
298
+	sc := strings.Index(opt, ";")
299
+	if sc >= 0 {
300
+		return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
301
+	}
302
+	// The presence of a slash implies there's an import path.
303
+	slash := strings.LastIndex(opt, "/")
304
+	if slash >= 0 {
305
+		return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
306
+	}
307
+	return "", cleanPackageName(opt), true
308
+}
309
+
310
+// goFileName returns the output name for the generated Go file.
311
+func (d *FileDescriptor) goFileName(pathType pathType) string {
312
+	name := *d.Name
313
+	if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
314
+		name = name[:len(name)-len(ext)]
315
+	}
316
+	name += ".pb.go"
317
+
318
+	if pathType == pathTypeSourceRelative {
319
+		return name
320
+	}
321
+
322
+	// Does the file have a "go_package" option?
323
+	// If it does, it may override the filename.
324
+	if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
325
+		// Replace the existing dirname with the declared import path.
326
+		_, name = path.Split(name)
327
+		name = path.Join(string(impPath), name)
328
+		return name
329
+	}
330
+
331
+	return name
332
+}
333
+
334
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
335
+	d.exported[obj] = append(d.exported[obj], sym)
336
+}
337
+
338
+// symbol is an interface representing an exported Go symbol.
339
+type symbol interface {
340
+	// GenerateAlias should generate an appropriate alias
341
+	// for the symbol from the named package.
342
+	GenerateAlias(g *Generator, filename string, pkg GoPackageName)
343
+}
344
+
345
+type messageSymbol struct {
346
+	sym                         string
347
+	hasExtensions, isMessageSet bool
348
+	oneofTypes                  []string
349
+}
350
+
351
+type getterSymbol struct {
352
+	name     string
353
+	typ      string
354
+	typeName string // canonical name in proto world; empty for proto.Message and similar
355
+	genType  bool   // whether typ contains a generated type (message/group/enum)
356
+}
357
+
358
+func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
359
+	g.P("// ", ms.sym, " from public import ", filename)
360
+	g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
361
+	for _, name := range ms.oneofTypes {
362
+		g.P("type ", name, " = ", pkg, ".", name)
363
+	}
364
+}
365
+
366
+type enumSymbol struct {
367
+	name   string
368
+	proto3 bool // Whether this came from a proto3 file.
369
+}
370
+
371
+func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
372
+	s := es.name
373
+	g.P("// ", s, " from public import ", filename)
374
+	g.P("type ", s, " = ", pkg, ".", s)
375
+	g.P("var ", s, "_name = ", pkg, ".", s, "_name")
376
+	g.P("var ", s, "_value = ", pkg, ".", s, "_value")
377
+}
378
+
379
+type constOrVarSymbol struct {
380
+	sym  string
381
+	typ  string // either "const" or "var"
382
+	cast string // if non-empty, a type cast is required (used for enums)
383
+}
384
+
385
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
386
+	v := string(pkg) + "." + cs.sym
387
+	if cs.cast != "" {
388
+		v = cs.cast + "(" + v + ")"
389
+	}
390
+	g.P(cs.typ, " ", cs.sym, " = ", v)
391
+}
392
+
393
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
394
+type Object interface {
395
+	GoImportPath() GoImportPath
396
+	TypeName() []string
397
+	File() *FileDescriptor
398
+}
399
+
400
+// Generator is the type whose methods generate the output, stored in the associated response structure.
401
+type Generator struct {
402
+	*bytes.Buffer
403
+
404
+	Request  *plugin.CodeGeneratorRequest  // The input.
405
+	Response *plugin.CodeGeneratorResponse // The output.
406
+
407
+	Param             map[string]string // Command-line parameters.
408
+	PackageImportPath string            // Go import path of the package we're generating code for
409
+	ImportPrefix      string            // String to prefix to imported package file names.
410
+	ImportMap         map[string]string // Mapping from .proto file name to import path
411
+
412
+	Pkg map[string]string // The names under which we import support packages
413
+
414
+	outputImportPath GoImportPath                   // Package we're generating code for.
415
+	allFiles         []*FileDescriptor              // All files in the tree
416
+	allFilesByName   map[string]*FileDescriptor     // All files by filename.
417
+	genFiles         []*FileDescriptor              // Those files we will generate output for.
418
+	file             *FileDescriptor                // The file we are compiling now.
419
+	packageNames     map[GoImportPath]GoPackageName // Imported package names in the current file.
420
+	usedPackages     map[GoImportPath]bool          // Packages used in current file.
421
+	usedPackageNames map[GoPackageName]bool         // Package names used in the current file.
422
+	addedImports     map[GoImportPath]bool          // Additional imports to emit.
423
+	typeNameToObject map[string]Object              // Key is a fully-qualified name in input syntax.
424
+	init             []string                       // Lines to emit in the init function.
425
+	indent           string
426
+	pathType         pathType // How to generate output filenames.
427
+	writeOutput      bool
428
+	annotateCode     bool                                       // whether to store annotations
429
+	annotations      []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
430
+}
431
+
432
+type pathType int
433
+
434
+const (
435
+	pathTypeImport pathType = iota
436
+	pathTypeSourceRelative
437
+)
438
+
439
+// New creates a new generator and allocates the request and response protobufs.
440
+func New() *Generator {
441
+	g := new(Generator)
442
+	g.Buffer = new(bytes.Buffer)
443
+	g.Request = new(plugin.CodeGeneratorRequest)
444
+	g.Response = new(plugin.CodeGeneratorResponse)
445
+	return g
446
+}
447
+
448
+// Error reports a problem, including an error, and exits the program.
449
+func (g *Generator) Error(err error, msgs ...string) {
450
+	s := strings.Join(msgs, " ") + ":" + err.Error()
451
+	log.Print("protoc-gen-go: error:", s)
452
+	os.Exit(1)
453
+}
454
+
455
+// Fail reports a problem and exits the program.
456
+func (g *Generator) Fail(msgs ...string) {
457
+	s := strings.Join(msgs, " ")
458
+	log.Print("protoc-gen-go: error:", s)
459
+	os.Exit(1)
460
+}
461
+
462
+// CommandLineParameters breaks the comma-separated list of key=value pairs
463
+// in the parameter (a member of the request protobuf) into a key/value map.
464
+// It then sets file name mappings defined by those entries.
465
+func (g *Generator) CommandLineParameters(parameter string) {
466
+	g.Param = make(map[string]string)
467
+	for _, p := range strings.Split(parameter, ",") {
468
+		if i := strings.Index(p, "="); i < 0 {
469
+			g.Param[p] = ""
470
+		} else {
471
+			g.Param[p[0:i]] = p[i+1:]
472
+		}
473
+	}
474
+
475
+	g.ImportMap = make(map[string]string)
476
+	pluginList := "none" // Default list of plugin names to enable (empty means all).
477
+	for k, v := range g.Param {
478
+		switch k {
479
+		case "import_prefix":
480
+			g.ImportPrefix = v
481
+		case "import_path":
482
+			g.PackageImportPath = v
483
+		case "paths":
484
+			switch v {
485
+			case "import":
486
+				g.pathType = pathTypeImport
487
+			case "source_relative":
488
+				g.pathType = pathTypeSourceRelative
489
+			default:
490
+				g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
491
+			}
492
+		case "plugins":
493
+			pluginList = v
494
+		case "annotate_code":
495
+			if v == "true" {
496
+				g.annotateCode = true
497
+			}
498
+		default:
499
+			if len(k) > 0 && k[0] == 'M' {
500
+				g.ImportMap[k[1:]] = v
501
+			}
502
+		}
503
+	}
504
+	if pluginList != "" {
505
+		// Amend the set of plugins.
506
+		enabled := make(map[string]bool)
507
+		for _, name := range strings.Split(pluginList, "+") {
508
+			enabled[name] = true
509
+		}
510
+		var nplugins []Plugin
511
+		for _, p := range plugins {
512
+			if enabled[p.Name()] {
513
+				nplugins = append(nplugins, p)
514
+			}
515
+		}
516
+		plugins = nplugins
517
+	}
518
+}
519
+
520
+// DefaultPackageName returns the package name printed for the object.
521
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
522
+// Otherwise it returns the empty string.
523
+func (g *Generator) DefaultPackageName(obj Object) string {
524
+	importPath := obj.GoImportPath()
525
+	if importPath == g.outputImportPath {
526
+		return ""
527
+	}
528
+	return string(g.GoPackageName(importPath)) + "."
529
+}
530
+
531
+// GoPackageName returns the name used for a package.
532
+func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
533
+	if name, ok := g.packageNames[importPath]; ok {
534
+		return name
535
+	}
536
+	name := cleanPackageName(baseName(string(importPath)))
537
+	for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ {
538
+		name = orig + GoPackageName(strconv.Itoa(i))
539
+	}
540
+	g.packageNames[importPath] = name
541
+	g.usedPackageNames[name] = true
542
+	return name
543
+}
544
+
545
+// AddImport adds a package to the generated file's import section.
546
+// It returns the name used for the package.
547
+func (g *Generator) AddImport(importPath GoImportPath) GoPackageName {
548
+	g.addedImports[importPath] = true
549
+	return g.GoPackageName(importPath)
550
+}
551
+
552
+var globalPackageNames = map[GoPackageName]bool{
553
+	"fmt":   true,
554
+	"math":  true,
555
+	"proto": true,
556
+}
557
+
558
+// Create and remember a guaranteed unique package name. Pkg is the candidate name.
559
+// The FileDescriptor parameter is unused.
560
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
561
+	name := cleanPackageName(pkg)
562
+	for i, orig := 1, name; globalPackageNames[name]; i++ {
563
+		name = orig + GoPackageName(strconv.Itoa(i))
564
+	}
565
+	globalPackageNames[name] = true
566
+	return string(name)
567
+}
568
+
569
+var isGoKeyword = map[string]bool{
570
+	"break":       true,
571
+	"case":        true,
572
+	"chan":        true,
573
+	"const":       true,
574
+	"continue":    true,
575
+	"default":     true,
576
+	"else":        true,
577
+	"defer":       true,
578
+	"fallthrough": true,
579
+	"for":         true,
580
+	"func":        true,
581
+	"go":          true,
582
+	"goto":        true,
583
+	"if":          true,
584
+	"import":      true,
585
+	"interface":   true,
586
+	"map":         true,
587
+	"package":     true,
588
+	"range":       true,
589
+	"return":      true,
590
+	"select":      true,
591
+	"struct":      true,
592
+	"switch":      true,
593
+	"type":        true,
594
+	"var":         true,
595
+}
596
+
597
+var isGoPredeclaredIdentifier = map[string]bool{
598
+	"append":     true,
599
+	"bool":       true,
600
+	"byte":       true,
601
+	"cap":        true,
602
+	"close":      true,
603
+	"complex":    true,
604
+	"complex128": true,
605
+	"complex64":  true,
606
+	"copy":       true,
607
+	"delete":     true,
608
+	"error":      true,
609
+	"false":      true,
610
+	"float32":    true,
611
+	"float64":    true,
612
+	"imag":       true,
613
+	"int":        true,
614
+	"int16":      true,
615
+	"int32":      true,
616
+	"int64":      true,
617
+	"int8":       true,
618
+	"iota":       true,
619
+	"len":        true,
620
+	"make":       true,
621
+	"new":        true,
622
+	"nil":        true,
623
+	"panic":      true,
624
+	"print":      true,
625
+	"println":    true,
626
+	"real":       true,
627
+	"recover":    true,
628
+	"rune":       true,
629
+	"string":     true,
630
+	"true":       true,
631
+	"uint":       true,
632
+	"uint16":     true,
633
+	"uint32":     true,
634
+	"uint64":     true,
635
+	"uint8":      true,
636
+	"uintptr":    true,
637
+}
638
+
639
+func cleanPackageName(name string) GoPackageName {
640
+	name = strings.Map(badToUnderscore, name)
641
+	// Identifier must not be keyword or predeclared identifier: insert _.
642
+	if isGoKeyword[name] {
643
+		name = "_" + name
644
+	}
645
+	// Identifier must not begin with digit: insert _.
646
+	if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
647
+		name = "_" + name
648
+	}
649
+	return GoPackageName(name)
650
+}
651
+
652
+// defaultGoPackage returns the package name to use,
653
+// derived from the import path of the package we're building code for.
654
+func (g *Generator) defaultGoPackage() GoPackageName {
655
+	p := g.PackageImportPath
656
+	if i := strings.LastIndex(p, "/"); i >= 0 {
657
+		p = p[i+1:]
658
+	}
659
+	return cleanPackageName(p)
660
+}
661
+
662
+// SetPackageNames sets the package name for this run.
663
+// The package name must agree across all files being generated.
664
+// It also defines unique package names for all imported files.
665
+func (g *Generator) SetPackageNames() {
666
+	g.outputImportPath = g.genFiles[0].importPath
667
+
668
+	defaultPackageNames := make(map[GoImportPath]GoPackageName)
669
+	for _, f := range g.genFiles {
670
+		if _, p, ok := f.goPackageOption(); ok {
671
+			defaultPackageNames[f.importPath] = p
672
+		}
673
+	}
674
+	for _, f := range g.genFiles {
675
+		if _, p, ok := f.goPackageOption(); ok {
676
+			// Source file: option go_package = "quux/bar";
677
+			f.packageName = p
678
+		} else if p, ok := defaultPackageNames[f.importPath]; ok {
679
+			// A go_package option in another file in the same package.
680
+			//
681
+			// This is a poor choice in general, since every source file should
682
+			// contain a go_package option. Supported mainly for historical
683
+			// compatibility.
684
+			f.packageName = p
685
+		} else if p := g.defaultGoPackage(); p != "" {
686
+			// Command-line: import_path=quux/bar.
687
+			//
688
+			// The import_path flag sets a package name for files which don't
689
+			// contain a go_package option.
690
+			f.packageName = p
691
+		} else if p := f.GetPackage(); p != "" {
692
+			// Source file: package quux.bar;
693
+			f.packageName = cleanPackageName(p)
694
+		} else {
695
+			// Source filename.
696
+			f.packageName = cleanPackageName(baseName(f.GetName()))
697
+		}
698
+	}
699
+
700
+	// Check that all files have a consistent package name and import path.
701
+	for _, f := range g.genFiles[1:] {
702
+		if a, b := g.genFiles[0].importPath, f.importPath; a != b {
703
+			g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
704
+		}
705
+		if a, b := g.genFiles[0].packageName, f.packageName; a != b {
706
+			g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
707
+		}
708
+	}
709
+
710
+	// Names of support packages. These never vary (if there are conflicts,
711
+	// we rename the conflicting package), so this could be removed someday.
712
+	g.Pkg = map[string]string{
713
+		"fmt":   "fmt",
714
+		"math":  "math",
715
+		"proto": "proto",
716
+	}
717
+}
718
+
719
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
720
+// and FileDescriptorProtos into file-referenced objects within the Generator.
721
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
722
+func (g *Generator) WrapTypes() {
723
+	g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
724
+	g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
725
+	genFileNames := make(map[string]bool)
726
+	for _, n := range g.Request.FileToGenerate {
727
+		genFileNames[n] = true
728
+	}
729
+	for _, f := range g.Request.ProtoFile {
730
+		fd := &FileDescriptor{
731
+			FileDescriptorProto: f,
732
+			exported:            make(map[Object][]symbol),
733
+			proto3:              fileIsProto3(f),
734
+		}
735
+		// The import path may be set in a number of ways.
736
+		if substitution, ok := g.ImportMap[f.GetName()]; ok {
737
+			// Command-line: M=foo.proto=quux/bar.
738
+			//
739
+			// Explicit mapping of source file to import path.
740
+			fd.importPath = GoImportPath(substitution)
741
+		} else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
742
+			// Command-line: import_path=quux/bar.
743
+			//
744
+			// The import_path flag sets the import path for every file that
745
+			// we generate code for.
746
+			fd.importPath = GoImportPath(g.PackageImportPath)
747
+		} else if p, _, _ := fd.goPackageOption(); p != "" {
748
+			// Source file: option go_package = "quux/bar";
749
+			//
750
+			// The go_package option sets the import path. Most users should use this.
751
+			fd.importPath = p
752
+		} else {
753
+			// Source filename.
754
+			//
755
+			// Last resort when nothing else is available.
756
+			fd.importPath = GoImportPath(path.Dir(f.GetName()))
757
+		}
758
+		// We must wrap the descriptors before we wrap the enums
759
+		fd.desc = wrapDescriptors(fd)
760
+		g.buildNestedDescriptors(fd.desc)
761
+		fd.enum = wrapEnumDescriptors(fd, fd.desc)
762
+		g.buildNestedEnums(fd.desc, fd.enum)
763
+		fd.ext = wrapExtensions(fd)
764
+		extractComments(fd)
765
+		g.allFiles = append(g.allFiles, fd)
766
+		g.allFilesByName[f.GetName()] = fd
767
+	}
768
+	for _, fd := range g.allFiles {
769
+		fd.imp = wrapImported(fd, g)
770
+	}
771
+
772
+	g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
773
+	for _, fileName := range g.Request.FileToGenerate {
774
+		fd := g.allFilesByName[fileName]
775
+		if fd == nil {
776
+			g.Fail("could not find file named", fileName)
777
+		}
778
+		g.genFiles = append(g.genFiles, fd)
779
+	}
780
+}
781
+
782
+// Scan the descriptors in this file.  For each one, build the slice of nested descriptors
783
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
784
+	for _, desc := range descs {
785
+		if len(desc.NestedType) != 0 {
786
+			for _, nest := range descs {
787
+				if nest.parent == desc {
788
+					desc.nested = append(desc.nested, nest)
789
+				}
790
+			}
791
+			if len(desc.nested) != len(desc.NestedType) {
792
+				g.Fail("internal error: nesting failure for", desc.GetName())
793
+			}
794
+		}
795
+	}
796
+}
797
+
798
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
799
+	for _, desc := range descs {
800
+		if len(desc.EnumType) != 0 {
801
+			for _, enum := range enums {
802
+				if enum.parent == desc {
803
+					desc.enums = append(desc.enums, enum)
804
+				}
805
+			}
806
+			if len(desc.enums) != len(desc.EnumType) {
807
+				g.Fail("internal error: enum nesting failure for", desc.GetName())
808
+			}
809
+		}
810
+	}
811
+}
812
+
813
+// Construct the Descriptor
814
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
815
+	d := &Descriptor{
816
+		common:          common{file},
817
+		DescriptorProto: desc,
818
+		parent:          parent,
819
+		index:           index,
820
+	}
821
+	if parent == nil {
822
+		d.path = fmt.Sprintf("%d,%d", messagePath, index)
823
+	} else {
824
+		d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
825
+	}
826
+
827
+	// The only way to distinguish a group from a message is whether
828
+	// the containing message has a TYPE_GROUP field that matches.
829
+	if parent != nil {
830
+		parts := d.TypeName()
831
+		if file.Package != nil {
832
+			parts = append([]string{*file.Package}, parts...)
833
+		}
834
+		exp := "." + strings.Join(parts, ".")
835
+		for _, field := range parent.Field {
836
+			if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
837
+				d.group = true
838
+				break
839
+			}
840
+		}
841
+	}
842
+
843
+	for _, field := range desc.Extension {
844
+		d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
845
+	}
846
+
847
+	return d
848
+}
849
+
850
+// Return a slice of all the Descriptors defined within this file
851
+func wrapDescriptors(file *FileDescriptor) []*Descriptor {
852
+	sl := make([]*Descriptor, 0, len(file.MessageType)+10)
853
+	for i, desc := range file.MessageType {
854
+		sl = wrapThisDescriptor(sl, desc, nil, file, i)
855
+	}
856
+	return sl
857
+}
858
+
859
+// Wrap this Descriptor, recursively
860
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
861
+	sl = append(sl, newDescriptor(desc, parent, file, index))
862
+	me := sl[len(sl)-1]
863
+	for i, nested := range desc.NestedType {
864
+		sl = wrapThisDescriptor(sl, nested, me, file, i)
865
+	}
866
+	return sl
867
+}
868
+
869
+// Construct the EnumDescriptor
870
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
871
+	ed := &EnumDescriptor{
872
+		common:              common{file},
873
+		EnumDescriptorProto: desc,
874
+		parent:              parent,
875
+		index:               index,
876
+	}
877
+	if parent == nil {
878
+		ed.path = fmt.Sprintf("%d,%d", enumPath, index)
879
+	} else {
880
+		ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
881
+	}
882
+	return ed
883
+}
884
+
885
+// Return a slice of all the EnumDescriptors defined within this file
886
+func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
887
+	sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
888
+	// Top-level enums.
889
+	for i, enum := range file.EnumType {
890
+		sl = append(sl, newEnumDescriptor(enum, nil, file, i))
891
+	}
892
+	// Enums within messages. Enums within embedded messages appear in the outer-most message.
893
+	for _, nested := range descs {
894
+		for i, enum := range nested.EnumType {
895
+			sl = append(sl, newEnumDescriptor(enum, nested, file, i))
896
+		}
897
+	}
898
+	return sl
899
+}
900
+
901
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
902
+func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
903
+	var sl []*ExtensionDescriptor
904
+	for _, field := range file.Extension {
905
+		sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
906
+	}
907
+	return sl
908
+}
909
+
910
+// Return a slice of all the types that are publicly imported into this file.
911
+func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
912
+	for _, index := range file.PublicDependency {
913
+		df := g.fileByName(file.Dependency[index])
914
+		for _, d := range df.desc {
915
+			if d.GetOptions().GetMapEntry() {
916
+				continue
917
+			}
918
+			sl = append(sl, &ImportedDescriptor{common{file}, d})
919
+		}
920
+		for _, e := range df.enum {
921
+			sl = append(sl, &ImportedDescriptor{common{file}, e})
922
+		}
923
+		for _, ext := range df.ext {
924
+			sl = append(sl, &ImportedDescriptor{common{file}, ext})
925
+		}
926
+	}
927
+	return
928
+}
929
+
930
+func extractComments(file *FileDescriptor) {
931
+	file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
932
+	for _, loc := range file.GetSourceCodeInfo().GetLocation() {
933
+		if loc.LeadingComments == nil {
934
+			continue
935
+		}
936
+		var p []string
937
+		for _, n := range loc.Path {
938
+			p = append(p, strconv.Itoa(int(n)))
939
+		}
940
+		file.comments[strings.Join(p, ",")] = loc
941
+	}
942
+}
943
+
944
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
945
+// The key names for the map come from the input data, which puts a period at the beginning.
946
+// It should be called after SetPackageNames and before GenerateAllFiles.
947
+func (g *Generator) BuildTypeNameMap() {
948
+	g.typeNameToObject = make(map[string]Object)
949
+	for _, f := range g.allFiles {
950
+		// The names in this loop are defined by the proto world, not us, so the
951
+		// package name may be empty.  If so, the dotted package name of X will
952
+		// be ".X"; otherwise it will be ".pkg.X".
953
+		dottedPkg := "." + f.GetPackage()
954
+		if dottedPkg != "." {
955
+			dottedPkg += "."
956
+		}
957
+		for _, enum := range f.enum {
958
+			name := dottedPkg + dottedSlice(enum.TypeName())
959
+			g.typeNameToObject[name] = enum
960
+		}
961
+		for _, desc := range f.desc {
962
+			name := dottedPkg + dottedSlice(desc.TypeName())
963
+			g.typeNameToObject[name] = desc
964
+		}
965
+	}
966
+}
967
+
968
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
969
+// returns the descriptor for the message or enum with that name.
970
+func (g *Generator) ObjectNamed(typeName string) Object {
971
+	o, ok := g.typeNameToObject[typeName]
972
+	if !ok {
973
+		g.Fail("can't find object with type", typeName)
974
+	}
975
+	return o
976
+}
977
+
978
+// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
979
+type AnnotatedAtoms struct {
980
+	source string
981
+	path   string
982
+	atoms  []interface{}
983
+}
984
+
985
+// Annotate records the file name and proto AST path of a list of atoms
986
+// so that a later call to P can emit a link from each atom to its origin.
987
+func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
988
+	return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
989
+}
990
+
991
+// printAtom prints the (atomic, non-annotation) argument to the generated output.
992
+func (g *Generator) printAtom(v interface{}) {
993
+	switch v := v.(type) {
994
+	case string:
995
+		g.WriteString(v)
996
+	case *string:
997
+		g.WriteString(*v)
998
+	case bool:
999
+		fmt.Fprint(g, v)
1000
+	case *bool:
1001
+		fmt.Fprint(g, *v)
1002
+	case int:
1003
+		fmt.Fprint(g, v)
1004
+	case *int32:
1005
+		fmt.Fprint(g, *v)
1006
+	case *int64:
1007
+		fmt.Fprint(g, *v)
1008
+	case float64:
1009
+		fmt.Fprint(g, v)
1010
+	case *float64:
1011
+		fmt.Fprint(g, *v)
1012
+	case GoPackageName:
1013
+		g.WriteString(string(v))
1014
+	case GoImportPath:
1015
+		g.WriteString(strconv.Quote(string(v)))
1016
+	default:
1017
+		g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
1018
+	}
1019
+}
1020
+
1021
+// P prints the arguments to the generated output.  It handles strings and int32s, plus
1022
+// handling indirections because they may be *string, etc.  Any inputs of type AnnotatedAtoms may emit
1023
+// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
1024
+// is true).
1025
+func (g *Generator) P(str ...interface{}) {
1026
+	if !g.writeOutput {
1027
+		return
1028
+	}
1029
+	g.WriteString(g.indent)
1030
+	for _, v := range str {
1031
+		switch v := v.(type) {
1032
+		case *AnnotatedAtoms:
1033
+			begin := int32(g.Len())
1034
+			for _, v := range v.atoms {
1035
+				g.printAtom(v)
1036
+			}
1037
+			if g.annotateCode {
1038
+				end := int32(g.Len())
1039
+				var path []int32
1040
+				for _, token := range strings.Split(v.path, ",") {
1041
+					val, err := strconv.ParseInt(token, 10, 32)
1042
+					if err != nil {
1043
+						g.Fail("could not parse proto AST path: ", err.Error())
1044
+					}
1045
+					path = append(path, int32(val))
1046
+				}
1047
+				g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
1048
+					Path:       path,
1049
+					SourceFile: &v.source,
1050
+					Begin:      &begin,
1051
+					End:        &end,
1052
+				})
1053
+			}
1054
+		default:
1055
+			g.printAtom(v)
1056
+		}
1057
+	}
1058
+	g.WriteByte('\n')
1059
+}
1060
+
1061
+// addInitf stores the given statement to be printed inside the file's init function.
1062
+// The statement is given as a format specifier and arguments.
1063
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
1064
+	g.init = append(g.init, fmt.Sprintf(stmt, a...))
1065
+}
1066
+
1067
+// In Indents the output one tab stop.
1068
+func (g *Generator) In() { g.indent += "\t" }
1069
+
1070
+// Out unindents the output one tab stop.
1071
+func (g *Generator) Out() {
1072
+	if len(g.indent) > 0 {
1073
+		g.indent = g.indent[1:]
1074
+	}
1075
+}
1076
+
1077
+// GenerateAllFiles generates the output for all the files we're outputting.
1078
+func (g *Generator) GenerateAllFiles() {
1079
+	// Initialize the plugins
1080
+	for _, p := range plugins {
1081
+		p.Init(g)
1082
+	}
1083
+	// Generate the output. The generator runs for every file, even the files
1084
+	// that we don't generate output for, so that we can collate the full list
1085
+	// of exported symbols to support public imports.
1086
+	genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
1087
+	for _, file := range g.genFiles {
1088
+		genFileMap[file] = true
1089
+	}
1090
+	for _, file := range g.allFiles {
1091
+		g.Reset()
1092
+		g.annotations = nil
1093
+		g.writeOutput = genFileMap[file]
1094
+		g.generate(file)
1095
+		if !g.writeOutput {
1096
+			continue
1097
+		}
1098
+		fname := file.goFileName(g.pathType)
1099
+		g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
1100
+			Name:    proto.String(fname),
1101
+			Content: proto.String(g.String()),
1102
+		})
1103
+		if g.annotateCode {
1104
+			// Store the generated code annotations in text, as the protoc plugin protocol requires that
1105
+			// strings contain valid UTF-8.
1106
+			g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
1107
+				Name:    proto.String(file.goFileName(g.pathType) + ".meta"),
1108
+				Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
1109
+			})
1110
+		}
1111
+	}
1112
+}
1113
+
1114
+// Run all the plugins associated with the file.
1115
+func (g *Generator) runPlugins(file *FileDescriptor) {
1116
+	for _, p := range plugins {
1117
+		p.Generate(file)
1118
+	}
1119
+}
1120
+
1121
+// Fill the response protocol buffer with the generated output for all the files we're
1122
+// supposed to generate.
1123
+func (g *Generator) generate(file *FileDescriptor) {
1124
+	g.file = file
1125
+	g.usedPackages = make(map[GoImportPath]bool)
1126
+	g.packageNames = make(map[GoImportPath]GoPackageName)
1127
+	g.usedPackageNames = make(map[GoPackageName]bool)
1128
+	g.addedImports = make(map[GoImportPath]bool)
1129
+	for name := range globalPackageNames {
1130
+		g.usedPackageNames[name] = true
1131
+	}
1132
+
1133
+	g.P("// This is a compile-time assertion to ensure that this generated file")
1134
+	g.P("// is compatible with the proto package it is being compiled against.")
1135
+	g.P("// A compilation error at this line likely means your copy of the")
1136
+	g.P("// proto package needs to be updated.")
1137
+	g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
1138
+	g.P()
1139
+
1140
+	for _, td := range g.file.imp {
1141
+		g.generateImported(td)
1142
+	}
1143
+	for _, enum := range g.file.enum {
1144
+		g.generateEnum(enum)
1145
+	}
1146
+	for _, desc := range g.file.desc {
1147
+		// Don't generate virtual messages for maps.
1148
+		if desc.GetOptions().GetMapEntry() {
1149
+			continue
1150
+		}
1151
+		g.generateMessage(desc)
1152
+	}
1153
+	for _, ext := range g.file.ext {
1154
+		g.generateExtension(ext)
1155
+	}
1156
+	g.generateInitFunction()
1157
+	g.generateFileDescriptor(file)
1158
+
1159
+	// Run the plugins before the imports so we know which imports are necessary.
1160
+	g.runPlugins(file)
1161
+
1162
+	// Generate header and imports last, though they appear first in the output.
1163
+	rem := g.Buffer
1164
+	remAnno := g.annotations
1165
+	g.Buffer = new(bytes.Buffer)
1166
+	g.annotations = nil
1167
+	g.generateHeader()
1168
+	g.generateImports()
1169
+	if !g.writeOutput {
1170
+		return
1171
+	}
1172
+	// Adjust the offsets for annotations displaced by the header and imports.
1173
+	for _, anno := range remAnno {
1174
+		*anno.Begin += int32(g.Len())
1175
+		*anno.End += int32(g.Len())
1176
+		g.annotations = append(g.annotations, anno)
1177
+	}
1178
+	g.Write(rem.Bytes())
1179
+
1180
+	// Reformat generated code and patch annotation locations.
1181
+	fset := token.NewFileSet()
1182
+	original := g.Bytes()
1183
+	if g.annotateCode {
1184
+		// make a copy independent of g; we'll need it after Reset.
1185
+		original = append([]byte(nil), original...)
1186
+	}
1187
+	fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments)
1188
+	if err != nil {
1189
+		// Print out the bad code with line numbers.
1190
+		// This should never happen in practice, but it can while changing generated code,
1191
+		// so consider this a debugging aid.
1192
+		var src bytes.Buffer
1193
+		s := bufio.NewScanner(bytes.NewReader(original))
1194
+		for line := 1; s.Scan(); line++ {
1195
+			fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
1196
+		}
1197
+		g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
1198
+	}
1199
+	ast.SortImports(fset, fileAST)
1200
+	g.Reset()
1201
+	err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST)
1202
+	if err != nil {
1203
+		g.Fail("generated Go source code could not be reformatted:", err.Error())
1204
+	}
1205
+	if g.annotateCode {
1206
+		m, err := remap.Compute(original, g.Bytes())
1207
+		if err != nil {
1208
+			g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
1209
+		}
1210
+		for _, anno := range g.annotations {
1211
+			new, ok := m.Find(int(*anno.Begin), int(*anno.End))
1212
+			if !ok {
1213
+				g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
1214
+			}
1215
+			*anno.Begin = int32(new.Pos)
1216
+			*anno.End = int32(new.End)
1217
+		}
1218
+	}
1219
+}
1220
+
1221
+// Generate the header, including package definition
1222
+func (g *Generator) generateHeader() {
1223
+	g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
1224
+	if g.file.GetOptions().GetDeprecated() {
1225
+		g.P("// ", g.file.Name, " is a deprecated file.")
1226
+	} else {
1227
+		g.P("// source: ", g.file.Name)
1228
+	}
1229
+	g.P()
1230
+	g.PrintComments(strconv.Itoa(packagePath))
1231
+	g.P()
1232
+	g.P("package ", g.file.packageName)
1233
+	g.P()
1234
+}
1235
+
1236
+// deprecationComment is the standard comment added to deprecated
1237
+// messages, fields, enums, and enum values.
1238
+var deprecationComment = "// Deprecated: Do not use."
1239
+
1240
+// PrintComments prints any comments from the source .proto file.
1241
+// The path is a comma-separated list of integers.
1242
+// It returns an indication of whether any comments were printed.
1243
+// See descriptor.proto for its format.
1244
+func (g *Generator) PrintComments(path string) bool {
1245
+	if !g.writeOutput {
1246
+		return false
1247
+	}
1248
+	if c, ok := g.makeComments(path); ok {
1249
+		g.P(c)
1250
+		return true
1251
+	}
1252
+	return false
1253
+}
1254
+
1255
+// makeComments generates the comment string for the field, no "\n" at the end
1256
+func (g *Generator) makeComments(path string) (string, bool) {
1257
+	loc, ok := g.file.comments[path]
1258
+	if !ok {
1259
+		return "", false
1260
+	}
1261
+	w := new(bytes.Buffer)
1262
+	nl := ""
1263
+	for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
1264
+		fmt.Fprintf(w, "%s//%s", nl, line)
1265
+		nl = "\n"
1266
+	}
1267
+	return w.String(), true
1268
+}
1269
+
1270
+func (g *Generator) fileByName(filename string) *FileDescriptor {
1271
+	return g.allFilesByName[filename]
1272
+}
1273
+
1274
+// weak returns whether the ith import of the current file is a weak import.
1275
+func (g *Generator) weak(i int32) bool {
1276
+	for _, j := range g.file.WeakDependency {
1277
+		if j == i {
1278
+			return true
1279
+		}
1280
+	}
1281
+	return false
1282
+}
1283
+
1284
+// Generate the imports
1285
+func (g *Generator) generateImports() {
1286
+	imports := make(map[GoImportPath]GoPackageName)
1287
+	for i, s := range g.file.Dependency {
1288
+		fd := g.fileByName(s)
1289
+		importPath := fd.importPath
1290
+		// Do not import our own package.
1291
+		if importPath == g.file.importPath {
1292
+			continue
1293
+		}
1294
+		// Do not import weak imports.
1295
+		if g.weak(int32(i)) {
1296
+			continue
1297
+		}
1298
+		// Do not import a package twice.
1299
+		if _, ok := imports[importPath]; ok {
1300
+			continue
1301
+		}
1302
+		// We need to import all the dependencies, even if we don't reference them,
1303
+		// because other code and tools depend on having the full transitive closure
1304
+		// of protocol buffer types in the binary.
1305
+		packageName := g.GoPackageName(importPath)
1306
+		if _, ok := g.usedPackages[importPath]; !ok {
1307
+			packageName = "_"
1308
+		}
1309
+		imports[importPath] = packageName
1310
+	}
1311
+	for importPath := range g.addedImports {
1312
+		imports[importPath] = g.GoPackageName(importPath)
1313
+	}
1314
+	// We almost always need a proto import.  Rather than computing when we
1315
+	// do, which is tricky when there's a plugin, just import it and
1316
+	// reference it later. The same argument applies to the fmt and math packages.
1317
+	g.P("import (")
1318
+	g.P(g.Pkg["fmt"] + ` "fmt"`)
1319
+	g.P(g.Pkg["math"] + ` "math"`)
1320
+	g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
1321
+	for importPath, packageName := range imports {
1322
+		g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath)
1323
+	}
1324
+	g.P(")")
1325
+	g.P()
1326
+	// TODO: may need to worry about uniqueness across plugins
1327
+	for _, p := range plugins {
1328
+		p.GenerateImports(g.file)
1329
+		g.P()
1330
+	}
1331
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
1332
+	g.P("var _ = ", g.Pkg["proto"], ".Marshal")
1333
+	g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
1334
+	g.P("var _ = ", g.Pkg["math"], ".Inf")
1335
+	g.P()
1336
+}
1337
+
1338
+func (g *Generator) generateImported(id *ImportedDescriptor) {
1339
+	df := id.o.File()
1340
+	filename := *df.Name
1341
+	if df.importPath == g.file.importPath {
1342
+		// Don't generate type aliases for files in the same Go package as this one.
1343
+		return
1344
+	}
1345
+	if !supportTypeAliases {
1346
+		g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
1347
+	}
1348
+	g.usedPackages[df.importPath] = true
1349
+
1350
+	for _, sym := range df.exported[id.o] {
1351
+		sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath))
1352
+	}
1353
+
1354
+	g.P()
1355
+}
1356
+
1357
+// Generate the enum definitions for this EnumDescriptor.
1358
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
1359
+	// The full type name
1360
+	typeName := enum.TypeName()
1361
+	// The full type name, CamelCased.
1362
+	ccTypeName := CamelCaseSlice(typeName)
1363
+	ccPrefix := enum.prefix()
1364
+
1365
+	deprecatedEnum := ""
1366
+	if enum.GetOptions().GetDeprecated() {
1367
+		deprecatedEnum = deprecationComment
1368
+	}
1369
+	g.PrintComments(enum.path)
1370
+	g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
1371
+	g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
1372
+	g.P("const (")
1373
+	for i, e := range enum.Value {
1374
+		etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
1375
+		g.PrintComments(etorPath)
1376
+
1377
+		deprecatedValue := ""
1378
+		if e.GetOptions().GetDeprecated() {
1379
+			deprecatedValue = deprecationComment
1380
+		}
1381
+
1382
+		name := ccPrefix + *e.Name
1383
+		g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
1384
+		g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
1385
+	}
1386
+	g.P(")")
1387
+	g.P()
1388
+	g.P("var ", ccTypeName, "_name = map[int32]string{")
1389
+	generated := make(map[int32]bool) // avoid duplicate values
1390
+	for _, e := range enum.Value {
1391
+		duplicate := ""
1392
+		if _, present := generated[*e.Number]; present {
1393
+			duplicate = "// Duplicate value: "
1394
+		}
1395
+		g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
1396
+		generated[*e.Number] = true
1397
+	}
1398
+	g.P("}")
1399
+	g.P()
1400
+	g.P("var ", ccTypeName, "_value = map[string]int32{")
1401
+	for _, e := range enum.Value {
1402
+		g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
1403
+	}
1404
+	g.P("}")
1405
+	g.P()
1406
+
1407
+	if !enum.proto3() {
1408
+		g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
1409
+		g.P("p := new(", ccTypeName, ")")
1410
+		g.P("*p = x")
1411
+		g.P("return p")
1412
+		g.P("}")
1413
+		g.P()
1414
+	}
1415
+
1416
+	g.P("func (x ", ccTypeName, ") String() string {")
1417
+	g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
1418
+	g.P("}")
1419
+	g.P()
1420
+
1421
+	if !enum.proto3() {
1422
+		g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
1423
+		g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
1424
+		g.P("if err != nil {")
1425
+		g.P("return err")
1426
+		g.P("}")
1427
+		g.P("*x = ", ccTypeName, "(value)")
1428
+		g.P("return nil")
1429
+		g.P("}")
1430
+		g.P()
1431
+	}
1432
+
1433
+	var indexes []string
1434
+	for m := enum.parent; m != nil; m = m.parent {
1435
+		// XXX: skip groups?
1436
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
1437
+	}
1438
+	indexes = append(indexes, strconv.Itoa(enum.index))
1439
+	g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
1440
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
1441
+	g.P("}")
1442
+	g.P()
1443
+	if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
1444
+		g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
1445
+		g.P()
1446
+	}
1447
+
1448
+	g.generateEnumRegistration(enum)
1449
+}
1450
+
1451
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
1452
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
1453
+// code.  The fields are:
1454
+//	wire encoding
1455
+//	protocol tag number
1456
+//	opt,req,rep for optional, required, or repeated
1457
+//	packed whether the encoding is "packed" (optional; repeated primitives only)
1458
+//	name= the original declared name
1459
+//	enum= the name of the enum type if it is an enum-typed field.
1460
+//	proto3 if this field is in a proto3 message
1461
+//	def= string representation of the default value, if any.
1462
+// The default value must be in a representation that can be used at run-time
1463
+// to generate the default value. Thus bools become 0 and 1, for instance.
1464
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
1465
+	optrepreq := ""
1466
+	switch {
1467
+	case isOptional(field):
1468
+		optrepreq = "opt"
1469
+	case isRequired(field):
1470
+		optrepreq = "req"
1471
+	case isRepeated(field):
1472
+		optrepreq = "rep"
1473
+	}
1474
+	var defaultValue string
1475
+	if dv := field.DefaultValue; dv != nil { // set means an explicit default
1476
+		defaultValue = *dv
1477
+		// Some types need tweaking.
1478
+		switch *field.Type {
1479
+		case descriptor.FieldDescriptorProto_TYPE_BOOL:
1480
+			if defaultValue == "true" {
1481
+				defaultValue = "1"
1482
+			} else {
1483
+				defaultValue = "0"
1484
+			}
1485
+		case descriptor.FieldDescriptorProto_TYPE_STRING,
1486
+			descriptor.FieldDescriptorProto_TYPE_BYTES:
1487
+			// Nothing to do. Quoting is done for the whole tag.
1488
+		case descriptor.FieldDescriptorProto_TYPE_ENUM:
1489
+			// For enums we need to provide the integer constant.
1490
+			obj := g.ObjectNamed(field.GetTypeName())
1491
+			if id, ok := obj.(*ImportedDescriptor); ok {
1492
+				// It is an enum that was publicly imported.
1493
+				// We need the underlying type.
1494
+				obj = id.o
1495
+			}
1496
+			enum, ok := obj.(*EnumDescriptor)
1497
+			if !ok {
1498
+				log.Printf("obj is a %T", obj)
1499
+				if id, ok := obj.(*ImportedDescriptor); ok {
1500
+					log.Printf("id.o is a %T", id.o)
1501
+				}
1502
+				g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
1503
+			}
1504
+			defaultValue = enum.integerValueAsString(defaultValue)
1505
+		case descriptor.FieldDescriptorProto_TYPE_FLOAT:
1506
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
1507
+				if f, err := strconv.ParseFloat(defaultValue, 32); err == nil {
1508
+					defaultValue = fmt.Sprint(float32(f))
1509
+				}
1510
+			}
1511
+		case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
1512
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
1513
+				if f, err := strconv.ParseFloat(defaultValue, 64); err == nil {
1514
+					defaultValue = fmt.Sprint(f)
1515
+				}
1516
+			}
1517
+		}
1518
+		defaultValue = ",def=" + defaultValue
1519
+	}
1520
+	enum := ""
1521
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
1522
+		// We avoid using obj.GoPackageName(), because we want to use the
1523
+		// original (proto-world) package name.
1524
+		obj := g.ObjectNamed(field.GetTypeName())
1525
+		if id, ok := obj.(*ImportedDescriptor); ok {
1526
+			obj = id.o
1527
+		}
1528
+		enum = ",enum="
1529
+		if pkg := obj.File().GetPackage(); pkg != "" {
1530
+			enum += pkg + "."
1531
+		}
1532
+		enum += CamelCaseSlice(obj.TypeName())
1533
+	}
1534
+	packed := ""
1535
+	if (field.Options != nil && field.Options.GetPacked()) ||
1536
+		// Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
1537
+		// "In proto3, repeated fields of scalar numeric types use packed encoding by default."
1538
+		(message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
1539
+			isRepeated(field) && isScalar(field)) {
1540
+		packed = ",packed"
1541
+	}
1542
+	fieldName := field.GetName()
1543
+	name := fieldName
1544
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
1545
+		// We must use the type name for groups instead of
1546
+		// the field name to preserve capitalization.
1547
+		// type_name in FieldDescriptorProto is fully-qualified,
1548
+		// but we only want the local part.
1549
+		name = *field.TypeName
1550
+		if i := strings.LastIndex(name, "."); i >= 0 {
1551
+			name = name[i+1:]
1552
+		}
1553
+	}
1554
+	if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name {
1555
+		// TODO: escaping might be needed, in which case
1556
+		// perhaps this should be in its own "json" tag.
1557
+		name += ",json=" + json
1558
+	}
1559
+	name = ",name=" + name
1560
+	if message.proto3() {
1561
+		name += ",proto3"
1562
+	}
1563
+	oneof := ""
1564
+	if field.OneofIndex != nil {
1565
+		oneof = ",oneof"
1566
+	}
1567
+	return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
1568
+		wiretype,
1569
+		field.GetNumber(),
1570
+		optrepreq,
1571
+		packed,
1572
+		name,
1573
+		enum,
1574
+		oneof,
1575
+		defaultValue))
1576
+}
1577
+
1578
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
1579
+	switch typ {
1580
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
1581
+		return false
1582
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
1583
+		return false
1584
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
1585
+		return false
1586
+	}
1587
+	return true
1588
+}
1589
+
1590
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
1591
+// TypeName drops the package name and underscores the rest.
1592
+// Otherwise the object is from another package; and the result is the underscored
1593
+// package name followed by the item name.
1594
+// The result always has an initial capital.
1595
+func (g *Generator) TypeName(obj Object) string {
1596
+	return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
1597
+}
1598
+
1599
+// GoType returns a string representing the type name, and the wire type
1600
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
1601
+	// TODO: Options.
1602
+	switch *field.Type {
1603
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
1604
+		typ, wire = "float64", "fixed64"
1605
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
1606
+		typ, wire = "float32", "fixed32"
1607
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
1608
+		typ, wire = "int64", "varint"
1609
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
1610
+		typ, wire = "uint64", "varint"
1611
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
1612
+		typ, wire = "int32", "varint"
1613
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
1614
+		typ, wire = "uint32", "varint"
1615
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
1616
+		typ, wire = "uint64", "fixed64"
1617
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
1618
+		typ, wire = "uint32", "fixed32"
1619
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
1620
+		typ, wire = "bool", "varint"
1621
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
1622
+		typ, wire = "string", "bytes"
1623
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
1624
+		desc := g.ObjectNamed(field.GetTypeName())
1625
+		typ, wire = "*"+g.TypeName(desc), "group"
1626
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
1627
+		desc := g.ObjectNamed(field.GetTypeName())
1628
+		typ, wire = "*"+g.TypeName(desc), "bytes"
1629
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
1630
+		typ, wire = "[]byte", "bytes"
1631
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
1632
+		desc := g.ObjectNamed(field.GetTypeName())
1633
+		typ, wire = g.TypeName(desc), "varint"
1634
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
1635
+		typ, wire = "int32", "fixed32"
1636
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
1637
+		typ, wire = "int64", "fixed64"
1638
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
1639
+		typ, wire = "int32", "zigzag32"
1640
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
1641
+		typ, wire = "int64", "zigzag64"
1642
+	default:
1643
+		g.Fail("unknown type for", field.GetName())
1644
+	}
1645
+	if isRepeated(field) {
1646
+		typ = "[]" + typ
1647
+	} else if message != nil && message.proto3() {
1648
+		return
1649
+	} else if field.OneofIndex != nil && message != nil {
1650
+		return
1651
+	} else if needsStar(*field.Type) {
1652
+		typ = "*" + typ
1653
+	}
1654
+	return
1655
+}
1656
+
1657
+func (g *Generator) RecordTypeUse(t string) {
1658
+	if _, ok := g.typeNameToObject[t]; !ok {
1659
+		return
1660
+	}
1661
+	importPath := g.ObjectNamed(t).GoImportPath()
1662
+	if importPath == g.outputImportPath {
1663
+		// Don't record use of objects in our package.
1664
+		return
1665
+	}
1666
+	g.AddImport(importPath)
1667
+	g.usedPackages[importPath] = true
1668
+}
1669
+
1670
+// Method names that may be generated.  Fields with these names get an
1671
+// underscore appended. Any change to this set is a potential incompatible
1672
+// API change because it changes generated field names.
1673
+var methodNames = [...]string{
1674
+	"Reset",
1675
+	"String",
1676
+	"ProtoMessage",
1677
+	"Marshal",
1678
+	"Unmarshal",
1679
+	"ExtensionRangeArray",
1680
+	"ExtensionMap",
1681
+	"Descriptor",
1682
+}
1683
+
1684
+// Names of messages in the `google.protobuf` package for which
1685
+// we will generate XXX_WellKnownType methods.
1686
+var wellKnownTypes = map[string]bool{
1687
+	"Any":       true,
1688
+	"Duration":  true,
1689
+	"Empty":     true,
1690
+	"Struct":    true,
1691
+	"Timestamp": true,
1692
+
1693
+	"Value":       true,
1694
+	"ListValue":   true,
1695
+	"DoubleValue": true,
1696
+	"FloatValue":  true,
1697
+	"Int64Value":  true,
1698
+	"UInt64Value": true,
1699
+	"Int32Value":  true,
1700
+	"UInt32Value": true,
1701
+	"BoolValue":   true,
1702
+	"StringValue": true,
1703
+	"BytesValue":  true,
1704
+}
1705
+
1706
+// getterDefault finds the default value for the field to return from a getter,
1707
+// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
1708
+func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
1709
+	if isRepeated(field) {
1710
+		return "nil"
1711
+	}
1712
+	if def := field.GetDefaultValue(); def != "" {
1713
+		defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
1714
+		if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
1715
+			return defaultConstant
1716
+		}
1717
+		return "append([]byte(nil), " + defaultConstant + "...)"
1718
+	}
1719
+	switch *field.Type {
1720
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
1721
+		return "false"
1722
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
1723
+		return `""`
1724
+	case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
1725
+		return "nil"
1726
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
1727
+		obj := g.ObjectNamed(field.GetTypeName())
1728
+		var enum *EnumDescriptor
1729
+		if id, ok := obj.(*ImportedDescriptor); ok {
1730
+			// The enum type has been publicly imported.
1731
+			enum, _ = id.o.(*EnumDescriptor)
1732
+		} else {
1733
+			enum, _ = obj.(*EnumDescriptor)
1734
+		}
1735
+		if enum == nil {
1736
+			log.Printf("don't know how to generate getter for %s", field.GetName())
1737
+			return "nil"
1738
+		}
1739
+		if len(enum.Value) == 0 {
1740
+			return "0 // empty enum"
1741
+		}
1742
+		first := enum.Value[0].GetName()
1743
+		return g.DefaultPackageName(obj) + enum.prefix() + first
1744
+	default:
1745
+		return "0"
1746
+	}
1747
+}
1748
+
1749
+// defaultConstantName builds the name of the default constant from the message
1750
+// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
1751
+func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
1752
+	return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
1753
+}
1754
+
1755
+// The different types of fields in a message and how to actually print them
1756
+// Most of the logic for generateMessage is in the methods of these types.
1757
+//
1758
+// Note that the content of the field is irrelevant, a simpleField can contain
1759
+// anything from a scalar to a group (which is just a message).
1760
+//
1761
+// Extension fields (and message sets) are however handled separately.
1762
+//
1763
+// simpleField - a field that is neiter weak nor oneof, possibly repeated
1764
+// oneofField - field containing list of subfields:
1765
+// - oneofSubField - a field within the oneof
1766
+
1767
+// msgCtx contains the context for the generator functions.
1768
+type msgCtx struct {
1769
+	goName  string      // Go struct name of the message, e.g. MessageName
1770
+	message *Descriptor // The descriptor for the message
1771
+}
1772
+
1773
+// fieldCommon contains data common to all types of fields.
1774
+type fieldCommon struct {
1775
+	goName     string // Go name of field, e.g. "FieldName" or "Descriptor_"
1776
+	protoName  string // Name of field in proto language, e.g. "field_name" or "descriptor"
1777
+	getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
1778
+	goType     string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
1779
+	tags       string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
1780
+	fullPath   string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
1781
+}
1782
+
1783
+// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
1784
+func (f *fieldCommon) getProtoName() string {
1785
+	return f.protoName
1786
+}
1787
+
1788
+// getGoType returns the go type of the field  as a string, e.g. "*int32".
1789
+func (f *fieldCommon) getGoType() string {
1790
+	return f.goType
1791
+}
1792
+
1793
+// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
1794
+type simpleField struct {
1795
+	fieldCommon
1796
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
1797
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
1798
+	deprecated    string                               // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
1799
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
1800
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
1801
+	comment       string                               // The full comment for the field, e.g. "// Useful information"
1802
+}
1803
+
1804
+// decl prints the declaration of the field in the struct (if any).
1805
+func (f *simpleField) decl(g *Generator, mc *msgCtx) {
1806
+	g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
1807
+}
1808
+
1809
+// getter prints the getter for the field.
1810
+func (f *simpleField) getter(g *Generator, mc *msgCtx) {
1811
+	star := ""
1812
+	tname := f.goType
1813
+	if needsStar(f.protoType) && tname[0] == '*' {
1814
+		tname = tname[1:]
1815
+		star = "*"
1816
+	}
1817
+	if f.deprecated != "" {
1818
+		g.P(f.deprecated)
1819
+	}
1820
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
1821
+	if f.getterDef == "nil" { // Simpler getter
1822
+		g.P("if m != nil {")
1823
+		g.P("return m." + f.goName)
1824
+		g.P("}")
1825
+		g.P("return nil")
1826
+		g.P("}")
1827
+		g.P()
1828
+		return
1829
+	}
1830
+	if mc.message.proto3() {
1831
+		g.P("if m != nil {")
1832
+	} else {
1833
+		g.P("if m != nil && m." + f.goName + " != nil {")
1834
+	}
1835
+	g.P("return " + star + "m." + f.goName)
1836
+	g.P("}")
1837
+	g.P("return ", f.getterDef)
1838
+	g.P("}")
1839
+	g.P()
1840
+}
1841
+
1842
+// setter prints the setter method of the field.
1843
+func (f *simpleField) setter(g *Generator, mc *msgCtx) {
1844
+	// No setter for regular fields yet
1845
+}
1846
+
1847
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
1848
+func (f *simpleField) getProtoDef() string {
1849
+	return f.protoDef
1850
+}
1851
+
1852
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
1853
+func (f *simpleField) getProtoTypeName() string {
1854
+	return f.protoTypeName
1855
+}
1856
+
1857
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
1858
+func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
1859
+	return f.protoType
1860
+}
1861
+
1862
+// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
1863
+type oneofSubField struct {
1864
+	fieldCommon
1865
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
1866
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
1867
+	oneofTypeName string                               // Type name of the enclosing struct, e.g. "MessageName_FieldName"
1868
+	fieldNumber   int                                  // Actual field number, as defined in proto, e.g. 12
1869
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
1870
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
1871
+	deprecated    string                               // Deprecation comment, if any.
1872
+}
1873
+
1874
+// typedNil prints a nil casted to the pointer to this field.
1875
+// - for XXX_OneofWrappers
1876
+func (f *oneofSubField) typedNil(g *Generator) {
1877
+	g.P("(*", f.oneofTypeName, ")(nil),")
1878
+}
1879
+
1880
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
1881
+func (f *oneofSubField) getProtoDef() string {
1882
+	return f.protoDef
1883
+}
1884
+
1885
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
1886
+func (f *oneofSubField) getProtoTypeName() string {
1887
+	return f.protoTypeName
1888
+}
1889
+
1890
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
1891
+func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
1892
+	return f.protoType
1893
+}
1894
+
1895
+// oneofField represents the oneof on top level.
1896
+// The alternative fields within the oneof are represented by oneofSubField.
1897
+type oneofField struct {
1898
+	fieldCommon
1899
+	subFields []*oneofSubField // All the possible oneof fields
1900
+	comment   string           // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
1901
+}
1902
+
1903
+// decl prints the declaration of the field in the struct (if any).
1904
+func (f *oneofField) decl(g *Generator, mc *msgCtx) {
1905
+	comment := f.comment
1906
+	for _, sf := range f.subFields {
1907
+		comment += "//\t*" + sf.oneofTypeName + "\n"
1908
+	}
1909
+	g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
1910
+}
1911
+
1912
+// getter for a oneof field will print additional discriminators and interfaces for the oneof,
1913
+// also it prints all the getters for the sub fields.
1914
+func (f *oneofField) getter(g *Generator, mc *msgCtx) {
1915
+	// The discriminator type
1916
+	g.P("type ", f.goType, " interface {")
1917
+	g.P(f.goType, "()")
1918
+	g.P("}")
1919
+	g.P()
1920
+	// The subField types, fulfilling the discriminator type contract
1921
+	for _, sf := range f.subFields {
1922
+		g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
1923
+		g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
1924
+		g.P("}")
1925
+		g.P()
1926
+	}
1927
+	for _, sf := range f.subFields {
1928
+		g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
1929
+		g.P()
1930
+	}
1931
+	// Getter for the oneof field
1932
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
1933
+	g.P("if m != nil { return m.", f.goName, " }")
1934
+	g.P("return nil")
1935
+	g.P("}")
1936
+	g.P()
1937
+	// Getters for each oneof
1938
+	for _, sf := range f.subFields {
1939
+		if sf.deprecated != "" {
1940
+			g.P(sf.deprecated)
1941
+		}
1942
+		g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
1943
+		g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
1944
+		g.P("return x.", sf.goName)
1945
+		g.P("}")
1946
+		g.P("return ", sf.getterDef)
1947
+		g.P("}")
1948
+		g.P()
1949
+	}
1950
+}
1951
+
1952
+// setter prints the setter method of the field.
1953
+func (f *oneofField) setter(g *Generator, mc *msgCtx) {
1954
+	// No setters for oneof yet
1955
+}
1956
+
1957
+// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
1958
+type topLevelField interface {
1959
+	decl(g *Generator, mc *msgCtx)   // print declaration within the struct
1960
+	getter(g *Generator, mc *msgCtx) // print getter
1961
+	setter(g *Generator, mc *msgCtx) // print setter if applicable
1962
+}
1963
+
1964
+// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
1965
+type defField interface {
1966
+	getProtoDef() string                                // default value explicitly stated in the proto file, e.g "yoshi" or "5"
1967
+	getProtoName() string                               // proto name of a field, e.g. "field_name" or "descriptor"
1968
+	getGoType() string                                  // go type of the field  as a string, e.g. "*int32"
1969
+	getProtoTypeName() string                           // protobuf type name for the field, e.g. ".google.protobuf.Duration"
1970
+	getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
1971
+}
1972
+
1973
+// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
1974
+// explicit in the proto.
1975
+func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
1976
+	// Collect fields that can have defaults
1977
+	dFields := []defField{}
1978
+	for _, pf := range topLevelFields {
1979
+		if f, ok := pf.(*oneofField); ok {
1980
+			for _, osf := range f.subFields {
1981
+				dFields = append(dFields, osf)
1982
+			}
1983
+			continue
1984
+		}
1985
+		dFields = append(dFields, pf.(defField))
1986
+	}
1987
+	for _, df := range dFields {
1988
+		def := df.getProtoDef()
1989
+		if def == "" {
1990
+			continue
1991
+		}
1992
+		fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
1993
+		typename := df.getGoType()
1994
+		if typename[0] == '*' {
1995
+			typename = typename[1:]
1996
+		}
1997
+		kind := "const "
1998
+		switch {
1999
+		case typename == "bool":
2000
+		case typename == "string":
2001
+			def = strconv.Quote(def)
2002
+		case typename == "[]byte":
2003
+			def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
2004
+			kind = "var "
2005
+		case def == "inf", def == "-inf", def == "nan":
2006
+			// These names are known to, and defined by, the protocol language.
2007
+			switch def {
2008
+			case "inf":
2009
+				def = "math.Inf(1)"
2010
+			case "-inf":
2011
+				def = "math.Inf(-1)"
2012
+			case "nan":
2013
+				def = "math.NaN()"
2014
+			}
2015
+			if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
2016
+				def = "float32(" + def + ")"
2017
+			}
2018
+			kind = "var "
2019
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT:
2020
+			if f, err := strconv.ParseFloat(def, 32); err == nil {
2021
+				def = fmt.Sprint(float32(f))
2022
+			}
2023
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE:
2024
+			if f, err := strconv.ParseFloat(def, 64); err == nil {
2025
+				def = fmt.Sprint(f)
2026
+			}
2027
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
2028
+			// Must be an enum.  Need to construct the prefixed name.
2029
+			obj := g.ObjectNamed(df.getProtoTypeName())
2030
+			var enum *EnumDescriptor
2031
+			if id, ok := obj.(*ImportedDescriptor); ok {
2032
+				// The enum type has been publicly imported.
2033
+				enum, _ = id.o.(*EnumDescriptor)
2034
+			} else {
2035
+				enum, _ = obj.(*EnumDescriptor)
2036
+			}
2037
+			if enum == nil {
2038
+				log.Printf("don't know how to generate constant for %s", fieldname)
2039
+				continue
2040
+			}
2041
+			def = g.DefaultPackageName(obj) + enum.prefix() + def
2042
+		}
2043
+		g.P(kind, fieldname, " ", typename, " = ", def)
2044
+		g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
2045
+	}
2046
+	g.P()
2047
+}
2048
+
2049
+// generateInternalStructFields just adds the XXX_<something> fields to the message struct.
2050
+func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
2051
+	g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
2052
+	if len(mc.message.ExtensionRange) > 0 {
2053
+		messageset := ""
2054
+		if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
2055
+			messageset = "protobuf_messageset:\"1\" "
2056
+		}
2057
+		g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
2058
+	}
2059
+	g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
2060
+	g.P("XXX_sizecache\tint32 `json:\"-\"`")
2061
+
2062
+}
2063
+
2064
+// generateOneofFuncs adds all the utility functions for oneof, including marshaling, unmarshaling and sizer.
2065
+func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
2066
+	ofields := []*oneofField{}
2067
+	for _, f := range topLevelFields {
2068
+		if o, ok := f.(*oneofField); ok {
2069
+			ofields = append(ofields, o)
2070
+		}
2071
+	}
2072
+	if len(ofields) == 0 {
2073
+		return
2074
+	}
2075
+
2076
+	// OneofFuncs
2077
+	g.P("// XXX_OneofWrappers is for the internal use of the proto package.")
2078
+	g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {")
2079
+	g.P("return []interface{}{")
2080
+	for _, of := range ofields {
2081
+		for _, sf := range of.subFields {
2082
+			sf.typedNil(g)
2083
+		}
2084
+	}
2085
+	g.P("}")
2086
+	g.P("}")
2087
+	g.P()
2088
+}
2089
+
2090
+// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
2091
+func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
2092
+	comments := g.PrintComments(mc.message.path)
2093
+
2094
+	// Guarantee deprecation comments appear after user-provided comments.
2095
+	if mc.message.GetOptions().GetDeprecated() {
2096
+		if comments {
2097
+			// Convention: Separate deprecation comments from original
2098
+			// comments with an empty line.
2099
+			g.P("//")
2100
+		}
2101
+		g.P(deprecationComment)
2102
+	}
2103
+
2104
+	g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
2105
+	for _, pf := range topLevelFields {
2106
+		pf.decl(g, mc)
2107
+	}
2108
+	g.generateInternalStructFields(mc, topLevelFields)
2109
+	g.P("}")
2110
+}
2111
+
2112
+// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
2113
+func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
2114
+	for _, pf := range topLevelFields {
2115
+		pf.getter(g, mc)
2116
+	}
2117
+}
2118
+
2119
+// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
2120
+func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
2121
+	for _, pf := range topLevelFields {
2122
+		pf.setter(g, mc)
2123
+	}
2124
+}
2125
+
2126
+// generateCommonMethods adds methods to the message that are not on a per field basis.
2127
+func (g *Generator) generateCommonMethods(mc *msgCtx) {
2128
+	// Reset, String and ProtoMessage methods.
2129
+	g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
2130
+	g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
2131
+	g.P("func (*", mc.goName, ") ProtoMessage() {}")
2132
+	var indexes []string
2133
+	for m := mc.message; m != nil; m = m.parent {
2134
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
2135
+	}
2136
+	g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
2137
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
2138
+	g.P("}")
2139
+	g.P()
2140
+	// TODO: Revisit the decision to use a XXX_WellKnownType method
2141
+	// if we change proto.MessageName to work with multiple equivalents.
2142
+	if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
2143
+		g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
2144
+		g.P()
2145
+	}
2146
+
2147
+	// Extension support methods
2148
+	if len(mc.message.ExtensionRange) > 0 {
2149
+		g.P()
2150
+		g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
2151
+		for _, r := range mc.message.ExtensionRange {
2152
+			end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
2153
+			g.P("{Start: ", r.Start, ", End: ", end, "},")
2154
+		}
2155
+		g.P("}")
2156
+		g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
2157
+		g.P("return extRange_", mc.goName)
2158
+		g.P("}")
2159
+		g.P()
2160
+	}
2161
+
2162
+	// TODO: It does not scale to keep adding another method for every
2163
+	// operation on protos that we want to switch over to using the
2164
+	// table-driven approach. Instead, we should only add a single method
2165
+	// that allows getting access to the *InternalMessageInfo struct and then
2166
+	// calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
2167
+
2168
+	// Wrapper for table-driven marshaling and unmarshaling.
2169
+	g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
2170
+	g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
2171
+	g.P("}")
2172
+
2173
+	g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
2174
+	g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
2175
+	g.P("}")
2176
+
2177
+	g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
2178
+	g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)")
2179
+	g.P("}")
2180
+
2181
+	g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
2182
+	g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
2183
+	g.P("}")
2184
+
2185
+	g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
2186
+	g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
2187
+	g.P("}")
2188
+
2189
+	g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
2190
+	g.P()
2191
+}
2192
+
2193
+// Generate the type, methods and default constant definitions for this Descriptor.
2194
+func (g *Generator) generateMessage(message *Descriptor) {
2195
+	topLevelFields := []topLevelField{}
2196
+	oFields := make(map[int32]*oneofField)
2197
+	// The full type name
2198
+	typeName := message.TypeName()
2199
+	// The full type name, CamelCased.
2200
+	goTypeName := CamelCaseSlice(typeName)
2201
+
2202
+	usedNames := make(map[string]bool)
2203
+	for _, n := range methodNames {
2204
+		usedNames[n] = true
2205
+	}
2206
+
2207
+	// allocNames finds a conflict-free variation of the given strings,
2208
+	// consistently mutating their suffixes.
2209
+	// It returns the same number of strings.
2210
+	allocNames := func(ns ...string) []string {
2211
+	Loop:
2212
+		for {
2213
+			for _, n := range ns {
2214
+				if usedNames[n] {
2215
+					for i := range ns {
2216
+						ns[i] += "_"
2217
+					}
2218
+					continue Loop
2219
+				}
2220
+			}
2221
+			for _, n := range ns {
2222
+				usedNames[n] = true
2223
+			}
2224
+			return ns
2225
+		}
2226
+	}
2227
+
2228
+	mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
2229
+
2230
+	// Build a structure more suitable for generating the text in one pass
2231
+	for i, field := range message.Field {
2232
+		// Allocate the getter and the field at the same time so name
2233
+		// collisions create field/method consistent names.
2234
+		// TODO: This allocation occurs based on the order of the fields
2235
+		// in the proto file, meaning that a change in the field
2236
+		// ordering can change generated Method/Field names.
2237
+		base := CamelCase(*field.Name)
2238
+		ns := allocNames(base, "Get"+base)
2239
+		fieldName, fieldGetterName := ns[0], ns[1]
2240
+		typename, wiretype := g.GoType(message, field)
2241
+		jsonName := *field.Name
2242
+		tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
2243
+
2244
+		oneof := field.OneofIndex != nil
2245
+		if oneof && oFields[*field.OneofIndex] == nil {
2246
+			odp := message.OneofDecl[int(*field.OneofIndex)]
2247
+			base := CamelCase(odp.GetName())
2248
+			fname := allocNames(base)[0]
2249
+
2250
+			// This is the first field of a oneof we haven't seen before.
2251
+			// Generate the union field.
2252
+			oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
2253
+			c, ok := g.makeComments(oneofFullPath)
2254
+			if ok {
2255
+				c += "\n//\n"
2256
+			}
2257
+			c += "// Types that are valid to be assigned to " + fname + ":\n"
2258
+			// Generate the rest of this comment later,
2259
+			// when we've computed any disambiguation.
2260
+
2261
+			dname := "is" + goTypeName + "_" + fname
2262
+			tag := `protobuf_oneof:"` + odp.GetName() + `"`
2263
+			of := oneofField{
2264
+				fieldCommon: fieldCommon{
2265
+					goName:     fname,
2266
+					getterName: "Get" + fname,
2267
+					goType:     dname,
2268
+					tags:       tag,
2269
+					protoName:  odp.GetName(),
2270
+					fullPath:   oneofFullPath,
2271
+				},
2272
+				comment: c,
2273
+			}
2274
+			topLevelFields = append(topLevelFields, &of)
2275
+			oFields[*field.OneofIndex] = &of
2276
+		}
2277
+
2278
+		if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
2279
+			desc := g.ObjectNamed(field.GetTypeName())
2280
+			if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
2281
+				// Figure out the Go types and tags for the key and value types.
2282
+				keyField, valField := d.Field[0], d.Field[1]
2283
+				keyType, keyWire := g.GoType(d, keyField)
2284
+				valType, valWire := g.GoType(d, valField)
2285
+				keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
2286
+
2287
+				// We don't use stars, except for message-typed values.
2288
+				// Message and enum types are the only two possibly foreign types used in maps,
2289
+				// so record their use. They are not permitted as map keys.
2290
+				keyType = strings.TrimPrefix(keyType, "*")
2291
+				switch *valField.Type {
2292
+				case descriptor.FieldDescriptorProto_TYPE_ENUM:
2293
+					valType = strings.TrimPrefix(valType, "*")
2294
+					g.RecordTypeUse(valField.GetTypeName())
2295
+				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
2296
+					g.RecordTypeUse(valField.GetTypeName())
2297
+				default:
2298
+					valType = strings.TrimPrefix(valType, "*")
2299
+				}
2300
+
2301
+				typename = fmt.Sprintf("map[%s]%s", keyType, valType)
2302
+				mapFieldTypes[field] = typename // record for the getter generation
2303
+
2304
+				tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
2305
+			}
2306
+		}
2307
+
2308
+		fieldDeprecated := ""
2309
+		if field.GetOptions().GetDeprecated() {
2310
+			fieldDeprecated = deprecationComment
2311
+		}
2312
+
2313
+		dvalue := g.getterDefault(field, goTypeName)
2314
+		if oneof {
2315
+			tname := goTypeName + "_" + fieldName
2316
+			// It is possible for this to collide with a message or enum
2317
+			// nested in this message. Check for collisions.
2318
+			for {
2319
+				ok := true
2320
+				for _, desc := range message.nested {
2321
+					if CamelCaseSlice(desc.TypeName()) == tname {
2322
+						ok = false
2323
+						break
2324
+					}
2325
+				}
2326
+				for _, enum := range message.enums {
2327
+					if CamelCaseSlice(enum.TypeName()) == tname {
2328
+						ok = false
2329
+						break
2330
+					}
2331
+				}
2332
+				if !ok {
2333
+					tname += "_"
2334
+					continue
2335
+				}
2336
+				break
2337
+			}
2338
+
2339
+			oneofField := oFields[*field.OneofIndex]
2340
+			tag := "protobuf:" + g.goTag(message, field, wiretype)
2341
+			sf := oneofSubField{
2342
+				fieldCommon: fieldCommon{
2343
+					goName:     fieldName,
2344
+					getterName: fieldGetterName,
2345
+					goType:     typename,
2346
+					tags:       tag,
2347
+					protoName:  field.GetName(),
2348
+					fullPath:   fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
2349
+				},
2350
+				protoTypeName: field.GetTypeName(),
2351
+				fieldNumber:   int(*field.Number),
2352
+				protoType:     *field.Type,
2353
+				getterDef:     dvalue,
2354
+				protoDef:      field.GetDefaultValue(),
2355
+				oneofTypeName: tname,
2356
+				deprecated:    fieldDeprecated,
2357
+			}
2358
+			oneofField.subFields = append(oneofField.subFields, &sf)
2359
+			g.RecordTypeUse(field.GetTypeName())
2360
+			continue
2361
+		}
2362
+
2363
+		fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
2364
+		c, ok := g.makeComments(fieldFullPath)
2365
+		if ok {
2366
+			c += "\n"
2367
+		}
2368
+		rf := simpleField{
2369
+			fieldCommon: fieldCommon{
2370
+				goName:     fieldName,
2371
+				getterName: fieldGetterName,
2372
+				goType:     typename,
2373
+				tags:       tag,
2374
+				protoName:  field.GetName(),
2375
+				fullPath:   fieldFullPath,
2376
+			},
2377
+			protoTypeName: field.GetTypeName(),
2378
+			protoType:     *field.Type,
2379
+			deprecated:    fieldDeprecated,
2380
+			getterDef:     dvalue,
2381
+			protoDef:      field.GetDefaultValue(),
2382
+			comment:       c,
2383
+		}
2384
+		var pf topLevelField = &rf
2385
+
2386
+		topLevelFields = append(topLevelFields, pf)
2387
+		g.RecordTypeUse(field.GetTypeName())
2388
+	}
2389
+
2390
+	mc := &msgCtx{
2391
+		goName:  goTypeName,
2392
+		message: message,
2393
+	}
2394
+
2395
+	g.generateMessageStruct(mc, topLevelFields)
2396
+	g.P()
2397
+	g.generateCommonMethods(mc)
2398
+	g.P()
2399
+	g.generateDefaultConstants(mc, topLevelFields)
2400
+	g.P()
2401
+	g.generateGetters(mc, topLevelFields)
2402
+	g.P()
2403
+	g.generateSetters(mc, topLevelFields)
2404
+	g.P()
2405
+	g.generateOneofFuncs(mc, topLevelFields)
2406
+	g.P()
2407
+
2408
+	var oneofTypes []string
2409
+	for _, f := range topLevelFields {
2410
+		if of, ok := f.(*oneofField); ok {
2411
+			for _, osf := range of.subFields {
2412
+				oneofTypes = append(oneofTypes, osf.oneofTypeName)
2413
+			}
2414
+		}
2415
+	}
2416
+
2417
+	opts := message.Options
2418
+	ms := &messageSymbol{
2419
+		sym:           goTypeName,
2420
+		hasExtensions: len(message.ExtensionRange) > 0,
2421
+		isMessageSet:  opts != nil && opts.GetMessageSetWireFormat(),
2422
+		oneofTypes:    oneofTypes,
2423
+	}
2424
+	g.file.addExport(message, ms)
2425
+
2426
+	for _, ext := range message.ext {
2427
+		g.generateExtension(ext)
2428
+	}
2429
+
2430
+	fullName := strings.Join(message.TypeName(), ".")
2431
+	if g.file.Package != nil {
2432
+		fullName = *g.file.Package + "." + fullName
2433
+	}
2434
+
2435
+	g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
2436
+	// Register types for native map types.
2437
+	for _, k := range mapFieldKeys(mapFieldTypes) {
2438
+		fullName := strings.TrimPrefix(*k.TypeName, ".")
2439
+		g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
2440
+	}
2441
+
2442
+}
2443
+
2444
+type byTypeName []*descriptor.FieldDescriptorProto
2445
+
2446
+func (a byTypeName) Len() int           { return len(a) }
2447
+func (a byTypeName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
2448
+func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
2449
+
2450
+// mapFieldKeys returns the keys of m in a consistent order.
2451
+func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
2452
+	keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
2453
+	for k := range m {
2454
+		keys = append(keys, k)
2455
+	}
2456
+	sort.Sort(byTypeName(keys))
2457
+	return keys
2458
+}
2459
+
2460
+var escapeChars = [256]byte{
2461
+	'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
2462
+}
2463
+
2464
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
2465
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
2466
+// sequences are conveyed, unmodified, into the decoded result.
2467
+func unescape(s string) string {
2468
+	// NB: Sadly, we can't use strconv.Unquote because protoc will escape both
2469
+	// single and double quotes, but strconv.Unquote only allows one or the
2470
+	// other (based on actual surrounding quotes of its input argument).
2471
+
2472
+	var out []byte
2473
+	for len(s) > 0 {
2474
+		// regular character, or too short to be valid escape
2475
+		if s[0] != '\\' || len(s) < 2 {
2476
+			out = append(out, s[0])
2477
+			s = s[1:]
2478
+		} else if c := escapeChars[s[1]]; c != 0 {
2479
+			// escape sequence
2480
+			out = append(out, c)
2481
+			s = s[2:]
2482
+		} else if s[1] == 'x' || s[1] == 'X' {
2483
+			// hex escape, e.g. "\x80
2484
+			if len(s) < 4 {
2485
+				// too short to be valid
2486
+				out = append(out, s[:2]...)
2487
+				s = s[2:]
2488
+				continue
2489
+			}
2490
+			v, err := strconv.ParseUint(s[2:4], 16, 8)
2491
+			if err != nil {
2492
+				out = append(out, s[:4]...)
2493
+			} else {
2494
+				out = append(out, byte(v))
2495
+			}
2496
+			s = s[4:]
2497
+		} else if '0' <= s[1] && s[1] <= '7' {
2498
+			// octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
2499
+			// so consume up to 2 more bytes or up to end-of-string
2500
+			n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
2501
+			if n > 3 {
2502
+				n = 3
2503
+			}
2504
+			v, err := strconv.ParseUint(s[1:1+n], 8, 8)
2505
+			if err != nil {
2506
+				out = append(out, s[:1+n]...)
2507
+			} else {
2508
+				out = append(out, byte(v))
2509
+			}
2510
+			s = s[1+n:]
2511
+		} else {
2512
+			// bad escape, just propagate the slash as-is
2513
+			out = append(out, s[0])
2514
+			s = s[1:]
2515
+		}
2516
+	}
2517
+
2518
+	return string(out)
2519
+}
2520
+
2521
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
2522
+	ccTypeName := ext.DescName()
2523
+
2524
+	extObj := g.ObjectNamed(*ext.Extendee)
2525
+	var extDesc *Descriptor
2526
+	if id, ok := extObj.(*ImportedDescriptor); ok {
2527
+		// This is extending a publicly imported message.
2528
+		// We need the underlying type for goTag.
2529
+		extDesc = id.o.(*Descriptor)
2530
+	} else {
2531
+		extDesc = extObj.(*Descriptor)
2532
+	}
2533
+	extendedType := "*" + g.TypeName(extObj) // always use the original
2534
+	field := ext.FieldDescriptorProto
2535
+	fieldType, wireType := g.GoType(ext.parent, field)
2536
+	tag := g.goTag(extDesc, field, wireType)
2537
+	g.RecordTypeUse(*ext.Extendee)
2538
+	if n := ext.FieldDescriptorProto.TypeName; n != nil {
2539
+		// foreign extension type
2540
+		g.RecordTypeUse(*n)
2541
+	}
2542
+
2543
+	typeName := ext.TypeName()
2544
+
2545
+	// Special case for proto2 message sets: If this extension is extending
2546
+	// proto2.bridge.MessageSet, and its final name component is "message_set_extension",
2547
+	// then drop that last component.
2548
+	//
2549
+	// TODO: This should be implemented in the text formatter rather than the generator.
2550
+	// In addition, the situation for when to apply this special case is implemented
2551
+	// differently in other languages:
2552
+	// https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
2553
+	if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
2554
+		typeName = typeName[:len(typeName)-1]
2555
+	}
2556
+
2557
+	// For text formatting, the package must be exactly what the .proto file declares,
2558
+	// ignoring overrides such as the go_package option, and with no dot/underscore mapping.
2559
+	extName := strings.Join(typeName, ".")
2560
+	if g.file.Package != nil {
2561
+		extName = *g.file.Package + "." + extName
2562
+	}
2563
+
2564
+	g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
2565
+	g.P("ExtendedType: (", extendedType, ")(nil),")
2566
+	g.P("ExtensionType: (", fieldType, ")(nil),")
2567
+	g.P("Field: ", field.Number, ",")
2568
+	g.P(`Name: "`, extName, `",`)
2569
+	g.P("Tag: ", tag, ",")
2570
+	g.P(`Filename: "`, g.file.GetName(), `",`)
2571
+
2572
+	g.P("}")
2573
+	g.P()
2574
+
2575
+	g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
2576
+
2577
+	g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
2578
+}
2579
+
2580
+func (g *Generator) generateInitFunction() {
2581
+	if len(g.init) == 0 {
2582
+		return
2583
+	}
2584
+	g.P("func init() {")
2585
+	for _, l := range g.init {
2586
+		g.P(l)
2587
+	}
2588
+	g.P("}")
2589
+	g.init = nil
2590
+}
2591
+
2592
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
2593
+	// Make a copy and trim source_code_info data.
2594
+	// TODO: Trim this more when we know exactly what we need.
2595
+	pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
2596
+	pb.SourceCodeInfo = nil
2597
+
2598
+	b, err := proto.Marshal(pb)
2599
+	if err != nil {
2600
+		g.Fail(err.Error())
2601
+	}
2602
+
2603
+	var buf bytes.Buffer
2604
+	w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
2605
+	w.Write(b)
2606
+	w.Close()
2607
+	b = buf.Bytes()
2608
+
2609
+	v := file.VarName()
2610
+	g.P()
2611
+	g.P("func init() {")
2612
+	g.P(g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ")")
2613
+	g.P("}")
2614
+	g.P("var ", v, " = []byte{")
2615
+	g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
2616
+	for len(b) > 0 {
2617
+		n := 16
2618
+		if n > len(b) {
2619
+			n = len(b)
2620
+		}
2621
+
2622
+		s := ""
2623
+		for _, c := range b[:n] {
2624
+			s += fmt.Sprintf("0x%02x,", c)
2625
+		}
2626
+		g.P(s)
2627
+
2628
+		b = b[n:]
2629
+	}
2630
+	g.P("}")
2631
+}
2632
+
2633
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
2634
+	// // We always print the full (proto-world) package name here.
2635
+	pkg := enum.File().GetPackage()
2636
+	if pkg != "" {
2637
+		pkg += "."
2638
+	}
2639
+	// The full type name
2640
+	typeName := enum.TypeName()
2641
+	// The full type name, CamelCased.
2642
+	ccTypeName := CamelCaseSlice(typeName)
2643
+	g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
2644
+}
2645
+
2646
+// And now lots of helper functions.
2647
+
2648
+// Is c an ASCII lower-case letter?
2649
+func isASCIILower(c byte) bool {
2650
+	return 'a' <= c && c <= 'z'
2651
+}
2652
+
2653
+// Is c an ASCII digit?
2654
+func isASCIIDigit(c byte) bool {
2655
+	return '0' <= c && c <= '9'
2656
+}
2657
+
2658
+// CamelCase returns the CamelCased name.
2659
+// If there is an interior underscore followed by a lower case letter,
2660
+// drop the underscore and convert the letter to upper case.
2661
+// There is a remote possibility of this rewrite causing a name collision,
2662
+// but it's so remote we're prepared to pretend it's nonexistent - since the
2663
+// C++ generator lowercases names, it's extremely unlikely to have two fields
2664
+// with different capitalizations.
2665
+// In short, _my_field_name_2 becomes XMyFieldName_2.
2666
+func CamelCase(s string) string {
2667
+	if s == "" {
2668
+		return ""
2669
+	}
2670
+	t := make([]byte, 0, 32)
2671
+	i := 0
2672
+	if s[0] == '_' {
2673
+		// Need a capital letter; drop the '_'.
2674
+		t = append(t, 'X')
2675
+		i++
2676
+	}
2677
+	// Invariant: if the next letter is lower case, it must be converted
2678
+	// to upper case.
2679
+	// That is, we process a word at a time, where words are marked by _ or
2680
+	// upper case letter. Digits are treated as words.
2681
+	for ; i < len(s); i++ {
2682
+		c := s[i]
2683
+		if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
2684
+			continue // Skip the underscore in s.
2685
+		}
2686
+		if isASCIIDigit(c) {
2687
+			t = append(t, c)
2688
+			continue
2689
+		}
2690
+		// Assume we have a letter now - if not, it's a bogus identifier.
2691
+		// The next word is a sequence of characters that must start upper case.
2692
+		if isASCIILower(c) {
2693
+			c ^= ' ' // Make it a capital letter.
2694
+		}
2695
+		t = append(t, c) // Guaranteed not lower case.
2696
+		// Accept lower case sequence that follows.
2697
+		for i+1 < len(s) && isASCIILower(s[i+1]) {
2698
+			i++
2699
+			t = append(t, s[i])
2700
+		}
2701
+	}
2702
+	return string(t)
2703
+}
2704
+
2705
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
2706
+// be joined with "_".
2707
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
2708
+
2709
+// dottedSlice turns a sliced name into a dotted name.
2710
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
2711
+
2712
+// Is this field optional?
2713
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
2714
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
2715
+}
2716
+
2717
+// Is this field required?
2718
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
2719
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
2720
+}
2721
+
2722
+// Is this field repeated?
2723
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
2724
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
2725
+}
2726
+
2727
+// Is this field a scalar numeric type?
2728
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
2729
+	if field.Type == nil {
2730
+		return false
2731
+	}
2732
+	switch *field.Type {
2733
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
2734
+		descriptor.FieldDescriptorProto_TYPE_FLOAT,
2735
+		descriptor.FieldDescriptorProto_TYPE_INT64,
2736
+		descriptor.FieldDescriptorProto_TYPE_UINT64,
2737
+		descriptor.FieldDescriptorProto_TYPE_INT32,
2738
+		descriptor.FieldDescriptorProto_TYPE_FIXED64,
2739
+		descriptor.FieldDescriptorProto_TYPE_FIXED32,
2740
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
2741
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
2742
+		descriptor.FieldDescriptorProto_TYPE_ENUM,
2743
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
2744
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
2745
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
2746
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
2747
+		return true
2748
+	default:
2749
+		return false
2750
+	}
2751
+}
2752
+
2753
+// badToUnderscore is the mapping function used to generate Go names from package names,
2754
+// which can be dotted in the input .proto file.  It replaces non-identifier characters such as
2755
+// dot or dash with underscore.
2756
+func badToUnderscore(r rune) rune {
2757
+	if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
2758
+		return r
2759
+	}
2760
+	return '_'
2761
+}
2762
+
2763
+// baseName returns the last path element of the name, with the last dotted suffix removed.
2764
+func baseName(name string) string {
2765
+	// First, find the last element
2766
+	if i := strings.LastIndex(name, "/"); i >= 0 {
2767
+		name = name[i+1:]
2768
+	}
2769
+	// Now drop the suffix
2770
+	if i := strings.LastIndex(name, "."); i >= 0 {
2771
+		name = name[0:i]
2772
+	}
2773
+	return name
2774
+}
2775
+
2776
+// The SourceCodeInfo message describes the location of elements of a parsed
2777
+// .proto file by way of a "path", which is a sequence of integers that
2778
+// describe the route from a FileDescriptorProto to the relevant submessage.
2779
+// The path alternates between a field number of a repeated field, and an index
2780
+// into that repeated field. The constants below define the field numbers that
2781
+// are used.
2782
+//
2783
+// See descriptor.proto for more information about this.
2784
+const (
2785
+	// tag numbers in FileDescriptorProto
2786
+	packagePath = 2 // package
2787
+	messagePath = 4 // message_type
2788
+	enumPath    = 5 // enum_type
2789
+	// tag numbers in DescriptorProto
2790
+	messageFieldPath   = 2 // field
2791
+	messageMessagePath = 3 // nested_type
2792
+	messageEnumPath    = 4 // enum_type
2793
+	messageOneofPath   = 8 // oneof_decl
2794
+	// tag numbers in EnumDescriptorProto
2795
+	enumValuePath = 2 // value
2796
+)
2797
+
2798
+var supportTypeAliases bool
2799
+
2800
+func init() {
2801
+	for _, tag := range build.Default.ReleaseTags {
2802
+		if tag == "go1.9" {
2803
+			supportTypeAliases = true
2804
+			return
2805
+		}
2806
+	}
2807
+}
0 2808
new file mode 100644
... ...
@@ -0,0 +1,117 @@
0
+// Go support for Protocol Buffers - Google's data interchange format
1
+//
2
+// Copyright 2017 The Go Authors.  All rights reserved.
3
+// https://github.com/golang/protobuf
4
+//
5
+// Redistribution and use in source and binary forms, with or without
6
+// modification, are permitted provided that the following conditions are
7
+// met:
8
+//
9
+//     * Redistributions of source code must retain the above copyright
10
+// notice, this list of conditions and the following disclaimer.
11
+//     * Redistributions in binary form must reproduce the above
12
+// copyright notice, this list of conditions and the following disclaimer
13
+// in the documentation and/or other materials provided with the
14
+// distribution.
15
+//     * Neither the name of Google Inc. nor the names of its
16
+// contributors may be used to endorse or promote products derived from
17
+// this software without specific prior written permission.
18
+//
19
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+/*
32
+Package remap handles tracking the locations of Go tokens in a source text
33
+across a rewrite by the Go formatter.
34
+*/
35
+package remap
36
+
37
+import (
38
+	"fmt"
39
+	"go/scanner"
40
+	"go/token"
41
+)
42
+
43
+// A Location represents a span of byte offsets in the source text.
44
+type Location struct {
45
+	Pos, End int // End is exclusive
46
+}
47
+
48
+// A Map represents a mapping between token locations in an input source text
49
+// and locations in the corresponding output text.
50
+type Map map[Location]Location
51
+
52
+// Find reports whether the specified span is recorded by m, and if so returns
53
+// the new location it was mapped to. If the input span was not found, the
54
+// returned location is the same as the input.
55
+func (m Map) Find(pos, end int) (Location, bool) {
56
+	key := Location{
57
+		Pos: pos,
58
+		End: end,
59
+	}
60
+	if loc, ok := m[key]; ok {
61
+		return loc, true
62
+	}
63
+	return key, false
64
+}
65
+
66
+func (m Map) add(opos, oend, npos, nend int) {
67
+	m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
68
+}
69
+
70
+// Compute constructs a location mapping from input to output.  An error is
71
+// reported if any of the tokens of output cannot be mapped.
72
+func Compute(input, output []byte) (Map, error) {
73
+	itok := tokenize(input)
74
+	otok := tokenize(output)
75
+	if len(itok) != len(otok) {
76
+		return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
77
+	}
78
+	m := make(Map)
79
+	for i, ti := range itok {
80
+		to := otok[i]
81
+		if ti.Token != to.Token {
82
+			return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
83
+		}
84
+		m.add(ti.pos, ti.end, to.pos, to.end)
85
+	}
86
+	return m, nil
87
+}
88
+
89
+// tokinfo records the span and type of a source token.
90
+type tokinfo struct {
91
+	pos, end int
92
+	token.Token
93
+}
94
+
95
+func tokenize(src []byte) []tokinfo {
96
+	fs := token.NewFileSet()
97
+	var s scanner.Scanner
98
+	s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
99
+	var info []tokinfo
100
+	for {
101
+		pos, next, lit := s.Scan()
102
+		switch next {
103
+		case token.SEMICOLON:
104
+			continue
105
+		}
106
+		info = append(info, tokinfo{
107
+			pos:   int(pos - 1),
108
+			end:   int(pos + token.Pos(len(lit)) - 1),
109
+			Token: next,
110
+		})
111
+		if next == token.EOF {
112
+			break
113
+		}
114
+	}
115
+	return info
116
+}
0 117
new file mode 100644
... ...
@@ -0,0 +1,545 @@
0
+// Go support for Protocol Buffers - Google's data interchange format
1
+//
2
+// Copyright 2015 The Go Authors.  All rights reserved.
3
+// https://github.com/golang/protobuf
4
+//
5
+// Redistribution and use in source and binary forms, with or without
6
+// modification, are permitted provided that the following conditions are
7
+// met:
8
+//
9
+//     * Redistributions of source code must retain the above copyright
10
+// notice, this list of conditions and the following disclaimer.
11
+//     * Redistributions in binary form must reproduce the above
12
+// copyright notice, this list of conditions and the following disclaimer
13
+// in the documentation and/or other materials provided with the
14
+// distribution.
15
+//     * Neither the name of Google Inc. nor the names of its
16
+// contributors may be used to endorse or promote products derived from
17
+// this software without specific prior written permission.
18
+//
19
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+// Package grpc outputs gRPC service descriptions in Go code.
32
+// It runs as a plugin for the Go protocol buffer compiler plugin.
33
+// It is linked in to protoc-gen-go.
34
+package grpc
35
+
36
+import (
37
+	"fmt"
38
+	"strconv"
39
+	"strings"
40
+
41
+	pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
42
+	"github.com/golang/protobuf/protoc-gen-go/generator"
43
+)
44
+
45
+// generatedCodeVersion indicates a version of the generated code.
46
+// It is incremented whenever an incompatibility between the generated code and
47
+// the grpc package is introduced; the generated code references
48
+// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
49
+const generatedCodeVersion = 6
50
+
51
+// Paths for packages used by code generated in this file,
52
+// relative to the import_prefix of the generator.Generator.
53
+const (
54
+	contextPkgPath = "context"
55
+	grpcPkgPath    = "google.golang.org/grpc"
56
+	codePkgPath    = "google.golang.org/grpc/codes"
57
+	statusPkgPath  = "google.golang.org/grpc/status"
58
+)
59
+
60
+func init() {
61
+	generator.RegisterPlugin(new(grpc))
62
+}
63
+
64
+// grpc is an implementation of the Go protocol buffer compiler's
65
+// plugin architecture.  It generates bindings for gRPC support.
66
+type grpc struct {
67
+	gen *generator.Generator
68
+}
69
+
70
+// Name returns the name of this plugin, "grpc".
71
+func (g *grpc) Name() string {
72
+	return "grpc"
73
+}
74
+
75
+// The names for packages imported in the generated code.
76
+// They may vary from the final path component of the import path
77
+// if the name is used by other packages.
78
+var (
79
+	contextPkg string
80
+	grpcPkg    string
81
+)
82
+
83
+// Init initializes the plugin.
84
+func (g *grpc) Init(gen *generator.Generator) {
85
+	g.gen = gen
86
+}
87
+
88
+// Given a type name defined in a .proto, return its object.
89
+// Also record that we're using it, to guarantee the associated import.
90
+func (g *grpc) objectNamed(name string) generator.Object {
91
+	g.gen.RecordTypeUse(name)
92
+	return g.gen.ObjectNamed(name)
93
+}
94
+
95
+// Given a type name defined in a .proto, return its name as we will print it.
96
+func (g *grpc) typeName(str string) string {
97
+	return g.gen.TypeName(g.objectNamed(str))
98
+}
99
+
100
+// P forwards to g.gen.P.
101
+func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
102
+
103
+// Generate generates code for the services in the given file.
104
+func (g *grpc) Generate(file *generator.FileDescriptor) {
105
+	if len(file.FileDescriptorProto.Service) == 0 {
106
+		return
107
+	}
108
+
109
+	contextPkg = string(g.gen.AddImport(contextPkgPath))
110
+	grpcPkg = string(g.gen.AddImport(grpcPkgPath))
111
+
112
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
113
+	g.P("var _ ", contextPkg, ".Context")
114
+	g.P("var _ ", grpcPkg, ".ClientConnInterface")
115
+	g.P()
116
+
117
+	// Assert version compatibility.
118
+	g.P("// This is a compile-time assertion to ensure that this generated file")
119
+	g.P("// is compatible with the grpc package it is being compiled against.")
120
+	g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
121
+	g.P()
122
+
123
+	for i, service := range file.FileDescriptorProto.Service {
124
+		g.generateService(file, service, i)
125
+	}
126
+}
127
+
128
+// GenerateImports generates the import declaration for this file.
129
+func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
130
+}
131
+
132
+// reservedClientName records whether a client name is reserved on the client side.
133
+var reservedClientName = map[string]bool{
134
+	// TODO: do we need any in gRPC?
135
+}
136
+
137
+func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
138
+
139
+// deprecationComment is the standard comment added to deprecated
140
+// messages, fields, enums, and enum values.
141
+var deprecationComment = "// Deprecated: Do not use."
142
+
143
+// generateService generates all the code for the named service.
144
+func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
145
+	path := fmt.Sprintf("6,%d", index) // 6 means service.
146
+
147
+	origServName := service.GetName()
148
+	fullServName := origServName
149
+	if pkg := file.GetPackage(); pkg != "" {
150
+		fullServName = pkg + "." + fullServName
151
+	}
152
+	servName := generator.CamelCase(origServName)
153
+	deprecated := service.GetOptions().GetDeprecated()
154
+
155
+	g.P()
156
+	g.P(fmt.Sprintf(`// %sClient is the client API for %s service.
157
+//
158
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName))
159
+
160
+	// Client interface.
161
+	if deprecated {
162
+		g.P("//")
163
+		g.P(deprecationComment)
164
+	}
165
+	g.P("type ", servName, "Client interface {")
166
+	for i, method := range service.Method {
167
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
168
+		if method.GetOptions().GetDeprecated() {
169
+			g.P("//")
170
+			g.P(deprecationComment)
171
+		}
172
+		g.P(g.generateClientSignature(servName, method))
173
+	}
174
+	g.P("}")
175
+	g.P()
176
+
177
+	// Client structure.
178
+	g.P("type ", unexport(servName), "Client struct {")
179
+	g.P("cc ", grpcPkg, ".ClientConnInterface")
180
+	g.P("}")
181
+	g.P()
182
+
183
+	// NewClient factory.
184
+	if deprecated {
185
+		g.P(deprecationComment)
186
+	}
187
+	g.P("func New", servName, "Client (cc ", grpcPkg, ".ClientConnInterface) ", servName, "Client {")
188
+	g.P("return &", unexport(servName), "Client{cc}")
189
+	g.P("}")
190
+	g.P()
191
+
192
+	var methodIndex, streamIndex int
193
+	serviceDescVar := "_" + servName + "_serviceDesc"
194
+	// Client method implementations.
195
+	for _, method := range service.Method {
196
+		var descExpr string
197
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
198
+			// Unary RPC method
199
+			descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
200
+			methodIndex++
201
+		} else {
202
+			// Streaming RPC method
203
+			descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
204
+			streamIndex++
205
+		}
206
+		g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
207
+	}
208
+
209
+	// Server interface.
210
+	serverType := servName + "Server"
211
+	g.P("// ", serverType, " is the server API for ", servName, " service.")
212
+	if deprecated {
213
+		g.P("//")
214
+		g.P(deprecationComment)
215
+	}
216
+	g.P("type ", serverType, " interface {")
217
+	for i, method := range service.Method {
218
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
219
+		if method.GetOptions().GetDeprecated() {
220
+			g.P("//")
221
+			g.P(deprecationComment)
222
+		}
223
+		g.P(g.generateServerSignature(servName, method))
224
+	}
225
+	g.P("}")
226
+	g.P()
227
+
228
+	// Server Unimplemented struct for forward compatibility.
229
+	if deprecated {
230
+		g.P(deprecationComment)
231
+	}
232
+	g.generateUnimplementedServer(servName, service)
233
+
234
+	// Server registration.
235
+	if deprecated {
236
+		g.P(deprecationComment)
237
+	}
238
+	g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
239
+	g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
240
+	g.P("}")
241
+	g.P()
242
+
243
+	// Server handler implementations.
244
+	var handlerNames []string
245
+	for _, method := range service.Method {
246
+		hname := g.generateServerMethod(servName, fullServName, method)
247
+		handlerNames = append(handlerNames, hname)
248
+	}
249
+
250
+	// Service descriptor.
251
+	g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
252
+	g.P("ServiceName: ", strconv.Quote(fullServName), ",")
253
+	g.P("HandlerType: (*", serverType, ")(nil),")
254
+	g.P("Methods: []", grpcPkg, ".MethodDesc{")
255
+	for i, method := range service.Method {
256
+		if method.GetServerStreaming() || method.GetClientStreaming() {
257
+			continue
258
+		}
259
+		g.P("{")
260
+		g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
261
+		g.P("Handler: ", handlerNames[i], ",")
262
+		g.P("},")
263
+	}
264
+	g.P("},")
265
+	g.P("Streams: []", grpcPkg, ".StreamDesc{")
266
+	for i, method := range service.Method {
267
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
268
+			continue
269
+		}
270
+		g.P("{")
271
+		g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
272
+		g.P("Handler: ", handlerNames[i], ",")
273
+		if method.GetServerStreaming() {
274
+			g.P("ServerStreams: true,")
275
+		}
276
+		if method.GetClientStreaming() {
277
+			g.P("ClientStreams: true,")
278
+		}
279
+		g.P("},")
280
+	}
281
+	g.P("},")
282
+	g.P("Metadata: \"", file.GetName(), "\",")
283
+	g.P("}")
284
+	g.P()
285
+}
286
+
287
+// generateUnimplementedServer creates the unimplemented server struct
288
+func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) {
289
+	serverType := servName + "Server"
290
+	g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.")
291
+	g.P("type Unimplemented", serverType, " struct {")
292
+	g.P("}")
293
+	g.P()
294
+	// Unimplemented<service_name>Server's concrete methods
295
+	for _, method := range service.Method {
296
+		g.generateServerMethodConcrete(servName, method)
297
+	}
298
+	g.P()
299
+}
300
+
301
+// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility
302
+func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) {
303
+	header := g.generateServerSignatureWithParamNames(servName, method)
304
+	g.P("func (*Unimplemented", servName, "Server) ", header, " {")
305
+	var nilArg string
306
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
307
+		nilArg = "nil, "
308
+	}
309
+	methName := generator.CamelCase(method.GetName())
310
+	statusPkg := string(g.gen.AddImport(statusPkgPath))
311
+	codePkg := string(g.gen.AddImport(codePkgPath))
312
+	g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`)
313
+	g.P("}")
314
+}
315
+
316
+// generateClientSignature returns the client-side signature for a method.
317
+func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
318
+	origMethName := method.GetName()
319
+	methName := generator.CamelCase(origMethName)
320
+	if reservedClientName[methName] {
321
+		methName += "_"
322
+	}
323
+	reqArg := ", in *" + g.typeName(method.GetInputType())
324
+	if method.GetClientStreaming() {
325
+		reqArg = ""
326
+	}
327
+	respName := "*" + g.typeName(method.GetOutputType())
328
+	if method.GetServerStreaming() || method.GetClientStreaming() {
329
+		respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
330
+	}
331
+	return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
332
+}
333
+
334
+func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
335
+	sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
336
+	methName := generator.CamelCase(method.GetName())
337
+	inType := g.typeName(method.GetInputType())
338
+	outType := g.typeName(method.GetOutputType())
339
+
340
+	if method.GetOptions().GetDeprecated() {
341
+		g.P(deprecationComment)
342
+	}
343
+	g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
344
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
345
+		g.P("out := new(", outType, ")")
346
+		// TODO: Pass descExpr to Invoke.
347
+		g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
348
+		g.P("if err != nil { return nil, err }")
349
+		g.P("return out, nil")
350
+		g.P("}")
351
+		g.P()
352
+		return
353
+	}
354
+	streamType := unexport(servName) + methName + "Client"
355
+	g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`)
356
+	g.P("if err != nil { return nil, err }")
357
+	g.P("x := &", streamType, "{stream}")
358
+	if !method.GetClientStreaming() {
359
+		g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
360
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
361
+	}
362
+	g.P("return x, nil")
363
+	g.P("}")
364
+	g.P()
365
+
366
+	genSend := method.GetClientStreaming()
367
+	genRecv := method.GetServerStreaming()
368
+	genCloseAndRecv := !method.GetServerStreaming()
369
+
370
+	// Stream auxiliary types and methods.
371
+	g.P("type ", servName, "_", methName, "Client interface {")
372
+	if genSend {
373
+		g.P("Send(*", inType, ") error")
374
+	}
375
+	if genRecv {
376
+		g.P("Recv() (*", outType, ", error)")
377
+	}
378
+	if genCloseAndRecv {
379
+		g.P("CloseAndRecv() (*", outType, ", error)")
380
+	}
381
+	g.P(grpcPkg, ".ClientStream")
382
+	g.P("}")
383
+	g.P()
384
+
385
+	g.P("type ", streamType, " struct {")
386
+	g.P(grpcPkg, ".ClientStream")
387
+	g.P("}")
388
+	g.P()
389
+
390
+	if genSend {
391
+		g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
392
+		g.P("return x.ClientStream.SendMsg(m)")
393
+		g.P("}")
394
+		g.P()
395
+	}
396
+	if genRecv {
397
+		g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
398
+		g.P("m := new(", outType, ")")
399
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
400
+		g.P("return m, nil")
401
+		g.P("}")
402
+		g.P()
403
+	}
404
+	if genCloseAndRecv {
405
+		g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
406
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
407
+		g.P("m := new(", outType, ")")
408
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
409
+		g.P("return m, nil")
410
+		g.P("}")
411
+		g.P()
412
+	}
413
+}
414
+
415
+// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names.
416
+func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string {
417
+	origMethName := method.GetName()
418
+	methName := generator.CamelCase(origMethName)
419
+	if reservedClientName[methName] {
420
+		methName += "_"
421
+	}
422
+
423
+	var reqArgs []string
424
+	ret := "error"
425
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
426
+		reqArgs = append(reqArgs, "ctx "+contextPkg+".Context")
427
+		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
428
+	}
429
+	if !method.GetClientStreaming() {
430
+		reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType()))
431
+	}
432
+	if method.GetServerStreaming() || method.GetClientStreaming() {
433
+		reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server")
434
+	}
435
+
436
+	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
437
+}
438
+
439
+// generateServerSignature returns the server-side signature for a method.
440
+func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
441
+	origMethName := method.GetName()
442
+	methName := generator.CamelCase(origMethName)
443
+	if reservedClientName[methName] {
444
+		methName += "_"
445
+	}
446
+
447
+	var reqArgs []string
448
+	ret := "error"
449
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
450
+		reqArgs = append(reqArgs, contextPkg+".Context")
451
+		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
452
+	}
453
+	if !method.GetClientStreaming() {
454
+		reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
455
+	}
456
+	if method.GetServerStreaming() || method.GetClientStreaming() {
457
+		reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
458
+	}
459
+
460
+	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
461
+}
462
+
463
+func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
464
+	methName := generator.CamelCase(method.GetName())
465
+	hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
466
+	inType := g.typeName(method.GetInputType())
467
+	outType := g.typeName(method.GetOutputType())
468
+
469
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
470
+		g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
471
+		g.P("in := new(", inType, ")")
472
+		g.P("if err := dec(in); err != nil { return nil, err }")
473
+		g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
474
+		g.P("info := &", grpcPkg, ".UnaryServerInfo{")
475
+		g.P("Server: srv,")
476
+		g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
477
+		g.P("}")
478
+		g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
479
+		g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
480
+		g.P("}")
481
+		g.P("return interceptor(ctx, in, info, handler)")
482
+		g.P("}")
483
+		g.P()
484
+		return hname
485
+	}
486
+	streamType := unexport(servName) + methName + "Server"
487
+	g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
488
+	if !method.GetClientStreaming() {
489
+		g.P("m := new(", inType, ")")
490
+		g.P("if err := stream.RecvMsg(m); err != nil { return err }")
491
+		g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
492
+	} else {
493
+		g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
494
+	}
495
+	g.P("}")
496
+	g.P()
497
+
498
+	genSend := method.GetServerStreaming()
499
+	genSendAndClose := !method.GetServerStreaming()
500
+	genRecv := method.GetClientStreaming()
501
+
502
+	// Stream auxiliary types and methods.
503
+	g.P("type ", servName, "_", methName, "Server interface {")
504
+	if genSend {
505
+		g.P("Send(*", outType, ") error")
506
+	}
507
+	if genSendAndClose {
508
+		g.P("SendAndClose(*", outType, ") error")
509
+	}
510
+	if genRecv {
511
+		g.P("Recv() (*", inType, ", error)")
512
+	}
513
+	g.P(grpcPkg, ".ServerStream")
514
+	g.P("}")
515
+	g.P()
516
+
517
+	g.P("type ", streamType, " struct {")
518
+	g.P(grpcPkg, ".ServerStream")
519
+	g.P("}")
520
+	g.P()
521
+
522
+	if genSend {
523
+		g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
524
+		g.P("return x.ServerStream.SendMsg(m)")
525
+		g.P("}")
526
+		g.P()
527
+	}
528
+	if genSendAndClose {
529
+		g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
530
+		g.P("return x.ServerStream.SendMsg(m)")
531
+		g.P("}")
532
+		g.P()
533
+	}
534
+	if genRecv {
535
+		g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
536
+		g.P("m := new(", inType, ")")
537
+		g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
538
+		g.P("return m, nil")
539
+		g.P("}")
540
+		g.P()
541
+	}
542
+
543
+	return hname
544
+}
0 545
new file mode 100644
... ...
@@ -0,0 +1,34 @@
0
+// Go support for Protocol Buffers - Google's data interchange format
1
+//
2
+// Copyright 2015 The Go Authors.  All rights reserved.
3
+// https://github.com/golang/protobuf
4
+//
5
+// Redistribution and use in source and binary forms, with or without
6
+// modification, are permitted provided that the following conditions are
7
+// met:
8
+//
9
+//     * Redistributions of source code must retain the above copyright
10
+// notice, this list of conditions and the following disclaimer.
11
+//     * Redistributions in binary form must reproduce the above
12
+// copyright notice, this list of conditions and the following disclaimer
13
+// in the documentation and/or other materials provided with the
14
+// distribution.
15
+//     * Neither the name of Google Inc. nor the names of its
16
+// contributors may be used to endorse or promote products derived from
17
+// this software without specific prior written permission.
18
+//
19
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+package main
32
+
33
+import _ "github.com/golang/protobuf/protoc-gen-go/grpc"
0 34
new file mode 100644
... ...
@@ -0,0 +1,98 @@
0
+// Go support for Protocol Buffers - Google's data interchange format
1
+//
2
+// Copyright 2010 The Go Authors.  All rights reserved.
3
+// https://github.com/golang/protobuf
4
+//
5
+// Redistribution and use in source and binary forms, with or without
6
+// modification, are permitted provided that the following conditions are
7
+// met:
8
+//
9
+//     * Redistributions of source code must retain the above copyright
10
+// notice, this list of conditions and the following disclaimer.
11
+//     * Redistributions in binary form must reproduce the above
12
+// copyright notice, this list of conditions and the following disclaimer
13
+// in the documentation and/or other materials provided with the
14
+// distribution.
15
+//     * Neither the name of Google Inc. nor the names of its
16
+// contributors may be used to endorse or promote products derived from
17
+// this software without specific prior written permission.
18
+//
19
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
32
+// Go code.  Run it by building this program and putting it in your path with
33
+// the name
34
+// 	protoc-gen-go
35
+// That word 'go' at the end becomes part of the option string set for the
36
+// protocol compiler, so once the protocol compiler (protoc) is installed
37
+// you can run
38
+// 	protoc --go_out=output_directory input_directory/file.proto
39
+// to generate Go bindings for the protocol defined by file.proto.
40
+// With that input, the output will be written to
41
+// 	output_directory/file.pb.go
42
+//
43
+// The generated code is documented in the package comment for
44
+// the library.
45
+//
46
+// See the README and documentation for protocol buffers to learn more:
47
+// 	https://developers.google.com/protocol-buffers/
48
+package main
49
+
50
+import (
51
+	"io/ioutil"
52
+	"os"
53
+
54
+	"github.com/golang/protobuf/proto"
55
+	"github.com/golang/protobuf/protoc-gen-go/generator"
56
+)
57
+
58
+func main() {
59
+	// Begin by allocating a generator. The request and response structures are stored there
60
+	// so we can do error handling easily - the response structure contains the field to
61
+	// report failure.
62
+	g := generator.New()
63
+
64
+	data, err := ioutil.ReadAll(os.Stdin)
65
+	if err != nil {
66
+		g.Error(err, "reading input")
67
+	}
68
+
69
+	if err := proto.Unmarshal(data, g.Request); err != nil {
70
+		g.Error(err, "parsing input proto")
71
+	}
72
+
73
+	if len(g.Request.FileToGenerate) == 0 {
74
+		g.Fail("no files to generate")
75
+	}
76
+
77
+	g.CommandLineParameters(g.Request.GetParameter())
78
+
79
+	// Create a wrapped version of the Descriptors and EnumDescriptors that
80
+	// point to the file that defines them.
81
+	g.WrapTypes()
82
+
83
+	g.SetPackageNames()
84
+	g.BuildTypeNameMap()
85
+
86
+	g.GenerateAllFiles()
87
+
88
+	// Send back the results.
89
+	data, err = proto.Marshal(g.Response)
90
+	if err != nil {
91
+		g.Error(err, "failed to marshal output proto")
92
+	}
93
+	_, err = os.Stdout.Write(data)
94
+	if err != nil {
95
+		g.Error(err, "failed to write output proto")
96
+	}
97
+}
0 98
new file mode 100644
... ...
@@ -0,0 +1,369 @@
0
+// Code generated by protoc-gen-go. DO NOT EDIT.
1
+// source: google/protobuf/compiler/plugin.proto
2
+
3
+/*
4
+Package plugin_go is a generated protocol buffer package.
5
+
6
+It is generated from these files:
7
+	google/protobuf/compiler/plugin.proto
8
+
9
+It has these top-level messages:
10
+	Version
11
+	CodeGeneratorRequest
12
+	CodeGeneratorResponse
13
+*/
14
+package plugin_go
15
+
16
+import proto "github.com/golang/protobuf/proto"
17
+import fmt "fmt"
18
+import math "math"
19
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
20
+
21
+// Reference imports to suppress errors if they are not otherwise used.
22
+var _ = proto.Marshal
23
+var _ = fmt.Errorf
24
+var _ = math.Inf
25
+
26
+// This is a compile-time assertion to ensure that this generated file
27
+// is compatible with the proto package it is being compiled against.
28
+// A compilation error at this line likely means your copy of the
29
+// proto package needs to be updated.
30
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
31
+
32
+// The version number of protocol compiler.
33
+type Version struct {
34
+	Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
35
+	Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
36
+	Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
37
+	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
38
+	// be empty for mainline stable releases.
39
+	Suffix               *string  `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
40
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
41
+	XXX_unrecognized     []byte   `json:"-"`
42
+	XXX_sizecache        int32    `json:"-"`
43
+}
44
+
45
+func (m *Version) Reset()                    { *m = Version{} }
46
+func (m *Version) String() string            { return proto.CompactTextString(m) }
47
+func (*Version) ProtoMessage()               {}
48
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
49
+func (m *Version) Unmarshal(b []byte) error {
50
+	return xxx_messageInfo_Version.Unmarshal(m, b)
51
+}
52
+func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
53
+	return xxx_messageInfo_Version.Marshal(b, m, deterministic)
54
+}
55
+func (dst *Version) XXX_Merge(src proto.Message) {
56
+	xxx_messageInfo_Version.Merge(dst, src)
57
+}
58
+func (m *Version) XXX_Size() int {
59
+	return xxx_messageInfo_Version.Size(m)
60
+}
61
+func (m *Version) XXX_DiscardUnknown() {
62
+	xxx_messageInfo_Version.DiscardUnknown(m)
63
+}
64
+
65
+var xxx_messageInfo_Version proto.InternalMessageInfo
66
+
67
+func (m *Version) GetMajor() int32 {
68
+	if m != nil && m.Major != nil {
69
+		return *m.Major
70
+	}
71
+	return 0
72
+}
73
+
74
+func (m *Version) GetMinor() int32 {
75
+	if m != nil && m.Minor != nil {
76
+		return *m.Minor
77
+	}
78
+	return 0
79
+}
80
+
81
+func (m *Version) GetPatch() int32 {
82
+	if m != nil && m.Patch != nil {
83
+		return *m.Patch
84
+	}
85
+	return 0
86
+}
87
+
88
+func (m *Version) GetSuffix() string {
89
+	if m != nil && m.Suffix != nil {
90
+		return *m.Suffix
91
+	}
92
+	return ""
93
+}
94
+
95
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
96
+type CodeGeneratorRequest struct {
97
+	// The .proto files that were explicitly listed on the command-line.  The
98
+	// code generator should generate code only for these files.  Each file's
99
+	// descriptor will be included in proto_file, below.
100
+	FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
101
+	// The generator parameter passed on the command-line.
102
+	Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
103
+	// FileDescriptorProtos for all files in files_to_generate and everything
104
+	// they import.  The files will appear in topological order, so each file
105
+	// appears before any file that imports it.
106
+	//
107
+	// protoc guarantees that all proto_files will be written after
108
+	// the fields above, even though this is not technically guaranteed by the
109
+	// protobuf wire format.  This theoretically could allow a plugin to stream
110
+	// in the FileDescriptorProtos and handle them one by one rather than read
111
+	// the entire set into memory at once.  However, as of this writing, this
112
+	// is not similarly optimized on protoc's end -- it will store all fields in
113
+	// memory at once before sending them to the plugin.
114
+	//
115
+	// Type names of fields and extensions in the FileDescriptorProto are always
116
+	// fully qualified.
117
+	ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
118
+	// The version number of protocol compiler.
119
+	CompilerVersion      *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
120
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
121
+	XXX_unrecognized     []byte   `json:"-"`
122
+	XXX_sizecache        int32    `json:"-"`
123
+}
124
+
125
+func (m *CodeGeneratorRequest) Reset()                    { *m = CodeGeneratorRequest{} }
126
+func (m *CodeGeneratorRequest) String() string            { return proto.CompactTextString(m) }
127
+func (*CodeGeneratorRequest) ProtoMessage()               {}
128
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
129
+func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
130
+	return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
131
+}
132
+func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
133
+	return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
134
+}
135
+func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
136
+	xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
137
+}
138
+func (m *CodeGeneratorRequest) XXX_Size() int {
139
+	return xxx_messageInfo_CodeGeneratorRequest.Size(m)
140
+}
141
+func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
142
+	xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
143
+}
144
+
145
+var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
146
+
147
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
148
+	if m != nil {
149
+		return m.FileToGenerate
150
+	}
151
+	return nil
152
+}
153
+
154
+func (m *CodeGeneratorRequest) GetParameter() string {
155
+	if m != nil && m.Parameter != nil {
156
+		return *m.Parameter
157
+	}
158
+	return ""
159
+}
160
+
161
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
162
+	if m != nil {
163
+		return m.ProtoFile
164
+	}
165
+	return nil
166
+}
167
+
168
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
169
+	if m != nil {
170
+		return m.CompilerVersion
171
+	}
172
+	return nil
173
+}
174
+
175
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
176
+type CodeGeneratorResponse struct {
177
+	// Error message.  If non-empty, code generation failed.  The plugin process
178
+	// should exit with status code zero even if it reports an error in this way.
179
+	//
180
+	// This should be used to indicate errors in .proto files which prevent the
181
+	// code generator from generating correct code.  Errors which indicate a
182
+	// problem in protoc itself -- such as the input CodeGeneratorRequest being
183
+	// unparseable -- should be reported by writing a message to stderr and
184
+	// exiting with a non-zero status code.
185
+	Error                *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
186
+	File                 []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
187
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
188
+	XXX_unrecognized     []byte                        `json:"-"`
189
+	XXX_sizecache        int32                         `json:"-"`
190
+}
191
+
192
+func (m *CodeGeneratorResponse) Reset()                    { *m = CodeGeneratorResponse{} }
193
+func (m *CodeGeneratorResponse) String() string            { return proto.CompactTextString(m) }
194
+func (*CodeGeneratorResponse) ProtoMessage()               {}
195
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
196
+func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
197
+	return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
198
+}
199
+func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
200
+	return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
201
+}
202
+func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
203
+	xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
204
+}
205
+func (m *CodeGeneratorResponse) XXX_Size() int {
206
+	return xxx_messageInfo_CodeGeneratorResponse.Size(m)
207
+}
208
+func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
209
+	xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
210
+}
211
+
212
+var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
213
+
214
+func (m *CodeGeneratorResponse) GetError() string {
215
+	if m != nil && m.Error != nil {
216
+		return *m.Error
217
+	}
218
+	return ""
219
+}
220
+
221
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
222
+	if m != nil {
223
+		return m.File
224
+	}
225
+	return nil
226
+}
227
+
228
+// Represents a single generated file.
229
+type CodeGeneratorResponse_File struct {
230
+	// The file name, relative to the output directory.  The name must not
231
+	// contain "." or ".." components and must be relative, not be absolute (so,
232
+	// the file cannot lie outside the output directory).  "/" must be used as
233
+	// the path separator, not "\".
234
+	//
235
+	// If the name is omitted, the content will be appended to the previous
236
+	// file.  This allows the generator to break large files into small chunks,
237
+	// and allows the generated text to be streamed back to protoc so that large
238
+	// files need not reside completely in memory at one time.  Note that as of
239
+	// this writing protoc does not optimize for this -- it will read the entire
240
+	// CodeGeneratorResponse before writing files to disk.
241
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
242
+	// If non-empty, indicates that the named file should already exist, and the
243
+	// content here is to be inserted into that file at a defined insertion
244
+	// point.  This feature allows a code generator to extend the output
245
+	// produced by another code generator.  The original generator may provide
246
+	// insertion points by placing special annotations in the file that look
247
+	// like:
248
+	//   @@protoc_insertion_point(NAME)
249
+	// The annotation can have arbitrary text before and after it on the line,
250
+	// which allows it to be placed in a comment.  NAME should be replaced with
251
+	// an identifier naming the point -- this is what other generators will use
252
+	// as the insertion_point.  Code inserted at this point will be placed
253
+	// immediately above the line containing the insertion point (thus multiple
254
+	// insertions to the same point will come out in the order they were added).
255
+	// The double-@ is intended to make it unlikely that the generated code
256
+	// could contain things that look like insertion points by accident.
257
+	//
258
+	// For example, the C++ code generator places the following line in the
259
+	// .pb.h files that it generates:
260
+	//   // @@protoc_insertion_point(namespace_scope)
261
+	// This line appears within the scope of the file's package namespace, but
262
+	// outside of any particular class.  Another plugin can then specify the
263
+	// insertion_point "namespace_scope" to generate additional classes or
264
+	// other declarations that should be placed in this scope.
265
+	//
266
+	// Note that if the line containing the insertion point begins with
267
+	// whitespace, the same whitespace will be added to every line of the
268
+	// inserted text.  This is useful for languages like Python, where
269
+	// indentation matters.  In these languages, the insertion point comment
270
+	// should be indented the same amount as any inserted code will need to be
271
+	// in order to work correctly in that context.
272
+	//
273
+	// The code generator that generates the initial file and the one which
274
+	// inserts into it must both run as part of a single invocation of protoc.
275
+	// Code generators are executed in the order in which they appear on the
276
+	// command line.
277
+	//
278
+	// If |insertion_point| is present, |name| must also be present.
279
+	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
280
+	// The file contents.
281
+	Content              *string  `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
282
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
283
+	XXX_unrecognized     []byte   `json:"-"`
284
+	XXX_sizecache        int32    `json:"-"`
285
+}
286
+
287
+func (m *CodeGeneratorResponse_File) Reset()                    { *m = CodeGeneratorResponse_File{} }
288
+func (m *CodeGeneratorResponse_File) String() string            { return proto.CompactTextString(m) }
289
+func (*CodeGeneratorResponse_File) ProtoMessage()               {}
290
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
291
+func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
292
+	return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
293
+}
294
+func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
295
+	return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
296
+}
297
+func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
298
+	xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
299
+}
300
+func (m *CodeGeneratorResponse_File) XXX_Size() int {
301
+	return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
302
+}
303
+func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
304
+	xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
305
+}
306
+
307
+var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
308
+
309
+func (m *CodeGeneratorResponse_File) GetName() string {
310
+	if m != nil && m.Name != nil {
311
+		return *m.Name
312
+	}
313
+	return ""
314
+}
315
+
316
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
317
+	if m != nil && m.InsertionPoint != nil {
318
+		return *m.InsertionPoint
319
+	}
320
+	return ""
321
+}
322
+
323
+func (m *CodeGeneratorResponse_File) GetContent() string {
324
+	if m != nil && m.Content != nil {
325
+		return *m.Content
326
+	}
327
+	return ""
328
+}
329
+
330
+func init() {
331
+	proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
332
+	proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
333
+	proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
334
+	proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
335
+}
336
+
337
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
338
+
339
+var fileDescriptor0 = []byte{
340
+	// 417 bytes of a gzipped FileDescriptorProto
341
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
342
+	0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
343
+	0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
344
+	0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
345
+	0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
346
+	0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
347
+	0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
348
+	0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
349
+	0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
350
+	0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
351
+	0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
352
+	0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
353
+	0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
354
+	0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
355
+	0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
356
+	0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
357
+	0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
358
+	0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
359
+	0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
360
+	0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
361
+	0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
362
+	0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
363
+	0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
364
+	0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
365
+	0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
366
+	0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
367
+	0x00,
368
+}
0 369
new file mode 100644
... ...
@@ -0,0 +1,167 @@
0
+// Protocol Buffers - Google's data interchange format
1
+// Copyright 2008 Google Inc.  All rights reserved.
2
+// https://developers.google.com/protocol-buffers/
3
+//
4
+// Redistribution and use in source and binary forms, with or without
5
+// modification, are permitted provided that the following conditions are
6
+// met:
7
+//
8
+//     * Redistributions of source code must retain the above copyright
9
+// notice, this list of conditions and the following disclaimer.
10
+//     * Redistributions in binary form must reproduce the above
11
+// copyright notice, this list of conditions and the following disclaimer
12
+// in the documentation and/or other materials provided with the
13
+// distribution.
14
+//     * Neither the name of Google Inc. nor the names of its
15
+// contributors may be used to endorse or promote products derived from
16
+// this software without specific prior written permission.
17
+//
18
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+
30
+// Author: kenton@google.com (Kenton Varda)
31
+//
32
+// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to
33
+//   change.
34
+//
35
+// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is
36
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
37
+// CodeGeneratorResponse to stdout.
38
+//
39
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
40
+// of dealing with the raw protocol defined here.
41
+//
42
+// A plugin executable needs only to be placed somewhere in the path.  The
43
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
44
+// flag "--${NAME}_out" is passed to protoc.
45
+
46
+syntax = "proto2";
47
+package google.protobuf.compiler;
48
+option java_package = "com.google.protobuf.compiler";
49
+option java_outer_classname = "PluginProtos";
50
+
51
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
52
+
53
+import "google/protobuf/descriptor.proto";
54
+
55
+// The version number of protocol compiler.
56
+message Version {
57
+  optional int32 major = 1;
58
+  optional int32 minor = 2;
59
+  optional int32 patch = 3;
60
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
61
+  // be empty for mainline stable releases.
62
+  optional string suffix = 4;
63
+}
64
+
65
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
66
+message CodeGeneratorRequest {
67
+  // The .proto files that were explicitly listed on the command-line.  The
68
+  // code generator should generate code only for these files.  Each file's
69
+  // descriptor will be included in proto_file, below.
70
+  repeated string file_to_generate = 1;
71
+
72
+  // The generator parameter passed on the command-line.
73
+  optional string parameter = 2;
74
+
75
+  // FileDescriptorProtos for all files in files_to_generate and everything
76
+  // they import.  The files will appear in topological order, so each file
77
+  // appears before any file that imports it.
78
+  //
79
+  // protoc guarantees that all proto_files will be written after
80
+  // the fields above, even though this is not technically guaranteed by the
81
+  // protobuf wire format.  This theoretically could allow a plugin to stream
82
+  // in the FileDescriptorProtos and handle them one by one rather than read
83
+  // the entire set into memory at once.  However, as of this writing, this
84
+  // is not similarly optimized on protoc's end -- it will store all fields in
85
+  // memory at once before sending them to the plugin.
86
+  //
87
+  // Type names of fields and extensions in the FileDescriptorProto are always
88
+  // fully qualified.
89
+  repeated FileDescriptorProto proto_file = 15;
90
+
91
+  // The version number of protocol compiler.
92
+  optional Version compiler_version = 3;
93
+
94
+}
95
+
96
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
97
+message CodeGeneratorResponse {
98
+  // Error message.  If non-empty, code generation failed.  The plugin process
99
+  // should exit with status code zero even if it reports an error in this way.
100
+  //
101
+  // This should be used to indicate errors in .proto files which prevent the
102
+  // code generator from generating correct code.  Errors which indicate a
103
+  // problem in protoc itself -- such as the input CodeGeneratorRequest being
104
+  // unparseable -- should be reported by writing a message to stderr and
105
+  // exiting with a non-zero status code.
106
+  optional string error = 1;
107
+
108
+  // Represents a single generated file.
109
+  message File {
110
+    // The file name, relative to the output directory.  The name must not
111
+    // contain "." or ".." components and must be relative, not be absolute (so,
112
+    // the file cannot lie outside the output directory).  "/" must be used as
113
+    // the path separator, not "\".
114
+    //
115
+    // If the name is omitted, the content will be appended to the previous
116
+    // file.  This allows the generator to break large files into small chunks,
117
+    // and allows the generated text to be streamed back to protoc so that large
118
+    // files need not reside completely in memory at one time.  Note that as of
119
+    // this writing protoc does not optimize for this -- it will read the entire
120
+    // CodeGeneratorResponse before writing files to disk.
121
+    optional string name = 1;
122
+
123
+    // If non-empty, indicates that the named file should already exist, and the
124
+    // content here is to be inserted into that file at a defined insertion
125
+    // point.  This feature allows a code generator to extend the output
126
+    // produced by another code generator.  The original generator may provide
127
+    // insertion points by placing special annotations in the file that look
128
+    // like:
129
+    //   @@protoc_insertion_point(NAME)
130
+    // The annotation can have arbitrary text before and after it on the line,
131
+    // which allows it to be placed in a comment.  NAME should be replaced with
132
+    // an identifier naming the point -- this is what other generators will use
133
+    // as the insertion_point.  Code inserted at this point will be placed
134
+    // immediately above the line containing the insertion point (thus multiple
135
+    // insertions to the same point will come out in the order they were added).
136
+    // The double-@ is intended to make it unlikely that the generated code
137
+    // could contain things that look like insertion points by accident.
138
+    //
139
+    // For example, the C++ code generator places the following line in the
140
+    // .pb.h files that it generates:
141
+    //   // @@protoc_insertion_point(namespace_scope)
142
+    // This line appears within the scope of the file's package namespace, but
143
+    // outside of any particular class.  Another plugin can then specify the
144
+    // insertion_point "namespace_scope" to generate additional classes or
145
+    // other declarations that should be placed in this scope.
146
+    //
147
+    // Note that if the line containing the insertion point begins with
148
+    // whitespace, the same whitespace will be added to every line of the
149
+    // inserted text.  This is useful for languages like Python, where
150
+    // indentation matters.  In these languages, the insertion point comment
151
+    // should be indented the same amount as any inserted code will need to be
152
+    // in order to work correctly in that context.
153
+    //
154
+    // The code generator that generates the initial file and the one which
155
+    // inserts into it must both run as part of a single invocation of protoc.
156
+    // Code generators are executed in the order in which they appear on the
157
+    // command line.
158
+    //
159
+    // If |insertion_point| is present, |name| must also be present.
160
+    optional string insertion_point = 2;
161
+
162
+    // The file contents.
163
+    optional string content = 15;
164
+  }
165
+  repeated File file = 15;
166
+}
... ...
@@ -1,19 +1,22 @@
1 1
 Google API Extensions for Go
2 2
 ============================
3 3
 
4
-[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go)
5
-[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go)
4
+[![GoDoc](https://godoc.org/github.com/googleapis/gax-go?status.svg)](https://godoc.org/github.com/googleapis/gax-go)
6 5
 
7 6
 Google API Extensions for Go (gax-go) is a set of modules which aids the
8 7
 development of APIs for clients and servers based on `gRPC` and Google API
9 8
 conventions.
10 9
 
11
-Application code will rarely need to use this library directly,
10
+To install the API extensions, use:
11
+
12
+```
13
+go get -u github.com/googleapis/gax-go
14
+```
15
+
16
+**Note:** Application code will rarely need to use this library directly,
12 17
 but the code generated automatically from API definition files can use it
13 18
 to simplify code generation and to provide more convenient and idiomatic API surface.
14 19
 
15
-**This project is currently experimental and not supported.**
16
-
17 20
 Go Versions
18 21
 ===========
19 22
 This library requires Go 1.6 or above.
20 23
deleted file mode 100644
... ...
@@ -1,157 +0,0 @@
1
-// Copyright 2016, Google Inc.
2
-// All rights reserved.
3
-//
4
-// Redistribution and use in source and binary forms, with or without
5
-// modification, are permitted provided that the following conditions are
6
-// met:
7
-//
8
-//     * Redistributions of source code must retain the above copyright
9
-// notice, this list of conditions and the following disclaimer.
10
-//     * Redistributions in binary form must reproduce the above
11
-// copyright notice, this list of conditions and the following disclaimer
12
-// in the documentation and/or other materials provided with the
13
-// distribution.
14
-//     * Neither the name of Google Inc. nor the names of its
15
-// contributors may be used to endorse or promote products derived from
16
-// this software without specific prior written permission.
17
-//
18
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
-
30
-package gax
31
-
32
-import (
33
-	"math/rand"
34
-	"time"
35
-
36
-	"google.golang.org/grpc"
37
-	"google.golang.org/grpc/codes"
38
-	"google.golang.org/grpc/status"
39
-)
40
-
41
-// CallOption is an option used by Invoke to control behaviors of RPC calls.
42
-// CallOption works by modifying relevant fields of CallSettings.
43
-type CallOption interface {
44
-	// Resolve applies the option by modifying cs.
45
-	Resolve(cs *CallSettings)
46
-}
47
-
48
-// Retryer is used by Invoke to determine retry behavior.
49
-type Retryer interface {
50
-	// Retry reports whether a request should be retriedand how long to pause before retrying
51
-	// if the previous attempt returned with err. Invoke never calls Retry with nil error.
52
-	Retry(err error) (pause time.Duration, shouldRetry bool)
53
-}
54
-
55
-type retryerOption func() Retryer
56
-
57
-func (o retryerOption) Resolve(s *CallSettings) {
58
-	s.Retry = o
59
-}
60
-
61
-// WithRetry sets CallSettings.Retry to fn.
62
-func WithRetry(fn func() Retryer) CallOption {
63
-	return retryerOption(fn)
64
-}
65
-
66
-// OnCodes returns a Retryer that retries if and only if
67
-// the previous attempt returns a GRPC error whose error code is stored in cc.
68
-// Pause times between retries are specified by bo.
69
-//
70
-// bo is only used for its parameters; each Retryer has its own copy.
71
-func OnCodes(cc []codes.Code, bo Backoff) Retryer {
72
-	return &boRetryer{
73
-		backoff: bo,
74
-		codes:   append([]codes.Code(nil), cc...),
75
-	}
76
-}
77
-
78
-type boRetryer struct {
79
-	backoff Backoff
80
-	codes   []codes.Code
81
-}
82
-
83
-func (r *boRetryer) Retry(err error) (time.Duration, bool) {
84
-	st, ok := status.FromError(err)
85
-	if !ok {
86
-		return 0, false
87
-	}
88
-	c := st.Code()
89
-	for _, rc := range r.codes {
90
-		if c == rc {
91
-			return r.backoff.Pause(), true
92
-		}
93
-	}
94
-	return 0, false
95
-}
96
-
97
-// Backoff implements exponential backoff.
98
-// The wait time between retries is a random value between 0 and the "retry envelope".
99
-// The envelope starts at Initial and increases by the factor of Multiplier every retry,
100
-// but is capped at Max.
101
-type Backoff struct {
102
-	// Initial is the initial value of the retry envelope, defaults to 1 second.
103
-	Initial time.Duration
104
-
105
-	// Max is the maximum value of the retry envelope, defaults to 30 seconds.
106
-	Max time.Duration
107
-
108
-	// Multiplier is the factor by which the retry envelope increases.
109
-	// It should be greater than 1 and defaults to 2.
110
-	Multiplier float64
111
-
112
-	// cur is the current retry envelope
113
-	cur time.Duration
114
-}
115
-
116
-func (bo *Backoff) Pause() time.Duration {
117
-	if bo.Initial == 0 {
118
-		bo.Initial = time.Second
119
-	}
120
-	if bo.cur == 0 {
121
-		bo.cur = bo.Initial
122
-	}
123
-	if bo.Max == 0 {
124
-		bo.Max = 30 * time.Second
125
-	}
126
-	if bo.Multiplier < 1 {
127
-		bo.Multiplier = 2
128
-	}
129
-	// Select a duration between zero and the current max. It might seem counterintuitive to
130
-	// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
131
-	// argues that that is the best strategy.
132
-	d := time.Duration(rand.Int63n(int64(bo.cur)))
133
-	bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
134
-	if bo.cur > bo.Max {
135
-		bo.cur = bo.Max
136
-	}
137
-	return d
138
-}
139
-
140
-type grpcOpt []grpc.CallOption
141
-
142
-func (o grpcOpt) Resolve(s *CallSettings) {
143
-	s.GRPC = o
144
-}
145
-
146
-func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
147
-	return grpcOpt(append([]grpc.CallOption(nil), opt...))
148
-}
149
-
150
-type CallSettings struct {
151
-	// Retry returns a Retryer to be used to control retry logic of a method call.
152
-	// If Retry is nil or the returned Retryer is nil, the call will not be retried.
153
-	Retry func() Retryer
154
-
155
-	// CallOptions to be forwarded to GRPC.
156
-	GRPC []grpc.CallOption
157
-}
158 1
deleted file mode 100644
... ...
@@ -1,40 +0,0 @@
1
-// Copyright 2016, Google Inc.
2
-// All rights reserved.
3
-//
4
-// Redistribution and use in source and binary forms, with or without
5
-// modification, are permitted provided that the following conditions are
6
-// met:
7
-//
8
-//     * Redistributions of source code must retain the above copyright
9
-// notice, this list of conditions and the following disclaimer.
10
-//     * Redistributions in binary form must reproduce the above
11
-// copyright notice, this list of conditions and the following disclaimer
12
-// in the documentation and/or other materials provided with the
13
-// distribution.
14
-//     * Neither the name of Google Inc. nor the names of its
15
-// contributors may be used to endorse or promote products derived from
16
-// this software without specific prior written permission.
17
-//
18
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
-
30
-// Package gax contains a set of modules which aid the development of APIs
31
-// for clients and servers based on gRPC and Google API conventions.
32
-//
33
-// Application code will rarely need to use this library directly.
34
-// However, code generated automatically from API definition files can use it
35
-// to simplify code generation and to provide more convenient and idiomatic API surfaces.
36
-//
37
-// This project is currently experimental and not supported.
38
-package gax
39
-
40
-const Version = "0.1.0"
41 1
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+module github.com/googleapis/gax-go
1
+
2
+require (
3
+	github.com/golang/protobuf v1.3.1
4
+	github.com/googleapis/gax-go/v2 v2.0.2
5
+	golang.org/x/exp v0.0.0-20190221220918-438050ddec5e
6
+	golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3
7
+	golang.org/x/tools v0.0.0-20190114222345-bf090417da8b
8
+	google.golang.org/grpc v1.19.0
9
+	honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099
10
+)
0 11
deleted file mode 100644
... ...
@@ -1,24 +0,0 @@
1
-package gax
2
-
3
-import "bytes"
4
-
5
-// XGoogHeader is for use by the Google Cloud Libraries only.
6
-//
7
-// XGoogHeader formats key-value pairs.
8
-// The resulting string is suitable for x-goog-api-client header.
9
-func XGoogHeader(keyval ...string) string {
10
-	if len(keyval) == 0 {
11
-		return ""
12
-	}
13
-	if len(keyval)%2 != 0 {
14
-		panic("gax.Header: odd argument count")
15
-	}
16
-	var buf bytes.Buffer
17
-	for i := 0; i < len(keyval); i += 2 {
18
-		buf.WriteByte(' ')
19
-		buf.WriteString(keyval[i])
20
-		buf.WriteByte('/')
21
-		buf.WriteString(keyval[i+1])
22
-	}
23
-	return buf.String()[1:]
24
-}
25 1
deleted file mode 100644
... ...
@@ -1,90 +0,0 @@
1
-// Copyright 2016, Google Inc.
2
-// All rights reserved.
3
-//
4
-// Redistribution and use in source and binary forms, with or without
5
-// modification, are permitted provided that the following conditions are
6
-// met:
7
-//
8
-//     * Redistributions of source code must retain the above copyright
9
-// notice, this list of conditions and the following disclaimer.
10
-//     * Redistributions in binary form must reproduce the above
11
-// copyright notice, this list of conditions and the following disclaimer
12
-// in the documentation and/or other materials provided with the
13
-// distribution.
14
-//     * Neither the name of Google Inc. nor the names of its
15
-// contributors may be used to endorse or promote products derived from
16
-// this software without specific prior written permission.
17
-//
18
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
-
30
-package gax
31
-
32
-import (
33
-	"time"
34
-
35
-	"golang.org/x/net/context"
36
-)
37
-
38
-// A user defined call stub.
39
-type APICall func(context.Context, CallSettings) error
40
-
41
-// Invoke calls the given APICall,
42
-// performing retries as specified by opts, if any.
43
-func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
44
-	var settings CallSettings
45
-	for _, opt := range opts {
46
-		opt.Resolve(&settings)
47
-	}
48
-	return invoke(ctx, call, settings, Sleep)
49
-}
50
-
51
-// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
52
-// If interrupted, Sleep returns ctx.Err().
53
-func Sleep(ctx context.Context, d time.Duration) error {
54
-	t := time.NewTimer(d)
55
-	select {
56
-	case <-ctx.Done():
57
-		t.Stop()
58
-		return ctx.Err()
59
-	case <-t.C:
60
-		return nil
61
-	}
62
-}
63
-
64
-type sleeper func(ctx context.Context, d time.Duration) error
65
-
66
-// invoke implements Invoke, taking an additional sleeper argument for testing.
67
-func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
68
-	var retryer Retryer
69
-	for {
70
-		err := call(ctx, settings)
71
-		if err == nil {
72
-			return nil
73
-		}
74
-		if settings.Retry == nil {
75
-			return err
76
-		}
77
-		if retryer == nil {
78
-			if r := settings.Retry(); r != nil {
79
-				retryer = r
80
-			} else {
81
-				return err
82
-			}
83
-		}
84
-		if d, ok := retryer.Retry(err); !ok {
85
-			return err
86
-		} else if err = sp(ctx, d); err != nil {
87
-			return err
88
-		}
89
-	}
90
-}
91 1
new file mode 100644
... ...
@@ -0,0 +1,161 @@
0
+// Copyright 2016, Google Inc.
1
+// All rights reserved.
2
+//
3
+// Redistribution and use in source and binary forms, with or without
4
+// modification, are permitted provided that the following conditions are
5
+// met:
6
+//
7
+//     * Redistributions of source code must retain the above copyright
8
+// notice, this list of conditions and the following disclaimer.
9
+//     * Redistributions in binary form must reproduce the above
10
+// copyright notice, this list of conditions and the following disclaimer
11
+// in the documentation and/or other materials provided with the
12
+// distribution.
13
+//     * Neither the name of Google Inc. nor the names of its
14
+// contributors may be used to endorse or promote products derived from
15
+// this software without specific prior written permission.
16
+//
17
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+package gax
30
+
31
+import (
32
+	"math/rand"
33
+	"time"
34
+
35
+	"google.golang.org/grpc"
36
+	"google.golang.org/grpc/codes"
37
+	"google.golang.org/grpc/status"
38
+)
39
+
40
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
41
+// CallOption works by modifying relevant fields of CallSettings.
42
+type CallOption interface {
43
+	// Resolve applies the option by modifying cs.
44
+	Resolve(cs *CallSettings)
45
+}
46
+
47
+// Retryer is used by Invoke to determine retry behavior.
48
+type Retryer interface {
49
+	// Retry reports whether a request should be retriedand how long to pause before retrying
50
+	// if the previous attempt returned with err. Invoke never calls Retry with nil error.
51
+	Retry(err error) (pause time.Duration, shouldRetry bool)
52
+}
53
+
54
+type retryerOption func() Retryer
55
+
56
+func (o retryerOption) Resolve(s *CallSettings) {
57
+	s.Retry = o
58
+}
59
+
60
+// WithRetry sets CallSettings.Retry to fn.
61
+func WithRetry(fn func() Retryer) CallOption {
62
+	return retryerOption(fn)
63
+}
64
+
65
+// OnCodes returns a Retryer that retries if and only if
66
+// the previous attempt returns a GRPC error whose error code is stored in cc.
67
+// Pause times between retries are specified by bo.
68
+//
69
+// bo is only used for its parameters; each Retryer has its own copy.
70
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
71
+	return &boRetryer{
72
+		backoff: bo,
73
+		codes:   append([]codes.Code(nil), cc...),
74
+	}
75
+}
76
+
77
+type boRetryer struct {
78
+	backoff Backoff
79
+	codes   []codes.Code
80
+}
81
+
82
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
83
+	st, ok := status.FromError(err)
84
+	if !ok {
85
+		return 0, false
86
+	}
87
+	c := st.Code()
88
+	for _, rc := range r.codes {
89
+		if c == rc {
90
+			return r.backoff.Pause(), true
91
+		}
92
+	}
93
+	return 0, false
94
+}
95
+
96
+// Backoff implements exponential backoff.
97
+// The wait time between retries is a random value between 0 and the "retry envelope".
98
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
99
+// but is capped at Max.
100
+type Backoff struct {
101
+	// Initial is the initial value of the retry envelope, defaults to 1 second.
102
+	Initial time.Duration
103
+
104
+	// Max is the maximum value of the retry envelope, defaults to 30 seconds.
105
+	Max time.Duration
106
+
107
+	// Multiplier is the factor by which the retry envelope increases.
108
+	// It should be greater than 1 and defaults to 2.
109
+	Multiplier float64
110
+
111
+	// cur is the current retry envelope
112
+	cur time.Duration
113
+}
114
+
115
+// Pause returns the next time.Duration that the caller should use to backoff.
116
+func (bo *Backoff) Pause() time.Duration {
117
+	if bo.Initial == 0 {
118
+		bo.Initial = time.Second
119
+	}
120
+	if bo.cur == 0 {
121
+		bo.cur = bo.Initial
122
+	}
123
+	if bo.Max == 0 {
124
+		bo.Max = 30 * time.Second
125
+	}
126
+	if bo.Multiplier < 1 {
127
+		bo.Multiplier = 2
128
+	}
129
+	// Select a duration between 1ns and the current max. It might seem
130
+	// counterintuitive to have so much jitter, but
131
+	// https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
132
+	// that is the best strategy.
133
+	d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
134
+	bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
135
+	if bo.cur > bo.Max {
136
+		bo.cur = bo.Max
137
+	}
138
+	return d
139
+}
140
+
141
+type grpcOpt []grpc.CallOption
142
+
143
+func (o grpcOpt) Resolve(s *CallSettings) {
144
+	s.GRPC = o
145
+}
146
+
147
+// WithGRPCOptions allows passing gRPC call options during client creation.
148
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
149
+	return grpcOpt(append([]grpc.CallOption(nil), opt...))
150
+}
151
+
152
+// CallSettings allow fine-grained control over how calls are made.
153
+type CallSettings struct {
154
+	// Retry returns a Retryer to be used to control retry logic of a method call.
155
+	// If Retry is nil or the returned Retryer is nil, the call will not be retried.
156
+	Retry func() Retryer
157
+
158
+	// CallOptions to be forwarded to GRPC.
159
+	GRPC []grpc.CallOption
160
+}
0 161
new file mode 100644
... ...
@@ -0,0 +1,39 @@
0
+// Copyright 2016, Google Inc.
1
+// All rights reserved.
2
+//
3
+// Redistribution and use in source and binary forms, with or without
4
+// modification, are permitted provided that the following conditions are
5
+// met:
6
+//
7
+//     * Redistributions of source code must retain the above copyright
8
+// notice, this list of conditions and the following disclaimer.
9
+//     * Redistributions in binary form must reproduce the above
10
+// copyright notice, this list of conditions and the following disclaimer
11
+// in the documentation and/or other materials provided with the
12
+// distribution.
13
+//     * Neither the name of Google Inc. nor the names of its
14
+// contributors may be used to endorse or promote products derived from
15
+// this software without specific prior written permission.
16
+//
17
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+// Package gax contains a set of modules which aid the development of APIs
30
+// for clients and servers based on gRPC and Google API conventions.
31
+//
32
+// Application code will rarely need to use this library directly.
33
+// However, code generated automatically from API definition files can use it
34
+// to simplify code generation and to provide more convenient and idiomatic API surfaces.
35
+package gax
36
+
37
+// Version specifies the gax-go version being used.
38
+const Version = "2.0.4"
0 39
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+module github.com/googleapis/gax-go/v2
1
+
2
+require google.golang.org/grpc v1.19.0
0 3
new file mode 100644
... ...
@@ -0,0 +1,53 @@
0
+// Copyright 2018, Google Inc.
1
+// All rights reserved.
2
+//
3
+// Redistribution and use in source and binary forms, with or without
4
+// modification, are permitted provided that the following conditions are
5
+// met:
6
+//
7
+//     * Redistributions of source code must retain the above copyright
8
+// notice, this list of conditions and the following disclaimer.
9
+//     * Redistributions in binary form must reproduce the above
10
+// copyright notice, this list of conditions and the following disclaimer
11
+// in the documentation and/or other materials provided with the
12
+// distribution.
13
+//     * Neither the name of Google Inc. nor the names of its
14
+// contributors may be used to endorse or promote products derived from
15
+// this software without specific prior written permission.
16
+//
17
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+package gax
30
+
31
+import "bytes"
32
+
33
+// XGoogHeader is for use by the Google Cloud Libraries only.
34
+//
35
+// XGoogHeader formats key-value pairs.
36
+// The resulting string is suitable for x-goog-api-client header.
37
+func XGoogHeader(keyval ...string) string {
38
+	if len(keyval) == 0 {
39
+		return ""
40
+	}
41
+	if len(keyval)%2 != 0 {
42
+		panic("gax.Header: odd argument count")
43
+	}
44
+	var buf bytes.Buffer
45
+	for i := 0; i < len(keyval); i += 2 {
46
+		buf.WriteByte(' ')
47
+		buf.WriteString(keyval[i])
48
+		buf.WriteByte('/')
49
+		buf.WriteString(keyval[i+1])
50
+	}
51
+	return buf.String()[1:]
52
+}
0 53
new file mode 100644
... ...
@@ -0,0 +1,99 @@
0
+// Copyright 2016, Google Inc.
1
+// All rights reserved.
2
+//
3
+// Redistribution and use in source and binary forms, with or without
4
+// modification, are permitted provided that the following conditions are
5
+// met:
6
+//
7
+//     * Redistributions of source code must retain the above copyright
8
+// notice, this list of conditions and the following disclaimer.
9
+//     * Redistributions in binary form must reproduce the above
10
+// copyright notice, this list of conditions and the following disclaimer
11
+// in the documentation and/or other materials provided with the
12
+// distribution.
13
+//     * Neither the name of Google Inc. nor the names of its
14
+// contributors may be used to endorse or promote products derived from
15
+// this software without specific prior written permission.
16
+//
17
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+package gax
30
+
31
+import (
32
+	"context"
33
+	"strings"
34
+	"time"
35
+)
36
+
37
+// APICall is a user defined call stub.
38
+type APICall func(context.Context, CallSettings) error
39
+
40
+// Invoke calls the given APICall,
41
+// performing retries as specified by opts, if any.
42
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
43
+	var settings CallSettings
44
+	for _, opt := range opts {
45
+		opt.Resolve(&settings)
46
+	}
47
+	return invoke(ctx, call, settings, Sleep)
48
+}
49
+
50
+// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
51
+// If interrupted, Sleep returns ctx.Err().
52
+func Sleep(ctx context.Context, d time.Duration) error {
53
+	t := time.NewTimer(d)
54
+	select {
55
+	case <-ctx.Done():
56
+		t.Stop()
57
+		return ctx.Err()
58
+	case <-t.C:
59
+		return nil
60
+	}
61
+}
62
+
63
+type sleeper func(ctx context.Context, d time.Duration) error
64
+
65
+// invoke implements Invoke, taking an additional sleeper argument for testing.
66
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
67
+	var retryer Retryer
68
+	for {
69
+		err := call(ctx, settings)
70
+		if err == nil {
71
+			return nil
72
+		}
73
+		if settings.Retry == nil {
74
+			return err
75
+		}
76
+		// Never retry permanent certificate errors. (e.x. if ca-certificates
77
+		// are not installed). We should only make very few, targeted
78
+		// exceptions: many (other) status=Unavailable should be retried, such
79
+		// as if there's a network hiccup, or the internet goes out for a
80
+		// minute. This is also why here we are doing string parsing instead of
81
+		// simply making Unavailable a non-retried code elsewhere.
82
+		if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
83
+			return err
84
+		}
85
+		if retryer == nil {
86
+			if r := settings.Retry(); r != nil {
87
+				retryer = r
88
+			} else {
89
+				return err
90
+			}
91
+		}
92
+		if d, ok := retryer.Retry(err); !ok {
93
+			return err
94
+		} else if err = sp(ctx, d); err != nil {
95
+			return err
96
+		}
97
+	}
98
+}
0 99
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+Copyright (c) 2009 The Go Authors. All rights reserved.
1
+
2
+Redistribution and use in source and binary forms, with or without
3
+modification, are permitted provided that the following conditions are
4
+met:
5
+
6
+   * Redistributions of source code must retain the above copyright
7
+notice, this list of conditions and the following disclaimer.
8
+   * Redistributions in binary form must reproduce the above
9
+copyright notice, this list of conditions and the following disclaimer
10
+in the documentation and/or other materials provided with the
11
+distribution.
12
+   * Neither the name of Google Inc. nor the names of its
13
+contributors may be used to endorse or promote products derived from
14
+this software without specific prior written permission.
15
+
16
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0 27
new file mode 100644
... ...
@@ -0,0 +1,22 @@
0
+Additional IP Rights Grant (Patents)
1
+
2
+"This implementation" means the copyrightable works distributed by
3
+Google as part of the Go project.
4
+
5
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
6
+no-charge, royalty-free, irrevocable (except as stated in this section)
7
+patent license to make, have made, use, offer to sell, sell, import,
8
+transfer and otherwise run, modify and propagate the contents of this
9
+implementation of Go, where such license applies only to those patent
10
+claims, both currently owned or controlled by Google and acquired in
11
+the future, licensable by Google that are necessarily infringed by this
12
+implementation of Go.  This grant does not include claims that would be
13
+infringed only as a consequence of further modification of this
14
+implementation.  If you or your agent or exclusive licensee institute or
15
+order or agree to the institution of patent litigation against any
16
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
17
+that this implementation of Go or any code incorporated within this
18
+implementation of Go constitutes direct or contributory patent
19
+infringement, or inducement of patent infringement, then any patent
20
+rights granted to you under this License for this implementation of Go
21
+shall terminate as of the date such litigation is filed.
0 22
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+# Go Tools
1
+
2
+This subrepository holds the source for various packages and tools that support
3
+the Go programming language.
4
+
5
+Some of the tools, `godoc` and `vet` for example, are included in binary Go
6
+distributions.
7
+
8
+Others, including the Go `guru` and the test coverage tool, can be fetched with
9
+`go get`.
10
+
11
+Packages include a type-checker for Go and an implementation of the
12
+Static Single Assignment form (SSA) representation for Go programs.
13
+
14
+## Download/Install
15
+
16
+The easiest way to install is to run `go get -u golang.org/x/tools/...`. You can
17
+also manually git clone the repository to `$GOPATH/src/golang.org/x/tools`.
18
+
19
+## Report Issues / Send Patches
20
+
21
+This repository uses Gerrit for code changes. To learn how to submit changes to
22
+this repository, see https://golang.org/doc/contribute.html.
23
+
24
+The main issue tracker for the tools repository is located at
25
+https://github.com/golang/go/issues. Prefix your issue with "x/tools/(your
26
+subdir):" in the subject line, so it is easy to find.
0 27
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// Copyright 2015 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build amd64,!appengine,!gccgo
5
+
6
+package intsets
7
+
8
+func popcnt(x word) int
9
+func havePOPCNT() bool
10
+
11
+var hasPOPCNT = havePOPCNT()
12
+
13
+// popcount returns the population count (number of set bits) of x.
14
+func popcount(x word) int {
15
+	if hasPOPCNT {
16
+		return popcnt(x)
17
+	}
18
+	return popcountTable(x) // faster than Hacker's Delight
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+// Copyright 2015 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build amd64,!appengine,!gccgo
5
+
6
+#include "textflag.h"
7
+
8
+// func havePOPCNT() bool
9
+TEXT ·havePOPCNT(SB),4,$0
10
+	MOVQ	$1, AX
11
+	CPUID
12
+	SHRQ	$23, CX
13
+	ANDQ	$1, CX
14
+	MOVB	CX, ret+0(FP)
15
+	RET
16
+
17
+// func popcnt(word) int
18
+TEXT ·popcnt(SB),NOSPLIT,$0-8
19
+	XORQ	AX, AX
20
+	MOVQ	x+0(FP), SI
21
+	// POPCNT (SI), AX is not recognized by Go assembler,
22
+	// so we assemble it ourselves.
23
+	BYTE	$0xf3
24
+	BYTE	$0x48
25
+	BYTE	$0x0f
26
+	BYTE	$0xb8
27
+	BYTE	$0xc6
28
+	MOVQ	AX, ret+8(FP)
29
+	RET
0 30
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+// Copyright 2015 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build gccgo
5
+
6
+package intsets
7
+
8
+func popcount(x word) int
0 9
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+// Copyright 2015 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build gccgo
5
+
6
+#include <errno.h>
7
+#include <stdint.h>
8
+#include <unistd.h>
9
+
10
+#define _STRINGIFY2_(x) #x
11
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
12
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
13
+
14
+extern intptr_t popcount(uintptr_t x) __asm__(GOSYM_PREFIX GOPKGPATH ".popcount");
15
+
16
+intptr_t popcount(uintptr_t x) {
17
+	return __builtin_popcountl((unsigned long)(x));
18
+}
0 19
new file mode 100644
... ...
@@ -0,0 +1,33 @@
0
+// Copyright 2015 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build !amd64 appengine
5
+// +build !gccgo
6
+
7
+package intsets
8
+
9
+import "runtime"
10
+
11
+// We compared three algorithms---Hacker's Delight, table lookup,
12
+// and AMD64's SSE4.1 hardware POPCNT---on a 2.67GHz Xeon X5550.
13
+//
14
+// % GOARCH=amd64 go test -run=NONE -bench=Popcount
15
+// POPCNT               5.12 ns/op
16
+// Table                8.53 ns/op
17
+// HackersDelight       9.96 ns/op
18
+//
19
+// % GOARCH=386 go test -run=NONE -bench=Popcount
20
+// Table               10.4  ns/op
21
+// HackersDelight       5.23 ns/op
22
+//
23
+// (AMD64's ABM1 hardware supports ntz and nlz too,
24
+// but they aren't critical.)
25
+
26
+// popcount returns the population count (number of set bits) of x.
27
+func popcount(x word) int {
28
+	if runtime.GOARCH == "386" {
29
+		return popcountHD(uint32(x))
30
+	}
31
+	return popcountTable(x)
32
+}
0 33
new file mode 100644
... ...
@@ -0,0 +1,1091 @@
0
+// Copyright 2014 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// Package intsets provides Sparse, a compact and fast representation
5
+// for sparse sets of int values.
6
+//
7
+// The time complexity of the operations Len, Insert, Remove and Has
8
+// is in O(n) but in practice those methods are faster and more
9
+// space-efficient than equivalent operations on sets based on the Go
10
+// map type.  The IsEmpty, Min, Max, Clear and TakeMin operations
11
+// require constant time.
12
+//
13
+package intsets // import "golang.org/x/tools/container/intsets"
14
+
15
+// TODO(adonovan):
16
+// - Add InsertAll(...int), RemoveAll(...int)
17
+// - Add 'bool changed' results for {Intersection,Difference}With too.
18
+//
19
+// TODO(adonovan): implement Dense, a dense bit vector with a similar API.
20
+// The space usage would be proportional to Max(), not Len(), and the
21
+// implementation would be based upon big.Int.
22
+//
23
+// TODO(adonovan): opt: make UnionWith and Difference faster.
24
+// These are the hot-spots for go/pointer.
25
+
26
+import (
27
+	"bytes"
28
+	"fmt"
29
+)
30
+
31
+// A Sparse is a set of int values.
32
+// Sparse operations (even queries) are not concurrency-safe.
33
+//
34
+// The zero value for Sparse is a valid empty set.
35
+//
36
+// Sparse sets must be copied using the Copy method, not by assigning
37
+// a Sparse value.
38
+//
39
+type Sparse struct {
40
+	// An uninitialized Sparse represents an empty set.
41
+	// An empty set may also be represented by
42
+	//  root.next == root.prev == &root.
43
+	//
44
+	// The root is always the block with the smallest offset.
45
+	// It can be empty, but only if it is the only block; in that case, offset is
46
+	// MaxInt (which is not a valid offset).
47
+	root block
48
+}
49
+
50
+type word uintptr
51
+
52
+const (
53
+	_m            = ^word(0)
54
+	bitsPerWord   = 8 << (_m>>8&1 + _m>>16&1 + _m>>32&1)
55
+	bitsPerBlock  = 256 // optimal value for go/pointer solver performance
56
+	wordsPerBlock = bitsPerBlock / bitsPerWord
57
+)
58
+
59
+// Limit values of implementation-specific int type.
60
+const (
61
+	MaxInt = int(^uint(0) >> 1)
62
+	MinInt = -MaxInt - 1
63
+)
64
+
65
+// -- block ------------------------------------------------------------
66
+
67
+// A set is represented as a circular doubly-linked list of blocks,
68
+// each containing an offset and a bit array of fixed size
69
+// bitsPerBlock; the blocks are ordered by increasing offset.
70
+//
71
+// The set contains an element x iff the block whose offset is x - (x
72
+// mod bitsPerBlock) has the bit (x mod bitsPerBlock) set, where mod
73
+// is the Euclidean remainder.
74
+//
75
+// A block may only be empty transiently.
76
+//
77
+type block struct {
78
+	offset     int                 // offset mod bitsPerBlock == 0
79
+	bits       [wordsPerBlock]word // contains at least one set bit
80
+	next, prev *block              // doubly-linked list of blocks
81
+}
82
+
83
+// wordMask returns the word index (in block.bits)
84
+// and single-bit mask for the block's ith bit.
85
+func wordMask(i uint) (w uint, mask word) {
86
+	w = i / bitsPerWord
87
+	mask = 1 << (i % bitsPerWord)
88
+	return
89
+}
90
+
91
+// insert sets the block b's ith bit and
92
+// returns true if it was not already set.
93
+//
94
+func (b *block) insert(i uint) bool {
95
+	w, mask := wordMask(i)
96
+	if b.bits[w]&mask == 0 {
97
+		b.bits[w] |= mask
98
+		return true
99
+	}
100
+	return false
101
+}
102
+
103
+// remove clears the block's ith bit and
104
+// returns true if the bit was previously set.
105
+// NB: may leave the block empty.
106
+//
107
+func (b *block) remove(i uint) bool {
108
+	w, mask := wordMask(i)
109
+	if b.bits[w]&mask != 0 {
110
+		b.bits[w] &^= mask
111
+		return true
112
+	}
113
+	return false
114
+}
115
+
116
+// has reports whether the block's ith bit is set.
117
+func (b *block) has(i uint) bool {
118
+	w, mask := wordMask(i)
119
+	return b.bits[w]&mask != 0
120
+}
121
+
122
+// empty reports whether b.len()==0, but more efficiently.
123
+func (b *block) empty() bool {
124
+	for _, w := range b.bits {
125
+		if w != 0 {
126
+			return false
127
+		}
128
+	}
129
+	return true
130
+}
131
+
132
+// len returns the number of set bits in block b.
133
+func (b *block) len() int {
134
+	var l int
135
+	for _, w := range b.bits {
136
+		l += popcount(w)
137
+	}
138
+	return l
139
+}
140
+
141
+// max returns the maximum element of the block.
142
+// The block must not be empty.
143
+func (b *block) max() int {
144
+	bi := b.offset + bitsPerBlock
145
+	// Decrement bi by number of high zeros in last.bits.
146
+	for i := len(b.bits) - 1; i >= 0; i-- {
147
+		if w := b.bits[i]; w != 0 {
148
+			return bi - nlz(w) - 1
149
+		}
150
+		bi -= bitsPerWord
151
+	}
152
+	panic("BUG: empty block")
153
+}
154
+
155
+// min returns the minimum element of the block,
156
+// and also removes it if take is set.
157
+// The block must not be initially empty.
158
+// NB: may leave the block empty.
159
+func (b *block) min(take bool) int {
160
+	for i, w := range b.bits {
161
+		if w != 0 {
162
+			tz := ntz(w)
163
+			if take {
164
+				b.bits[i] = w &^ (1 << uint(tz))
165
+			}
166
+			return b.offset + int(i*bitsPerWord) + tz
167
+		}
168
+	}
169
+	panic("BUG: empty block")
170
+}
171
+
172
+// lowerBound returns the smallest element of the block that is greater than or
173
+// equal to the element corresponding to the ith bit. If there is no such
174
+// element, the second return value is false.
175
+func (b *block) lowerBound(i uint) (int, bool) {
176
+	w := i / bitsPerWord
177
+	bit := i % bitsPerWord
178
+
179
+	if val := b.bits[w] >> bit; val != 0 {
180
+		return b.offset + int(i) + ntz(val), true
181
+	}
182
+
183
+	for w++; w < wordsPerBlock; w++ {
184
+		if val := b.bits[w]; val != 0 {
185
+			return b.offset + int(w*bitsPerWord) + ntz(val), true
186
+		}
187
+	}
188
+
189
+	return 0, false
190
+}
191
+
192
+// forEach calls f for each element of block b.
193
+// f must not mutate b's enclosing Sparse.
194
+func (b *block) forEach(f func(int)) {
195
+	for i, w := range b.bits {
196
+		offset := b.offset + i*bitsPerWord
197
+		for bi := 0; w != 0 && bi < bitsPerWord; bi++ {
198
+			if w&1 != 0 {
199
+				f(offset)
200
+			}
201
+			offset++
202
+			w >>= 1
203
+		}
204
+	}
205
+}
206
+
207
+// offsetAndBitIndex returns the offset of the block that would
208
+// contain x and the bit index of x within that block.
209
+//
210
+func offsetAndBitIndex(x int) (int, uint) {
211
+	mod := x % bitsPerBlock
212
+	if mod < 0 {
213
+		// Euclidean (non-negative) remainder
214
+		mod += bitsPerBlock
215
+	}
216
+	return x - mod, uint(mod)
217
+}
218
+
219
+// -- Sparse --------------------------------------------------------------
220
+
221
+// none is a shared, empty, sentinel block that indicates the end of a block
222
+// list.
223
+var none block
224
+
225
+// Dummy type used to generate an implicit panic. This must be defined at the
226
+// package level; if it is defined inside a function, it prevents the inlining
227
+// of that function.
228
+type to_copy_a_sparse_you_must_call_its_Copy_method struct{}
229
+
230
+// init ensures s is properly initialized.
231
+func (s *Sparse) init() {
232
+	root := &s.root
233
+	if root.next == nil {
234
+		root.offset = MaxInt
235
+		root.next = root
236
+		root.prev = root
237
+	} else if root.next.prev != root {
238
+		// Copying a Sparse x leads to pernicious corruption: the
239
+		// new Sparse y shares the old linked list, but iteration
240
+		// on y will never encounter &y.root so it goes into a
241
+		// loop.  Fail fast before this occurs.
242
+		// We don't want to call panic here because it prevents the
243
+		// inlining of this function.
244
+		_ = (interface{}(nil)).(to_copy_a_sparse_you_must_call_its_Copy_method)
245
+	}
246
+}
247
+
248
+func (s *Sparse) first() *block {
249
+	s.init()
250
+	if s.root.offset == MaxInt {
251
+		return &none
252
+	}
253
+	return &s.root
254
+}
255
+
256
+// next returns the next block in the list, or end if b is the last block.
257
+func (s *Sparse) next(b *block) *block {
258
+	if b.next == &s.root {
259
+		return &none
260
+	}
261
+	return b.next
262
+}
263
+
264
+// prev returns the previous block in the list, or end if b is the first block.
265
+func (s *Sparse) prev(b *block) *block {
266
+	if b.prev == &s.root {
267
+		return &none
268
+	}
269
+	return b.prev
270
+}
271
+
272
+// IsEmpty reports whether the set s is empty.
273
+func (s *Sparse) IsEmpty() bool {
274
+	return s.root.next == nil || s.root.offset == MaxInt
275
+}
276
+
277
+// Len returns the number of elements in the set s.
278
+func (s *Sparse) Len() int {
279
+	var l int
280
+	for b := s.first(); b != &none; b = s.next(b) {
281
+		l += b.len()
282
+	}
283
+	return l
284
+}
285
+
286
+// Max returns the maximum element of the set s, or MinInt if s is empty.
287
+func (s *Sparse) Max() int {
288
+	if s.IsEmpty() {
289
+		return MinInt
290
+	}
291
+	return s.root.prev.max()
292
+}
293
+
294
+// Min returns the minimum element of the set s, or MaxInt if s is empty.
295
+func (s *Sparse) Min() int {
296
+	if s.IsEmpty() {
297
+		return MaxInt
298
+	}
299
+	return s.root.min(false)
300
+}
301
+
302
+// LowerBound returns the smallest element >= x, or MaxInt if there is no such
303
+// element.
304
+func (s *Sparse) LowerBound(x int) int {
305
+	offset, i := offsetAndBitIndex(x)
306
+	for b := s.first(); b != &none; b = s.next(b) {
307
+		if b.offset > offset {
308
+			return b.min(false)
309
+		}
310
+		if b.offset == offset {
311
+			if y, ok := b.lowerBound(i); ok {
312
+				return y
313
+			}
314
+		}
315
+	}
316
+	return MaxInt
317
+}
318
+
319
+// block returns the block that would contain offset,
320
+// or nil if s contains no such block.
321
+// Precondition: offset is a multiple of bitsPerBlock.
322
+func (s *Sparse) block(offset int) *block {
323
+	for b := s.first(); b != &none && b.offset <= offset; b = s.next(b) {
324
+		if b.offset == offset {
325
+			return b
326
+		}
327
+	}
328
+	return nil
329
+}
330
+
331
+// Insert adds x to the set s, and reports whether the set grew.
332
+func (s *Sparse) Insert(x int) bool {
333
+	offset, i := offsetAndBitIndex(x)
334
+
335
+	b := s.first()
336
+	for ; b != &none && b.offset <= offset; b = s.next(b) {
337
+		if b.offset == offset {
338
+			return b.insert(i)
339
+		}
340
+	}
341
+
342
+	// Insert new block before b.
343
+	new := s.insertBlockBefore(b)
344
+	new.offset = offset
345
+	return new.insert(i)
346
+}
347
+
348
+// removeBlock removes a block and returns the block that followed it (or end if
349
+// it was the last block).
350
+func (s *Sparse) removeBlock(b *block) *block {
351
+	if b != &s.root {
352
+		b.prev.next = b.next
353
+		b.next.prev = b.prev
354
+		if b.next == &s.root {
355
+			return &none
356
+		}
357
+		return b.next
358
+	}
359
+
360
+	first := s.root.next
361
+	if first == &s.root {
362
+		// This was the only block.
363
+		s.Clear()
364
+		return &none
365
+	}
366
+	s.root.offset = first.offset
367
+	s.root.bits = first.bits
368
+	if first.next == &s.root {
369
+		// Single block remaining.
370
+		s.root.next = &s.root
371
+		s.root.prev = &s.root
372
+	} else {
373
+		s.root.next = first.next
374
+		first.next.prev = &s.root
375
+	}
376
+	return &s.root
377
+}
378
+
379
+// Remove removes x from the set s, and reports whether the set shrank.
380
+func (s *Sparse) Remove(x int) bool {
381
+	offset, i := offsetAndBitIndex(x)
382
+	if b := s.block(offset); b != nil {
383
+		if !b.remove(i) {
384
+			return false
385
+		}
386
+		if b.empty() {
387
+			s.removeBlock(b)
388
+		}
389
+		return true
390
+	}
391
+	return false
392
+}
393
+
394
+// Clear removes all elements from the set s.
395
+func (s *Sparse) Clear() {
396
+	s.root = block{
397
+		offset: MaxInt,
398
+		next:   &s.root,
399
+		prev:   &s.root,
400
+	}
401
+}
402
+
403
+// If set s is non-empty, TakeMin sets *p to the minimum element of
404
+// the set s, removes that element from the set and returns true.
405
+// Otherwise, it returns false and *p is undefined.
406
+//
407
+// This method may be used for iteration over a worklist like so:
408
+//
409
+// 	var x int
410
+// 	for worklist.TakeMin(&x) { use(x) }
411
+//
412
+func (s *Sparse) TakeMin(p *int) bool {
413
+	if s.IsEmpty() {
414
+		return false
415
+	}
416
+	*p = s.root.min(true)
417
+	if s.root.empty() {
418
+		s.removeBlock(&s.root)
419
+	}
420
+	return true
421
+}
422
+
423
+// Has reports whether x is an element of the set s.
424
+func (s *Sparse) Has(x int) bool {
425
+	offset, i := offsetAndBitIndex(x)
426
+	if b := s.block(offset); b != nil {
427
+		return b.has(i)
428
+	}
429
+	return false
430
+}
431
+
432
+// forEach applies function f to each element of the set s in order.
433
+//
434
+// f must not mutate s.  Consequently, forEach is not safe to expose
435
+// to clients.  In any case, using "range s.AppendTo()" allows more
436
+// natural control flow with continue/break/return.
437
+//
438
+func (s *Sparse) forEach(f func(int)) {
439
+	for b := s.first(); b != &none; b = s.next(b) {
440
+		b.forEach(f)
441
+	}
442
+}
443
+
444
+// Copy sets s to the value of x.
445
+func (s *Sparse) Copy(x *Sparse) {
446
+	if s == x {
447
+		return
448
+	}
449
+
450
+	xb := x.first()
451
+	sb := s.first()
452
+	for xb != &none {
453
+		if sb == &none {
454
+			sb = s.insertBlockBefore(sb)
455
+		}
456
+		sb.offset = xb.offset
457
+		sb.bits = xb.bits
458
+		xb = x.next(xb)
459
+		sb = s.next(sb)
460
+	}
461
+	s.discardTail(sb)
462
+}
463
+
464
+// insertBlockBefore returns a new block, inserting it before next.
465
+// If next is the root, the root is replaced. If next is end, the block is
466
+// inserted at the end.
467
+func (s *Sparse) insertBlockBefore(next *block) *block {
468
+	if s.IsEmpty() {
469
+		if next != &none {
470
+			panic("BUG: passed block with empty set")
471
+		}
472
+		return &s.root
473
+	}
474
+
475
+	if next == &s.root {
476
+		// Special case: we need to create a new block that will become the root
477
+		// block.The old root block becomes the second block.
478
+		second := s.root
479
+		s.root = block{
480
+			next: &second,
481
+		}
482
+		if second.next == &s.root {
483
+			s.root.prev = &second
484
+		} else {
485
+			s.root.prev = second.prev
486
+			second.next.prev = &second
487
+			second.prev = &s.root
488
+		}
489
+		return &s.root
490
+	}
491
+	if next == &none {
492
+		// Insert before root.
493
+		next = &s.root
494
+	}
495
+	b := new(block)
496
+	b.next = next
497
+	b.prev = next.prev
498
+	b.prev.next = b
499
+	next.prev = b
500
+	return b
501
+}
502
+
503
+// discardTail removes block b and all its successors from s.
504
+func (s *Sparse) discardTail(b *block) {
505
+	if b != &none {
506
+		if b == &s.root {
507
+			s.Clear()
508
+		} else {
509
+			b.prev.next = &s.root
510
+			s.root.prev = b.prev
511
+		}
512
+	}
513
+}
514
+
515
+// IntersectionWith sets s to the intersection s ∩ x.
516
+func (s *Sparse) IntersectionWith(x *Sparse) {
517
+	if s == x {
518
+		return
519
+	}
520
+
521
+	xb := x.first()
522
+	sb := s.first()
523
+	for xb != &none && sb != &none {
524
+		switch {
525
+		case xb.offset < sb.offset:
526
+			xb = x.next(xb)
527
+
528
+		case xb.offset > sb.offset:
529
+			sb = s.removeBlock(sb)
530
+
531
+		default:
532
+			var sum word
533
+			for i := range sb.bits {
534
+				r := xb.bits[i] & sb.bits[i]
535
+				sb.bits[i] = r
536
+				sum |= r
537
+			}
538
+			if sum != 0 {
539
+				sb = s.next(sb)
540
+			} else {
541
+				// sb will be overwritten or removed
542
+			}
543
+
544
+			xb = x.next(xb)
545
+		}
546
+	}
547
+
548
+	s.discardTail(sb)
549
+}
550
+
551
+// Intersection sets s to the intersection x ∩ y.
552
+func (s *Sparse) Intersection(x, y *Sparse) {
553
+	switch {
554
+	case s == x:
555
+		s.IntersectionWith(y)
556
+		return
557
+	case s == y:
558
+		s.IntersectionWith(x)
559
+		return
560
+	case x == y:
561
+		s.Copy(x)
562
+		return
563
+	}
564
+
565
+	xb := x.first()
566
+	yb := y.first()
567
+	sb := s.first()
568
+	for xb != &none && yb != &none {
569
+		switch {
570
+		case xb.offset < yb.offset:
571
+			xb = x.next(xb)
572
+			continue
573
+		case xb.offset > yb.offset:
574
+			yb = y.next(yb)
575
+			continue
576
+		}
577
+
578
+		if sb == &none {
579
+			sb = s.insertBlockBefore(sb)
580
+		}
581
+		sb.offset = xb.offset
582
+
583
+		var sum word
584
+		for i := range sb.bits {
585
+			r := xb.bits[i] & yb.bits[i]
586
+			sb.bits[i] = r
587
+			sum |= r
588
+		}
589
+		if sum != 0 {
590
+			sb = s.next(sb)
591
+		} else {
592
+			// sb will be overwritten or removed
593
+		}
594
+
595
+		xb = x.next(xb)
596
+		yb = y.next(yb)
597
+	}
598
+
599
+	s.discardTail(sb)
600
+}
601
+
602
+// Intersects reports whether s ∩ x ≠ ∅.
603
+func (s *Sparse) Intersects(x *Sparse) bool {
604
+	sb := s.first()
605
+	xb := x.first()
606
+	for sb != &none && xb != &none {
607
+		switch {
608
+		case xb.offset < sb.offset:
609
+			xb = x.next(xb)
610
+		case xb.offset > sb.offset:
611
+			sb = s.next(sb)
612
+		default:
613
+			for i := range sb.bits {
614
+				if sb.bits[i]&xb.bits[i] != 0 {
615
+					return true
616
+				}
617
+			}
618
+			sb = s.next(sb)
619
+			xb = x.next(xb)
620
+		}
621
+	}
622
+	return false
623
+}
624
+
625
+// UnionWith sets s to the union s ∪ x, and reports whether s grew.
626
+func (s *Sparse) UnionWith(x *Sparse) bool {
627
+	if s == x {
628
+		return false
629
+	}
630
+
631
+	var changed bool
632
+	xb := x.first()
633
+	sb := s.first()
634
+	for xb != &none {
635
+		if sb != &none && sb.offset == xb.offset {
636
+			for i := range xb.bits {
637
+				if sb.bits[i] != xb.bits[i] {
638
+					sb.bits[i] |= xb.bits[i]
639
+					changed = true
640
+				}
641
+			}
642
+			xb = x.next(xb)
643
+		} else if sb == &none || sb.offset > xb.offset {
644
+			sb = s.insertBlockBefore(sb)
645
+			sb.offset = xb.offset
646
+			sb.bits = xb.bits
647
+			changed = true
648
+
649
+			xb = x.next(xb)
650
+		}
651
+		sb = s.next(sb)
652
+	}
653
+	return changed
654
+}
655
+
656
+// Union sets s to the union x ∪ y.
657
+func (s *Sparse) Union(x, y *Sparse) {
658
+	switch {
659
+	case x == y:
660
+		s.Copy(x)
661
+		return
662
+	case s == x:
663
+		s.UnionWith(y)
664
+		return
665
+	case s == y:
666
+		s.UnionWith(x)
667
+		return
668
+	}
669
+
670
+	xb := x.first()
671
+	yb := y.first()
672
+	sb := s.first()
673
+	for xb != &none || yb != &none {
674
+		if sb == &none {
675
+			sb = s.insertBlockBefore(sb)
676
+		}
677
+		switch {
678
+		case yb == &none || (xb != &none && xb.offset < yb.offset):
679
+			sb.offset = xb.offset
680
+			sb.bits = xb.bits
681
+			xb = x.next(xb)
682
+
683
+		case xb == &none || (yb != &none && yb.offset < xb.offset):
684
+			sb.offset = yb.offset
685
+			sb.bits = yb.bits
686
+			yb = y.next(yb)
687
+
688
+		default:
689
+			sb.offset = xb.offset
690
+			for i := range xb.bits {
691
+				sb.bits[i] = xb.bits[i] | yb.bits[i]
692
+			}
693
+			xb = x.next(xb)
694
+			yb = y.next(yb)
695
+		}
696
+		sb = s.next(sb)
697
+	}
698
+
699
+	s.discardTail(sb)
700
+}
701
+
702
+// DifferenceWith sets s to the difference s ∖ x.
703
+func (s *Sparse) DifferenceWith(x *Sparse) {
704
+	if s == x {
705
+		s.Clear()
706
+		return
707
+	}
708
+
709
+	xb := x.first()
710
+	sb := s.first()
711
+	for xb != &none && sb != &none {
712
+		switch {
713
+		case xb.offset > sb.offset:
714
+			sb = s.next(sb)
715
+
716
+		case xb.offset < sb.offset:
717
+			xb = x.next(xb)
718
+
719
+		default:
720
+			var sum word
721
+			for i := range sb.bits {
722
+				r := sb.bits[i] & ^xb.bits[i]
723
+				sb.bits[i] = r
724
+				sum |= r
725
+			}
726
+			if sum == 0 {
727
+				sb = s.removeBlock(sb)
728
+			} else {
729
+				sb = s.next(sb)
730
+			}
731
+			xb = x.next(xb)
732
+		}
733
+	}
734
+}
735
+
736
+// Difference sets s to the difference x ∖ y.
737
+func (s *Sparse) Difference(x, y *Sparse) {
738
+	switch {
739
+	case x == y:
740
+		s.Clear()
741
+		return
742
+	case s == x:
743
+		s.DifferenceWith(y)
744
+		return
745
+	case s == y:
746
+		var y2 Sparse
747
+		y2.Copy(y)
748
+		s.Difference(x, &y2)
749
+		return
750
+	}
751
+
752
+	xb := x.first()
753
+	yb := y.first()
754
+	sb := s.first()
755
+	for xb != &none && yb != &none {
756
+		if xb.offset > yb.offset {
757
+			// y has block, x has &none
758
+			yb = y.next(yb)
759
+			continue
760
+		}
761
+
762
+		if sb == &none {
763
+			sb = s.insertBlockBefore(sb)
764
+		}
765
+		sb.offset = xb.offset
766
+
767
+		switch {
768
+		case xb.offset < yb.offset:
769
+			// x has block, y has &none
770
+			sb.bits = xb.bits
771
+
772
+			sb = s.next(sb)
773
+
774
+		default:
775
+			// x and y have corresponding blocks
776
+			var sum word
777
+			for i := range sb.bits {
778
+				r := xb.bits[i] & ^yb.bits[i]
779
+				sb.bits[i] = r
780
+				sum |= r
781
+			}
782
+			if sum != 0 {
783
+				sb = s.next(sb)
784
+			} else {
785
+				// sb will be overwritten or removed
786
+			}
787
+
788
+			yb = y.next(yb)
789
+		}
790
+		xb = x.next(xb)
791
+	}
792
+
793
+	for xb != &none {
794
+		if sb == &none {
795
+			sb = s.insertBlockBefore(sb)
796
+		}
797
+		sb.offset = xb.offset
798
+		sb.bits = xb.bits
799
+		sb = s.next(sb)
800
+
801
+		xb = x.next(xb)
802
+	}
803
+
804
+	s.discardTail(sb)
805
+}
806
+
807
+// SymmetricDifferenceWith sets s to the symmetric difference s ∆ x.
808
+func (s *Sparse) SymmetricDifferenceWith(x *Sparse) {
809
+	if s == x {
810
+		s.Clear()
811
+		return
812
+	}
813
+
814
+	sb := s.first()
815
+	xb := x.first()
816
+	for xb != &none && sb != &none {
817
+		switch {
818
+		case sb.offset < xb.offset:
819
+			sb = s.next(sb)
820
+		case xb.offset < sb.offset:
821
+			nb := s.insertBlockBefore(sb)
822
+			nb.offset = xb.offset
823
+			nb.bits = xb.bits
824
+			xb = x.next(xb)
825
+		default:
826
+			var sum word
827
+			for i := range sb.bits {
828
+				r := sb.bits[i] ^ xb.bits[i]
829
+				sb.bits[i] = r
830
+				sum |= r
831
+			}
832
+			if sum == 0 {
833
+				sb = s.removeBlock(sb)
834
+			} else {
835
+				sb = s.next(sb)
836
+			}
837
+			xb = x.next(xb)
838
+		}
839
+	}
840
+
841
+	for xb != &none { // append the tail of x to s
842
+		sb = s.insertBlockBefore(sb)
843
+		sb.offset = xb.offset
844
+		sb.bits = xb.bits
845
+		sb = s.next(sb)
846
+		xb = x.next(xb)
847
+	}
848
+}
849
+
850
+// SymmetricDifference sets s to the symmetric difference x ∆ y.
851
+func (s *Sparse) SymmetricDifference(x, y *Sparse) {
852
+	switch {
853
+	case x == y:
854
+		s.Clear()
855
+		return
856
+	case s == x:
857
+		s.SymmetricDifferenceWith(y)
858
+		return
859
+	case s == y:
860
+		s.SymmetricDifferenceWith(x)
861
+		return
862
+	}
863
+
864
+	sb := s.first()
865
+	xb := x.first()
866
+	yb := y.first()
867
+	for xb != &none && yb != &none {
868
+		if sb == &none {
869
+			sb = s.insertBlockBefore(sb)
870
+		}
871
+		switch {
872
+		case yb.offset < xb.offset:
873
+			sb.offset = yb.offset
874
+			sb.bits = yb.bits
875
+			sb = s.next(sb)
876
+			yb = y.next(yb)
877
+		case xb.offset < yb.offset:
878
+			sb.offset = xb.offset
879
+			sb.bits = xb.bits
880
+			sb = s.next(sb)
881
+			xb = x.next(xb)
882
+		default:
883
+			var sum word
884
+			for i := range sb.bits {
885
+				r := xb.bits[i] ^ yb.bits[i]
886
+				sb.bits[i] = r
887
+				sum |= r
888
+			}
889
+			if sum != 0 {
890
+				sb.offset = xb.offset
891
+				sb = s.next(sb)
892
+			}
893
+			xb = x.next(xb)
894
+			yb = y.next(yb)
895
+		}
896
+	}
897
+
898
+	for xb != &none { // append the tail of x to s
899
+		if sb == &none {
900
+			sb = s.insertBlockBefore(sb)
901
+		}
902
+		sb.offset = xb.offset
903
+		sb.bits = xb.bits
904
+		sb = s.next(sb)
905
+		xb = x.next(xb)
906
+	}
907
+
908
+	for yb != &none { // append the tail of y to s
909
+		if sb == &none {
910
+			sb = s.insertBlockBefore(sb)
911
+		}
912
+		sb.offset = yb.offset
913
+		sb.bits = yb.bits
914
+		sb = s.next(sb)
915
+		yb = y.next(yb)
916
+	}
917
+
918
+	s.discardTail(sb)
919
+}
920
+
921
+// SubsetOf reports whether s ∖ x = ∅.
922
+func (s *Sparse) SubsetOf(x *Sparse) bool {
923
+	if s == x {
924
+		return true
925
+	}
926
+
927
+	sb := s.first()
928
+	xb := x.first()
929
+	for sb != &none {
930
+		switch {
931
+		case xb == &none || xb.offset > sb.offset:
932
+			return false
933
+		case xb.offset < sb.offset:
934
+			xb = x.next(xb)
935
+		default:
936
+			for i := range sb.bits {
937
+				if sb.bits[i]&^xb.bits[i] != 0 {
938
+					return false
939
+				}
940
+			}
941
+			sb = s.next(sb)
942
+			xb = x.next(xb)
943
+		}
944
+	}
945
+	return true
946
+}
947
+
948
+// Equals reports whether the sets s and t have the same elements.
949
+func (s *Sparse) Equals(t *Sparse) bool {
950
+	if s == t {
951
+		return true
952
+	}
953
+	sb := s.first()
954
+	tb := t.first()
955
+	for {
956
+		switch {
957
+		case sb == &none && tb == &none:
958
+			return true
959
+		case sb == &none || tb == &none:
960
+			return false
961
+		case sb.offset != tb.offset:
962
+			return false
963
+		case sb.bits != tb.bits:
964
+			return false
965
+		}
966
+
967
+		sb = s.next(sb)
968
+		tb = t.next(tb)
969
+	}
970
+}
971
+
972
+// String returns a human-readable description of the set s.
973
+func (s *Sparse) String() string {
974
+	var buf bytes.Buffer
975
+	buf.WriteByte('{')
976
+	s.forEach(func(x int) {
977
+		if buf.Len() > 1 {
978
+			buf.WriteByte(' ')
979
+		}
980
+		fmt.Fprintf(&buf, "%d", x)
981
+	})
982
+	buf.WriteByte('}')
983
+	return buf.String()
984
+}
985
+
986
+// BitString returns the set as a string of 1s and 0s denoting the sum
987
+// of the i'th powers of 2, for each i in s.  A radix point, always
988
+// preceded by a digit, appears if the sum is non-integral.
989
+//
990
+// Examples:
991
+//              {}.BitString() =      "0"
992
+//           {4,5}.BitString() = "110000"
993
+//            {-3}.BitString() =      "0.001"
994
+//      {-3,0,4,5}.BitString() = "110001.001"
995
+//
996
+func (s *Sparse) BitString() string {
997
+	if s.IsEmpty() {
998
+		return "0"
999
+	}
1000
+
1001
+	min, max := s.Min(), s.Max()
1002
+	var nbytes int
1003
+	if max > 0 {
1004
+		nbytes = max
1005
+	}
1006
+	nbytes++ // zero bit
1007
+	radix := nbytes
1008
+	if min < 0 {
1009
+		nbytes += len(".") - min
1010
+	}
1011
+
1012
+	b := make([]byte, nbytes)
1013
+	for i := range b {
1014
+		b[i] = '0'
1015
+	}
1016
+	if radix < nbytes {
1017
+		b[radix] = '.'
1018
+	}
1019
+	s.forEach(func(x int) {
1020
+		if x >= 0 {
1021
+			x += len(".")
1022
+		}
1023
+		b[radix-x] = '1'
1024
+	})
1025
+	return string(b)
1026
+}
1027
+
1028
+// GoString returns a string showing the internal representation of
1029
+// the set s.
1030
+//
1031
+func (s *Sparse) GoString() string {
1032
+	var buf bytes.Buffer
1033
+	for b := s.first(); b != &none; b = s.next(b) {
1034
+		fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p",
1035
+			b, b.offset, b.next, b.prev)
1036
+		for _, w := range b.bits {
1037
+			fmt.Fprintf(&buf, " 0%016x", w)
1038
+		}
1039
+		fmt.Fprintf(&buf, "}\n")
1040
+	}
1041
+	return buf.String()
1042
+}
1043
+
1044
+// AppendTo returns the result of appending the elements of s to slice
1045
+// in order.
1046
+func (s *Sparse) AppendTo(slice []int) []int {
1047
+	s.forEach(func(x int) {
1048
+		slice = append(slice, x)
1049
+	})
1050
+	return slice
1051
+}
1052
+
1053
+// -- Testing/debugging ------------------------------------------------
1054
+
1055
+// check returns an error if the representation invariants of s are violated.
1056
+func (s *Sparse) check() error {
1057
+	s.init()
1058
+	if s.root.empty() {
1059
+		// An empty set must have only the root block with offset MaxInt.
1060
+		if s.root.next != &s.root {
1061
+			return fmt.Errorf("multiple blocks with empty root block")
1062
+		}
1063
+		if s.root.offset != MaxInt {
1064
+			return fmt.Errorf("empty set has offset %d, should be MaxInt", s.root.offset)
1065
+		}
1066
+		return nil
1067
+	}
1068
+	for b := s.first(); ; b = s.next(b) {
1069
+		if b.offset%bitsPerBlock != 0 {
1070
+			return fmt.Errorf("bad offset modulo: %d", b.offset)
1071
+		}
1072
+		if b.empty() {
1073
+			return fmt.Errorf("empty block")
1074
+		}
1075
+		if b.prev.next != b {
1076
+			return fmt.Errorf("bad prev.next link")
1077
+		}
1078
+		if b.next.prev != b {
1079
+			return fmt.Errorf("bad next.prev link")
1080
+		}
1081
+		if b.next == &s.root {
1082
+			break
1083
+		}
1084
+		if b.offset >= b.next.offset {
1085
+			return fmt.Errorf("bad offset order: b.offset=%d, b.next.offset=%d",
1086
+				b.offset, b.next.offset)
1087
+		}
1088
+	}
1089
+	return nil
1090
+}
0 1091
new file mode 100644
... ...
@@ -0,0 +1,84 @@
0
+// Copyright 2013 The Go Authors. All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+package intsets
5
+
6
+// From Hacker's Delight, fig 5.2.
7
+func popcountHD(x uint32) int {
8
+	x -= (x >> 1) & 0x55555555
9
+	x = (x & 0x33333333) + ((x >> 2) & 0x33333333)
10
+	x = (x + (x >> 4)) & 0x0f0f0f0f
11
+	x = x + (x >> 8)
12
+	x = x + (x >> 16)
13
+	return int(x & 0x0000003f)
14
+}
15
+
16
+var a [1 << 8]byte
17
+
18
+func init() {
19
+	for i := range a {
20
+		var n byte
21
+		for x := i; x != 0; x >>= 1 {
22
+			if x&1 != 0 {
23
+				n++
24
+			}
25
+		}
26
+		a[i] = n
27
+	}
28
+}
29
+
30
+func popcountTable(x word) int {
31
+	return int(a[byte(x>>(0*8))] +
32
+		a[byte(x>>(1*8))] +
33
+		a[byte(x>>(2*8))] +
34
+		a[byte(x>>(3*8))] +
35
+		a[byte(x>>(4*8))] +
36
+		a[byte(x>>(5*8))] +
37
+		a[byte(x>>(6*8))] +
38
+		a[byte(x>>(7*8))])
39
+}
40
+
41
+// nlz returns the number of leading zeros of x.
42
+// From Hacker's Delight, fig 5.11.
43
+func nlz(x word) int {
44
+	x |= (x >> 1)
45
+	x |= (x >> 2)
46
+	x |= (x >> 4)
47
+	x |= (x >> 8)
48
+	x |= (x >> 16)
49
+	x |= (x >> 32)
50
+	return popcount(^x)
51
+}
52
+
53
+// ntz returns the number of trailing zeros of x.
54
+// From Hacker's Delight, fig 5.13.
55
+func ntz(x word) int {
56
+	if x == 0 {
57
+		return bitsPerWord
58
+	}
59
+	n := 1
60
+	if bitsPerWord == 64 {
61
+		if (x & 0xffffffff) == 0 {
62
+			n = n + 32
63
+			x = x >> 32
64
+		}
65
+	}
66
+	if (x & 0x0000ffff) == 0 {
67
+		n = n + 16
68
+		x = x >> 16
69
+	}
70
+	if (x & 0x000000ff) == 0 {
71
+		n = n + 8
72
+		x = x >> 8
73
+	}
74
+	if (x & 0x0000000f) == 0 {
75
+		n = n + 4
76
+		x = x >> 4
77
+	}
78
+	if (x & 0x00000003) == 0 {
79
+		n = n + 2
80
+		x = x >> 2
81
+	}
82
+	return n - int(x&1)
83
+}
0 84
new file mode 100644
... ...
@@ -0,0 +1,8 @@
0
+module golang.org/x/tools
1
+
2
+go 1.11
3
+
4
+require (
5
+	golang.org/x/net v0.0.0-20190311183353-d8887717615a
6
+	golang.org/x/sync v0.0.0-20190423024810-112230192c58
7
+)