Browse code

Merge pull request #11788 from reikani/pchoi

Changed snake case naming to camelCase.

Brian Goff authored on 2015/03/27 15:55:50
Showing 19 changed files
... ...
@@ -276,18 +276,18 @@ func TestGetEvents(t *testing.T) {
276 276
 		t.Fatal("handler was not called")
277 277
 	}
278 278
 	assertContentType(r, "application/json", t)
279
-	var stdout_json struct {
279
+	var stdoutJSON struct {
280 280
 		Since int
281 281
 		Until int
282 282
 	}
283
-	if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
283
+	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJSON); err != nil {
284 284
 		t.Fatal(err)
285 285
 	}
286
-	if stdout_json.Since != 1 {
287
-		t.Errorf("since != 1: %#v", stdout_json.Since)
286
+	if stdoutJSON.Since != 1 {
287
+		t.Errorf("since != 1: %#v", stdoutJSON.Since)
288 288
 	}
289
-	if stdout_json.Until != 0 {
290
-		t.Errorf("until != 0: %#v", stdout_json.Until)
289
+	if stdoutJSON.Until != 0 {
290
+		t.Errorf("until != 0: %#v", stdoutJSON.Until)
291 291
 	}
292 292
 }
293 293
 
... ...
@@ -509,8 +509,8 @@ func toJson(data interface{}, t *testing.T) io.Reader {
509 509
 	return &buf
510 510
 }
511 511
 
512
-func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
513
-	if recorder.HeaderMap.Get("Content-Type") != content_type {
512
+func assertContentType(recorder *httptest.ResponseRecorder, contentType string, t *testing.T) {
513
+	if recorder.HeaderMap.Get("Content-Type") != contentType {
514 514
 		t.Fatalf("%#v\n", recorder)
515 515
 	}
516 516
 }
... ...
@@ -154,7 +154,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
154 154
 	}
155 155
 	// FIXME: this hack is necessary for legacy integration tests to access
156 156
 	// the daemon object.
157
-	eng.Hack_SetGlobalVar("httpapi.daemon", daemon)
157
+	eng.HackSetGlobalVar("httpapi.daemon", daemon)
158 158
 	return nil
159 159
 }
160 160
 
... ...
@@ -24,7 +24,7 @@ type Driver struct {
24 24
 // InitLoopbacks ensures that the loopback devices are properly created within
25 25
 // the system running the device mapper tests.
26 26
 func InitLoopbacks() error {
27
-	stat_t, err := getBaseLoopStats()
27
+	statT, err := getBaseLoopStats()
28 28
 	if err != nil {
29 29
 		return err
30 30
 	}
... ...
@@ -34,10 +34,10 @@ func InitLoopbacks() error {
34 34
 		// only create new loopback files if they don't exist
35 35
 		if _, err := os.Stat(loopPath); err != nil {
36 36
 			if mkerr := syscall.Mknod(loopPath,
37
-				uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
37
+				uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
38 38
 				return mkerr
39 39
 			}
40
-			os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
40
+			os.Chown(loopPath, int(statT.Uid), int(statT.Gid))
41 41
 		}
42 42
 	}
43 43
 	return nil
... ...
@@ -89,14 +89,14 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) error {
89 89
 	v.SetInt("NCPU", runtime.NumCPU())
90 90
 	v.SetInt64("MemTotal", meminfo.MemTotal)
91 91
 	v.Set("DockerRootDir", daemon.Config().Root)
92
-	if http_proxy := os.Getenv("http_proxy"); http_proxy != "" {
93
-		v.Set("HttpProxy", http_proxy)
92
+	if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
93
+		v.Set("HttpProxy", httpProxy)
94 94
 	}
95
-	if https_proxy := os.Getenv("https_proxy"); https_proxy != "" {
96
-		v.Set("HttpsProxy", https_proxy)
95
+	if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" {
96
+		v.Set("HttpsProxy", httpsProxy)
97 97
 	}
98
-	if no_proxy := os.Getenv("no_proxy"); no_proxy != "" {
99
-		v.Set("NoProxy", no_proxy)
98
+	if noProxy := os.Getenv("no_proxy"); noProxy != "" {
99
+		v.Set("NoProxy", noProxy)
100 100
 	}
101 101
 
102 102
 	if hostname, err := os.Hostname(); err == nil {
... ...
@@ -30,7 +30,7 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
30 30
 		n           = job.GetenvInt("limit")
31 31
 		size        = job.GetenvBool("size")
32 32
 		psFilters   filters.Args
33
-		filt_exited []int
33
+		filtExited  []int
34 34
 	)
35 35
 	outs := engine.NewTable("Created", 0)
36 36
 
... ...
@@ -44,7 +44,7 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
44 44
 			if err != nil {
45 45
 				return err
46 46
 			}
47
-			filt_exited = append(filt_exited, code)
47
+			filtExited = append(filtExited, code)
48 48
 		}
49 49
 	}
50 50
 
... ...
@@ -109,15 +109,15 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
109 109
 				return errLast
110 110
 			}
111 111
 		}
112
-		if len(filt_exited) > 0 {
113
-			should_skip := true
114
-			for _, code := range filt_exited {
112
+		if len(filtExited) > 0 {
113
+			shouldSkip := true
114
+			for _, code := range filtExited {
115 115
 				if code == container.ExitCode && !container.Running {
116
-					should_skip = false
116
+					shouldSkip = false
117 117
 					break
118 118
 				}
119 119
 			}
120
-			if should_skip {
120
+			if shouldSkip {
121 121
 				return nil
122 122
 			}
123 123
 		}
... ...
@@ -266,7 +266,7 @@ func InitDriver(job *engine.Job) error {
266 266
 	ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
267 267
 
268 268
 	// https://github.com/docker/docker/issues/2768
269
-	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
269
+	job.Eng.HackSetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
270 270
 
271 271
 	for name, f := range map[string]engine.Handler{
272 272
 		"allocate_interface": Allocate,
... ...
@@ -522,8 +522,8 @@ func Allocate(job *engine.Job) error {
522 522
 
523 523
 	if globalIPv6Network != nil {
524 524
 		// If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
525
-		netmask_ones, _ := globalIPv6Network.Mask.Size()
526
-		if requestedIPv6 == nil && netmask_ones <= 80 {
525
+		netmaskOnes, _ := globalIPv6Network.Mask.Size()
526
+		if requestedIPv6 == nil && netmaskOnes <= 80 {
527 527
 			requestedIPv6 = make(net.IP, len(globalIPv6Network.IP))
528 528
 			copy(requestedIPv6, globalIPv6Network.IP)
529 529
 			for i, h := range mac {
... ...
@@ -184,16 +184,16 @@ func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) {
184 184
 
185 185
 	// ensure global ip with mac
186 186
 	ip := net.ParseIP(output.Get("GlobalIPv6"))
187
-	expected_ip := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
188
-	if ip.String() != expected_ip.String() {
189
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
187
+	expectedIP := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
188
+	if ip.String() != expectedIP.String() {
189
+		t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
190 190
 	}
191 191
 
192 192
 	// ensure link local format
193 193
 	ip = net.ParseIP(output.Get("LinkLocalIPv6"))
194
-	expected_ip = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
195
-	if ip.String() != expected_ip.String() {
196
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
194
+	expectedIP = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
195
+	if ip.String() != expectedIP.String() {
196
+		t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
197 197
 	}
198 198
 
199 199
 }
... ...
@@ -203,18 +203,18 @@ func TestIPv6InterfaceAllocationRequest(t *testing.T) {
203 203
 	input := engine.Env{}
204 204
 
205 205
 	_, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
206
-	expected_ip := net.ParseIP("2001:db8:1234:1234:1234::1328")
206
+	expectedIP := net.ParseIP("2001:db8:1234:1234:1234::1328")
207 207
 
208 208
 	// set global ipv6
209 209
 	input.Set("globalIPv6Network", subnet.String())
210
-	input.Set("RequestedIPv6", expected_ip.String())
210
+	input.Set("RequestedIPv6", expectedIP.String())
211 211
 
212 212
 	output := newInterfaceAllocation(t, input)
213 213
 
214 214
 	// ensure global ip with mac
215 215
 	ip := net.ParseIP(output.Get("GlobalIPv6"))
216
-	if ip.String() != expected_ip.String() {
217
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
216
+	if ip.String() != expectedIP.String() {
217
+		t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
218 218
 	}
219 219
 
220 220
 	// retry -> fails for duplicated address
... ...
@@ -2,7 +2,7 @@ package engine
2 2
 
3 3
 type Hack map[string]interface{}
4 4
 
5
-func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
5
+func (eng *Engine) HackGetGlobalVar(key string) interface{} {
6 6
 	if eng.hack == nil {
7 7
 		return nil
8 8
 	}
... ...
@@ -13,7 +13,7 @@ func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
13 13
 	return val
14 14
 }
15 15
 
16
-func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
16
+func (eng *Engine) HackSetGlobalVar(key string, val interface{}) {
17 17
 	if eng.hack == nil {
18 18
 		eng.hack = make(Hack)
19 19
 	}
... ...
@@ -19,10 +19,10 @@ var acceptedImageFilterTags = map[string]struct{}{
19 19
 
20 20
 func (s *TagStore) CmdImages(job *engine.Job) error {
21 21
 	var (
22
-		allImages   map[string]*image.Image
23
-		err         error
24
-		filt_tagged = true
25
-		filt_label  = false
22
+		allImages  map[string]*image.Image
23
+		err        error
24
+		filtTagged = true
25
+		filtLabel  = false
26 26
 	)
27 27
 
28 28
 	imageFilters, err := filters.FromParam(job.Getenv("filters"))
... ...
@@ -38,14 +38,14 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
38 38
 	if i, ok := imageFilters["dangling"]; ok {
39 39
 		for _, value := range i {
40 40
 			if strings.ToLower(value) == "true" {
41
-				filt_tagged = false
41
+				filtTagged = false
42 42
 			}
43 43
 		}
44 44
 	}
45 45
 
46
-	_, filt_label = imageFilters["label"]
46
+	_, filtLabel = imageFilters["label"]
47 47
 
48
-	if job.GetenvBool("all") && filt_tagged {
48
+	if job.GetenvBool("all") && filtTagged {
49 49
 		allImages, err = s.graph.Map()
50 50
 	} else {
51 51
 		allImages, err = s.graph.Heads()
... ...
@@ -70,7 +70,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
70 70
 			}
71 71
 
72 72
 			if out, exists := lookup[id]; exists {
73
-				if filt_tagged {
73
+				if filtTagged {
74 74
 					if utils.DigestReference(ref) {
75 75
 						out.SetList("RepoDigests", append(out.GetList("RepoDigests"), imgRef))
76 76
 					} else { // Tag Ref.
... ...
@@ -83,7 +83,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
83 83
 				if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
84 84
 					continue
85 85
 				}
86
-				if filt_tagged {
86
+				if filtTagged {
87 87
 					out := &engine.Env{}
88 88
 					out.SetJson("ParentId", image.Parent)
89 89
 					out.SetJson("Id", image.ID)
... ...
@@ -114,7 +114,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
114 114
 	}
115 115
 
116 116
 	// Display images which aren't part of a repository/tag
117
-	if job.Getenv("filter") == "" || filt_label {
117
+	if job.Getenv("filter") == "" || filtLabel {
118 118
 		for _, image := range allImages {
119 119
 			if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
120 120
 				continue
... ...
@@ -152,7 +152,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
152 152
 
153 153
 	errors := make(chan error)
154 154
 
155
-	layers_downloaded := false
155
+	layersDownloaded := false
156 156
 	for _, image := range repoData.ImgList {
157 157
 		downloadImage := func(img *registry.ImgData) {
158 158
 			if askedTag != "" && img.Tag != askedTag {
... ...
@@ -189,29 +189,29 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
189 189
 			out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
190 190
 			success := false
191 191
 			var lastErr, err error
192
-			var is_downloaded bool
192
+			var isDownloaded bool
193 193
 			for _, ep := range repoInfo.Index.Mirrors {
194 194
 				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
195
-				if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
195
+				if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
196 196
 					// Don't report errors when pulling from mirrors.
197 197
 					logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
198 198
 					continue
199 199
 				}
200
-				layers_downloaded = layers_downloaded || is_downloaded
200
+				layersDownloaded = layersDownloaded || isDownloaded
201 201
 				success = true
202 202
 				break
203 203
 			}
204 204
 			if !success {
205 205
 				for _, ep := range repoData.Endpoints {
206 206
 					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
207
-					if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
207
+					if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
208 208
 						// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
209 209
 						// As the error is also given to the output stream the user will see the error.
210 210
 						lastErr = err
211 211
 						out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
212 212
 						continue
213 213
 					}
214
-					layers_downloaded = layers_downloaded || is_downloaded
214
+					layersDownloaded = layersDownloaded || isDownloaded
215 215
 					success = true
216 216
 					break
217 217
 				}
... ...
@@ -262,7 +262,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
262 262
 	if len(askedTag) > 0 {
263 263
 		requestedTag = utils.ImageReference(repoInfo.CanonicalName, askedTag)
264 264
 	}
265
-	WriteStatus(requestedTag, out, sf, layers_downloaded)
265
+	WriteStatus(requestedTag, out, sf, layersDownloaded)
266 266
 	return nil
267 267
 }
268 268
 
... ...
@@ -275,7 +275,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
275 275
 	// FIXME: Try to stream the images?
276 276
 	// FIXME: Launch the getRemoteImage() in goroutines
277 277
 
278
-	layers_downloaded := false
278
+	layersDownloaded := false
279 279
 	for i := len(history) - 1; i >= 0; i-- {
280 280
 		id := history[i]
281 281
 
... ...
@@ -299,16 +299,16 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
299 299
 				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
300 300
 				if err != nil && j == retries {
301 301
 					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
302
-					return layers_downloaded, err
302
+					return layersDownloaded, err
303 303
 				} else if err != nil {
304 304
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
305 305
 					continue
306 306
 				}
307 307
 				img, err = image.NewImgJSON(imgJSON)
308
-				layers_downloaded = true
308
+				layersDownloaded = true
309 309
 				if err != nil && j == retries {
310 310
 					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
311
-					return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
311
+					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
312 312
 				} else if err != nil {
313 313
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
314 314
 					continue
... ...
@@ -333,9 +333,9 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
333 333
 					continue
334 334
 				} else if err != nil {
335 335
 					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
336
-					return layers_downloaded, err
336
+					return layersDownloaded, err
337 337
 				}
338
-				layers_downloaded = true
338
+				layersDownloaded = true
339 339
 				defer layer.Close()
340 340
 
341 341
 				err = s.graph.Register(img,
... ...
@@ -353,7 +353,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
353 353
 					continue
354 354
 				} else if err != nil {
355 355
 					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
356
-					return layers_downloaded, err
356
+					return layersDownloaded, err
357 357
 				} else {
358 358
 					break
359 359
 				}
... ...
@@ -361,11 +361,11 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
361 361
 		}
362 362
 		out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
363 363
 	}
364
-	return layers_downloaded, nil
364
+	return layersDownloaded, nil
365 365
 }
366 366
 
367
-func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layers_downloaded bool) {
368
-	if layers_downloaded {
367
+func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) {
368
+	if layersDownloaded {
369 369
 		out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag))
370 370
 	} else {
371 371
 		out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag))
... ...
@@ -251,11 +251,11 @@ func TestPsListContainersSize(t *testing.T) {
251 251
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello")
252 252
 	runCommandWithOutput(cmd)
253 253
 	cmd = exec.Command(dockerBinary, "ps", "-s", "-n=1")
254
-	base_out, _, err := runCommandWithOutput(cmd)
255
-	base_lines := strings.Split(strings.Trim(base_out, "\n "), "\n")
256
-	base_sizeIndex := strings.Index(base_lines[0], "SIZE")
257
-	base_foundSize := base_lines[1][base_sizeIndex:]
258
-	base_bytes, err := strconv.Atoi(strings.Split(base_foundSize, " ")[0])
254
+	baseOut, _, err := runCommandWithOutput(cmd)
255
+	baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n")
256
+	baseSizeIndex := strings.Index(baseLines[0], "SIZE")
257
+	baseFoundsize := baseLines[1][baseSizeIndex:]
258
+	baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0])
259 259
 	if err != nil {
260 260
 		t.Fatal(err)
261 261
 	}
... ...
@@ -292,7 +292,7 @@ func TestPsListContainersSize(t *testing.T) {
292 292
 	if foundID != id[:12] {
293 293
 		t.Fatalf("Expected id %s, got %s", id[:12], foundID)
294 294
 	}
295
-	expectedSize := fmt.Sprintf("%d B", (2 + base_bytes))
295
+	expectedSize := fmt.Sprintf("%d B", (2 + baseBytes))
296 296
 	foundSize := lines[1][sizeIndex:]
297 297
 	if foundSize != expectedSize {
298 298
 		t.Fatalf("Expected size %q, got %q", expectedSize, foundSize)
... ...
@@ -2186,15 +2186,15 @@ func eqToBaseDiff(out string, t *testing.T) bool {
2186 2186
 	out1, _, err := runCommandWithOutput(cmd)
2187 2187
 	cID := stripTrailingCharacters(out1)
2188 2188
 	cmd = exec.Command(dockerBinary, "diff", cID)
2189
-	base_diff, _, err := runCommandWithOutput(cmd)
2189
+	baseDiff, _, err := runCommandWithOutput(cmd)
2190 2190
 	if err != nil {
2191
-		t.Fatal(err, base_diff)
2191
+		t.Fatal(err, baseDiff)
2192 2192
 	}
2193
-	base_arr := strings.Split(base_diff, "\n")
2194
-	sort.Strings(base_arr)
2195
-	out_arr := strings.Split(out, "\n")
2196
-	sort.Strings(out_arr)
2197
-	return sliceEq(base_arr, out_arr)
2193
+	baseArr := strings.Split(baseDiff, "\n")
2194
+	sort.Strings(baseArr)
2195
+	outArr := strings.Split(out, "\n")
2196
+	sort.Strings(outArr)
2197
+	return sliceEq(baseArr, outArr)
2198 2198
 }
2199 2199
 
2200 2200
 func sliceEq(a, b []string) bool {
... ...
@@ -158,9 +158,9 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
158 158
 
159 159
 	// Check that we have the volumes we want
160 160
 	out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
161
-	n_volumes := strings.Trim(out, " \r\n'")
162
-	if n_volumes != "2" {
163
-		t.Fatalf("Missing volumes: expected 2, got %s", n_volumes)
161
+	nVolumes := strings.Trim(out, " \r\n'")
162
+	if nVolumes != "2" {
163
+		t.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
164 164
 	}
165 165
 
166 166
 	logDone("start - missing containers in --volumes-from did not affect subsequent runs")
... ...
@@ -61,9 +61,9 @@ func TestTagInvalidUnprefixedRepo(t *testing.T) {
61 61
 
62 62
 // ensure we don't allow the use of invalid tags; these tag operations should fail
63 63
 func TestTagInvalidPrefixedRepo(t *testing.T) {
64
-	long_tag := stringutils.GenerateRandomAlphaOnlyString(121)
64
+	longTag := stringutils.GenerateRandomAlphaOnlyString(121)
65 65
 
66
-	invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag}
66
+	invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
67 67
 
68 68
 	for _, repotag := range invalidTags {
69 69
 		tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag)
... ...
@@ -150,7 +150,7 @@ func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container {
150 150
 }
151 151
 
152 152
 func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon {
153
-	iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
153
+	iDaemon := eng.HackGetGlobalVar("httpapi.daemon")
154 154
 	if iDaemon == nil {
155 155
 		panic("Legacy daemon field not set in engine")
156 156
 	}
... ...
@@ -260,7 +260,7 @@ func Exists(table Table, chain string, rule ...string) bool {
260 260
 
261 261
 	// parse "iptables -S" for the rule (this checks rules in a specific chain
262 262
 	// in a specific table)
263
-	rule_string := strings.Join(rule, " ")
263
+	ruleString := strings.Join(rule, " ")
264 264
 	existingRules, _ := exec.Command("iptables", "-t", string(table), "-S", chain).Output()
265 265
 
266 266
 	// regex to replace ips in rule
... ...
@@ -269,7 +269,7 @@ func Exists(table Table, chain string, rule ...string) bool {
269 269
 
270 270
 	return strings.Contains(
271 271
 		re.ReplaceAllString(string(existingRules), "?"),
272
-		re.ReplaceAllString(rule_string, "?"),
272
+		re.ReplaceAllString(ruleString, "?"),
273 273
 	)
274 274
 }
275 275
 
... ...
@@ -941,11 +941,11 @@ func (f *FlagSet) parseOne() (bool, string, error) {
941 941
 
942 942
 	// it's a flag. does it have an argument?
943 943
 	f.args = f.args[1:]
944
-	has_value := false
944
+	hasValue := false
945 945
 	value := ""
946 946
 	if i := strings.Index(name, "="); i != -1 {
947 947
 		value = trimQuotes(name[i+1:])
948
-		has_value = true
948
+		hasValue = true
949 949
 		name = name[:i]
950 950
 	}
951 951
 
... ...
@@ -962,7 +962,7 @@ func (f *FlagSet) parseOne() (bool, string, error) {
962 962
 		return false, name, ErrRetry
963 963
 	}
964 964
 	if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
965
-		if has_value {
965
+		if hasValue {
966 966
 			if err := fv.Set(value); err != nil {
967 967
 				return false, "", f.failf("invalid boolean value %q for  -%s: %v", value, name, err)
968 968
 			}
... ...
@@ -971,12 +971,12 @@ func (f *FlagSet) parseOne() (bool, string, error) {
971 971
 		}
972 972
 	} else {
973 973
 		// It must have a value, which might be the next argument.
974
-		if !has_value && len(f.args) > 0 {
974
+		if !hasValue && len(f.args) > 0 {
975 975
 			// value is the next arg
976
-			has_value = true
976
+			hasValue = true
977 977
 			value, f.args = f.args[0], f.args[1:]
978 978
 		}
979
-		if !has_value {
979
+		if !hasValue {
980 980
 			return false, "", f.failf("flag needs an argument: -%s", name)
981 981
 		}
982 982
 		if err := flag.Value.Set(value); err != nil {
... ...
@@ -60,10 +60,10 @@ func (ipnet *netIPNet) MarshalJSON() ([]byte, error) {
60 60
 }
61 61
 
62 62
 func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) {
63
-	var ipnet_str string
64
-	if err = json.Unmarshal(b, &ipnet_str); err == nil {
63
+	var ipnetStr string
64
+	if err = json.Unmarshal(b, &ipnetStr); err == nil {
65 65
 		var cidr *net.IPNet
66
-		if _, cidr, err = net.ParseCIDR(ipnet_str); err == nil {
66
+		if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
67 67
 			*ipnet = netIPNet(*cidr)
68 68
 		}
69 69
 	}
... ...
@@ -171,7 +171,7 @@ func makePublicIndex() *IndexInfo {
171 171
 	return index
172 172
 }
173 173
 
174
-func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceConfig {
174
+func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig {
175 175
 	options := &Options{
176 176
 		Mirrors:            opts.NewListOpts(nil),
177 177
 		InsecureRegistries: opts.NewListOpts(nil),
... ...
@@ -181,9 +181,9 @@ func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceC
181 181
 			options.Mirrors.Set(mirror)
182 182
 		}
183 183
 	}
184
-	if insecure_registries != nil {
185
-		for _, insecure_registries := range insecure_registries {
186
-			options.InsecureRegistries.Set(insecure_registries)
184
+	if insecureRegistries != nil {
185
+		for _, insecureRegistries := range insecureRegistries {
186
+			options.InsecureRegistries.Set(insecureRegistries)
187 187
 		}
188 188
 	}
189 189