Browse code

Revert "Merge pull request #16228 from duglin/ContextualizeEvents"

Although having a request ID available throughout the codebase is very
valuable, the impact of requiring a Context as an argument to every
function in the codepath of an API request, is too significant and was
not properly understood at the time of the review.

Furthermore, mixing API-layer code with non-API-layer code makes the
latter usable only by API-layer code (one that has a notion of Context).

This reverts commit de4164043546d2b9ee3bf323dbc41f4979c84480, reversing
changes made to 7daeecd42d7bb112bfe01532c8c9a962bb0c7967.

Signed-off-by: Tibor Vass <tibor@docker.com>

Conflicts:
api/server/container.go
builder/internals.go
daemon/container_unix.go
daemon/create.go

Tibor Vass authored on 2015/09/30 02:51:40
Showing 68 changed files
... ...
@@ -45,7 +45,7 @@ func (s *Server) getContainersJSON(ctx context.Context, w http.ResponseWriter, r
45 45
 		config.Limit = limit
46 46
 	}
47 47
 
48
-	containers, err := s.daemon.Containers(ctx, config)
48
+	containers, err := s.daemon.Containers(config)
49 49
 	if err != nil {
50 50
 		return err
51 51
 	}
... ...
@@ -83,7 +83,7 @@ func (s *Server) getContainersStats(ctx context.Context, w http.ResponseWriter,
83 83
 		Version:   version,
84 84
 	}
85 85
 
86
-	return s.daemon.ContainerStats(ctx, vars["name"], config)
86
+	return s.daemon.ContainerStats(vars["name"], config)
87 87
 }
88 88
 
89 89
 func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
... ...
@@ -118,7 +118,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
118 118
 		closeNotifier = notifier.CloseNotify()
119 119
 	}
120 120
 
121
-	c, err := s.daemon.Get(ctx, vars["name"])
121
+	c, err := s.daemon.Get(vars["name"])
122 122
 	if err != nil {
123 123
 		return err
124 124
 	}
... ...
@@ -140,7 +140,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
140 140
 		Stop:       closeNotifier,
141 141
 	}
142 142
 
143
-	if err := s.daemon.ContainerLogs(ctx, c, logsConfig); err != nil {
143
+	if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
144 144
 		// The client may be expecting all of the data we're sending to
145 145
 		// be multiplexed, so send it through OutStream, which will
146 146
 		// have been set up to handle that if needed.
... ...
@@ -155,7 +155,7 @@ func (s *Server) getContainersExport(ctx context.Context, w http.ResponseWriter,
155 155
 		return fmt.Errorf("Missing parameter")
156 156
 	}
157 157
 
158
-	return s.daemon.ContainerExport(ctx, vars["name"], w)
158
+	return s.daemon.ContainerExport(vars["name"], w)
159 159
 }
160 160
 
161 161
 func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
... ...
@@ -183,7 +183,7 @@ func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter,
183 183
 		hostConfig = c
184 184
 	}
185 185
 
186
-	if err := s.daemon.ContainerStart(ctx, vars["name"], hostConfig); err != nil {
186
+	if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil {
187 187
 		return err
188 188
 	}
189 189
 	w.WriteHeader(http.StatusNoContent)
... ...
@@ -200,7 +200,7 @@ func (s *Server) postContainersStop(ctx context.Context, w http.ResponseWriter,
200 200
 
201 201
 	seconds, _ := strconv.Atoi(r.Form.Get("t"))
202 202
 
203
-	if err := s.daemon.ContainerStop(ctx, vars["name"], seconds); err != nil {
203
+	if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
204 204
 		return err
205 205
 	}
206 206
 	w.WriteHeader(http.StatusNoContent)
... ...
@@ -227,7 +227,7 @@ func (s *Server) postContainersKill(ctx context.Context, w http.ResponseWriter,
227 227
 		}
228 228
 	}
229 229
 
230
-	if err := s.daemon.ContainerKill(ctx, name, uint64(sig)); err != nil {
230
+	if err := s.daemon.ContainerKill(name, uint64(sig)); err != nil {
231 231
 		theErr, isDerr := err.(errcode.ErrorCoder)
232 232
 		isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning
233 233
 
... ...
@@ -254,7 +254,7 @@ func (s *Server) postContainersRestart(ctx context.Context, w http.ResponseWrite
254 254
 
255 255
 	timeout, _ := strconv.Atoi(r.Form.Get("t"))
256 256
 
257
-	if err := s.daemon.ContainerRestart(ctx, vars["name"], timeout); err != nil {
257
+	if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
258 258
 		return err
259 259
 	}
260 260
 
... ...
@@ -271,7 +271,7 @@ func (s *Server) postContainersPause(ctx context.Context, w http.ResponseWriter,
271 271
 		return err
272 272
 	}
273 273
 
274
-	if err := s.daemon.ContainerPause(ctx, vars["name"]); err != nil {
274
+	if err := s.daemon.ContainerPause(vars["name"]); err != nil {
275 275
 		return err
276 276
 	}
277 277
 
... ...
@@ -288,7 +288,7 @@ func (s *Server) postContainersUnpause(ctx context.Context, w http.ResponseWrite
288 288
 		return err
289 289
 	}
290 290
 
291
-	if err := s.daemon.ContainerUnpause(ctx, vars["name"]); err != nil {
291
+	if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
292 292
 		return err
293 293
 	}
294 294
 
... ...
@@ -302,7 +302,7 @@ func (s *Server) postContainersWait(ctx context.Context, w http.ResponseWriter,
302 302
 		return fmt.Errorf("Missing parameter")
303 303
 	}
304 304
 
305
-	status, err := s.daemon.ContainerWait(ctx, vars["name"], -1*time.Second)
305
+	status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second)
306 306
 	if err != nil {
307 307
 		return err
308 308
 	}
... ...
@@ -317,7 +317,7 @@ func (s *Server) getContainersChanges(ctx context.Context, w http.ResponseWriter
317 317
 		return fmt.Errorf("Missing parameter")
318 318
 	}
319 319
 
320
-	changes, err := s.daemon.ContainerChanges(ctx, vars["name"])
320
+	changes, err := s.daemon.ContainerChanges(vars["name"])
321 321
 	if err != nil {
322 322
 		return err
323 323
 	}
... ...
@@ -334,7 +334,7 @@ func (s *Server) getContainersTop(ctx context.Context, w http.ResponseWriter, r
334 334
 		return err
335 335
 	}
336 336
 
337
-	procList, err := s.daemon.ContainerTop(ctx, vars["name"], r.Form.Get("ps_args"))
337
+	procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args"))
338 338
 	if err != nil {
339 339
 		return err
340 340
 	}
... ...
@@ -352,7 +352,7 @@ func (s *Server) postContainerRename(ctx context.Context, w http.ResponseWriter,
352 352
 
353 353
 	name := vars["name"]
354 354
 	newName := r.Form.Get("name")
355
-	if err := s.daemon.ContainerRename(ctx, name, newName); err != nil {
355
+	if err := s.daemon.ContainerRename(name, newName); err != nil {
356 356
 		return err
357 357
 	}
358 358
 	w.WriteHeader(http.StatusNoContent)
... ...
@@ -376,7 +376,7 @@ func (s *Server) postContainersCreate(ctx context.Context, w http.ResponseWriter
376 376
 	version := ctx.Version()
377 377
 	adjustCPUShares := version.LessThan("1.19")
378 378
 
379
-	ccr, err := s.daemon.ContainerCreate(ctx, name, config, hostConfig, adjustCPUShares)
379
+	ccr, err := s.daemon.ContainerCreate(name, config, hostConfig, adjustCPUShares)
380 380
 	if err != nil {
381 381
 		return err
382 382
 	}
... ...
@@ -399,7 +399,7 @@ func (s *Server) deleteContainers(ctx context.Context, w http.ResponseWriter, r
399 399
 		RemoveLink:   boolValue(r, "link"),
400 400
 	}
401 401
 
402
-	if err := s.daemon.ContainerRm(ctx, name, config); err != nil {
402
+	if err := s.daemon.ContainerRm(name, config); err != nil {
403 403
 		// Force a 404 for the empty string
404 404
 		if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") {
405 405
 			return fmt.Errorf("no such id: \"\"")
... ...
@@ -429,7 +429,7 @@ func (s *Server) postContainersResize(ctx context.Context, w http.ResponseWriter
429 429
 		return err
430 430
 	}
431 431
 
432
-	return s.daemon.ContainerResize(ctx, vars["name"], height, width)
432
+	return s.daemon.ContainerResize(vars["name"], height, width)
433 433
 }
434 434
 
435 435
 func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
... ...
@@ -441,7 +441,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
441 441
 	}
442 442
 	containerName := vars["name"]
443 443
 
444
-	if !s.daemon.Exists(ctx, containerName) {
444
+	if !s.daemon.Exists(containerName) {
445 445
 		return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
446 446
 	}
447 447
 
... ...
@@ -467,7 +467,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
467 467
 		Stream:    boolValue(r, "stream"),
468 468
 	}
469 469
 
470
-	if err := s.daemon.ContainerAttachWithLogs(ctx, containerName, attachWithLogsConfig); err != nil {
470
+	if err := s.daemon.ContainerAttachWithLogs(containerName, attachWithLogsConfig); err != nil {
471 471
 		fmt.Fprintf(outStream, "Error attaching: %s\n", err)
472 472
 	}
473 473
 
... ...
@@ -483,7 +483,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
483 483
 	}
484 484
 	containerName := vars["name"]
485 485
 
486
-	if !s.daemon.Exists(ctx, containerName) {
486
+	if !s.daemon.Exists(containerName) {
487 487
 		return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
488 488
 	}
489 489
 
... ...
@@ -498,7 +498,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
498 498
 			Stream:    boolValue(r, "stream"),
499 499
 		}
500 500
 
501
-		if err := s.daemon.ContainerWsAttachWithLogs(ctx, containerName, wsAttachWithLogsConfig); err != nil {
501
+		if err := s.daemon.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil {
502 502
 			logrus.Errorf("Error attaching websocket: %s", err)
503 503
 		}
504 504
 	})
... ...
@@ -32,7 +32,7 @@ func (s *Server) postContainersCopy(ctx context.Context, w http.ResponseWriter,
32 32
 		return fmt.Errorf("Path cannot be empty")
33 33
 	}
34 34
 
35
-	data, err := s.daemon.ContainerCopy(ctx, vars["name"], cfg.Resource)
35
+	data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource)
36 36
 	if err != nil {
37 37
 		if strings.Contains(strings.ToLower(err.Error()), "no such id") {
38 38
 			w.WriteHeader(http.StatusNotFound)
... ...
@@ -74,7 +74,7 @@ func (s *Server) headContainersArchive(ctx context.Context, w http.ResponseWrite
74 74
 		return err
75 75
 	}
76 76
 
77
-	stat, err := s.daemon.ContainerStatPath(ctx, v.name, v.path)
77
+	stat, err := s.daemon.ContainerStatPath(v.name, v.path)
78 78
 	if err != nil {
79 79
 		return err
80 80
 	}
... ...
@@ -88,7 +88,7 @@ func (s *Server) getContainersArchive(ctx context.Context, w http.ResponseWriter
88 88
 		return err
89 89
 	}
90 90
 
91
-	tarArchive, stat, err := s.daemon.ContainerArchivePath(ctx, v.name, v.path)
91
+	tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path)
92 92
 	if err != nil {
93 93
 		return err
94 94
 	}
... ...
@@ -111,5 +111,5 @@ func (s *Server) putContainersArchive(ctx context.Context, w http.ResponseWriter
111 111
 	}
112 112
 
113 113
 	noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir")
114
-	return s.daemon.ContainerExtractToDir(ctx, v.name, v.path, noOverwriteDirNonDir, r.Body)
114
+	return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body)
115 115
 }
... ...
@@ -45,7 +45,7 @@ func (s *Server) getVersion(ctx context.Context, w http.ResponseWriter, r *http.
45 45
 }
46 46
 
47 47
 func (s *Server) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
48
-	info, err := s.daemon.SystemInfo(ctx)
48
+	info, err := s.daemon.SystemInfo()
49 49
 	if err != nil {
50 50
 		return err
51 51
 	}
... ...
@@ -120,7 +120,7 @@ func (s *Server) getEvents(ctx context.Context, w http.ResponseWriter, r *http.R
120 120
 	enc := json.NewEncoder(outStream)
121 121
 
122 122
 	getContainerID := func(cn string) string {
123
-		c, err := d.Get(ctx, cn)
123
+		c, err := d.Get(cn)
124 124
 		if err != nil {
125 125
 			return ""
126 126
 		}
... ...
@@ -19,7 +19,7 @@ func (s *Server) getExecByID(ctx context.Context, w http.ResponseWriter, r *http
19 19
 		return fmt.Errorf("Missing parameter 'id'")
20 20
 	}
21 21
 
22
-	eConfig, err := s.daemon.ContainerExecInspect(ctx, vars["id"])
22
+	eConfig, err := s.daemon.ContainerExecInspect(vars["id"])
23 23
 	if err != nil {
24 24
 		return err
25 25
 	}
... ...
@@ -47,7 +47,7 @@ func (s *Server) postContainerExecCreate(ctx context.Context, w http.ResponseWri
47 47
 	}
48 48
 
49 49
 	// Register an instance of Exec in container.
50
-	id, err := s.daemon.ContainerExecCreate(ctx, execConfig)
50
+	id, err := s.daemon.ContainerExecCreate(execConfig)
51 51
 	if err != nil {
52 52
 		logrus.Errorf("Error setting up exec command in container %s: %s", name, err)
53 53
 		return err
... ...
@@ -100,7 +100,7 @@ func (s *Server) postContainerExecStart(ctx context.Context, w http.ResponseWrit
100 100
 	}
101 101
 
102 102
 	// Now run the user process in container.
103
-	if err := s.daemon.ContainerExecStart(ctx, execName, stdin, stdout, stderr); err != nil {
103
+	if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil {
104 104
 		fmt.Fprintf(outStream, "Error running exec in container: %v\n", err)
105 105
 	}
106 106
 	return nil
... ...
@@ -123,5 +123,5 @@ func (s *Server) postContainerExecResize(ctx context.Context, w http.ResponseWri
123 123
 		return err
124 124
 	}
125 125
 
126
-	return s.daemon.ContainerExecResize(ctx, vars["name"], height, width)
126
+	return s.daemon.ContainerExecResize(vars["name"], height, width)
127 127
 }
... ...
@@ -55,7 +55,7 @@ func (s *Server) postCommit(ctx context.Context, w http.ResponseWriter, r *http.
55 55
 		Config:  c,
56 56
 	}
57 57
 
58
-	imgID, err := builder.Commit(ctx, cname, s.daemon, commitCfg)
58
+	imgID, err := builder.Commit(cname, s.daemon, commitCfg)
59 59
 	if err != nil {
60 60
 		return err
61 61
 	}
... ...
@@ -112,7 +112,7 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
112 112
 			OutStream:   output,
113 113
 		}
114 114
 
115
-		err = s.daemon.Repositories(ctx).Pull(ctx, image, tag, imagePullConfig)
115
+		err = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
116 116
 	} else { //import
117 117
 		if tag == "" {
118 118
 			repo, tag = parsers.ParseRepositoryTag(repo)
... ...
@@ -124,12 +124,12 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
124 124
 		// generated from the download to be available to the output
125 125
 		// stream processing below
126 126
 		var newConfig *runconfig.Config
127
-		newConfig, err = builder.BuildFromConfig(ctx, s.daemon, &runconfig.Config{}, r.Form["changes"])
127
+		newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
128 128
 		if err != nil {
129 129
 			return err
130 130
 		}
131 131
 
132
-		err = s.daemon.Repositories(ctx).Import(ctx, src, repo, tag, message, r.Body, output, newConfig)
132
+		err = s.daemon.Repositories().Import(src, repo, tag, message, r.Body, output, newConfig)
133 133
 	}
134 134
 	if err != nil {
135 135
 		if !output.Flushed() {
... ...
@@ -184,7 +184,7 @@ func (s *Server) postImagesPush(ctx context.Context, w http.ResponseWriter, r *h
184 184
 
185 185
 	w.Header().Set("Content-Type", "application/json")
186 186
 
187
-	if err := s.daemon.Repositories(ctx).Push(ctx, name, imagePushConfig); err != nil {
187
+	if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
188 188
 		if !output.Flushed() {
189 189
 			return err
190 190
 		}
... ...
@@ -212,7 +212,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
212 212
 		names = r.Form["names"]
213 213
 	}
214 214
 
215
-	if err := s.daemon.Repositories(ctx).ImageExport(names, output); err != nil {
215
+	if err := s.daemon.Repositories().ImageExport(names, output); err != nil {
216 216
 		if !output.Flushed() {
217 217
 			return err
218 218
 		}
... ...
@@ -223,7 +223,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
223 223
 }
224 224
 
225 225
 func (s *Server) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
226
-	return s.daemon.Repositories(ctx).Load(r.Body, w)
226
+	return s.daemon.Repositories().Load(r.Body, w)
227 227
 }
228 228
 
229 229
 func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
... ...
@@ -243,7 +243,7 @@ func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *htt
243 243
 	force := boolValue(r, "force")
244 244
 	prune := !boolValue(r, "noprune")
245 245
 
246
-	list, err := s.daemon.ImageDelete(ctx, name, force, prune)
246
+	list, err := s.daemon.ImageDelete(name, force, prune)
247 247
 	if err != nil {
248 248
 		return err
249 249
 	}
... ...
@@ -256,7 +256,7 @@ func (s *Server) getImagesByName(ctx context.Context, w http.ResponseWriter, r *
256 256
 		return fmt.Errorf("Missing parameter")
257 257
 	}
258 258
 
259
-	imageInspect, err := s.daemon.Repositories(ctx).Lookup(vars["name"])
259
+	imageInspect, err := s.daemon.Repositories().Lookup(vars["name"])
260 260
 	if err != nil {
261 261
 		return err
262 262
 	}
... ...
@@ -346,7 +346,7 @@ func (s *Server) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
346 346
 		}()
347 347
 	}
348 348
 
349
-	if err := builder.Build(ctx, s.daemon, buildConfig); err != nil {
349
+	if err := builder.Build(s.daemon, buildConfig); err != nil {
350 350
 		// Do not write the error in the http output if it's still empty.
351 351
 		// This prevents from writing a 200(OK) when there is an interal error.
352 352
 		if !output.Flushed() {
... ...
@@ -364,7 +364,7 @@ func (s *Server) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *ht
364 364
 	}
365 365
 
366 366
 	// FIXME: The filter parameter could just be a match filter
367
-	images, err := s.daemon.Repositories(ctx).Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
367
+	images, err := s.daemon.Repositories().Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
368 368
 	if err != nil {
369 369
 		return err
370 370
 	}
... ...
@@ -378,7 +378,7 @@ func (s *Server) getImagesHistory(ctx context.Context, w http.ResponseWriter, r
378 378
 	}
379 379
 
380 380
 	name := vars["name"]
381
-	history, err := s.daemon.Repositories(ctx).History(name)
381
+	history, err := s.daemon.Repositories().History(name)
382 382
 	if err != nil {
383 383
 		return err
384 384
 	}
... ...
@@ -398,10 +398,10 @@ func (s *Server) postImagesTag(ctx context.Context, w http.ResponseWriter, r *ht
398 398
 	tag := r.Form.Get("tag")
399 399
 	force := boolValue(r, "force")
400 400
 	name := vars["name"]
401
-	if err := s.daemon.Repositories(ctx).Tag(repo, tag, name, force); err != nil {
401
+	if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
402 402
 		return err
403 403
 	}
404
-	s.daemon.EventsService.Log(ctx, "tag", utils.ImageReference(repo, tag), "")
404
+	s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
405 405
 	w.WriteHeader(http.StatusCreated)
406 406
 	return nil
407 407
 }
... ...
@@ -20,11 +20,11 @@ func (s *Server) getContainersByName(ctx context.Context, w http.ResponseWriter,
20 20
 
21 21
 	switch {
22 22
 	case version.LessThan("1.20"):
23
-		json, err = s.daemon.ContainerInspectPre120(ctx, vars["name"])
23
+		json, err = s.daemon.ContainerInspectPre120(vars["name"])
24 24
 	case version.Equal("1.20"):
25
-		json, err = s.daemon.ContainerInspect120(ctx, vars["name"])
25
+		json, err = s.daemon.ContainerInspect120(vars["name"])
26 26
 	default:
27
-		json, err = s.daemon.ContainerInspect(ctx, vars["name"])
27
+		json, err = s.daemon.ContainerInspect(vars["name"])
28 28
 	}
29 29
 
30 30
 	if err != nil {
... ...
@@ -18,7 +18,6 @@ import (
18 18
 	"github.com/docker/docker/context"
19 19
 	"github.com/docker/docker/daemon"
20 20
 	"github.com/docker/docker/pkg/sockets"
21
-	"github.com/docker/docker/pkg/stringid"
22 21
 	"github.com/docker/docker/utils"
23 22
 )
24 23
 
... ...
@@ -42,12 +41,12 @@ type Server struct {
42 42
 }
43 43
 
44 44
 // New returns a new instance of the server based on the specified configuration.
45
-func New(ctx context.Context, cfg *Config) *Server {
45
+func New(cfg *Config) *Server {
46 46
 	srv := &Server{
47 47
 		cfg:   cfg,
48 48
 		start: make(chan struct{}),
49 49
 	}
50
-	srv.router = createRouter(ctx, srv)
50
+	srv.router = createRouter(srv)
51 51
 	return srv
52 52
 }
53 53
 
... ...
@@ -291,7 +290,7 @@ func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
291 291
 	return
292 292
 }
293 293
 
294
-func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
294
+func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
295 295
 	return func(w http.ResponseWriter, r *http.Request) {
296 296
 		// log the handler generation
297 297
 		logrus.Debugf("Calling %s %s", localMethod, localRoute)
... ...
@@ -303,8 +302,7 @@ func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localR
303 303
 		// apply to all requests. Data that is specific to the
304 304
 		// immediate function being called should still be passed
305 305
 		// as 'args' on the function call.
306
-		reqID := stringid.TruncateID(stringid.GenerateNonCryptoID())
307
-		ctx = context.WithValue(ctx, context.RequestID, reqID)
306
+		ctx := context.Background()
308 307
 		handlerFunc := s.handleWithGlobalMiddlewares(localHandler)
309 308
 
310 309
 		if err := handlerFunc(ctx, w, r, mux.Vars(r)); err != nil {
... ...
@@ -316,7 +314,7 @@ func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localR
316 316
 
317 317
 // createRouter initializes the main router the server uses.
318 318
 // we keep enableCors just for legacy usage, need to be removed in the future
319
-func createRouter(ctx context.Context, s *Server) *mux.Router {
319
+func createRouter(s *Server) *mux.Router {
320 320
 	r := mux.NewRouter()
321 321
 	if os.Getenv("DEBUG") != "" {
322 322
 		profilerSetup(r, "/debug/")
... ...
@@ -396,7 +394,7 @@ func createRouter(ctx context.Context, s *Server) *mux.Router {
396 396
 			localMethod := method
397 397
 
398 398
 			// build the handler function
399
-			f := s.makeHTTPHandler(ctx, localMethod, localRoute, localFct)
399
+			f := s.makeHTTPHandler(localMethod, localRoute, localFct)
400 400
 
401 401
 			// add the new route
402 402
 			if localRoute == "" {
... ...
@@ -2,12 +2,8 @@
2 2
 
3 3
 package server
4 4
 
5
-import (
6
-	"github.com/docker/docker/context"
7
-)
8
-
9
-func (s *Server) registerSubRouter(ctx context.Context) {
10
-	httpHandler := s.daemon.NetworkAPIRouter(ctx)
5
+func (s *Server) registerSubRouter() {
6
+	httpHandler := s.daemon.NetworkAPIRouter()
11 7
 
12 8
 	subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
13 9
 	subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)
... ...
@@ -2,9 +2,5 @@
2 2
 
3 3
 package server
4 4
 
5
-import (
6
-	"github.com/docker/docker/context"
7
-)
8
-
9
-func (s *Server) registerSubRouter(ctx context.Context) {
5
+func (s *Server) registerSubRouter() {
10 6
 }
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"net/http"
9 9
 	"strconv"
10 10
 
11
-	"github.com/docker/docker/context"
12 11
 	"github.com/docker/docker/daemon"
13 12
 	"github.com/docker/docker/pkg/sockets"
14 13
 	"github.com/docker/libnetwork/portallocator"
... ...
@@ -64,10 +63,10 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
64 64
 // AcceptConnections allows clients to connect to the API server.
65 65
 // Referenced Daemon is notified about this server, and waits for the
66 66
 // daemon acknowledgement before the incoming connections are accepted.
67
-func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
67
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
68 68
 	// Tell the init daemon we are accepting requests
69 69
 	s.daemon = d
70
-	s.registerSubRouter(ctx)
70
+	s.registerSubRouter()
71 71
 	go systemdDaemon.SdNotify("READY=1")
72 72
 	// close the lock so the listeners start accepting connections
73 73
 	select {
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"net"
8 8
 	"net/http"
9 9
 
10
-	"github.com/docker/docker/context"
11 10
 	"github.com/docker/docker/daemon"
12 11
 )
13 12
 
... ...
@@ -43,9 +42,9 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
43 43
 }
44 44
 
45 45
 // AcceptConnections allows router to start listening for the incoming requests.
46
-func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
46
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
47 47
 	s.daemon = d
48
-	s.registerSubRouter(ctx)
48
+	s.registerSubRouter()
49 49
 	// close the lock so the listeners start accepting connections
50 50
 	select {
51 51
 	case <-s.start:
... ...
@@ -13,7 +13,7 @@ func (s *Server) getVolumesList(ctx context.Context, w http.ResponseWriter, r *h
13 13
 		return err
14 14
 	}
15 15
 
16
-	volumes, err := s.daemon.Volumes(ctx, r.Form.Get("filters"))
16
+	volumes, err := s.daemon.Volumes(r.Form.Get("filters"))
17 17
 	if err != nil {
18 18
 		return err
19 19
 	}
... ...
@@ -25,7 +25,7 @@ func (s *Server) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *
25 25
 		return err
26 26
 	}
27 27
 
28
-	v, err := s.daemon.VolumeInspect(ctx, vars["name"])
28
+	v, err := s.daemon.VolumeInspect(vars["name"])
29 29
 	if err != nil {
30 30
 		return err
31 31
 	}
... ...
@@ -46,7 +46,7 @@ func (s *Server) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r
46 46
 		return err
47 47
 	}
48 48
 
49
-	volume, err := s.daemon.VolumeCreate(ctx, req.Name, req.Driver, req.DriverOpts)
49
+	volume, err := s.daemon.VolumeCreate(req.Name, req.Driver, req.DriverOpts)
50 50
 	if err != nil {
51 51
 		return err
52 52
 	}
... ...
@@ -57,7 +57,7 @@ func (s *Server) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *ht
57 57
 	if err := parseForm(r); err != nil {
58 58
 		return err
59 59
 	}
60
-	if err := s.daemon.VolumeRm(ctx, vars["name"]); err != nil {
60
+	if err := s.daemon.VolumeRm(vars["name"]); err != nil {
61 61
 		return err
62 62
 	}
63 63
 	w.WriteHeader(http.StatusNoContent)
... ...
@@ -18,7 +18,6 @@ import (
18 18
 	"strings"
19 19
 
20 20
 	"github.com/Sirupsen/logrus"
21
-	"github.com/docker/docker/context"
22 21
 	derr "github.com/docker/docker/errors"
23 22
 	flag "github.com/docker/docker/pkg/mflag"
24 23
 	"github.com/docker/docker/pkg/nat"
... ...
@@ -44,7 +43,7 @@ func nullDispatch(b *builder, args []string, attributes map[string]bool, origina
44 44
 // Sets the environment variable foo to bar, also makes interpolation
45 45
 // in the dockerfile available from the next statement on via ${foo}.
46 46
 //
47
-func env(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
47
+func env(b *builder, args []string, attributes map[string]bool, original string) error {
48 48
 	if len(args) == 0 {
49 49
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
50 50
 	}
... ...
@@ -97,13 +96,13 @@ func env(ctx context.Context, b *builder, args []string, attributes map[string]b
97 97
 		j++
98 98
 	}
99 99
 
100
-	return b.commit(ctx, "", b.Config.Cmd, commitStr)
100
+	return b.commit("", b.Config.Cmd, commitStr)
101 101
 }
102 102
 
103 103
 // MAINTAINER some text <maybe@an.email.address>
104 104
 //
105 105
 // Sets the maintainer metadata.
106
-func maintainer(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
106
+func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
107 107
 	if len(args) != 1 {
108 108
 		return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
109 109
 	}
... ...
@@ -113,14 +112,14 @@ func maintainer(ctx context.Context, b *builder, args []string, attributes map[s
113 113
 	}
114 114
 
115 115
 	b.maintainer = args[0]
116
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
116
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
117 117
 }
118 118
 
119 119
 // LABEL some json data describing the image
120 120
 //
121 121
 // Sets the Label variable foo to bar,
122 122
 //
123
-func label(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
123
+func label(b *builder, args []string, attributes map[string]bool, original string) error {
124 124
 	if len(args) == 0 {
125 125
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
126 126
 	}
... ...
@@ -148,7 +147,7 @@ func label(ctx context.Context, b *builder, args []string, attributes map[string
148 148
 		b.Config.Labels[args[j]] = args[j+1]
149 149
 		j++
150 150
 	}
151
-	return b.commit(ctx, "", b.Config.Cmd, commitStr)
151
+	return b.commit("", b.Config.Cmd, commitStr)
152 152
 }
153 153
 
154 154
 // ADD foo /path
... ...
@@ -156,7 +155,7 @@ func label(ctx context.Context, b *builder, args []string, attributes map[string
156 156
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
157 157
 // exist here. If you do not wish to have this automatic handling, use COPY.
158 158
 //
159
-func add(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
159
+func add(b *builder, args []string, attributes map[string]bool, original string) error {
160 160
 	if len(args) < 2 {
161 161
 		return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
162 162
 	}
... ...
@@ -165,14 +164,14 @@ func add(ctx context.Context, b *builder, args []string, attributes map[string]b
165 165
 		return err
166 166
 	}
167 167
 
168
-	return b.runContextCommand(ctx, args, true, true, "ADD")
168
+	return b.runContextCommand(args, true, true, "ADD")
169 169
 }
170 170
 
171 171
 // COPY foo /path
172 172
 //
173 173
 // Same as 'ADD' but without the tar and remote url handling.
174 174
 //
175
-func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
175
+func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
176 176
 	if len(args) < 2 {
177 177
 		return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
178 178
 	}
... ...
@@ -181,14 +180,14 @@ func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map
181 181
 		return err
182 182
 	}
183 183
 
184
-	return b.runContextCommand(ctx, args, false, false, "COPY")
184
+	return b.runContextCommand(args, false, false, "COPY")
185 185
 }
186 186
 
187 187
 // FROM imagename
188 188
 //
189 189
 // This sets the image the dockerfile will build on top of.
190 190
 //
191
-func from(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
191
+func from(b *builder, args []string, attributes map[string]bool, original string) error {
192 192
 	if len(args) != 1 {
193 193
 		return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
194 194
 	}
... ...
@@ -209,16 +208,16 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
209 209
 		return nil
210 210
 	}
211 211
 
212
-	image, err := b.Daemon.Repositories(ctx).LookupImage(name)
212
+	image, err := b.Daemon.Repositories().LookupImage(name)
213 213
 	if b.Pull {
214
-		image, err = b.pullImage(ctx, name)
214
+		image, err = b.pullImage(name)
215 215
 		if err != nil {
216 216
 			return err
217 217
 		}
218 218
 	}
219 219
 	if err != nil {
220
-		if b.Daemon.Graph(ctx).IsNotExist(err, name) {
221
-			image, err = b.pullImage(ctx, name)
220
+		if b.Daemon.Graph().IsNotExist(err, name) {
221
+			image, err = b.pullImage(name)
222 222
 		}
223 223
 
224 224
 		// note that the top level err will still be !nil here if IsNotExist is
... ...
@@ -228,7 +227,7 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
228 228
 		}
229 229
 	}
230 230
 
231
-	return b.processImageFrom(ctx, image)
231
+	return b.processImageFrom(image)
232 232
 }
233 233
 
234 234
 // ONBUILD RUN echo yo
... ...
@@ -240,7 +239,7 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
240 240
 // special cases. search for 'OnBuild' in internals.go for additional special
241 241
 // cases.
242 242
 //
243
-func onbuild(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
243
+func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
244 244
 	if len(args) == 0 {
245 245
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
246 246
 	}
... ...
@@ -260,14 +259,14 @@ func onbuild(ctx context.Context, b *builder, args []string, attributes map[stri
260 260
 	original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
261 261
 
262 262
 	b.Config.OnBuild = append(b.Config.OnBuild, original)
263
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
263
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
264 264
 }
265 265
 
266 266
 // WORKDIR /tmp
267 267
 //
268 268
 // Set the working directory for future RUN/CMD/etc statements.
269 269
 //
270
-func workdir(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
270
+func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
271 271
 	if len(args) != 1 {
272 272
 		return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
273 273
 	}
... ...
@@ -287,7 +286,7 @@ func workdir(ctx context.Context, b *builder, args []string, attributes map[stri
287 287
 
288 288
 	b.Config.WorkingDir = workdir
289 289
 
290
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
290
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
291 291
 }
292 292
 
293 293
 // RUN some command yo
... ...
@@ -300,7 +299,7 @@ func workdir(ctx context.Context, b *builder, args []string, attributes map[stri
300 300
 // RUN echo hi          # cmd /S /C echo hi   (Windows)
301 301
 // RUN [ "echo", "hi" ] # echo hi
302 302
 //
303
-func run(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
303
+func run(b *builder, args []string, attributes map[string]bool, original string) error {
304 304
 	if b.image == "" && !b.noBaseImage {
305 305
 		return derr.ErrorCodeMissingFrom
306 306
 	}
... ...
@@ -381,7 +380,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
381 381
 	}
382 382
 
383 383
 	b.Config.Cmd = saveCmd
384
-	hit, err := b.probeCache(ctx)
384
+	hit, err := b.probeCache()
385 385
 	if err != nil {
386 386
 		return err
387 387
 	}
... ...
@@ -396,17 +395,17 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
396 396
 
397 397
 	logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
398 398
 
399
-	c, err := b.create(ctx)
399
+	c, err := b.create()
400 400
 	if err != nil {
401 401
 		return err
402 402
 	}
403 403
 
404 404
 	// Ensure that we keep the container mounted until the commit
405 405
 	// to avoid unmounting and then mounting directly again
406
-	c.Mount(ctx)
407
-	defer c.Unmount(ctx)
406
+	c.Mount()
407
+	defer c.Unmount()
408 408
 
409
-	err = b.run(ctx, c)
409
+	err = b.run(c)
410 410
 	if err != nil {
411 411
 		return err
412 412
 	}
... ...
@@ -416,7 +415,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
416 416
 	// properly match it.
417 417
 	b.Config.Env = env
418 418
 	b.Config.Cmd = saveCmd
419
-	if err := b.commit(ctx, c.ID, cmd, "run"); err != nil {
419
+	if err := b.commit(c.ID, cmd, "run"); err != nil {
420 420
 		return err
421 421
 	}
422 422
 
... ...
@@ -428,7 +427,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
428 428
 // Set the default command to run in the container (which may be empty).
429 429
 // Argument handling is the same as RUN.
430 430
 //
431
-func cmd(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
431
+func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
432 432
 	if err := b.BuilderFlags.Parse(); err != nil {
433 433
 		return err
434 434
 	}
... ...
@@ -445,7 +444,7 @@ func cmd(ctx context.Context, b *builder, args []string, attributes map[string]b
445 445
 
446 446
 	b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...)
447 447
 
448
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
448
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
449 449
 		return err
450 450
 	}
451 451
 
... ...
@@ -464,7 +463,7 @@ func cmd(ctx context.Context, b *builder, args []string, attributes map[string]b
464 464
 // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
465 465
 // is initialized at NewBuilder time instead of through argument parsing.
466 466
 //
467
-func entrypoint(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
467
+func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
468 468
 	if err := b.BuilderFlags.Parse(); err != nil {
469 469
 		return err
470 470
 	}
... ...
@@ -493,7 +492,7 @@ func entrypoint(ctx context.Context, b *builder, args []string, attributes map[s
493 493
 		b.Config.Cmd = nil
494 494
 	}
495 495
 
496
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
496
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
497 497
 		return err
498 498
 	}
499 499
 
... ...
@@ -505,7 +504,7 @@ func entrypoint(ctx context.Context, b *builder, args []string, attributes map[s
505 505
 // Expose ports for links and port mappings. This all ends up in
506 506
 // b.Config.ExposedPorts for runconfig.
507 507
 //
508
-func expose(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
508
+func expose(b *builder, args []string, attributes map[string]bool, original string) error {
509 509
 	portsTab := args
510 510
 
511 511
 	if len(args) == 0 {
... ...
@@ -538,7 +537,7 @@ func expose(ctx context.Context, b *builder, args []string, attributes map[strin
538 538
 		i++
539 539
 	}
540 540
 	sort.Strings(portList)
541
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
541
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
542 542
 }
543 543
 
544 544
 // USER foo
... ...
@@ -546,7 +545,7 @@ func expose(ctx context.Context, b *builder, args []string, attributes map[strin
546 546
 // Set the user to 'foo' for future commands and when running the
547 547
 // ENTRYPOINT/CMD at container run time.
548 548
 //
549
-func user(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
549
+func user(b *builder, args []string, attributes map[string]bool, original string) error {
550 550
 	if len(args) != 1 {
551 551
 		return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
552 552
 	}
... ...
@@ -556,14 +555,14 @@ func user(ctx context.Context, b *builder, args []string, attributes map[string]
556 556
 	}
557 557
 
558 558
 	b.Config.User = args[0]
559
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("USER %v", args))
559
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
560 560
 }
561 561
 
562 562
 // VOLUME /foo
563 563
 //
564 564
 // Expose the volume /foo for use. Will also accept the JSON array form.
565 565
 //
566
-func volume(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
566
+func volume(b *builder, args []string, attributes map[string]bool, original string) error {
567 567
 	if len(args) == 0 {
568 568
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
569 569
 	}
... ...
@@ -582,7 +581,7 @@ func volume(ctx context.Context, b *builder, args []string, attributes map[strin
582 582
 		}
583 583
 		b.Config.Volumes[v] = struct{}{}
584 584
 	}
585
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
585
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
586 586
 		return err
587 587
 	}
588 588
 	return nil
... ...
@@ -591,7 +590,7 @@ func volume(ctx context.Context, b *builder, args []string, attributes map[strin
591 591
 // STOPSIGNAL signal
592 592
 //
593 593
 // Set the signal that will be used to kill the container.
594
-func stopSignal(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
594
+func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
595 595
 	if len(args) != 1 {
596 596
 		return fmt.Errorf("STOPSIGNAL requires exactly one argument")
597 597
 	}
... ...
@@ -603,7 +602,7 @@ func stopSignal(ctx context.Context, b *builder, args []string, attributes map[s
603 603
 	}
604 604
 
605 605
 	b.Config.StopSignal = sig
606
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
606
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
607 607
 }
608 608
 
609 609
 // ARG name[=value]
... ...
@@ -611,7 +610,7 @@ func stopSignal(ctx context.Context, b *builder, args []string, attributes map[s
611 611
 // Adds the variable foo to the trusted list of variables that can be passed
612 612
 // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
613 613
 // Dockerfile author may optionally set a default value of this variable.
614
-func arg(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
614
+func arg(b *builder, args []string, attributes map[string]bool, original string) error {
615 615
 	if len(args) != 1 {
616 616
 		return fmt.Errorf("ARG requires exactly one argument definition")
617 617
 	}
... ...
@@ -647,5 +646,5 @@ func arg(ctx context.Context, b *builder, args []string, attributes map[string]b
647 647
 		b.buildArgs[name] = value
648 648
 	}
649 649
 
650
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
650
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
651 651
 }
... ...
@@ -32,7 +32,6 @@ import (
32 32
 	"github.com/docker/docker/builder/command"
33 33
 	"github.com/docker/docker/builder/parser"
34 34
 	"github.com/docker/docker/cliconfig"
35
-	"github.com/docker/docker/context"
36 35
 	"github.com/docker/docker/daemon"
37 36
 	"github.com/docker/docker/pkg/fileutils"
38 37
 	"github.com/docker/docker/pkg/streamformatter"
... ...
@@ -58,10 +57,10 @@ var replaceEnvAllowed = map[string]struct{}{
58 58
 	command.Arg:        {},
59 59
 }
60 60
 
61
-var evaluateTable map[string]func(context.Context, *builder, []string, map[string]bool, string) error
61
+var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error
62 62
 
63 63
 func init() {
64
-	evaluateTable = map[string]func(context.Context, *builder, []string, map[string]bool, string) error{
64
+	evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
65 65
 		command.Env:        env,
66 66
 		command.Label:      label,
67 67
 		command.Maintainer: maintainer,
... ...
@@ -159,7 +158,7 @@ type builder struct {
159 159
 //   processing.
160 160
 // * Print a happy message and return the image ID.
161 161
 //
162
-func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
162
+func (b *builder) Run(context io.Reader) (string, error) {
163 163
 	if err := b.readContext(context); err != nil {
164 164
 		return "", err
165 165
 	}
... ...
@@ -188,15 +187,15 @@ func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
188 188
 		default:
189 189
 			// Not cancelled yet, keep going...
190 190
 		}
191
-		if err := b.dispatch(ctx, i, n); err != nil {
191
+		if err := b.dispatch(i, n); err != nil {
192 192
 			if b.ForceRemove {
193
-				b.clearTmp(ctx)
193
+				b.clearTmp()
194 194
 			}
195 195
 			return "", err
196 196
 		}
197 197
 		fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
198 198
 		if b.Remove {
199
-			b.clearTmp(ctx)
199
+			b.clearTmp()
200 200
 		}
201 201
 	}
202 202
 
... ...
@@ -312,7 +311,7 @@ func (b *builder) isBuildArgAllowed(arg string) bool {
312 312
 // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
313 313
 // deal with that, at least until it becomes more of a general concern with new
314 314
 // features.
315
-func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) error {
315
+func (b *builder) dispatch(stepN int, ast *parser.Node) error {
316 316
 	cmd := ast.Value
317 317
 
318 318
 	// To ensure the user is given a decent error message if the platform
... ...
@@ -405,7 +404,7 @@ func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) err
405 405
 	if f, ok := evaluateTable[cmd]; ok {
406 406
 		b.BuilderFlags = NewBFlags()
407 407
 		b.BuilderFlags.Args = flags
408
-		return f(ctx, b, strList, attrs, original)
408
+		return f(b, strList, attrs, original)
409 409
 	}
410 410
 
411 411
 	return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
... ...
@@ -22,7 +22,6 @@ import (
22 22
 	"github.com/Sirupsen/logrus"
23 23
 	"github.com/docker/docker/builder/parser"
24 24
 	"github.com/docker/docker/cliconfig"
25
-	"github.com/docker/docker/context"
26 25
 	"github.com/docker/docker/daemon"
27 26
 	"github.com/docker/docker/graph"
28 27
 	"github.com/docker/docker/image"
... ...
@@ -76,7 +75,7 @@ func (b *builder) readContext(context io.Reader) (err error) {
76 76
 	return
77 77
 }
78 78
 
79
-func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
79
+func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
80 80
 	if b.disableCommit {
81 81
 		return nil
82 82
 	}
... ...
@@ -93,7 +92,7 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
93 93
 		}
94 94
 		defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
95 95
 
96
-		hit, err := b.probeCache(ctx)
96
+		hit, err := b.probeCache()
97 97
 		if err != nil {
98 98
 			return err
99 99
 		}
... ...
@@ -101,18 +100,18 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
101 101
 			return nil
102 102
 		}
103 103
 
104
-		container, err := b.create(ctx)
104
+		container, err := b.create()
105 105
 		if err != nil {
106 106
 			return err
107 107
 		}
108 108
 		id = container.ID
109 109
 
110
-		if err := container.Mount(ctx); err != nil {
110
+		if err := container.Mount(); err != nil {
111 111
 			return err
112 112
 		}
113
-		defer container.Unmount(ctx)
113
+		defer container.Unmount()
114 114
 	}
115
-	container, err := b.Daemon.Get(ctx, id)
115
+	container, err := b.Daemon.Get(id)
116 116
 	if err != nil {
117 117
 		return err
118 118
 	}
... ...
@@ -128,11 +127,11 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
128 128
 	}
129 129
 
130 130
 	// Commit the container
131
-	image, err := b.Daemon.Commit(ctx, container, commitCfg)
131
+	image, err := b.Daemon.Commit(container, commitCfg)
132 132
 	if err != nil {
133 133
 		return err
134 134
 	}
135
-	b.Daemon.Graph(ctx).Retain(b.id, image.ID)
135
+	b.Daemon.Graph().Retain(b.id, image.ID)
136 136
 	b.activeImages = append(b.activeImages, image.ID)
137 137
 	b.image = image.ID
138 138
 	return nil
... ...
@@ -146,7 +145,7 @@ type copyInfo struct {
146 146
 	tmpDir     string
147 147
 }
148 148
 
149
-func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
149
+func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
150 150
 	if b.context == nil {
151 151
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
152 152
 	}
... ...
@@ -224,7 +223,7 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
224 224
 	}
225 225
 	defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
226 226
 
227
-	hit, err := b.probeCache(ctx)
227
+	hit, err := b.probeCache()
228 228
 	if err != nil {
229 229
 		return err
230 230
 	}
... ...
@@ -233,21 +232,21 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
233 233
 		return nil
234 234
 	}
235 235
 
236
-	ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true)
236
+	ccr, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
237 237
 	if err != nil {
238 238
 		return err
239 239
 	}
240
-	container, err := b.Daemon.Get(ctx, ccr.ID)
240
+	container, err := b.Daemon.Get(ccr.ID)
241 241
 	if err != nil {
242 242
 		return err
243 243
 	}
244 244
 
245 245
 	b.TmpContainers[container.ID] = struct{}{}
246 246
 
247
-	if err := container.Mount(ctx); err != nil {
247
+	if err := container.Mount(); err != nil {
248 248
 		return err
249 249
 	}
250
-	defer container.Unmount(ctx)
250
+	defer container.Unmount()
251 251
 
252 252
 	for _, ci := range copyInfos {
253 253
 		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
... ...
@@ -255,7 +254,7 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
255 255
 		}
256 256
 	}
257 257
 
258
-	if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
258
+	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
259 259
 		return err
260 260
 	}
261 261
 	return nil
... ...
@@ -490,7 +489,7 @@ func containsWildcards(name string) bool {
490 490
 	return false
491 491
 }
492 492
 
493
-func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
493
+func (b *builder) pullImage(name string) (*image.Image, error) {
494 494
 	remote, tag := parsers.ParseRepositoryTag(name)
495 495
 	if tag == "" {
496 496
 		tag = "latest"
... ...
@@ -516,11 +515,11 @@ func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, err
516 516
 		OutStream:  ioutils.NopWriteCloser(b.OutOld),
517 517
 	}
518 518
 
519
-	if err := b.Daemon.Repositories(ctx).Pull(ctx, remote, tag, imagePullConfig); err != nil {
519
+	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
520 520
 		return nil, err
521 521
 	}
522 522
 
523
-	image, err := b.Daemon.Repositories(ctx).LookupImage(name)
523
+	image, err := b.Daemon.Repositories().LookupImage(name)
524 524
 	if err != nil {
525 525
 		return nil, err
526 526
 	}
... ...
@@ -528,7 +527,7 @@ func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, err
528 528
 	return image, nil
529 529
 }
530 530
 
531
-func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
531
+func (b *builder) processImageFrom(img *image.Image) error {
532 532
 	b.image = img.ID
533 533
 
534 534
 	if img.Config != nil {
... ...
@@ -568,7 +567,7 @@ func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error
568 568
 				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
569 569
 			}
570 570
 
571
-			if err := b.dispatch(ctx, i, n); err != nil {
571
+			if err := b.dispatch(i, n); err != nil {
572 572
 				return err
573 573
 			}
574 574
 		}
... ...
@@ -582,12 +581,12 @@ func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error
582 582
 // in the current server `b.Daemon`. If an image is found, probeCache returns
583 583
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
584 584
 // is any error, it returns `(false, err)`.
585
-func (b *builder) probeCache(ctx context.Context) (bool, error) {
585
+func (b *builder) probeCache() (bool, error) {
586 586
 	if !b.UtilizeCache || b.cacheBusted {
587 587
 		return false, nil
588 588
 	}
589 589
 
590
-	cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config)
590
+	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
591 591
 	if err != nil {
592 592
 		return false, err
593 593
 	}
... ...
@@ -600,12 +599,12 @@ func (b *builder) probeCache(ctx context.Context) (bool, error) {
600 600
 	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
601 601
 	logrus.Debugf("[BUILDER] Use cached version")
602 602
 	b.image = cache.ID
603
-	b.Daemon.Graph(ctx).Retain(b.id, cache.ID)
603
+	b.Daemon.Graph().Retain(b.id, cache.ID)
604 604
 	b.activeImages = append(b.activeImages, cache.ID)
605 605
 	return true, nil
606 606
 }
607 607
 
608
-func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
608
+func (b *builder) create() (*daemon.Container, error) {
609 609
 	if b.image == "" && !b.noBaseImage {
610 610
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
611 611
 	}
... ...
@@ -626,14 +625,14 @@ func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
626 626
 	config := *b.Config
627 627
 
628 628
 	// Create the container
629
-	ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true)
629
+	ccr, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
630 630
 	if err != nil {
631 631
 		return nil, err
632 632
 	}
633 633
 	for _, warning := range ccr.Warnings {
634 634
 		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
635 635
 	}
636
-	c, err := b.Daemon.Get(ctx, ccr.ID)
636
+	c, err := b.Daemon.Get(ccr.ID)
637 637
 	if err != nil {
638 638
 		return nil, err
639 639
 	}
... ...
@@ -653,14 +652,14 @@ func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
653 653
 	return c, nil
654 654
 }
655 655
 
656
-func (b *builder) run(ctx context.Context, c *daemon.Container) error {
656
+func (b *builder) run(c *daemon.Container) error {
657 657
 	var errCh chan error
658 658
 	if b.Verbose {
659 659
 		errCh = c.Attach(nil, b.OutStream, b.ErrStream)
660 660
 	}
661 661
 
662 662
 	//start the container
663
-	if err := c.Start(ctx); err != nil {
663
+	if err := c.Start(); err != nil {
664 664
 		return err
665 665
 	}
666 666
 
... ...
@@ -670,7 +669,7 @@ func (b *builder) run(ctx context.Context, c *daemon.Container) error {
670 670
 		select {
671 671
 		case <-b.cancelled:
672 672
 			logrus.Debugln("Build cancelled, killing container:", c.ID)
673
-			c.Kill(ctx)
673
+			c.Kill()
674 674
 		case <-finished:
675 675
 		}
676 676
 	}()
... ...
@@ -801,13 +800,13 @@ func copyAsDirectory(source, destination string, destExisted bool) error {
801 801
 	return fixPermissions(source, destination, 0, 0, destExisted)
802 802
 }
803 803
 
804
-func (b *builder) clearTmp(ctx context.Context) {
804
+func (b *builder) clearTmp() {
805 805
 	for c := range b.TmpContainers {
806 806
 		rmConfig := &daemon.ContainerRmConfig{
807 807
 			ForceRemove:  true,
808 808
 			RemoveVolume: true,
809 809
 		}
810
-		if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
810
+		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
811 811
 			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
812 812
 			return
813 813
 		}
... ...
@@ -14,7 +14,6 @@ import (
14 14
 	"github.com/docker/docker/api"
15 15
 	"github.com/docker/docker/builder/parser"
16 16
 	"github.com/docker/docker/cliconfig"
17
-	"github.com/docker/docker/context"
18 17
 	"github.com/docker/docker/daemon"
19 18
 	"github.com/docker/docker/graph/tags"
20 19
 	"github.com/docker/docker/pkg/archive"
... ...
@@ -113,7 +112,7 @@ func NewBuildConfig() *Config {
113 113
 
114 114
 // Build is the main interface of the package, it gathers the Builder
115 115
 // struct and calls builder.Run() to do all the real build job.
116
-func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
116
+func Build(d *daemon.Daemon, buildConfig *Config) error {
117 117
 	var (
118 118
 		repoName string
119 119
 		tag      string
... ...
@@ -230,15 +229,15 @@ func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
230 230
 	}
231 231
 
232 232
 	defer func() {
233
-		builder.Daemon.Graph(ctx).Release(builder.id, builder.activeImages...)
233
+		builder.Daemon.Graph().Release(builder.id, builder.activeImages...)
234 234
 	}()
235 235
 
236
-	id, err := builder.Run(ctx, context)
236
+	id, err := builder.Run(context)
237 237
 	if err != nil {
238 238
 		return err
239 239
 	}
240 240
 	if repoName != "" {
241
-		return d.Repositories(ctx).Tag(repoName, tag, id, true)
241
+		return d.Repositories().Tag(repoName, tag, id, true)
242 242
 	}
243 243
 	return nil
244 244
 }
... ...
@@ -248,7 +247,7 @@ func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
248 248
 //
249 249
 // - call parse.Parse() to get AST root from Dockerfile entries
250 250
 // - do build by calling builder.dispatch() to call all entries' handling routines
251
-func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
251
+func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
252 252
 	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
253 253
 	if err != nil {
254 254
 		return nil, err
... ...
@@ -270,7 +269,7 @@ func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config,
270 270
 	}
271 271
 
272 272
 	for i, n := range ast.Children {
273
-		if err := builder.dispatch(ctx, i, n); err != nil {
273
+		if err := builder.dispatch(i, n); err != nil {
274 274
 			return nil, err
275 275
 		}
276 276
 	}
... ...
@@ -290,8 +289,8 @@ type CommitConfig struct {
290 290
 }
291 291
 
292 292
 // Commit will create a new image from a container's changes
293
-func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
294
-	container, err := d.Get(ctx, name)
293
+func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
294
+	container, err := d.Get(name)
295 295
 	if err != nil {
296 296
 		return "", err
297 297
 	}
... ...
@@ -305,7 +304,7 @@ func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig)
305 305
 		c.Config = &runconfig.Config{}
306 306
 	}
307 307
 
308
-	newConfig, err := BuildFromConfig(ctx, d, c.Config, c.Changes)
308
+	newConfig, err := BuildFromConfig(d, c.Config, c.Changes)
309 309
 	if err != nil {
310 310
 		return "", err
311 311
 	}
... ...
@@ -323,7 +322,7 @@ func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig)
323 323
 		Config:  newConfig,
324 324
 	}
325 325
 
326
-	img, err := d.Commit(ctx, container, commitCfg)
326
+	img, err := d.Commit(container, commitCfg)
327 327
 	if err != nil {
328 328
 		return "", err
329 329
 	}
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"strings"
9 9
 
10 10
 	"github.com/docker/docker/api/types"
11
-	"github.com/docker/docker/context"
12 11
 	"github.com/docker/docker/pkg/archive"
13 12
 	"github.com/docker/docker/pkg/chrootarchive"
14 13
 	"github.com/docker/docker/pkg/ioutils"
... ...
@@ -21,8 +20,8 @@ var ErrExtractPointNotDirectory = errors.New("extraction point is not a director
21 21
 
22 22
 // ContainerCopy performs a deprecated operation of archiving the resource at
23 23
 // the specified path in the conatiner identified by the given name.
24
-func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string) (io.ReadCloser, error) {
25
-	container, err := daemon.Get(ctx, name)
24
+func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
25
+	container, err := daemon.Get(name)
26 26
 	if err != nil {
27 27
 		return nil, err
28 28
 	}
... ...
@@ -31,30 +30,30 @@ func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string
31 31
 		res = res[1:]
32 32
 	}
33 33
 
34
-	return container.copy(ctx, res)
34
+	return container.copy(res)
35 35
 }
36 36
 
37 37
 // ContainerStatPath stats the filesystem resource at the specified path in the
38 38
 // container identified by the given name.
39
-func (daemon *Daemon) ContainerStatPath(ctx context.Context, name string, path string) (stat *types.ContainerPathStat, err error) {
40
-	container, err := daemon.Get(ctx, name)
39
+func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
40
+	container, err := daemon.Get(name)
41 41
 	if err != nil {
42 42
 		return nil, err
43 43
 	}
44 44
 
45
-	return container.StatPath(ctx, path)
45
+	return container.StatPath(path)
46 46
 }
47 47
 
48 48
 // ContainerArchivePath creates an archive of the filesystem resource at the
49 49
 // specified path in the container identified by the given name. Returns a
50 50
 // tar archive of the resource and whether it was a directory or a single file.
51
-func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
52
-	container, err := daemon.Get(ctx, name)
51
+func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
52
+	container, err := daemon.Get(name)
53 53
 	if err != nil {
54 54
 		return nil, nil, err
55 55
 	}
56 56
 
57
-	return container.ArchivePath(ctx, path)
57
+	return container.ArchivePath(path)
58 58
 }
59 59
 
60 60
 // ContainerExtractToDir extracts the given archive to the specified location
... ...
@@ -63,13 +62,13 @@ func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, pat
63 63
 // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will
64 64
 // be an error if unpacking the given content would cause an existing directory
65 65
 // to be replaced with a non-directory and vice versa.
66
-func (daemon *Daemon) ContainerExtractToDir(ctx context.Context, name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
67
-	container, err := daemon.Get(ctx, name)
66
+func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
67
+	container, err := daemon.Get(name)
68 68
 	if err != nil {
69 69
 		return err
70 70
 	}
71 71
 
72
-	return container.ExtractToDir(ctx, path, noOverwriteDirNonDir, content)
72
+	return container.ExtractToDir(path, noOverwriteDirNonDir, content)
73 73
 }
74 74
 
75 75
 // resolvePath resolves the given path in the container to a resource on the
... ...
@@ -134,14 +133,14 @@ func (container *Container) statPath(resolvedPath, absPath string) (stat *types.
134 134
 
135 135
 // StatPath stats the filesystem resource at the specified path in this
136 136
 // container. Returns stat info about the resource.
137
-func (container *Container) StatPath(ctx context.Context, path string) (stat *types.ContainerPathStat, err error) {
137
+func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
138 138
 	container.Lock()
139 139
 	defer container.Unlock()
140 140
 
141
-	if err = container.Mount(ctx); err != nil {
141
+	if err = container.Mount(); err != nil {
142 142
 		return nil, err
143 143
 	}
144
-	defer container.Unmount(ctx)
144
+	defer container.Unmount()
145 145
 
146 146
 	err = container.mountVolumes()
147 147
 	defer container.unmountVolumes(true)
... ...
@@ -160,7 +159,7 @@ func (container *Container) StatPath(ctx context.Context, path string) (stat *ty
160 160
 // ArchivePath creates an archive of the filesystem resource at the specified
161 161
 // path in this container. Returns a tar archive of the resource and stat info
162 162
 // about the resource.
163
-func (container *Container) ArchivePath(ctx context.Context, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
163
+func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
164 164
 	container.Lock()
165 165
 
166 166
 	defer func() {
... ...
@@ -172,7 +171,7 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
172 172
 		}
173 173
 	}()
174 174
 
175
-	if err = container.Mount(ctx); err != nil {
175
+	if err = container.Mount(); err != nil {
176 176
 		return nil, nil, err
177 177
 	}
178 178
 
... ...
@@ -181,7 +180,7 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
181 181
 			// unmount any volumes
182 182
 			container.unmountVolumes(true)
183 183
 			// unmount the container's rootfs
184
-			container.Unmount(ctx)
184
+			container.Unmount()
185 185
 		}
186 186
 	}()
187 187
 
... ...
@@ -215,12 +214,12 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
215 215
 	content = ioutils.NewReadCloserWrapper(data, func() error {
216 216
 		err := data.Close()
217 217
 		container.unmountVolumes(true)
218
-		container.Unmount(ctx)
218
+		container.Unmount()
219 219
 		container.Unlock()
220 220
 		return err
221 221
 	})
222 222
 
223
-	container.logEvent(ctx, "archive-path")
223
+	container.logEvent("archive-path")
224 224
 
225 225
 	return content, stat, nil
226 226
 }
... ...
@@ -231,14 +230,14 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
231 231
 // noOverwriteDirNonDir is true then it will be an error if unpacking the
232 232
 // given content would cause an existing directory to be replaced with a non-
233 233
 // directory and vice versa.
234
-func (container *Container) ExtractToDir(ctx context.Context, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
234
+func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
235 235
 	container.Lock()
236 236
 	defer container.Unlock()
237 237
 
238
-	if err = container.Mount(ctx); err != nil {
238
+	if err = container.Mount(); err != nil {
239 239
 		return err
240 240
 	}
241
-	defer container.Unmount(ctx)
241
+	defer container.Unmount()
242 242
 
243 243
 	err = container.mountVolumes()
244 244
 	defer container.unmountVolumes(true)
... ...
@@ -319,7 +318,7 @@ func (container *Container) ExtractToDir(ctx context.Context, path string, noOve
319 319
 		return err
320 320
 	}
321 321
 
322
-	container.logEvent(ctx, "extract-to-dir")
322
+	container.logEvent("extract-to-dir")
323 323
 
324 324
 	return nil
325 325
 }
... ...
@@ -3,7 +3,6 @@ package daemon
3 3
 import (
4 4
 	"io"
5 5
 
6
-	"github.com/docker/docker/context"
7 6
 	"github.com/docker/docker/pkg/stdcopy"
8 7
 )
9 8
 
... ...
@@ -16,8 +15,8 @@ type ContainerAttachWithLogsConfig struct {
16 16
 }
17 17
 
18 18
 // ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
19
-func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerAttachWithLogsConfig) error {
20
-	container, err := daemon.Get(ctx, prefixOrName)
19
+func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error {
20
+	container, err := daemon.Get(prefixOrName)
21 21
 	if err != nil {
22 22
 		return err
23 23
 	}
... ...
@@ -44,7 +43,7 @@ func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName
44 44
 		stderr = errStream
45 45
 	}
46 46
 
47
-	return container.attachWithLogs(ctx, stdin, stdout, stderr, c.Logs, c.Stream)
47
+	return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
48 48
 }
49 49
 
50 50
 // ContainerWsAttachWithLogsConfig attach with websockets, since all
... ...
@@ -56,10 +55,10 @@ type ContainerWsAttachWithLogsConfig struct {
56 56
 }
57 57
 
58 58
 // ContainerWsAttachWithLogs websocket connection
59
-func (daemon *Daemon) ContainerWsAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
60
-	container, err := daemon.Get(ctx, prefixOrName)
59
+func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
60
+	container, err := daemon.Get(prefixOrName)
61 61
 	if err != nil {
62 62
 		return err
63 63
 	}
64
-	return container.attachWithLogs(ctx, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
64
+	return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
65 65
 }
... ...
@@ -1,13 +1,10 @@
1 1
 package daemon
2 2
 
3
-import (
4
-	"github.com/docker/docker/context"
5
-	"github.com/docker/docker/pkg/archive"
6
-)
3
+import "github.com/docker/docker/pkg/archive"
7 4
 
8 5
 // ContainerChanges returns a list of container fs changes
9
-func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) {
10
-	container, err := daemon.Get(ctx, name)
6
+func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
7
+	container, err := daemon.Get(name)
11 8
 	if err != nil {
12 9
 		return nil, err
13 10
 	}
... ...
@@ -1,7 +1,6 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	"github.com/docker/docker/image"
6 5
 	"github.com/docker/docker/runconfig"
7 6
 )
... ...
@@ -19,10 +18,10 @@ type ContainerCommitConfig struct {
19 19
 
20 20
 // Commit creates a new filesystem image from the current state of a container.
21 21
 // The image can optionally be tagged into a repository.
22
-func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *ContainerCommitConfig) (*image.Image, error) {
22
+func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
23 23
 	if c.Pause && !container.isPaused() {
24
-		container.pause(ctx)
25
-		defer container.unpause(ctx)
24
+		container.pause()
25
+		defer container.unpause()
26 26
 	}
27 27
 
28 28
 	rwTar, err := container.exportContainerRw()
... ...
@@ -47,6 +46,6 @@ func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *Conta
47 47
 			return img, err
48 48
 		}
49 49
 	}
50
-	container.logEvent(ctx, "commit")
50
+	container.logEvent("commit")
51 51
 	return img, nil
52 52
 }
... ...
@@ -15,7 +15,6 @@ import (
15 15
 	"github.com/opencontainers/runc/libcontainer/label"
16 16
 
17 17
 	"github.com/Sirupsen/logrus"
18
-	"github.com/docker/docker/context"
19 18
 	"github.com/docker/docker/daemon/execdriver"
20 19
 	"github.com/docker/docker/daemon/logger"
21 20
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
... ...
@@ -171,10 +170,9 @@ func (container *Container) writeHostConfig() error {
171 171
 	return ioutil.WriteFile(pth, data, 0666)
172 172
 }
173 173
 
174
-func (container *Container) logEvent(ctx context.Context, action string) {
174
+func (container *Container) logEvent(action string) {
175 175
 	d := container.daemon
176 176
 	d.EventsService.Log(
177
-		ctx,
178 177
 		action,
179 178
 		container.ID,
180 179
 		container.Config.Image,
... ...
@@ -240,7 +238,7 @@ func (container *Container) exportContainerRw() (archive.Archive, error) {
240 240
 // container needs, such as storage and networking, as well as links
241 241
 // between containers. The container is left waiting for a signal to
242 242
 // begin running.
243
-func (container *Container) Start(ctx context.Context) (err error) {
243
+func (container *Container) Start() (err error) {
244 244
 	container.Lock()
245 245
 	defer container.Unlock()
246 246
 
... ...
@@ -262,12 +260,12 @@ func (container *Container) Start(ctx context.Context) (err error) {
262 262
 				container.ExitCode = 128
263 263
 			}
264 264
 			container.toDisk()
265
-			container.cleanup(ctx)
266
-			container.logEvent(ctx, "die")
265
+			container.cleanup()
266
+			container.logEvent("die")
267 267
 		}
268 268
 	}()
269 269
 
270
-	if err := container.Mount(ctx); err != nil {
270
+	if err := container.Mount(); err != nil {
271 271
 		return err
272 272
 	}
273 273
 
... ...
@@ -275,10 +273,10 @@ func (container *Container) Start(ctx context.Context) (err error) {
275 275
 	// backwards API compatibility.
276 276
 	container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
277 277
 
278
-	if err := container.initializeNetworking(ctx); err != nil {
278
+	if err := container.initializeNetworking(); err != nil {
279 279
 		return err
280 280
 	}
281
-	linkedEnv, err := container.setupLinkedContainers(ctx)
281
+	linkedEnv, err := container.setupLinkedContainers()
282 282
 	if err != nil {
283 283
 		return err
284 284
 	}
... ...
@@ -286,7 +284,7 @@ func (container *Container) Start(ctx context.Context) (err error) {
286 286
 		return err
287 287
 	}
288 288
 	env := container.createDaemonEnvironment(linkedEnv)
289
-	if err := populateCommand(ctx, container, env); err != nil {
289
+	if err := populateCommand(container, env); err != nil {
290 290
 		return err
291 291
 	}
292 292
 
... ...
@@ -303,7 +301,7 @@ func (container *Container) Start(ctx context.Context) (err error) {
303 303
 	mounts = append(mounts, container.ipcMounts()...)
304 304
 
305 305
 	container.command.Mounts = mounts
306
-	return container.waitForStart(ctx)
306
+	return container.waitForStart()
307 307
 }
308 308
 
309 309
 // streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
... ...
@@ -336,14 +334,14 @@ func (container *Container) isNetworkAllocated() bool {
336 336
 
337 337
 // cleanup releases any network resources allocated to the container along with any rules
338 338
 // around how containers are linked together.  It also unmounts the container's root filesystem.
339
-func (container *Container) cleanup(ctx context.Context) {
339
+func (container *Container) cleanup() {
340 340
 	container.releaseNetwork()
341 341
 
342 342
 	if err := container.unmountIpcMounts(); err != nil {
343 343
 		logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
344 344
 	}
345 345
 
346
-	if err := container.Unmount(ctx); err != nil {
346
+	if err := container.Unmount(); err != nil {
347 347
 		logrus.Errorf("%s: Failed to umount filesystem: %v", container.ID, err)
348 348
 	}
349 349
 
... ...
@@ -359,7 +357,7 @@ func (container *Container) cleanup(ctx context.Context) {
359 359
 // to send the signal. An error is returned if the container is paused
360 360
 // or not running, or if there is a problem returned from the
361 361
 // underlying kill command.
362
-func (container *Container) killSig(ctx context.Context, sig int) error {
362
+func (container *Container) killSig(sig int) error {
363 363
 	logrus.Debugf("Sending %d to %s", sig, container.ID)
364 364
 	container.Lock()
365 365
 	defer container.Unlock()
... ...
@@ -387,13 +385,13 @@ func (container *Container) killSig(ctx context.Context, sig int) error {
387 387
 	if err := container.daemon.kill(container, sig); err != nil {
388 388
 		return err
389 389
 	}
390
-	container.logEvent(ctx, "kill")
390
+	container.logEvent("kill")
391 391
 	return nil
392 392
 }
393 393
 
394 394
 // Wrapper aroung killSig() suppressing "no such process" error.
395
-func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int) error {
396
-	err := container.killSig(ctx, sig)
395
+func (container *Container) killPossiblyDeadProcess(sig int) error {
396
+	err := container.killSig(sig)
397 397
 	if err == syscall.ESRCH {
398 398
 		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
399 399
 		return nil
... ...
@@ -401,7 +399,7 @@ func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int
401 401
 	return err
402 402
 }
403 403
 
404
-func (container *Container) pause(ctx context.Context) error {
404
+func (container *Container) pause() error {
405 405
 	container.Lock()
406 406
 	defer container.Unlock()
407 407
 
... ...
@@ -419,11 +417,11 @@ func (container *Container) pause(ctx context.Context) error {
419 419
 		return err
420 420
 	}
421 421
 	container.Paused = true
422
-	container.logEvent(ctx, "pause")
422
+	container.logEvent("pause")
423 423
 	return nil
424 424
 }
425 425
 
426
-func (container *Container) unpause(ctx context.Context) error {
426
+func (container *Container) unpause() error {
427 427
 	container.Lock()
428 428
 	defer container.Unlock()
429 429
 
... ...
@@ -441,18 +439,18 @@ func (container *Container) unpause(ctx context.Context) error {
441 441
 		return err
442 442
 	}
443 443
 	container.Paused = false
444
-	container.logEvent(ctx, "unpause")
444
+	container.logEvent("unpause")
445 445
 	return nil
446 446
 }
447 447
 
448 448
 // Kill forcefully terminates a container.
449
-func (container *Container) Kill(ctx context.Context) error {
449
+func (container *Container) Kill() error {
450 450
 	if !container.IsRunning() {
451 451
 		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
452 452
 	}
453 453
 
454 454
 	// 1. Send SIGKILL
455
-	if err := container.killPossiblyDeadProcess(ctx, int(syscall.SIGKILL)); err != nil {
455
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
456 456
 		// While normally we might "return err" here we're not going to
457 457
 		// because if we can't stop the container by this point then
458 458
 		// its probably because its already stopped. Meaning, between
... ...
@@ -486,15 +484,15 @@ func (container *Container) Kill(ctx context.Context) error {
486 486
 // process to exit. If a negative duration is given, Stop will wait
487 487
 // for the initial signal forever. If the container is not running Stop returns
488 488
 // immediately.
489
-func (container *Container) Stop(ctx context.Context, seconds int) error {
489
+func (container *Container) Stop(seconds int) error {
490 490
 	if !container.IsRunning() {
491 491
 		return nil
492 492
 	}
493 493
 
494 494
 	// 1. Send a SIGTERM
495
-	if err := container.killPossiblyDeadProcess(ctx, container.stopSignal()); err != nil {
495
+	if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil {
496 496
 		logrus.Infof("Failed to send SIGTERM to the process, force killing")
497
-		if err := container.killPossiblyDeadProcess(ctx, 9); err != nil {
497
+		if err := container.killPossiblyDeadProcess(9); err != nil {
498 498
 			return err
499 499
 		}
500 500
 	}
... ...
@@ -503,13 +501,13 @@ func (container *Container) Stop(ctx context.Context, seconds int) error {
503 503
 	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
504 504
 		logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
505 505
 		// 3. If it doesn't, then send SIGKILL
506
-		if err := container.Kill(ctx); err != nil {
506
+		if err := container.Kill(); err != nil {
507 507
 			container.WaitStop(-1 * time.Second)
508 508
 			return err
509 509
 		}
510 510
 	}
511 511
 
512
-	container.logEvent(ctx, "stop")
512
+	container.logEvent("stop")
513 513
 	return nil
514 514
 }
515 515
 
... ...
@@ -517,61 +515,61 @@ func (container *Container) Stop(ctx context.Context, seconds int) error {
517 517
 // container. When stopping, wait for the given duration in seconds to
518 518
 // gracefully stop, before forcefully terminating the container. If
519 519
 // given a negative duration, wait forever for a graceful stop.
520
-func (container *Container) Restart(ctx context.Context, seconds int) error {
520
+func (container *Container) Restart(seconds int) error {
521 521
 	// Avoid unnecessarily unmounting and then directly mounting
522 522
 	// the container when the container stops and then starts
523 523
 	// again
524
-	if err := container.Mount(ctx); err == nil {
525
-		defer container.Unmount(ctx)
524
+	if err := container.Mount(); err == nil {
525
+		defer container.Unmount()
526 526
 	}
527 527
 
528
-	if err := container.Stop(ctx, seconds); err != nil {
528
+	if err := container.Stop(seconds); err != nil {
529 529
 		return err
530 530
 	}
531 531
 
532
-	if err := container.Start(ctx); err != nil {
532
+	if err := container.Start(); err != nil {
533 533
 		return err
534 534
 	}
535 535
 
536
-	container.logEvent(ctx, "restart")
536
+	container.logEvent("restart")
537 537
 	return nil
538 538
 }
539 539
 
540 540
 // Resize changes the TTY of the process running inside the container
541 541
 // to the given height and width. The container must be running.
542
-func (container *Container) Resize(ctx context.Context, h, w int) error {
542
+func (container *Container) Resize(h, w int) error {
543 543
 	if !container.IsRunning() {
544 544
 		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
545 545
 	}
546 546
 	if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
547 547
 		return err
548 548
 	}
549
-	container.logEvent(ctx, "resize")
549
+	container.logEvent("resize")
550 550
 	return nil
551 551
 }
552 552
 
553
-func (container *Container) export(ctx context.Context) (archive.Archive, error) {
554
-	if err := container.Mount(ctx); err != nil {
553
+func (container *Container) export() (archive.Archive, error) {
554
+	if err := container.Mount(); err != nil {
555 555
 		return nil, err
556 556
 	}
557 557
 
558 558
 	archive, err := archive.Tar(container.basefs, archive.Uncompressed)
559 559
 	if err != nil {
560
-		container.Unmount(ctx)
560
+		container.Unmount()
561 561
 		return nil, err
562 562
 	}
563 563
 	arch := ioutils.NewReadCloserWrapper(archive, func() error {
564 564
 		err := archive.Close()
565
-		container.Unmount(ctx)
565
+		container.Unmount()
566 566
 		return err
567 567
 	})
568
-	container.logEvent(ctx, "export")
568
+	container.logEvent("export")
569 569
 	return arch, err
570 570
 }
571 571
 
572 572
 // Mount sets container.basefs
573
-func (container *Container) Mount(ctx context.Context) error {
574
-	return container.daemon.Mount(ctx, container)
573
+func (container *Container) Mount() error {
574
+	return container.daemon.Mount(container)
575 575
 }
576 576
 
577 577
 func (container *Container) changes() ([]archive.Change, error) {
... ...
@@ -580,7 +578,7 @@ func (container *Container) changes() ([]archive.Change, error) {
580 580
 	return container.daemon.changes(container)
581 581
 }
582 582
 
583
-func (container *Container) getImage(ctx context.Context) (*image.Image, error) {
583
+func (container *Container) getImage() (*image.Image, error) {
584 584
 	if container.daemon == nil {
585 585
 		return nil, derr.ErrorCodeImageUnregContainer
586 586
 	}
... ...
@@ -589,7 +587,7 @@ func (container *Container) getImage(ctx context.Context) (*image.Image, error)
589 589
 
590 590
 // Unmount asks the daemon to release the layered filesystems that are
591 591
 // mounted by the container.
592
-func (container *Container) Unmount(ctx context.Context) error {
592
+func (container *Container) Unmount() error {
593 593
 	return container.daemon.unmount(container)
594 594
 }
595 595
 
... ...
@@ -614,7 +612,7 @@ func validateID(id string) error {
614 614
 	return nil
615 615
 }
616 616
 
617
-func (container *Container) copy(ctx context.Context, resource string) (rc io.ReadCloser, err error) {
617
+func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
618 618
 	container.Lock()
619 619
 
620 620
 	defer func() {
... ...
@@ -626,7 +624,7 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
626 626
 		}
627 627
 	}()
628 628
 
629
-	if err := container.Mount(ctx); err != nil {
629
+	if err := container.Mount(); err != nil {
630 630
 		return nil, err
631 631
 	}
632 632
 
... ...
@@ -635,7 +633,7 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
635 635
 			// unmount any volumes
636 636
 			container.unmountVolumes(true)
637 637
 			// unmount the container's rootfs
638
-			container.Unmount(ctx)
638
+			container.Unmount()
639 639
 		}
640 640
 	}()
641 641
 
... ...
@@ -671,11 +669,11 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
671 671
 	reader := ioutils.NewReadCloserWrapper(archive, func() error {
672 672
 		err := archive.Close()
673 673
 		container.unmountVolumes(true)
674
-		container.Unmount(ctx)
674
+		container.Unmount()
675 675
 		container.Unlock()
676 676
 		return err
677 677
 	})
678
-	container.logEvent(ctx, "copy")
678
+	container.logEvent("copy")
679 679
 	return reader, nil
680 680
 }
681 681
 
... ...
@@ -754,14 +752,14 @@ func (container *Container) startLogging() error {
754 754
 	return nil
755 755
 }
756 756
 
757
-func (container *Container) waitForStart(ctx context.Context) error {
757
+func (container *Container) waitForStart() error {
758 758
 	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
759 759
 
760 760
 	// block until we either receive an error from the initial start of the container's
761 761
 	// process or until the process is running in the container
762 762
 	select {
763 763
 	case <-container.monitor.startSignal:
764
-	case err := <-promise.Go(func() error { return container.monitor.Start(ctx) }):
764
+	case err := <-promise.Go(container.monitor.Start):
765 765
 		return err
766 766
 	}
767 767
 
... ...
@@ -792,11 +790,11 @@ func (container *Container) getExecIDs() []string {
792 792
 	return container.execCommands.List()
793 793
 }
794 794
 
795
-func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) error {
795
+func (container *Container) exec(ExecConfig *ExecConfig) error {
796 796
 	container.Lock()
797 797
 	defer container.Unlock()
798 798
 
799
-	callback := func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
799
+	callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
800 800
 		if processConfig.Tty {
801 801
 			// The callback is called after the process Start()
802 802
 			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
... ...
@@ -811,7 +809,7 @@ func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) er
811 811
 
812 812
 	// We use a callback here instead of a goroutine and an chan for
813 813
 	// synchronization purposes
814
-	cErr := promise.Go(func() error { return container.monitorExec(ctx, ExecConfig, callback) })
814
+	cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
815 815
 
816 816
 	// Exec should not return until the process is actually running
817 817
 	select {
... ...
@@ -823,13 +821,13 @@ func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) er
823 823
 	return nil
824 824
 }
825 825
 
826
-func (container *Container) monitorExec(ctx context.Context, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
826
+func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
827 827
 	var (
828 828
 		err      error
829 829
 		exitCode int
830 830
 	)
831 831
 	pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
832
-	exitCode, err = container.daemon.Exec(ctx, container, ExecConfig, pipes, callback)
832
+	exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
833 833
 	if err != nil {
834 834
 		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
835 835
 	}
... ...
@@ -862,7 +860,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr
862 862
 	return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
863 863
 }
864 864
 
865
-func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
865
+func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
866 866
 	if logs {
867 867
 		logDriver, err := container.getLogger()
868 868
 		if err != nil {
... ...
@@ -894,7 +892,7 @@ func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadClo
894 894
 		}
895 895
 	}
896 896
 
897
-	container.logEvent(ctx, "attach")
897
+	container.logEvent("attach")
898 898
 
899 899
 	//stream
900 900
 	if stream {
... ...
@@ -15,7 +15,6 @@ import (
15 15
 	"time"
16 16
 
17 17
 	"github.com/Sirupsen/logrus"
18
-	"github.com/docker/docker/context"
19 18
 	"github.com/docker/docker/daemon/execdriver"
20 19
 	"github.com/docker/docker/daemon/links"
21 20
 	"github.com/docker/docker/daemon/network"
... ...
@@ -78,12 +77,12 @@ func killProcessDirectly(container *Container) error {
78 78
 	return nil
79 79
 }
80 80
 
81
-func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
81
+func (container *Container) setupLinkedContainers() ([]string, error) {
82 82
 	var (
83 83
 		env    []string
84 84
 		daemon = container.daemon
85 85
 	)
86
-	children, err := daemon.children(ctx, container.Name)
86
+	children, err := daemon.children(container.Name)
87 87
 	if err != nil {
88 88
 		return nil, err
89 89
 	}
... ...
@@ -176,7 +175,7 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
176 176
 	return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
177 177
 }
178 178
 
179
-func populateCommand(ctx context.Context, c *Container, env []string) error {
179
+func populateCommand(c *Container, env []string) error {
180 180
 	var en *execdriver.Network
181 181
 	if !c.Config.NetworkDisabled {
182 182
 		en = &execdriver.Network{}
... ...
@@ -186,7 +185,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
186 186
 
187 187
 		parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
188 188
 		if parts[0] == "container" {
189
-			nc, err := c.getNetworkedContainer(ctx)
189
+			nc, err := c.getNetworkedContainer()
190 190
 			if err != nil {
191 191
 				return err
192 192
 			}
... ...
@@ -207,7 +206,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
207 207
 	}
208 208
 
209 209
 	if c.hostConfig.IpcMode.IsContainer() {
210
-		ic, err := c.getIpcContainer(ctx)
210
+		ic, err := c.getIpcContainer()
211 211
 		if err != nil {
212 212
 			return err
213 213
 		}
... ...
@@ -350,18 +349,18 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
350 350
 }
351 351
 
352 352
 // GetSize returns the real size & virtual size of the container.
353
-func (container *Container) getSize(ctx context.Context) (int64, int64) {
353
+func (container *Container) getSize() (int64, int64) {
354 354
 	var (
355 355
 		sizeRw, sizeRootfs int64
356 356
 		err                error
357 357
 		driver             = container.daemon.driver
358 358
 	)
359 359
 
360
-	if err := container.Mount(ctx); err != nil {
360
+	if err := container.Mount(); err != nil {
361 361
 		logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
362 362
 		return sizeRw, sizeRootfs
363 363
 	}
364
-	defer container.Unmount(ctx)
364
+	defer container.Unmount()
365 365
 
366 366
 	initID := fmt.Sprintf("%s-init", container.ID)
367 367
 	sizeRw, err = driver.DiffSize(container.ID, initID)
... ...
@@ -413,7 +412,7 @@ func (container *Container) buildHostnameFile() error {
413 413
 	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
414 414
 }
415 415
 
416
-func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwork.SandboxOption, error) {
416
+func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, error) {
417 417
 	var (
418 418
 		sboxOptions []libnetwork.SandboxOption
419 419
 		err         error
... ...
@@ -490,7 +489,7 @@ func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwo
490 490
 
491 491
 	var childEndpoints, parentEndpoints []string
492 492
 
493
-	children, err := container.daemon.children(ctx, container.Name)
493
+	children, err := container.daemon.children(container.Name)
494 494
 	if err != nil {
495 495
 		return nil, err
496 496
 	}
... ...
@@ -521,7 +520,7 @@ func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwo
521 521
 			continue
522 522
 		}
523 523
 
524
-		c, err := container.daemon.Get(ctx, ref.ParentID)
524
+		c, err := container.daemon.Get(ref.ParentID)
525 525
 		if err != nil {
526 526
 			logrus.Error(err)
527 527
 		}
... ...
@@ -680,7 +679,7 @@ func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox)
680 680
 
681 681
 // UpdateNetwork is used to update the container's network (e.g. when linked containers
682 682
 // get removed/unlinked).
683
-func (container *Container) updateNetwork(ctx context.Context) error {
683
+func (container *Container) updateNetwork() error {
684 684
 	ctrl := container.daemon.netController
685 685
 	sid := container.NetworkSettings.SandboxID
686 686
 
... ...
@@ -689,7 +688,7 @@ func (container *Container) updateNetwork(ctx context.Context) error {
689 689
 		return derr.ErrorCodeNoSandbox.WithArgs(sid, err)
690 690
 	}
691 691
 
692
-	options, err := container.buildSandboxOptions(ctx)
692
+	options, err := container.buildSandboxOptions()
693 693
 	if err != nil {
694 694
 		return derr.ErrorCodeNetworkUpdate.WithArgs(err)
695 695
 	}
... ...
@@ -813,7 +812,7 @@ func createNetwork(controller libnetwork.NetworkController, dnet string, driver
813 813
 	return controller.NewNetwork(driver, dnet, createOptions...)
814 814
 }
815 815
 
816
-func (container *Container) allocateNetwork(ctx context.Context) error {
816
+func (container *Container) allocateNetwork() error {
817 817
 	mode := container.hostConfig.NetworkMode
818 818
 	controller := container.daemon.netController
819 819
 	if container.Config.NetworkDisabled || mode.IsContainer() {
... ...
@@ -847,14 +846,14 @@ func (container *Container) allocateNetwork(ctx context.Context) error {
847 847
 		service = strings.Replace(service, "/", "", -1)
848 848
 	}
849 849
 
850
-	if err := container.configureNetwork(ctx, networkName, service, networkDriver, mode.IsDefault()); err != nil {
850
+	if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil {
851 851
 		return err
852 852
 	}
853 853
 
854 854
 	return container.writeHostConfig()
855 855
 }
856 856
 
857
-func (container *Container) configureNetwork(ctx context.Context, networkName, service, networkDriver string, canCreateNetwork bool) error {
857
+func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
858 858
 	controller := container.daemon.netController
859 859
 
860 860
 	n, err := controller.NetworkByName(networkName)
... ...
@@ -898,7 +897,7 @@ func (container *Container) configureNetwork(ctx context.Context, networkName, s
898 898
 		return false
899 899
 	})
900 900
 	if sb == nil {
901
-		options, err := container.buildSandboxOptions(ctx)
901
+		options, err := container.buildSandboxOptions()
902 902
 		if err != nil {
903 903
 			return err
904 904
 		}
... ...
@@ -921,12 +920,12 @@ func (container *Container) configureNetwork(ctx context.Context, networkName, s
921 921
 	return nil
922 922
 }
923 923
 
924
-func (container *Container) initializeNetworking(ctx context.Context) error {
924
+func (container *Container) initializeNetworking() error {
925 925
 	var err error
926 926
 
927 927
 	if container.hostConfig.NetworkMode.IsContainer() {
928 928
 		// we need to get the hosts files from the container to join
929
-		nc, err := container.getNetworkedContainer(ctx)
929
+		nc, err := container.getNetworkedContainer()
930 930
 		if err != nil {
931 931
 			return err
932 932
 		}
... ...
@@ -952,7 +951,7 @@ func (container *Container) initializeNetworking(ctx context.Context) error {
952 952
 
953 953
 	}
954 954
 
955
-	if err := container.allocateNetwork(ctx); err != nil {
955
+	if err := container.allocateNetwork(); err != nil {
956 956
 		return err
957 957
 	}
958 958
 
... ...
@@ -973,9 +972,9 @@ func (container *Container) setNetworkNamespaceKey(pid int) error {
973 973
 	return sandbox.SetKey(path)
974 974
 }
975 975
 
976
-func (container *Container) getIpcContainer(ctx context.Context) (*Container, error) {
976
+func (container *Container) getIpcContainer() (*Container, error) {
977 977
 	containerID := container.hostConfig.IpcMode.Container()
978
-	c, err := container.daemon.Get(ctx, containerID)
978
+	c, err := container.daemon.Get(containerID)
979 979
 	if err != nil {
980 980
 		return nil, err
981 981
 	}
... ...
@@ -1011,14 +1010,14 @@ func (container *Container) setupWorkingDirectory() error {
1011 1011
 	return nil
1012 1012
 }
1013 1013
 
1014
-func (container *Container) getNetworkedContainer(ctx context.Context) (*Container, error) {
1014
+func (container *Container) getNetworkedContainer() (*Container, error) {
1015 1015
 	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
1016 1016
 	switch parts[0] {
1017 1017
 	case "container":
1018 1018
 		if len(parts) != 2 {
1019 1019
 			return nil, derr.ErrorCodeParseContainer
1020 1020
 		}
1021
-		nc, err := container.daemon.Get(ctx, parts[1])
1021
+		nc, err := container.daemon.Get(parts[1])
1022 1022
 		if err != nil {
1023 1023
 			return nil, err
1024 1024
 		}
... ...
@@ -5,7 +5,6 @@ package daemon
5 5
 import (
6 6
 	"strings"
7 7
 
8
-	"github.com/docker/docker/context"
9 8
 	"github.com/docker/docker/daemon/execdriver"
10 9
 	derr "github.com/docker/docker/errors"
11 10
 )
... ...
@@ -26,7 +25,7 @@ func killProcessDirectly(container *Container) error {
26 26
 	return nil
27 27
 }
28 28
 
29
-func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
29
+func (container *Container) setupLinkedContainers() ([]string, error) {
30 30
 	return nil, nil
31 31
 }
32 32
 
... ...
@@ -35,7 +34,7 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string
35 35
 	return container.Config.Env
36 36
 }
37 37
 
38
-func (container *Container) initializeNetworking(ctx context.Context) error {
38
+func (container *Container) initializeNetworking() error {
39 39
 	return nil
40 40
 }
41 41
 
... ...
@@ -43,7 +42,7 @@ func (container *Container) setupWorkingDirectory() error {
43 43
 	return nil
44 44
 }
45 45
 
46
-func populateCommand(ctx context.Context, c *Container, env []string) error {
46
+func populateCommand(c *Container, env []string) error {
47 47
 	en := &execdriver.Network{
48 48
 		Interface: nil,
49 49
 	}
... ...
@@ -136,7 +135,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
136 136
 }
137 137
 
138 138
 // GetSize returns real size & virtual size
139
-func (container *Container) getSize(ctx context.Context) (int64, int64) {
139
+func (container *Container) getSize() (int64, int64) {
140 140
 	// TODO Windows
141 141
 	return 0, 0
142 142
 }
... ...
@@ -151,7 +150,7 @@ func (container *Container) allocateNetwork() error {
151 151
 	return nil
152 152
 }
153 153
 
154
-func (container *Container) updateNetwork(ctx context.Context) error {
154
+func (container *Container) updateNetwork() error {
155 155
 	return nil
156 156
 }
157 157
 
... ...
@@ -5,7 +5,6 @@ import (
5 5
 
6 6
 	"github.com/Sirupsen/logrus"
7 7
 	"github.com/docker/docker/api/types"
8
-	"github.com/docker/docker/context"
9 8
 	derr "github.com/docker/docker/errors"
10 9
 	"github.com/docker/docker/graph/tags"
11 10
 	"github.com/docker/docker/image"
... ...
@@ -16,21 +15,21 @@ import (
16 16
 )
17 17
 
18 18
 // ContainerCreate takes configs and creates a container.
19
-func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (types.ContainerCreateResponse, error) {
19
+func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (types.ContainerCreateResponse, error) {
20 20
 	if config == nil {
21 21
 		return types.ContainerCreateResponse{}, derr.ErrorCodeEmptyConfig
22 22
 	}
23 23
 
24
-	warnings, err := daemon.verifyContainerSettings(ctx, hostConfig, config)
24
+	warnings, err := daemon.verifyContainerSettings(hostConfig, config)
25 25
 	if err != nil {
26 26
 		return types.ContainerCreateResponse{"", warnings}, err
27 27
 	}
28 28
 
29 29
 	daemon.adaptContainerSettings(hostConfig, adjustCPUShares)
30 30
 
31
-	container, buildWarnings, err := daemon.Create(ctx, config, hostConfig, name)
31
+	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
32 32
 	if err != nil {
33
-		if daemon.Graph(ctx).IsNotExist(err, config.Image) {
33
+		if daemon.Graph().IsNotExist(err, config.Image) {
34 34
 			if strings.Contains(config.Image, "@") {
35 35
 				return types.ContainerCreateResponse{"", warnings}, derr.ErrorCodeNoSuchImageHash.WithArgs(config.Image)
36 36
 			}
... ...
@@ -49,7 +48,7 @@ func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *
49 49
 }
50 50
 
51 51
 // Create creates a new container from the given configuration with a given name.
52
-func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
52
+func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
53 53
 	var (
54 54
 		container *Container
55 55
 		warnings  []string
... ...
@@ -77,29 +76,29 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
77 77
 		hostConfig = &runconfig.HostConfig{}
78 78
 	}
79 79
 	if hostConfig.SecurityOpt == nil {
80
-		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(ctx, hostConfig.IpcMode, hostConfig.PidMode)
80
+		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
81 81
 		if err != nil {
82 82
 			return nil, nil, err
83 83
 		}
84 84
 	}
85
-	if container, err = daemon.newContainer(ctx, name, config, imgID); err != nil {
85
+	if container, err = daemon.newContainer(name, config, imgID); err != nil {
86 86
 		return nil, nil, err
87 87
 	}
88 88
 	defer func() {
89 89
 		if retErr != nil {
90
-			if err := daemon.rm(ctx, container, false); err != nil {
90
+			if err := daemon.rm(container, false); err != nil {
91 91
 				logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err)
92 92
 			}
93 93
 		}
94 94
 	}()
95 95
 
96
-	if err := daemon.Register(ctx, container); err != nil {
96
+	if err := daemon.Register(container); err != nil {
97 97
 		return nil, nil, err
98 98
 	}
99 99
 	if err := daemon.createRootfs(container); err != nil {
100 100
 		return nil, nil, err
101 101
 	}
102
-	if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
102
+	if err := daemon.setHostConfig(container, hostConfig); err != nil {
103 103
 		return nil, nil, err
104 104
 	}
105 105
 	defer func() {
... ...
@@ -109,10 +108,10 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
109 109
 			}
110 110
 		}
111 111
 	}()
112
-	if err := container.Mount(ctx); err != nil {
112
+	if err := container.Mount(); err != nil {
113 113
 		return nil, nil, err
114 114
 	}
115
-	defer container.Unmount(ctx)
115
+	defer container.Unmount()
116 116
 
117 117
 	if err := createContainerPlatformSpecificSettings(container, config, hostConfig, img); err != nil {
118 118
 		return nil, nil, err
... ...
@@ -122,16 +121,16 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
122 122
 		logrus.Errorf("Error saving new container to disk: %v", err)
123 123
 		return nil, nil, err
124 124
 	}
125
-	container.logEvent(ctx, "create")
125
+	container.logEvent("create")
126 126
 	return container, warnings, nil
127 127
 }
128 128
 
129
-func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
129
+func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
130 130
 	if ipcMode.IsHost() || pidMode.IsHost() {
131 131
 		return label.DisableSecOpt(), nil
132 132
 	}
133 133
 	if ipcContainer := ipcMode.Container(); ipcContainer != "" {
134
-		c, err := daemon.Get(ctx, ipcContainer)
134
+		c, err := daemon.Get(ipcContainer)
135 135
 		if err != nil {
136 136
 			return nil, err
137 137
 		}
... ...
@@ -143,7 +142,7 @@ func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig
143 143
 
144 144
 // VolumeCreate creates a volume with the specified name, driver, and opts
145 145
 // This is called directly from the remote API
146
-func (daemon *Daemon) VolumeCreate(ctx context.Context, name, driverName string, opts map[string]string) (*types.Volume, error) {
146
+func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) {
147 147
 	if name == "" {
148 148
 		name = stringid.GenerateNonCryptoID()
149 149
 	}
... ...
@@ -20,7 +20,6 @@ import (
20 20
 
21 21
 	"github.com/Sirupsen/logrus"
22 22
 	"github.com/docker/docker/api"
23
-	"github.com/docker/docker/context"
24 23
 	"github.com/docker/docker/daemon/events"
25 24
 	"github.com/docker/docker/daemon/execdriver"
26 25
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
... ...
@@ -129,14 +128,14 @@ type Daemon struct {
129 129
 //  - A partial container ID prefix (e.g. short ID) of any length that is
130 130
 //    unique enough to only return a single container object
131 131
 //  If none of these searches succeed, an error is returned
132
-func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container, error) {
132
+func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
133 133
 	if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
134 134
 		// prefix is an exact match to a full container ID
135 135
 		return containerByID, nil
136 136
 	}
137 137
 
138 138
 	// GetByName will match only an exact name provided; we ignore errors
139
-	if containerByName, _ := daemon.GetByName(ctx, prefixOrName); containerByName != nil {
139
+	if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
140 140
 		// prefix is an exact match to a full container Name
141 141
 		return containerByName, nil
142 142
 	}
... ...
@@ -154,8 +153,8 @@ func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container,
154 154
 
155 155
 // Exists returns a true if a container of the specified ID or name exists,
156 156
 // false otherwise.
157
-func (daemon *Daemon) Exists(ctx context.Context, id string) bool {
158
-	c, _ := daemon.Get(ctx, id)
157
+func (daemon *Daemon) Exists(id string) bool {
158
+	c, _ := daemon.Get(id)
159 159
 	return c != nil
160 160
 }
161 161
 
... ...
@@ -180,8 +179,8 @@ func (daemon *Daemon) load(id string) (*Container, error) {
180 180
 }
181 181
 
182 182
 // Register makes a container object usable by the daemon as <container.ID>
183
-func (daemon *Daemon) Register(ctx context.Context, container *Container) error {
184
-	if container.daemon != nil || daemon.Exists(ctx, container.ID) {
183
+func (daemon *Daemon) Register(container *Container) error {
184
+	if container.daemon != nil || daemon.Exists(container.ID) {
185 185
 		return fmt.Errorf("Container is already loaded")
186 186
 	}
187 187
 	if err := validateID(container.ID); err != nil {
... ...
@@ -219,7 +218,10 @@ func (daemon *Daemon) Register(ctx context.Context, container *Container) error
219 219
 		}
220 220
 		daemon.execDriver.Terminate(cmd)
221 221
 
222
-		if err := container.Unmount(ctx); err != nil {
222
+		if err := container.unmountIpcMounts(); err != nil {
223
+			logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
224
+		}
225
+		if err := container.Unmount(); err != nil {
223 226
 			logrus.Debugf("unmount error %s", err)
224 227
 		}
225 228
 		if err := container.toDiskLocking(); err != nil {
... ...
@@ -253,7 +255,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
253 253
 	return nil
254 254
 }
255 255
 
256
-func (daemon *Daemon) restore(ctx context.Context) error {
256
+func (daemon *Daemon) restore() error {
257 257
 	type cr struct {
258 258
 		container  *Container
259 259
 		registered bool
... ...
@@ -323,7 +325,7 @@ func (daemon *Daemon) restore(ctx context.Context) error {
323 323
 				}
324 324
 			}
325 325
 
326
-			if err := daemon.Register(ctx, container); err != nil {
326
+			if err := daemon.Register(container); err != nil {
327 327
 				logrus.Errorf("Failed to register container %s: %s", container.ID, err)
328 328
 				// The container register failed should not be started.
329 329
 				return
... ...
@@ -334,7 +336,7 @@ func (daemon *Daemon) restore(ctx context.Context) error {
334 334
 			if daemon.configStore.AutoRestart && container.shouldRestart() {
335 335
 				logrus.Debugf("Starting container %s", container.ID)
336 336
 
337
-				if err := container.Start(ctx); err != nil {
337
+				if err := container.Start(); err != nil {
338 338
 					logrus.Errorf("Failed to start container %s: %s", container.ID, err)
339 339
 				}
340 340
 			}
... ...
@@ -364,7 +366,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
364 364
 	return nil
365 365
 }
366 366
 
367
-func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (string, string, error) {
367
+func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
368 368
 	var (
369 369
 		err error
370 370
 		id  = stringid.GenerateNonCryptoID()
... ...
@@ -377,14 +379,14 @@ func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (strin
377 377
 		return id, name, nil
378 378
 	}
379 379
 
380
-	if name, err = daemon.reserveName(ctx, id, name); err != nil {
380
+	if name, err = daemon.reserveName(id, name); err != nil {
381 381
 		return "", "", err
382 382
 	}
383 383
 
384 384
 	return id, name, nil
385 385
 }
386 386
 
387
-func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string, error) {
387
+func (daemon *Daemon) reserveName(id, name string) (string, error) {
388 388
 	if !validContainerNamePattern.MatchString(name) {
389 389
 		return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
390 390
 	}
... ...
@@ -398,7 +400,7 @@ func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string,
398 398
 			return "", err
399 399
 		}
400 400
 
401
-		conflictingContainer, err := daemon.GetByName(ctx, name)
401
+		conflictingContainer, err := daemon.GetByName(name)
402 402
 		if err != nil {
403 403
 			if strings.Contains(err.Error(), "Could not find entity") {
404 404
 				return "", err
... ...
@@ -468,12 +470,12 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic
468 468
 	return entrypoint, args
469 469
 }
470 470
 
471
-func (daemon *Daemon) newContainer(ctx context.Context, name string, config *runconfig.Config, imgID string) (*Container, error) {
471
+func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) {
472 472
 	var (
473 473
 		id  string
474 474
 		err error
475 475
 	)
476
-	id, name, err = daemon.generateIDAndName(ctx, name)
476
+	id, name, err = daemon.generateIDAndName(name)
477 477
 	if err != nil {
478 478
 		return nil, err
479 479
 	}
... ...
@@ -510,7 +512,7 @@ func GetFullContainerName(name string) (string, error) {
510 510
 }
511 511
 
512 512
 // GetByName returns a container given a name.
513
-func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, error) {
513
+func (daemon *Daemon) GetByName(name string) (*Container, error) {
514 514
 	fullName, err := GetFullContainerName(name)
515 515
 	if err != nil {
516 516
 		return nil, err
... ...
@@ -529,7 +531,7 @@ func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, e
529 529
 // children returns all child containers of the container with the
530 530
 // given name. The containers are returned as a map from the container
531 531
 // name to a pointer to Container.
532
-func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Container, error) {
532
+func (daemon *Daemon) children(name string) (map[string]*Container, error) {
533 533
 	name, err := GetFullContainerName(name)
534 534
 	if err != nil {
535 535
 		return nil, err
... ...
@@ -537,7 +539,7 @@ func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Co
537 537
 	children := make(map[string]*Container)
538 538
 
539 539
 	err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
540
-		c, err := daemon.Get(ctx, e.ID())
540
+		c, err := daemon.Get(e.ID())
541 541
 		if err != nil {
542 542
 			return err
543 543
 		}
... ...
@@ -573,7 +575,7 @@ func (daemon *Daemon) registerLink(parent, child *Container, alias string) error
573 573
 
574 574
 // NewDaemon sets up everything for the daemon to be able to service
575 575
 // requests from the webserver.
576
-func NewDaemon(ctx context.Context, config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
576
+func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
577 577
 	setDefaultMtu(config)
578 578
 
579 579
 	// Ensure we have compatible configuration options
... ...
@@ -641,7 +643,7 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
641 641
 	// Ensure the graph driver is shutdown at a later point
642 642
 	defer func() {
643 643
 		if err != nil {
644
-			if err := d.Shutdown(ctx); err != nil {
644
+			if err := d.Shutdown(); err != nil {
645 645
 				logrus.Error(err)
646 646
 			}
647 647
 		}
... ...
@@ -785,7 +787,7 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
785 785
 
786 786
 	go d.execCommandGC()
787 787
 
788
-	if err := d.restore(ctx); err != nil {
788
+	if err := d.restore(); err != nil {
789 789
 		return nil, err
790 790
 	}
791 791
 
... ...
@@ -793,12 +795,12 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
793 793
 }
794 794
 
795 795
 // Shutdown stops the daemon.
796
-func (daemon *Daemon) Shutdown(ctx context.Context) error {
796
+func (daemon *Daemon) Shutdown() error {
797 797
 	daemon.shutdown = true
798 798
 	if daemon.containers != nil {
799 799
 		group := sync.WaitGroup{}
800 800
 		logrus.Debug("starting clean shutdown of all containers...")
801
-		for _, container := range daemon.List(ctx) {
801
+		for _, container := range daemon.List() {
802 802
 			c := container
803 803
 			if c.IsRunning() {
804 804
 				logrus.Debugf("stopping %s", c.ID)
... ...
@@ -821,7 +823,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
821 821
 							logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
822 822
 							return
823 823
 						}
824
-						if err := c.unpause(ctx); err != nil {
824
+						if err := c.unpause(); err != nil {
825 825
 							logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
826 826
 							return
827 827
 						}
... ...
@@ -836,7 +838,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
836 836
 						}
837 837
 					} else {
838 838
 						// If container failed to exit in 10 seconds of SIGTERM, then using the force
839
-						if err := c.Stop(ctx, 10); err != nil {
839
+						if err := c.Stop(10); err != nil {
840 840
 							logrus.Errorf("Stop container %s with error: %v", c.ID, err)
841 841
 						}
842 842
 					}
... ...
@@ -874,7 +876,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
874 874
 
875 875
 // Mount sets container.basefs
876 876
 // (is it not set coming in? why is it unset?)
877
-func (daemon *Daemon) Mount(ctx context.Context, container *Container) error {
877
+func (daemon *Daemon) Mount(container *Container) error {
878 878
 	dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
879 879
 	if err != nil {
880 880
 		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
... ...
@@ -899,14 +901,14 @@ func (daemon *Daemon) unmount(container *Container) error {
899 899
 	return nil
900 900
 }
901 901
 
902
-func (daemon *Daemon) run(ctx context.Context, c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
902
+func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
903 903
 	hooks := execdriver.Hooks{
904 904
 		Start: startCallback,
905 905
 	}
906
-	hooks.PreStart = append(hooks.PreStart, func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
906
+	hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
907 907
 		return c.setNetworkNamespaceKey(pid)
908 908
 	})
909
-	return daemon.execDriver.Run(ctx, c.command, pipes, hooks)
909
+	return daemon.execDriver.Run(c.command, pipes, hooks)
910 910
 }
911 911
 
912 912
 func (daemon *Daemon) kill(c *Container, sig int) error {
... ...
@@ -973,12 +975,12 @@ func (daemon *Daemon) createRootfs(container *Container) error {
973 973
 // which need direct access to daemon.graph.
974 974
 // Once the tests switch to using engine and jobs, this method
975 975
 // can go away.
976
-func (daemon *Daemon) Graph(ctx context.Context) *graph.Graph {
976
+func (daemon *Daemon) Graph() *graph.Graph {
977 977
 	return daemon.graph
978 978
 }
979 979
 
980 980
 // Repositories returns all repositories.
981
-func (daemon *Daemon) Repositories(ctx context.Context) *graph.TagStore {
981
+func (daemon *Daemon) Repositories() *graph.TagStore {
982 982
 	return daemon.repositories
983 983
 }
984 984
 
... ...
@@ -992,13 +994,13 @@ func (daemon *Daemon) systemInitPath() string {
992 992
 
993 993
 // GraphDriver returns the currently used driver for processing
994 994
 // container layers.
995
-func (daemon *Daemon) GraphDriver(ctx context.Context) graphdriver.Driver {
995
+func (daemon *Daemon) GraphDriver() graphdriver.Driver {
996 996
 	return daemon.driver
997 997
 }
998 998
 
999 999
 // ExecutionDriver returns the currently used driver for creating and
1000 1000
 // starting execs in a container.
1001
-func (daemon *Daemon) ExecutionDriver(ctx context.Context) execdriver.Driver {
1001
+func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
1002 1002
 	return daemon.execDriver
1003 1003
 }
1004 1004
 
... ...
@@ -1010,9 +1012,9 @@ func (daemon *Daemon) containerGraph() *graphdb.Database {
1010 1010
 // of the image with imgID, that had the same config when it was
1011 1011
 // created. nil is returned if a child cannot be found. An error is
1012 1012
 // returned if the parent image cannot be found.
1013
-func (daemon *Daemon) ImageGetCached(ctx context.Context, imgID string, config *runconfig.Config) (*image.Image, error) {
1013
+func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
1014 1014
 	// Retrieve all images
1015
-	images := daemon.Graph(ctx).Map()
1015
+	images := daemon.Graph().Map()
1016 1016
 
1017 1017
 	// Store the tree in a map of map (map[parentId][childId])
1018 1018
 	imageMap := make(map[string]map[string]struct{})
... ...
@@ -1048,7 +1050,7 @@ func tempDir(rootDir string) (string, error) {
1048 1048
 	return tmpDir, system.MkdirAll(tmpDir, 0700)
1049 1049
 }
1050 1050
 
1051
-func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
1051
+func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
1052 1052
 	container.Lock()
1053 1053
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
1054 1054
 		container.Unlock()
... ...
@@ -1058,14 +1060,14 @@ func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, h
1058 1058
 
1059 1059
 	// Do not lock while creating volumes since this could be calling out to external plugins
1060 1060
 	// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
1061
-	if err := daemon.registerMountPoints(ctx, container, hostConfig); err != nil {
1061
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
1062 1062
 		return err
1063 1063
 	}
1064 1064
 
1065 1065
 	container.Lock()
1066 1066
 	defer container.Unlock()
1067 1067
 	// Register any links from the host config before starting the container
1068
-	if err := daemon.registerLinks(ctx, container, hostConfig); err != nil {
1068
+	if err := daemon.registerLinks(container, hostConfig); err != nil {
1069 1069
 		return err
1070 1070
 	}
1071 1071
 
... ...
@@ -1089,7 +1091,7 @@ var errNoDefaultRoute = errors.New("no default route was found")
1089 1089
 
1090 1090
 // verifyContainerSettings performs validation of the hostconfig and config
1091 1091
 // structures.
1092
-func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
1092
+func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
1093 1093
 
1094 1094
 	// First perform verification of settings common across all platforms.
1095 1095
 	if config != nil {
... ...
@@ -1126,7 +1128,7 @@ func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *r
1126 1126
 	}
1127 1127
 
1128 1128
 	// Now do platform-specific verification
1129
-	return verifyPlatformContainerSettings(ctx, daemon, hostConfig, config)
1129
+	return verifyPlatformContainerSettings(daemon, hostConfig, config)
1130 1130
 }
1131 1131
 
1132 1132
 func configureVolumes(config *Config) (*store.VolumeStore, error) {
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"path/filepath"
9 9
 	"testing"
10 10
 
11
-	"github.com/docker/docker/context"
12 11
 	"github.com/docker/docker/pkg/graphdb"
13 12
 	"github.com/docker/docker/pkg/stringid"
14 13
 	"github.com/docker/docker/pkg/truncindex"
... ...
@@ -93,34 +92,32 @@ func TestGet(t *testing.T) {
93 93
 		containerGraphDB: graph,
94 94
 	}
95 95
 
96
-	ctx := context.Background()
97
-
98
-	if container, _ := daemon.Get(ctx, "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
96
+	if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
99 97
 		t.Fatal("Should explicitly match full container IDs")
100 98
 	}
101 99
 
102
-	if container, _ := daemon.Get(ctx, "75fb0b8009"); container != c4 {
100
+	if container, _ := daemon.Get("75fb0b8009"); container != c4 {
103 101
 		t.Fatal("Should match a partial ID")
104 102
 	}
105 103
 
106
-	if container, _ := daemon.Get(ctx, "drunk_hawking"); container != c2 {
104
+	if container, _ := daemon.Get("drunk_hawking"); container != c2 {
107 105
 		t.Fatal("Should match a full name")
108 106
 	}
109 107
 
110 108
 	// c3.Name is a partial match for both c3.ID and c2.ID
111
-	if c, _ := daemon.Get(ctx, "3cdbd1aa"); c != c3 {
109
+	if c, _ := daemon.Get("3cdbd1aa"); c != c3 {
112 110
 		t.Fatal("Should match a full name even though it collides with another container's ID")
113 111
 	}
114 112
 
115
-	if container, _ := daemon.Get(ctx, "d22d69a2b896"); container != c5 {
113
+	if container, _ := daemon.Get("d22d69a2b896"); container != c5 {
116 114
 		t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID")
117 115
 	}
118 116
 
119
-	if _, err := daemon.Get(ctx, "3cdbd1"); err == nil {
117
+	if _, err := daemon.Get("3cdbd1"); err == nil {
120 118
 		t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
121 119
 	}
122 120
 
123
-	if _, err := daemon.Get(ctx, "nothing"); err == nil {
121
+	if _, err := daemon.Get("nothing"); err == nil {
124 122
 		t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
125 123
 	}
126 124
 
... ...
@@ -489,15 +486,13 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
489 489
 		t.Fatalf("Expected 1 volume mounted, was 0\n")
490 490
 	}
491 491
 
492
-	ctx := context.Background()
493
-
494 492
 	m := c.MountPoints["/vol1"]
495
-	_, err = daemon.VolumeCreate(ctx, m.Name, m.Driver, nil)
493
+	_, err = daemon.VolumeCreate(m.Name, m.Driver, nil)
496 494
 	if err != nil {
497 495
 		t.Fatal(err)
498 496
 	}
499 497
 
500
-	if err := daemon.VolumeRm(ctx, m.Name); err != nil {
498
+	if err := daemon.VolumeRm(m.Name); err != nil {
501 499
 		t.Fatal(err)
502 500
 	}
503 501
 
... ...
@@ -13,7 +13,6 @@ import (
13 13
 
14 14
 	"github.com/Sirupsen/logrus"
15 15
 	"github.com/docker/docker/autogen/dockerversion"
16
-	"github.com/docker/docker/context"
17 16
 	"github.com/docker/docker/daemon/graphdriver"
18 17
 	"github.com/docker/docker/pkg/fileutils"
19 18
 	"github.com/docker/docker/pkg/parsers"
... ...
@@ -119,12 +118,12 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
119 119
 
120 120
 // verifyPlatformContainerSettings performs platform-specific validation of the
121 121
 // hostconfig and config structures.
122
-func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
122
+func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
123 123
 	warnings := []string{}
124 124
 	sysInfo := sysinfo.New(true)
125 125
 
126
-	if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver(ctx).Name(), "lxc") {
127
-		return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver(ctx).Name())
126
+	if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
127
+		return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
128 128
 	}
129 129
 
130 130
 	// memory subsystem checks and adjustments
... ...
@@ -492,12 +491,12 @@ func setupInitLayer(initLayer string) error {
492 492
 
493 493
 // NetworkAPIRouter implements a feature for server-experimental,
494 494
 // directly calling into libnetwork.
495
-func (daemon *Daemon) NetworkAPIRouter(ctx context.Context) func(w http.ResponseWriter, req *http.Request) {
495
+func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
496 496
 	return nwapi.NewHTTPHandler(daemon.netController)
497 497
 }
498 498
 
499 499
 // registerLinks writes the links to a file.
500
-func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
500
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
501 501
 	if hostConfig == nil || hostConfig.Links == nil {
502 502
 		return nil
503 503
 	}
... ...
@@ -507,14 +506,14 @@ func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, h
507 507
 		if err != nil {
508 508
 			return err
509 509
 		}
510
-		child, err := daemon.Get(ctx, name)
510
+		child, err := daemon.Get(name)
511 511
 		if err != nil {
512 512
 			//An error from daemon.Get() means this name could not be found
513 513
 			return fmt.Errorf("Could not get container for %s", name)
514 514
 		}
515 515
 		for child.hostConfig.NetworkMode.IsContainer() {
516 516
 			parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
517
-			child, err = daemon.Get(ctx, parts[1])
517
+			child, err = daemon.Get(parts[1])
518 518
 			if err != nil {
519 519
 				return fmt.Errorf("Could not get container for %s", parts[1])
520 520
 			}
... ...
@@ -6,7 +6,6 @@ import (
6 6
 	"syscall"
7 7
 
8 8
 	"github.com/Sirupsen/logrus"
9
-	"github.com/docker/docker/context"
10 9
 	"github.com/docker/docker/daemon/graphdriver"
11 10
 	// register the windows graph driver
12 11
 	_ "github.com/docker/docker/daemon/graphdriver/windows"
... ...
@@ -48,7 +47,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
48 48
 
49 49
 // verifyPlatformContainerSettings performs platform-specific validation of the
50 50
 // hostconfig and config structures.
51
-func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
51
+func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
52 52
 	return nil, nil
53 53
 }
54 54
 
... ...
@@ -105,7 +104,7 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error)
105 105
 
106 106
 // registerLinks sets up links between containers and writes the
107 107
 // configuration out for persistence.
108
-func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
108
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
109 109
 	// TODO Windows. Factored out for network modes. There may be more
110 110
 	// refactoring required here.
111 111
 
... ...
@@ -118,7 +117,7 @@ func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, h
118 118
 		if err != nil {
119 119
 			return err
120 120
 		}
121
-		child, err := daemon.Get(ctx, name)
121
+		child, err := daemon.Get(name)
122 122
 		if err != nil {
123 123
 			//An error from daemon.Get() means this name could not be found
124 124
 			return fmt.Errorf("Could not get container for %s", name)
... ...
@@ -5,8 +5,6 @@ import (
5 5
 	"os"
6 6
 	"path"
7 7
 
8
-	"github.com/docker/docker/context"
9
-
10 8
 	"github.com/Sirupsen/logrus"
11 9
 	derr "github.com/docker/docker/errors"
12 10
 	"github.com/docker/docker/volume/store"
... ...
@@ -21,8 +19,8 @@ type ContainerRmConfig struct {
21 21
 // is returned if the container is not found, or if the remove
22 22
 // fails. If the remove succeeds, the container name is released, and
23 23
 // network links are removed.
24
-func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *ContainerRmConfig) error {
25
-	container, err := daemon.Get(ctx, name)
24
+func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
25
+	container, err := daemon.Get(name)
26 26
 	if err != nil {
27 27
 		return err
28 28
 	}
... ...
@@ -45,9 +43,9 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
45 45
 			return err
46 46
 		}
47 47
 
48
-		parentContainer, _ := daemon.Get(ctx, pe.ID())
48
+		parentContainer, _ := daemon.Get(pe.ID())
49 49
 		if parentContainer != nil {
50
-			if err := parentContainer.updateNetwork(ctx); err != nil {
50
+			if err := parentContainer.updateNetwork(); err != nil {
51 51
 				logrus.Debugf("Could not update network to remove link %s: %v", n, err)
52 52
 			}
53 53
 		}
... ...
@@ -55,7 +53,7 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
55 55
 		return nil
56 56
 	}
57 57
 
58
-	if err := daemon.rm(ctx, container, config.ForceRemove); err != nil {
58
+	if err := daemon.rm(container, config.ForceRemove); err != nil {
59 59
 		// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err))
60 60
 		return err
61 61
 	}
... ...
@@ -68,12 +66,12 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
68 68
 }
69 69
 
70 70
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
71
-func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove bool) (err error) {
71
+func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
72 72
 	if container.IsRunning() {
73 73
 		if !forceRemove {
74 74
 			return derr.ErrorCodeRmRunning
75 75
 		}
76
-		if err := container.Kill(ctx); err != nil {
76
+		if err := container.Kill(); err != nil {
77 77
 			return derr.ErrorCodeRmFailed.WithArgs(err)
78 78
 		}
79 79
 	}
... ...
@@ -94,7 +92,7 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
94 94
 
95 95
 	defer container.resetRemovalInProgress()
96 96
 
97
-	if err = container.Stop(ctx, 3); err != nil {
97
+	if err = container.Stop(3); err != nil {
98 98
 		return err
99 99
 	}
100 100
 
... ...
@@ -115,7 +113,7 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
115 115
 			daemon.idIndex.Delete(container.ID)
116 116
 			daemon.containers.Delete(container.ID)
117 117
 			os.RemoveAll(container.root)
118
-			container.logEvent(ctx, "destroy")
118
+			container.logEvent("destroy")
119 119
 		}
120 120
 	}()
121 121
 
... ...
@@ -144,14 +142,14 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
144 144
 	daemon.idIndex.Delete(container.ID)
145 145
 	daemon.containers.Delete(container.ID)
146 146
 
147
-	container.logEvent(ctx, "destroy")
147
+	container.logEvent("destroy")
148 148
 	return nil
149 149
 }
150 150
 
151 151
 // VolumeRm removes the volume with the given name.
152 152
 // If the volume is referenced by a container it is not removed
153 153
 // This is called directly from the remote API
154
-func (daemon *Daemon) VolumeRm(ctx context.Context, name string) error {
154
+func (daemon *Daemon) VolumeRm(name string) error {
155 155
 	v, err := daemon.volumes.Get(name)
156 156
 	if err != nil {
157 157
 		return err
... ...
@@ -4,8 +4,6 @@ import (
4 4
 	"sync"
5 5
 	"time"
6 6
 
7
-	"github.com/docker/docker/context"
8
-
9 7
 	"github.com/docker/docker/pkg/jsonmessage"
10 8
 	"github.com/docker/docker/pkg/pubsub"
11 9
 )
... ...
@@ -46,9 +44,9 @@ func (e *Events) Evict(l chan interface{}) {
46 46
 
47 47
 // Log broadcasts event to listeners. Each listener has 100 millisecond for
48 48
 // receiving event or it will be skipped.
49
-func (e *Events) Log(ctx context.Context, action, id, from string) {
49
+func (e *Events) Log(action, id, from string) {
50 50
 	now := time.Now().UTC()
51
-	jm := &jsonmessage.JSONMessage{RequestID: ctx.RequestID(), Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
51
+	jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
52 52
 	e.mu.Lock()
53 53
 	if len(e.events) == cap(e.events) {
54 54
 		// discard oldest event
... ...
@@ -5,12 +5,10 @@ import (
5 5
 	"testing"
6 6
 	"time"
7 7
 
8
-	"github.com/docker/docker/context"
9 8
 	"github.com/docker/docker/pkg/jsonmessage"
10 9
 )
11 10
 
12 11
 func TestEventsLog(t *testing.T) {
13
-	ctx := context.Background()
14 12
 	e := New()
15 13
 	_, l1 := e.Subscribe()
16 14
 	_, l2 := e.Subscribe()
... ...
@@ -20,7 +18,7 @@ func TestEventsLog(t *testing.T) {
20 20
 	if count != 2 {
21 21
 		t.Fatalf("Must be 2 subscribers, got %d", count)
22 22
 	}
23
-	e.Log(ctx, "test", "cont", "image")
23
+	e.Log("test", "cont", "image")
24 24
 	select {
25 25
 	case msg := <-l1:
26 26
 		jmsg, ok := msg.(*jsonmessage.JSONMessage)
... ...
@@ -66,14 +64,13 @@ func TestEventsLog(t *testing.T) {
66 66
 }
67 67
 
68 68
 func TestEventsLogTimeout(t *testing.T) {
69
-	ctx := context.Background()
70 69
 	e := New()
71 70
 	_, l := e.Subscribe()
72 71
 	defer e.Evict(l)
73 72
 
74 73
 	c := make(chan struct{})
75 74
 	go func() {
76
-		e.Log(ctx, "test", "cont", "image")
75
+		e.Log("test", "cont", "image")
77 76
 		close(c)
78 77
 	}()
79 78
 
... ...
@@ -85,14 +82,13 @@ func TestEventsLogTimeout(t *testing.T) {
85 85
 }
86 86
 
87 87
 func TestLogEvents(t *testing.T) {
88
-	ctx := context.Background()
89 88
 	e := New()
90 89
 
91 90
 	for i := 0; i < eventsLimit+16; i++ {
92 91
 		action := fmt.Sprintf("action_%d", i)
93 92
 		id := fmt.Sprintf("cont_%d", i)
94 93
 		from := fmt.Sprintf("image_%d", i)
95
-		e.Log(ctx, action, id, from)
94
+		e.Log(action, id, from)
96 95
 	}
97 96
 	time.Sleep(50 * time.Millisecond)
98 97
 	current, l := e.Subscribe()
... ...
@@ -101,7 +97,7 @@ func TestLogEvents(t *testing.T) {
101 101
 		action := fmt.Sprintf("action_%d", num)
102 102
 		id := fmt.Sprintf("cont_%d", num)
103 103
 		from := fmt.Sprintf("image_%d", num)
104
-		e.Log(ctx, action, id, from)
104
+		e.Log(action, id, from)
105 105
 	}
106 106
 	if len(e.events) != eventsLimit {
107 107
 		t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"time"
9 9
 
10 10
 	"github.com/Sirupsen/logrus"
11
-	"github.com/docker/docker/context"
12 11
 	"github.com/docker/docker/daemon/execdriver"
13 12
 	derr "github.com/docker/docker/errors"
14 13
 	"github.com/docker/docker/pkg/broadcastwriter"
... ...
@@ -118,8 +117,8 @@ func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
118 118
 	d.execCommands.Delete(ExecConfig.ID)
119 119
 }
120 120
 
121
-func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Container, error) {
122
-	container, err := d.Get(ctx, name)
121
+func (d *Daemon) getActiveContainer(name string) (*Container, error) {
122
+	container, err := d.Get(name)
123 123
 	if err != nil {
124 124
 		return nil, err
125 125
 	}
... ...
@@ -134,13 +133,13 @@ func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Containe
134 134
 }
135 135
 
136 136
 // ContainerExecCreate sets up an exec in a running container.
137
-func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.ExecConfig) (string, error) {
137
+func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
138 138
 	// Not all drivers support Exec (LXC for example)
139 139
 	if err := checkExecSupport(d.execDriver.Name()); err != nil {
140 140
 		return "", err
141 141
 	}
142 142
 
143
-	container, err := d.getActiveContainer(ctx, config.Container)
143
+	container, err := d.getActiveContainer(config.Container)
144 144
 	if err != nil {
145 145
 		return "", err
146 146
 	}
... ...
@@ -175,14 +174,14 @@ func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.Exec
175 175
 
176 176
 	d.registerExecCommand(ExecConfig)
177 177
 
178
-	container.logEvent(ctx, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
178
+	container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
179 179
 
180 180
 	return ExecConfig.ID, nil
181 181
 }
182 182
 
183 183
 // ContainerExecStart starts a previously set up exec instance. The
184 184
 // std streams are set up.
185
-func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
185
+func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
186 186
 	var (
187 187
 		cStdin           io.ReadCloser
188 188
 		cStdout, cStderr io.Writer
... ...
@@ -208,7 +207,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
208 208
 	logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
209 209
 	container := ExecConfig.Container
210 210
 
211
-	container.logEvent(ctx, "exec_start: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
211
+	container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
212 212
 
213 213
 	if ExecConfig.OpenStdin {
214 214
 		r, w := io.Pipe()
... ...
@@ -244,7 +243,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
244 244
 	// the exitStatus) even after the cmd is done running.
245 245
 
246 246
 	go func() {
247
-		if err := container.exec(ctx, ExecConfig); err != nil {
247
+		if err := container.exec(ExecConfig); err != nil {
248 248
 			execErr <- derr.ErrorCodeExecCantRun.WithArgs(execName, container.ID, err)
249 249
 		}
250 250
 	}()
... ...
@@ -268,11 +267,11 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
268 268
 }
269 269
 
270 270
 // Exec calls the underlying exec driver to run
271
-func (d *Daemon) Exec(ctx context.Context, c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
271
+func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
272 272
 	hooks := execdriver.Hooks{
273 273
 		Start: startCallback,
274 274
 	}
275
-	exitStatus, err := d.execDriver.Exec(ctx, c.command, ExecConfig.ProcessConfig, pipes, hooks)
275
+	exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks)
276 276
 
277 277
 	// On err, make sure we don't leave ExitCode at zero
278 278
 	if err != nil && exitStatus == 0 {
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"time"
8 8
 
9 9
 	// TODO Windows: Factor out ulimit
10
-	"github.com/docker/docker/context"
11 10
 	"github.com/docker/docker/pkg/ulimit"
12 11
 	"github.com/opencontainers/runc/libcontainer"
13 12
 	"github.com/opencontainers/runc/libcontainer/configs"
... ...
@@ -30,7 +29,7 @@ var (
30 30
 // through PreStart, Start and PostStop events.
31 31
 // Callbacks are provided a processConfig pointer and the pid of the child.
32 32
 // The channel will be used to notify the OOM events.
33
-type DriverCallback func(ctx context.Context, processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
33
+type DriverCallback func(processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
34 34
 
35 35
 // Hooks is a struct containing function pointers to callbacks
36 36
 // used by any execdriver implementation exploiting hooks capabilities
... ...
@@ -70,11 +69,11 @@ type ExitStatus struct {
70 70
 type Driver interface {
71 71
 	// Run executes the process, blocks until the process exits and returns
72 72
 	// the exit code. It's the last stage on Docker side for running a container.
73
-	Run(ctx context.Context, c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
73
+	Run(c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
74 74
 
75 75
 	// Exec executes the process in an existing container, blocks until the
76 76
 	// process exits and returns the exit code.
77
-	Exec(ctx context.Context, c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
77
+	Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
78 78
 
79 79
 	// Kill sends signals to process in container.
80 80
 	Kill(c *Command, sig int) error
... ...
@@ -20,7 +20,6 @@ import (
20 20
 	"time"
21 21
 
22 22
 	"github.com/Sirupsen/logrus"
23
-	"github.com/docker/docker/context"
24 23
 	"github.com/docker/docker/daemon/execdriver"
25 24
 	"github.com/docker/docker/pkg/stringutils"
26 25
 	sysinfo "github.com/docker/docker/pkg/system"
... ...
@@ -126,7 +125,7 @@ func killNetNsProc(proc *os.Process) {
126 126
 
127 127
 // Run implements the exec driver Driver interface,
128 128
 // it calls 'exec.Cmd' to launch lxc commands to run a container.
129
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
129
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
130 130
 	var (
131 131
 		term     execdriver.Terminal
132 132
 		err      error
... ...
@@ -330,7 +329,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
330 330
 
331 331
 	if hooks.Start != nil {
332 332
 		logrus.Debugf("Invoking startCallback")
333
-		hooks.Start(ctx, &c.ProcessConfig, pid, oomKillNotification)
333
+		hooks.Start(&c.ProcessConfig, pid, oomKillNotification)
334 334
 
335 335
 	}
336 336
 
... ...
@@ -872,7 +871,7 @@ func (t *TtyConsole) Close() error {
872 872
 
873 873
 // Exec implements the exec driver Driver interface,
874 874
 // it is not implemented by lxc.
875
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
875
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
876 876
 	return -1, ErrExec
877 877
 }
878 878
 
... ...
@@ -9,7 +9,6 @@ import (
9 9
 	"strings"
10 10
 	"syscall"
11 11
 
12
-	"github.com/docker/docker/context"
13 12
 	"github.com/docker/docker/daemon/execdriver"
14 13
 	"github.com/opencontainers/runc/libcontainer/apparmor"
15 14
 	"github.com/opencontainers/runc/libcontainer/configs"
... ...
@@ -19,7 +18,7 @@ import (
19 19
 
20 20
 // createContainer populates and configures the container type with the
21 21
 // data provided by the execdriver.Command
22
-func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
22
+func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
23 23
 	container := execdriver.InitContainer(c)
24 24
 
25 25
 	if err := d.createIpc(container, c); err != nil {
... ...
@@ -34,7 +33,7 @@ func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hoo
34 34
 		return nil, err
35 35
 	}
36 36
 
37
-	if err := d.createNetwork(ctx, container, c, hooks); err != nil {
37
+	if err := d.createNetwork(container, c, hooks); err != nil {
38 38
 		return nil, err
39 39
 	}
40 40
 
... ...
@@ -120,7 +119,7 @@ func generateIfaceName() (string, error) {
120 120
 	return "", errors.New("Failed to find name for new interface")
121 121
 }
122 122
 
123
-func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
123
+func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
124 124
 	if c.Network == nil {
125 125
 		return nil
126 126
 	}
... ...
@@ -157,7 +156,7 @@ func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c
157 157
 						// non-blocking and return the correct result when read.
158 158
 						chOOM := make(chan struct{})
159 159
 						close(chOOM)
160
-						if err := fnHook(ctx, &c.ProcessConfig, s.Pid, chOOM); err != nil {
160
+						if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil {
161 161
 							return err
162 162
 						}
163 163
 					}
... ...
@@ -14,7 +14,6 @@ import (
14 14
 	"time"
15 15
 
16 16
 	"github.com/Sirupsen/logrus"
17
-	"github.com/docker/docker/context"
18 17
 	"github.com/docker/docker/daemon/execdriver"
19 18
 	"github.com/docker/docker/pkg/parsers"
20 19
 	"github.com/docker/docker/pkg/pools"
... ...
@@ -132,9 +131,9 @@ type execOutput struct {
132 132
 
133 133
 // Run implements the exec driver Driver interface,
134 134
 // it calls libcontainer APIs to run a container.
135
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
135
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
136 136
 	// take the Command and populate the libcontainer.Config from it
137
-	container, err := d.createContainer(ctx, c, hooks)
137
+	container, err := d.createContainer(c, hooks)
138 138
 	if err != nil {
139 139
 		return execdriver.ExitStatus{ExitCode: -1}, err
140 140
 	}
... ...
@@ -175,7 +174,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
175 175
 			p.Wait()
176 176
 			return execdriver.ExitStatus{ExitCode: -1}, err
177 177
 		}
178
-		hooks.Start(ctx, &c.ProcessConfig, pid, oom)
178
+		hooks.Start(&c.ProcessConfig, pid, oom)
179 179
 	}
180 180
 
181 181
 	waitF := p.Wait
... ...
@@ -9,7 +9,6 @@ import (
9 9
 	"strings"
10 10
 	"syscall"
11 11
 
12
-	"github.com/docker/docker/context"
13 12
 	"github.com/docker/docker/daemon/execdriver"
14 13
 	"github.com/opencontainers/runc/libcontainer"
15 14
 	// Blank import 'nsenter' so that init in that package will call c
... ...
@@ -21,7 +20,7 @@ import (
21 21
 
22 22
 // Exec implements the exec driver Driver interface,
23 23
 // it calls libcontainer APIs to execute a container.
24
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
24
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
25 25
 	active := d.activeContainers[c.ID]
26 26
 	if active == nil {
27 27
 		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
... ...
@@ -66,7 +65,7 @@ func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig
66 66
 		// non-blocking and return the correct result when read.
67 67
 		chOOM := make(chan struct{})
68 68
 		close(chOOM)
69
-		hooks.Start(ctx, &c.ProcessConfig, pid, chOOM)
69
+		hooks.Start(&c.ProcessConfig, pid, chOOM)
70 70
 	}
71 71
 
72 72
 	ps, err := p.Wait()
... ...
@@ -7,13 +7,12 @@ import (
7 7
 	"fmt"
8 8
 
9 9
 	"github.com/Sirupsen/logrus"
10
-	"github.com/docker/docker/context"
11 10
 	"github.com/docker/docker/daemon/execdriver"
12 11
 	"github.com/microsoft/hcsshim"
13 12
 )
14 13
 
15 14
 // Exec implements the exec driver Driver interface.
16
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
15
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
17 16
 
18 17
 	var (
19 18
 		term     execdriver.Terminal
... ...
@@ -75,7 +74,7 @@ func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig
75 75
 		// non-blocking and return the correct result when read.
76 76
 		chOOM := make(chan struct{})
77 77
 		close(chOOM)
78
-		hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
78
+		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
79 79
 	}
80 80
 
81 81
 	if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {
... ...
@@ -15,7 +15,6 @@ import (
15 15
 	"syscall"
16 16
 
17 17
 	"github.com/Sirupsen/logrus"
18
-	"github.com/docker/docker/context"
19 18
 	"github.com/docker/docker/daemon/execdriver"
20 19
 	"github.com/microsoft/hcsshim"
21 20
 )
... ...
@@ -80,7 +79,7 @@ type containerInit struct {
80 80
 const defaultOwner = "docker"
81 81
 
82 82
 // Run implements the exec driver Driver interface
83
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
83
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
84 84
 
85 85
 	var (
86 86
 		term execdriver.Terminal
... ...
@@ -299,7 +298,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
299 299
 		// non-blocking and return the correct result when read.
300 300
 		chOOM := make(chan struct{})
301 301
 		close(chOOM)
302
-		hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
302
+		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
303 303
 	}
304 304
 
305 305
 	var exitCode int32
... ...
@@ -3,19 +3,18 @@ package daemon
3 3
 import (
4 4
 	"io"
5 5
 
6
-	"github.com/docker/docker/context"
7 6
 	derr "github.com/docker/docker/errors"
8 7
 )
9 8
 
10 9
 // ContainerExport writes the contents of the container to the given
11 10
 // writer. An error is returned if the container cannot be found.
12
-func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error {
13
-	container, err := daemon.Get(ctx, name)
11
+func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
12
+	container, err := daemon.Get(name)
14 13
 	if err != nil {
15 14
 		return err
16 15
 	}
17 16
 
18
-	data, err := container.export(ctx)
17
+	data, err := container.export()
19 18
 	if err != nil {
20 19
 		return derr.ErrorCodeExportFailed.WithArgs(name, err)
21 20
 	}
... ...
@@ -5,7 +5,6 @@ import (
5 5
 	"strings"
6 6
 
7 7
 	"github.com/docker/docker/api/types"
8
-	"github.com/docker/docker/context"
9 8
 	derr "github.com/docker/docker/errors"
10 9
 	"github.com/docker/docker/graph/tags"
11 10
 	"github.com/docker/docker/image"
... ...
@@ -51,10 +50,10 @@ import (
51 51
 // FIXME: remove ImageDelete's dependency on Daemon, then move to the graph
52 52
 // package. This would require that we no longer need the daemon to determine
53 53
 // whether images are being used by a stopped or running container.
54
-func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDelete, error) {
54
+func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
55 55
 	records := []types.ImageDelete{}
56 56
 
57
-	img, err := daemon.Repositories(ctx).LookupImage(imageRef)
57
+	img, err := daemon.Repositories().LookupImage(imageRef)
58 58
 	if err != nil {
59 59
 		return nil, err
60 60
 	}
... ...
@@ -65,8 +64,8 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
65 65
 		// first. We can only remove this reference if either force is
66 66
 		// true, there are multiple repository references to this
67 67
 		// image, or there are no containers using the given reference.
68
-		if !(force || daemon.imageHasMultipleRepositoryReferences(ctx, img.ID)) {
69
-			if container := daemon.getContainerUsingImage(ctx, img.ID); container != nil {
68
+		if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) {
69
+			if container := daemon.getContainerUsingImage(img.ID); container != nil {
70 70
 				// If we removed the repository reference then
71 71
 				// this image would remain "dangling" and since
72 72
 				// we really want to avoid that the client must
... ...
@@ -75,14 +74,14 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
75 75
 			}
76 76
 		}
77 77
 
78
-		parsedRef, err := daemon.removeImageRef(ctx, imageRef)
78
+		parsedRef, err := daemon.removeImageRef(imageRef)
79 79
 		if err != nil {
80 80
 			return nil, err
81 81
 		}
82 82
 
83 83
 		untaggedRecord := types.ImageDelete{Untagged: parsedRef}
84 84
 
85
-		daemon.EventsService.Log(ctx, "untag", img.ID, "")
85
+		daemon.EventsService.Log("untag", img.ID, "")
86 86
 		records = append(records, untaggedRecord)
87 87
 
88 88
 		removedRepositoryRef = true
... ...
@@ -91,21 +90,21 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
91 91
 		// repository reference to the image then we will want to
92 92
 		// remove that reference.
93 93
 		// FIXME: Is this the behavior we want?
94
-		repoRefs := daemon.Repositories(ctx).ByID()[img.ID]
94
+		repoRefs := daemon.Repositories().ByID()[img.ID]
95 95
 		if len(repoRefs) == 1 {
96
-			parsedRef, err := daemon.removeImageRef(ctx, repoRefs[0])
96
+			parsedRef, err := daemon.removeImageRef(repoRefs[0])
97 97
 			if err != nil {
98 98
 				return nil, err
99 99
 			}
100 100
 
101 101
 			untaggedRecord := types.ImageDelete{Untagged: parsedRef}
102 102
 
103
-			daemon.EventsService.Log(ctx, "untag", img.ID, "")
103
+			daemon.EventsService.Log("untag", img.ID, "")
104 104
 			records = append(records, untaggedRecord)
105 105
 		}
106 106
 	}
107 107
 
108
-	return records, daemon.imageDeleteHelper(ctx, img, &records, force, prune, removedRepositoryRef)
108
+	return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef)
109 109
 }
110 110
 
111 111
 // isImageIDPrefix returns whether the given possiblePrefix is a prefix of the
... ...
@@ -116,14 +115,14 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
116 116
 
117 117
 // imageHasMultipleRepositoryReferences returns whether there are multiple
118 118
 // repository references to the given imageID.
119
-func (daemon *Daemon) imageHasMultipleRepositoryReferences(ctx context.Context, imageID string) bool {
120
-	return len(daemon.Repositories(ctx).ByID()[imageID]) > 1
119
+func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool {
120
+	return len(daemon.Repositories().ByID()[imageID]) > 1
121 121
 }
122 122
 
123 123
 // getContainerUsingImage returns a container that was created using the given
124 124
 // imageID. Returns nil if there is no such container.
125
-func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string) *Container {
126
-	for _, container := range daemon.List(ctx) {
125
+func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
126
+	for _, container := range daemon.List() {
127 127
 		if container.ImageID == imageID {
128 128
 			return container
129 129
 		}
... ...
@@ -137,7 +136,7 @@ func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string
137 137
 // repositoryRef must not be an image ID but a repository name followed by an
138 138
 // optional tag or digest reference. If tag or digest is omitted, the default
139 139
 // tag is used. Returns the resolved image reference and an error.
140
-func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string) (string, error) {
140
+func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
141 141
 	repository, ref := parsers.ParseRepositoryTag(repositoryRef)
142 142
 	if ref == "" {
143 143
 		ref = tags.DefaultTag
... ...
@@ -146,7 +145,7 @@ func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string)
146 146
 	// Ignore the boolean value returned, as far as we're concerned, this
147 147
 	// is an idempotent operation and it's okay if the reference didn't
148 148
 	// exist in the first place.
149
-	_, err := daemon.Repositories(ctx).Delete(repository, ref)
149
+	_, err := daemon.Repositories().Delete(repository, ref)
150 150
 
151 151
 	return utils.ImageReference(repository, ref), err
152 152
 }
... ...
@@ -156,18 +155,18 @@ func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string)
156 156
 // on the first encountered error. Removed references are logged to this
157 157
 // daemon's event service. An "Untagged" types.ImageDelete is added to the
158 158
 // given list of records.
159
-func (daemon *Daemon) removeAllReferencesToImageID(ctx context.Context, imgID string, records *[]types.ImageDelete) error {
160
-	imageRefs := daemon.Repositories(ctx).ByID()[imgID]
159
+func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error {
160
+	imageRefs := daemon.Repositories().ByID()[imgID]
161 161
 
162 162
 	for _, imageRef := range imageRefs {
163
-		parsedRef, err := daemon.removeImageRef(ctx, imageRef)
163
+		parsedRef, err := daemon.removeImageRef(imageRef)
164 164
 		if err != nil {
165 165
 			return err
166 166
 		}
167 167
 
168 168
 		untaggedRecord := types.ImageDelete{Untagged: parsedRef}
169 169
 
170
-		daemon.EventsService.Log(ctx, "untag", imgID, "")
170
+		daemon.EventsService.Log("untag", imgID, "")
171 171
 		*records = append(*records, untaggedRecord)
172 172
 	}
173 173
 
... ...
@@ -204,11 +203,11 @@ func (idc *imageDeleteConflict) Error() string {
204 204
 // conflict is encountered, it will be returned immediately without deleting
205 205
 // the image. If quiet is true, any encountered conflicts will be ignored and
206 206
 // the function will return nil immediately without deleting the image.
207
-func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
207
+func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
208 208
 	// First, determine if this image has any conflicts. Ignore soft conflicts
209 209
 	// if force is true.
210
-	if conflict := daemon.checkImageDeleteConflict(ctx, img, force); conflict != nil {
211
-		if quiet && !daemon.imageIsDangling(ctx, img) {
210
+	if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil {
211
+		if quiet && !daemon.imageIsDangling(img) {
212 212
 			// Ignore conflicts UNLESS the image is "dangling" in
213 213
 			// which case we want the user to know.
214 214
 			return nil
... ...
@@ -220,15 +219,15 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
220 220
 	}
221 221
 
222 222
 	// Delete all repository tag/digest references to this image.
223
-	if err := daemon.removeAllReferencesToImageID(ctx, img.ID, records); err != nil {
223
+	if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil {
224 224
 		return err
225 225
 	}
226 226
 
227
-	if err := daemon.Graph(ctx).Delete(img.ID); err != nil {
227
+	if err := daemon.Graph().Delete(img.ID); err != nil {
228 228
 		return err
229 229
 	}
230 230
 
231
-	daemon.EventsService.Log(ctx, "delete", img.ID, "")
231
+	daemon.EventsService.Log("delete", img.ID, "")
232 232
 	*records = append(*records, types.ImageDelete{Deleted: img.ID})
233 233
 
234 234
 	if !prune || img.Parent == "" {
... ...
@@ -238,14 +237,14 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
238 238
 	// We need to prune the parent image. This means delete it if there are
239 239
 	// no tags/digests referencing it and there are no containers using it (
240 240
 	// either running or stopped).
241
-	parentImg, err := daemon.Graph(ctx).Get(img.Parent)
241
+	parentImg, err := daemon.Graph().Get(img.Parent)
242 242
 	if err != nil {
243 243
 		return derr.ErrorCodeImgNoParent.WithArgs(err)
244 244
 	}
245 245
 
246 246
 	// Do not force prunings, but do so quietly (stopping on any encountered
247 247
 	// conflicts).
248
-	return daemon.imageDeleteHelper(ctx, parentImg, records, false, true, true)
248
+	return daemon.imageDeleteHelper(parentImg, records, false, true, true)
249 249
 }
250 250
 
251 251
 // checkImageDeleteConflict determines whether there are any conflicts
... ...
@@ -254,9 +253,9 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
254 254
 // using the image. A soft conflict is any tags/digest referencing the given
255 255
 // image or any stopped container using the image. If ignoreSoftConflicts is
256 256
 // true, this function will not check for soft conflict conditions.
257
-func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
257
+func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
258 258
 	// Check for hard conflicts first.
259
-	if conflict := daemon.checkImageDeleteHardConflict(ctx, img); conflict != nil {
259
+	if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil {
260 260
 		return conflict
261 261
 	}
262 262
 
... ...
@@ -266,12 +265,12 @@ func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.I
266 266
 		return nil
267 267
 	}
268 268
 
269
-	return daemon.checkImageDeleteSoftConflict(ctx, img)
269
+	return daemon.checkImageDeleteSoftConflict(img)
270 270
 }
271 271
 
272
-func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
272
+func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict {
273 273
 	// Check if the image ID is being used by a pull or build.
274
-	if daemon.Graph(ctx).IsHeld(img.ID) {
274
+	if daemon.Graph().IsHeld(img.ID) {
275 275
 		return &imageDeleteConflict{
276 276
 			hard:    true,
277 277
 			imgID:   img.ID,
... ...
@@ -280,7 +279,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
280 280
 	}
281 281
 
282 282
 	// Check if the image has any descendent images.
283
-	if daemon.Graph(ctx).HasChildren(img) {
283
+	if daemon.Graph().HasChildren(img) {
284 284
 		return &imageDeleteConflict{
285 285
 			hard:    true,
286 286
 			imgID:   img.ID,
... ...
@@ -289,7 +288,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
289 289
 	}
290 290
 
291 291
 	// Check if any running container is using the image.
292
-	for _, container := range daemon.List(ctx) {
292
+	for _, container := range daemon.List() {
293 293
 		if !container.IsRunning() {
294 294
 			// Skip this until we check for soft conflicts later.
295 295
 			continue
... ...
@@ -307,9 +306,9 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
307 307
 	return nil
308 308
 }
309 309
 
310
-func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
310
+func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict {
311 311
 	// Check if any repository tags/digest reference this image.
312
-	if daemon.Repositories(ctx).HasReferences(img) {
312
+	if daemon.Repositories().HasReferences(img) {
313 313
 		return &imageDeleteConflict{
314 314
 			imgID:   img.ID,
315 315
 			message: "image is referenced in one or more repositories",
... ...
@@ -317,7 +316,7 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *ima
317 317
 	}
318 318
 
319 319
 	// Check if any stopped containers reference this image.
320
-	for _, container := range daemon.List(ctx) {
320
+	for _, container := range daemon.List() {
321 321
 		if container.IsRunning() {
322 322
 			// Skip this as it was checked above in hard conflict conditions.
323 323
 			continue
... ...
@@ -337,6 +336,6 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *ima
337 337
 // imageIsDangling returns whether the given image is "dangling" which means
338 338
 // that there are no repository references to the given image and it has no
339 339
 // child images.
340
-func (daemon *Daemon) imageIsDangling(ctx context.Context, img *image.Image) bool {
341
-	return !(daemon.Repositories(ctx).HasReferences(img) || daemon.Graph(ctx).HasChildren(img))
340
+func (daemon *Daemon) imageIsDangling(img *image.Image) bool {
341
+	return !(daemon.Repositories().HasReferences(img) || daemon.Graph().HasChildren(img))
342 342
 }
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"github.com/Sirupsen/logrus"
9 9
 	"github.com/docker/docker/api/types"
10 10
 	"github.com/docker/docker/autogen/dockerversion"
11
-	"github.com/docker/docker/context"
12 11
 	"github.com/docker/docker/pkg/fileutils"
13 12
 	"github.com/docker/docker/pkg/parsers/kernel"
14 13
 	"github.com/docker/docker/pkg/parsers/operatingsystem"
... ...
@@ -19,8 +18,8 @@ import (
19 19
 )
20 20
 
21 21
 // SystemInfo returns information about the host server the daemon is running on.
22
-func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
23
-	images := daemon.Graph(ctx).Map()
22
+func (daemon *Daemon) SystemInfo() (*types.Info, error) {
23
+	images := daemon.Graph().Map()
24 24
 	var imgcount int
25 25
 	if images == nil {
26 26
 		imgcount = 0
... ...
@@ -66,10 +65,10 @@ func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
66 66
 
67 67
 	v := &types.Info{
68 68
 		ID:                 daemon.ID,
69
-		Containers:         len(daemon.List(ctx)),
69
+		Containers:         len(daemon.List()),
70 70
 		Images:             imgcount,
71
-		Driver:             daemon.GraphDriver(ctx).String(),
72
-		DriverStatus:       daemon.GraphDriver(ctx).Status(),
71
+		Driver:             daemon.GraphDriver().String(),
72
+		DriverStatus:       daemon.GraphDriver().Status(),
73 73
 		IPv4Forwarding:     !sysInfo.IPv4ForwardingDisabled,
74 74
 		BridgeNfIptables:   !sysInfo.BridgeNfCallIptablesDisabled,
75 75
 		BridgeNfIP6tables:  !sysInfo.BridgeNfCallIP6tablesDisabled,
... ...
@@ -77,7 +76,7 @@ func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
77 77
 		NFd:                fileutils.GetTotalUsedFds(),
78 78
 		NGoroutines:        runtime.NumGoroutine(),
79 79
 		SystemTime:         time.Now().Format(time.RFC3339Nano),
80
-		ExecutionDriver:    daemon.ExecutionDriver(ctx).Name(),
80
+		ExecutionDriver:    daemon.ExecutionDriver().Name(),
81 81
 		LoggingDriver:      daemon.defaultLogConfig.Type,
82 82
 		NEventsListener:    daemon.EventsService.SubscribersCount(),
83 83
 		KernelVersion:      kernelVersion,
... ...
@@ -5,14 +5,13 @@ import (
5 5
 	"time"
6 6
 
7 7
 	"github.com/docker/docker/api/types"
8
-	"github.com/docker/docker/context"
9 8
 )
10 9
 
11 10
 // ContainerInspect returns low-level information about a
12 11
 // container. Returns an error if the container cannot be found, or if
13 12
 // there is an error getting the data.
14
-func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types.ContainerJSON, error) {
15
-	container, err := daemon.Get(ctx, name)
13
+func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
14
+	container, err := daemon.Get(name)
16 15
 	if err != nil {
17 16
 		return nil, err
18 17
 	}
... ...
@@ -20,7 +19,7 @@ func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types
20 20
 	container.Lock()
21 21
 	defer container.Unlock()
22 22
 
23
-	base, err := daemon.getInspectData(ctx, container)
23
+	base, err := daemon.getInspectData(container)
24 24
 	if err != nil {
25 25
 		return nil, err
26 26
 	}
... ...
@@ -31,8 +30,8 @@ func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types
31 31
 }
32 32
 
33 33
 // ContainerInspect120 serializes the master version of a container into a json type.
34
-func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*types.ContainerJSON120, error) {
35
-	container, err := daemon.Get(ctx, name)
34
+func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, error) {
35
+	container, err := daemon.Get(name)
36 36
 	if err != nil {
37 37
 		return nil, err
38 38
 	}
... ...
@@ -40,7 +39,7 @@ func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*ty
40 40
 	container.Lock()
41 41
 	defer container.Unlock()
42 42
 
43
-	base, err := daemon.getInspectData(ctx, container)
43
+	base, err := daemon.getInspectData(container)
44 44
 	if err != nil {
45 45
 		return nil, err
46 46
 	}
... ...
@@ -54,11 +53,11 @@ func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*ty
54 54
 	return &types.ContainerJSON120{base, mountPoints, config}, nil
55 55
 }
56 56
 
57
-func (daemon *Daemon) getInspectData(ctx context.Context, container *Container) (*types.ContainerJSONBase, error) {
57
+func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
58 58
 	// make a copy to play with
59 59
 	hostConfig := *container.hostConfig
60 60
 
61
-	if children, err := daemon.children(ctx, container.Name); err == nil {
61
+	if children, err := daemon.children(container.Name); err == nil {
62 62
 		for linkAlias, child := range children {
63 63
 			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
64 64
 		}
... ...
@@ -121,7 +120,7 @@ func (daemon *Daemon) getInspectData(ctx context.Context, container *Container)
121 121
 
122 122
 // ContainerExecInspect returns low-level information about the exec
123 123
 // command. An error is returned if the exec cannot be found.
124
-func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*ExecConfig, error) {
124
+func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
125 125
 	eConfig, err := daemon.getExecConfig(id)
126 126
 	if err != nil {
127 127
 		return nil, err
... ...
@@ -131,7 +130,7 @@ func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*Exe
131 131
 
132 132
 // VolumeInspect looks up a volume by name. An error is returned if
133 133
 // the volume cannot be found.
134
-func (daemon *Daemon) VolumeInspect(ctx context.Context, name string) (*types.Volume, error) {
134
+func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
135 135
 	v, err := daemon.volumes.Get(name)
136 136
 	if err != nil {
137 137
 		return nil, err
... ...
@@ -2,10 +2,7 @@
2 2
 
3 3
 package daemon
4 4
 
5
-import (
6
-	"github.com/docker/docker/api/types"
7
-	"github.com/docker/docker/context"
8
-)
5
+import "github.com/docker/docker/api/types"
9 6
 
10 7
 // This sets platform-specific fields
11 8
 func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
... ...
@@ -18,8 +15,8 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
18 18
 }
19 19
 
20 20
 // ContainerInspectPre120 gets containers for pre 1.20 APIs.
21
-func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSONPre120, error) {
22
-	container, err := daemon.Get(ctx, name)
21
+func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
22
+	container, err := daemon.Get(name)
23 23
 	if err != nil {
24 24
 		return nil, err
25 25
 	}
... ...
@@ -27,7 +24,7 @@ func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (
27 27
 	container.Lock()
28 28
 	defer container.Unlock()
29 29
 
30
-	base, err := daemon.getInspectData(ctx, container)
30
+	base, err := daemon.getInspectData(container)
31 31
 	if err != nil {
32 32
 		return nil, err
33 33
 	}
... ...
@@ -1,9 +1,6 @@
1 1
 package daemon
2 2
 
3
-import (
4
-	"github.com/docker/docker/api/types"
5
-	"github.com/docker/docker/context"
6
-)
3
+import "github.com/docker/docker/api/types"
7 4
 
8 5
 // This sets platform-specific fields
9 6
 func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
... ...
@@ -15,6 +12,6 @@ func addMountPoints(container *Container) []types.MountPoint {
15 15
 }
16 16
 
17 17
 // ContainerInspectPre120 get containers for pre 1.20 APIs.
18
-func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSON, error) {
19
-	return daemon.ContainerInspect(ctx, name)
18
+func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSON, error) {
19
+	return daemon.ContainerInspect(name)
20 20
 }
... ...
@@ -1,29 +1,25 @@
1 1
 package daemon
2 2
 
3
-import (
4
-	"syscall"
5
-
6
-	"github.com/docker/docker/context"
7
-)
3
+import "syscall"
8 4
 
9 5
 // ContainerKill send signal to the container
10 6
 // If no signal is given (sig 0), then Kill with SIGKILL and wait
11 7
 // for the container to exit.
12 8
 // If a signal is given, then just send it to the container and return.
13
-func (daemon *Daemon) ContainerKill(ctx context.Context, name string, sig uint64) error {
14
-	container, err := daemon.Get(ctx, name)
9
+func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
10
+	container, err := daemon.Get(name)
15 11
 	if err != nil {
16 12
 		return err
17 13
 	}
18 14
 
19 15
 	// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
20 16
 	if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
21
-		if err := container.Kill(ctx); err != nil {
17
+		if err := container.Kill(); err != nil {
22 18
 			return err
23 19
 		}
24 20
 	} else {
25 21
 		// Otherwise, just send the requested signal
26
-		if err := container.killSig(ctx, int(sig)); err != nil {
22
+		if err := container.killSig(int(sig)); err != nil {
27 23
 			return err
28 24
 		}
29 25
 	}
... ...
@@ -8,7 +8,6 @@ import (
8 8
 
9 9
 	"github.com/Sirupsen/logrus"
10 10
 	"github.com/docker/docker/api/types"
11
-	"github.com/docker/docker/context"
12 11
 	derr "github.com/docker/docker/errors"
13 12
 	"github.com/docker/docker/image"
14 13
 	"github.com/docker/docker/pkg/graphdb"
... ...
@@ -21,7 +20,7 @@ type iterationAction int
21 21
 
22 22
 // containerReducer represents a reducer for a container.
23 23
 // Returns the object to serialize by the api.
24
-type containerReducer func(context.Context, *Container, *listContext) (*types.Container, error)
24
+type containerReducer func(*Container, *listContext) (*types.Container, error)
25 25
 
26 26
 const (
27 27
 	// includeContainer is the action to include a container in the reducer.
... ...
@@ -36,7 +35,7 @@ const (
36 36
 var errStopIteration = errors.New("container list iteration stopped")
37 37
 
38 38
 // List returns an array of all containers registered in the daemon.
39
-func (daemon *Daemon) List(ctx context.Context) []*Container {
39
+func (daemon *Daemon) List() []*Container {
40 40
 	return daemon.containers.List()
41 41
 }
42 42
 
... ...
@@ -80,21 +79,21 @@ type listContext struct {
80 80
 }
81 81
 
82 82
 // Containers returns the list of containers to show given the user's filtering.
83
-func (daemon *Daemon) Containers(ctx context.Context, config *ContainersConfig) ([]*types.Container, error) {
84
-	return daemon.reduceContainers(ctx, config, daemon.transformContainer)
83
+func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
84
+	return daemon.reduceContainers(config, daemon.transformContainer)
85 85
 }
86 86
 
87 87
 // reduceContainer parses the user filtering and generates the list of containers to return based on a reducer.
88
-func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
88
+func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
89 89
 	containers := []*types.Container{}
90 90
 
91
-	fctx, err := daemon.foldFilter(ctx, config)
91
+	ctx, err := daemon.foldFilter(config)
92 92
 	if err != nil {
93 93
 		return nil, err
94 94
 	}
95 95
 
96
-	for _, container := range daemon.List(ctx) {
97
-		t, err := daemon.reducePsContainer(ctx, container, fctx, reducer)
96
+	for _, container := range daemon.List() {
97
+		t, err := daemon.reducePsContainer(container, ctx, reducer)
98 98
 		if err != nil {
99 99
 			if err != errStopIteration {
100 100
 				return nil, err
... ...
@@ -103,19 +102,19 @@ func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersCo
103 103
 		}
104 104
 		if t != nil {
105 105
 			containers = append(containers, t)
106
-			fctx.idx++
106
+			ctx.idx++
107 107
 		}
108 108
 	}
109 109
 	return containers, nil
110 110
 }
111 111
 
112 112
 // reducePsContainer is the basic representation for a container as expected by the ps command.
113
-func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Container, lctx *listContext, reducer containerReducer) (*types.Container, error) {
113
+func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
114 114
 	container.Lock()
115 115
 	defer container.Unlock()
116 116
 
117 117
 	// filter containers to return
118
-	action := includeContainerInList(container, lctx)
118
+	action := includeContainerInList(container, ctx)
119 119
 	switch action {
120 120
 	case excludeContainer:
121 121
 		return nil, nil
... ...
@@ -124,11 +123,11 @@ func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Containe
124 124
 	}
125 125
 
126 126
 	// transform internal container struct into api structs
127
-	return reducer(ctx, container, lctx)
127
+	return reducer(container, ctx)
128 128
 }
129 129
 
130 130
 // foldFilter generates the container filter based in the user's filtering options.
131
-func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig) (*listContext, error) {
131
+func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) {
132 132
 	psFilters, err := filters.FromParam(config.Filters)
133 133
 	if err != nil {
134 134
 		return nil, err
... ...
@@ -160,11 +159,11 @@ func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig)
160 160
 	var ancestorFilter bool
161 161
 	if ancestors, ok := psFilters["ancestor"]; ok {
162 162
 		ancestorFilter = true
163
-		byParents := daemon.Graph(ctx).ByParent()
163
+		byParents := daemon.Graph().ByParent()
164 164
 		// The idea is to walk the graph down the most "efficient" way.
165 165
 		for _, ancestor := range ancestors {
166 166
 			// First, get the imageId of the ancestor filter (yay)
167
-			image, err := daemon.Repositories(ctx).LookupImage(ancestor)
167
+			image, err := daemon.Repositories().LookupImage(ancestor)
168 168
 			if err != nil {
169 169
 				logrus.Warnf("Error while looking up for image %v", ancestor)
170 170
 				continue
... ...
@@ -186,14 +185,14 @@ func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig)
186 186
 
187 187
 	var beforeCont, sinceCont *Container
188 188
 	if config.Before != "" {
189
-		beforeCont, err = daemon.Get(ctx, config.Before)
189
+		beforeCont, err = daemon.Get(config.Before)
190 190
 		if err != nil {
191 191
 			return nil, err
192 192
 		}
193 193
 	}
194 194
 
195 195
 	if config.Since != "" {
196
-		sinceCont, err = daemon.Get(ctx, config.Since)
196
+		sinceCont, err = daemon.Get(config.Since)
197 197
 		if err != nil {
198 198
 			return nil, err
199 199
 		}
... ...
@@ -287,13 +286,13 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct
287 287
 }
288 288
 
289 289
 // transformContainer generates the container type expected by the docker ps command.
290
-func (daemon *Daemon) transformContainer(ctx context.Context, container *Container, lctx *listContext) (*types.Container, error) {
290
+func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
291 291
 	newC := &types.Container{
292 292
 		ID:    container.ID,
293
-		Names: lctx.names[container.ID],
293
+		Names: ctx.names[container.ID],
294 294
 	}
295 295
 
296
-	img, err := daemon.Repositories(ctx).LookupImage(container.Config.Image)
296
+	img, err := daemon.Repositories().LookupImage(container.Config.Image)
297 297
 	if err != nil {
298 298
 		// If the image can no longer be found by its original reference,
299 299
 		// it makes sense to show the ID instead of a stale reference.
... ...
@@ -350,8 +349,8 @@ func (daemon *Daemon) transformContainer(ctx context.Context, container *Contain
350 350
 		}
351 351
 	}
352 352
 
353
-	if lctx.Size {
354
-		sizeRw, sizeRootFs := container.getSize(ctx)
353
+	if ctx.Size {
354
+		sizeRw, sizeRootFs := container.getSize()
355 355
 		newC.SizeRw = sizeRw
356 356
 		newC.SizeRootFs = sizeRootFs
357 357
 	}
... ...
@@ -362,7 +361,7 @@ func (daemon *Daemon) transformContainer(ctx context.Context, container *Contain
362 362
 
363 363
 // Volumes lists known volumes, using the filter to restrict the range
364 364
 // of volumes returned.
365
-func (daemon *Daemon) Volumes(ctx context.Context, filter string) ([]*types.Volume, error) {
365
+func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
366 366
 	var volumesOut []*types.Volume
367 367
 	volFilters, err := filters.FromParam(filter)
368 368
 	if err != nil {
... ...
@@ -6,7 +6,6 @@ import (
6 6
 	"time"
7 7
 
8 8
 	"github.com/Sirupsen/logrus"
9
-	"github.com/docker/docker/context"
10 9
 	"github.com/docker/docker/daemon/logger"
11 10
 	derr "github.com/docker/docker/errors"
12 11
 	"github.com/docker/docker/pkg/stdcopy"
... ...
@@ -31,7 +30,7 @@ type ContainerLogsConfig struct {
31 31
 
32 32
 // ContainerLogs hooks up a container's stdout and stderr streams
33 33
 // configured with the given struct.
34
-func (daemon *Daemon) ContainerLogs(ctx context.Context, container *Container, config *ContainerLogsConfig) error {
34
+func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
35 35
 	if !(config.UseStdout || config.UseStderr) {
36 36
 		return derr.ErrorCodeNeedStream
37 37
 	}
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"time"
8 8
 
9 9
 	"github.com/Sirupsen/logrus"
10
-	"github.com/docker/docker/context"
11 10
 	"github.com/docker/docker/daemon/execdriver"
12 11
 	"github.com/docker/docker/pkg/stringid"
13 12
 	"github.com/docker/docker/runconfig"
... ...
@@ -85,9 +84,9 @@ func (m *containerMonitor) ExitOnNext() {
85 85
 
86 86
 // Close closes the container's resources such as networking allocations and
87 87
 // unmounts the contatiner's root filesystem
88
-func (m *containerMonitor) Close(ctx context.Context) error {
88
+func (m *containerMonitor) Close() error {
89 89
 	// Cleanup networking and mounts
90
-	m.container.cleanup(ctx)
90
+	m.container.cleanup()
91 91
 
92 92
 	// FIXME: here is race condition between two RUN instructions in Dockerfile
93 93
 	// because they share same runconfig and change image. Must be fixed
... ...
@@ -102,7 +101,7 @@ func (m *containerMonitor) Close(ctx context.Context) error {
102 102
 }
103 103
 
104 104
 // Start starts the containers process and monitors it according to the restart policy
105
-func (m *containerMonitor) Start(ctx context.Context) error {
105
+func (m *containerMonitor) Start() error {
106 106
 	var (
107 107
 		err        error
108 108
 		exitStatus execdriver.ExitStatus
... ...
@@ -118,7 +117,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
118 118
 			m.container.setStopped(&exitStatus)
119 119
 			defer m.container.Unlock()
120 120
 		}
121
-		m.Close(ctx)
121
+		m.Close()
122 122
 	}()
123 123
 	// reset stopped flag
124 124
 	if m.container.HasBeenManuallyStopped {
... ...
@@ -139,11 +138,11 @@ func (m *containerMonitor) Start(ctx context.Context) error {
139 139
 
140 140
 		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
141 141
 
142
-		m.container.logEvent(ctx, "start")
142
+		m.container.logEvent("start")
143 143
 
144 144
 		m.lastStartTime = time.Now()
145 145
 
146
-		if exitStatus, err = m.container.daemon.run(ctx, m.container, pipes, m.callback); err != nil {
146
+		if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
147 147
 			// if we receive an internal error from the initial start of a container then lets
148 148
 			// return it instead of entering the restart loop
149 149
 			if m.container.RestartCount == 0 {
... ...
@@ -163,7 +162,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
163 163
 
164 164
 		if m.shouldRestart(exitStatus.ExitCode) {
165 165
 			m.container.setRestarting(&exitStatus)
166
-			m.container.logEvent(ctx, "die")
166
+			m.container.logEvent("die")
167 167
 			m.resetContainer(true)
168 168
 
169 169
 			// sleep with a small time increment between each restart to help avoid issues cased by quickly
... ...
@@ -178,7 +177,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
178 178
 			continue
179 179
 		}
180 180
 
181
-		m.container.logEvent(ctx, "die")
181
+		m.container.logEvent("die")
182 182
 		m.resetContainer(true)
183 183
 		return err
184 184
 	}
... ...
@@ -246,11 +245,11 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
246 246
 
247 247
 // callback ensures that the container's state is properly updated after we
248 248
 // received ack from the execution drivers
249
-func (m *containerMonitor) callback(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
249
+func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
250 250
 	go func() {
251 251
 		_, ok := <-chOOM
252 252
 		if ok {
253
-			m.container.logEvent(ctx, "oom")
253
+			m.container.logEvent("oom")
254 254
 		}
255 255
 	}()
256 256
 
... ...
@@ -1,18 +1,17 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	derr "github.com/docker/docker/errors"
6 5
 )
7 6
 
8 7
 // ContainerPause pauses a container
9
-func (daemon *Daemon) ContainerPause(ctx context.Context, name string) error {
10
-	container, err := daemon.Get(ctx, name)
8
+func (daemon *Daemon) ContainerPause(name string) error {
9
+	container, err := daemon.Get(name)
11 10
 	if err != nil {
12 11
 		return err
13 12
 	}
14 13
 
15
-	if err := container.pause(ctx); err != nil {
14
+	if err := container.pause(); err != nil {
16 15
 		return derr.ErrorCodePauseError.WithArgs(name, err)
17 16
 	}
18 17
 
... ...
@@ -1,19 +1,18 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	derr "github.com/docker/docker/errors"
6 5
 )
7 6
 
8 7
 // ContainerRename changes the name of a container, using the oldName
9 8
 // to find the container. An error is returned if newName is already
10 9
 // reserved.
11
-func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName string) error {
10
+func (daemon *Daemon) ContainerRename(oldName, newName string) error {
12 11
 	if oldName == "" || newName == "" {
13 12
 		return derr.ErrorCodeEmptyRename
14 13
 	}
15 14
 
16
-	container, err := daemon.Get(ctx, oldName)
15
+	container, err := daemon.Get(oldName)
17 16
 	if err != nil {
18 17
 		return err
19 18
 	}
... ...
@@ -22,7 +21,7 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
22 22
 
23 23
 	container.Lock()
24 24
 	defer container.Unlock()
25
-	if newName, err = daemon.reserveName(ctx, container.ID, newName); err != nil {
25
+	if newName, err = daemon.reserveName(container.ID, newName); err != nil {
26 26
 		return derr.ErrorCodeRenameTaken.WithArgs(err)
27 27
 	}
28 28
 
... ...
@@ -30,7 +29,7 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
30 30
 
31 31
 	undo := func() {
32 32
 		container.Name = oldName
33
-		daemon.reserveName(ctx, container.ID, oldName)
33
+		daemon.reserveName(container.ID, oldName)
34 34
 		daemon.containerGraphDB.Delete(newName)
35 35
 	}
36 36
 
... ...
@@ -44,6 +43,6 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
44 44
 		return err
45 45
 	}
46 46
 
47
-	container.logEvent(ctx, "rename")
47
+	container.logEvent("rename")
48 48
 	return nil
49 49
 }
... ...
@@ -1,24 +1,20 @@
1 1
 package daemon
2 2
 
3
-import (
4
-	"github.com/docker/docker/context"
5
-)
6
-
7 3
 // ContainerResize changes the size of the TTY of the process running
8 4
 // in the container with the given name to the given height and width.
9
-func (daemon *Daemon) ContainerResize(ctx context.Context, name string, height, width int) error {
10
-	container, err := daemon.Get(ctx, name)
5
+func (daemon *Daemon) ContainerResize(name string, height, width int) error {
6
+	container, err := daemon.Get(name)
11 7
 	if err != nil {
12 8
 		return err
13 9
 	}
14 10
 
15
-	return container.Resize(ctx, height, width)
11
+	return container.Resize(height, width)
16 12
 }
17 13
 
18 14
 // ContainerExecResize changes the size of the TTY of the process
19 15
 // running in the exec with the given name to the given height and
20 16
 // width.
21
-func (daemon *Daemon) ContainerExecResize(ctx context.Context, name string, height, width int) error {
17
+func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
22 18
 	ExecConfig, err := daemon.getExecConfig(name)
23 19
 	if err != nil {
24 20
 		return err
... ...
@@ -1,7 +1,6 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	derr "github.com/docker/docker/errors"
6 5
 )
7 6
 
... ...
@@ -11,12 +10,12 @@ import (
11 11
 // timeout, ContainerRestart will wait forever until a graceful
12 12
 // stop. Returns an error if the container cannot be found, or if
13 13
 // there is an underlying error at any stage of the restart.
14
-func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, seconds int) error {
15
-	container, err := daemon.Get(ctx, name)
14
+func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
15
+	container, err := daemon.Get(name)
16 16
 	if err != nil {
17 17
 		return err
18 18
 	}
19
-	if err := container.Restart(ctx, seconds); err != nil {
19
+	if err := container.Restart(seconds); err != nil {
20 20
 		return derr.ErrorCodeCantRestart.WithArgs(name, err)
21 21
 	}
22 22
 	return nil
... ...
@@ -3,15 +3,14 @@ package daemon
3 3
 import (
4 4
 	"runtime"
5 5
 
6
-	"github.com/docker/docker/context"
7 6
 	derr "github.com/docker/docker/errors"
8 7
 	"github.com/docker/docker/runconfig"
9 8
 	"github.com/docker/docker/utils"
10 9
 )
11 10
 
12 11
 // ContainerStart starts a container.
13
-func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *runconfig.HostConfig) error {
14
-	container, err := daemon.Get(ctx, name)
12
+func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
13
+	container, err := daemon.Get(name)
15 14
 	if err != nil {
16 15
 		return err
17 16
 	}
... ...
@@ -29,7 +28,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
29 29
 		// This is kept for backward compatibility - hostconfig should be passed when
30 30
 		// creating a container, not during start.
31 31
 		if hostConfig != nil {
32
-			if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
32
+			if err := daemon.setHostConfig(container, hostConfig); err != nil {
33 33
 				return err
34 34
 			}
35 35
 		}
... ...
@@ -41,11 +40,11 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
41 41
 
42 42
 	// check if hostConfig is in line with the current system settings.
43 43
 	// It may happen cgroups are umounted or the like.
44
-	if _, err = daemon.verifyContainerSettings(ctx, container.hostConfig, nil); err != nil {
44
+	if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
45 45
 		return err
46 46
 	}
47 47
 
48
-	if err := container.Start(ctx); err != nil {
48
+	if err := container.Start(); err != nil {
49 49
 		return derr.ErrorCodeCantStart.WithArgs(name, utils.GetErrorMessage(err))
50 50
 	}
51 51
 
... ...
@@ -5,7 +5,6 @@ import (
5 5
 	"io"
6 6
 
7 7
 	"github.com/docker/docker/api/types"
8
-	"github.com/docker/docker/context"
9 8
 	"github.com/docker/docker/daemon/execdriver"
10 9
 	"github.com/docker/docker/pkg/version"
11 10
 	"github.com/docker/libnetwork/osl"
... ...
@@ -23,9 +22,9 @@ type ContainerStatsConfig struct {
23 23
 
24 24
 // ContainerStats writes information about the container to the stream
25 25
 // given in the config object.
26
-func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *ContainerStatsConfig) error {
26
+func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error {
27 27
 
28
-	container, err := daemon.Get(ctx, prefixOrName)
28
+	container, err := daemon.Get(prefixOrName)
29 29
 	if err != nil {
30 30
 		return err
31 31
 	}
... ...
@@ -1,7 +1,6 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	derr "github.com/docker/docker/errors"
6 5
 )
7 6
 
... ...
@@ -11,15 +10,15 @@ import (
11 11
 // will wait for a graceful termination. An error is returned if the
12 12
 // container is not found, is already stopped, or if there is a
13 13
 // problem stopping the container.
14
-func (daemon *Daemon) ContainerStop(ctx context.Context, name string, seconds int) error {
15
-	container, err := daemon.Get(ctx, name)
14
+func (daemon *Daemon) ContainerStop(name string, seconds int) error {
15
+	container, err := daemon.Get(name)
16 16
 	if err != nil {
17 17
 		return err
18 18
 	}
19 19
 	if !container.IsRunning() {
20 20
 		return derr.ErrorCodeStopped
21 21
 	}
22
-	if err := container.Stop(ctx, seconds); err != nil {
22
+	if err := container.Stop(seconds); err != nil {
23 23
 		return derr.ErrorCodeCantStop.WithArgs(name, err)
24 24
 	}
25 25
 	return nil
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"strings"
9 9
 
10 10
 	"github.com/docker/docker/api/types"
11
-	"github.com/docker/docker/context"
12 11
 	derr "github.com/docker/docker/errors"
13 12
 )
14 13
 
... ...
@@ -17,12 +16,12 @@ import (
17 17
 // "-ef" if no args are given.  An error is returned if the container
18 18
 // is not found, or is not running, or if there are any problems
19 19
 // running ps, or parsing the output.
20
-func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
20
+func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
21 21
 	if psArgs == "" {
22 22
 		psArgs = "-ef"
23 23
 	}
24 24
 
25
-	container, err := daemon.Get(ctx, name)
25
+	container, err := daemon.Get(name)
26 26
 	if err != nil {
27 27
 		return nil, err
28 28
 	}
... ...
@@ -31,7 +30,7 @@ func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs stri
31 31
 		return nil, derr.ErrorCodeNotRunning.WithArgs(name)
32 32
 	}
33 33
 
34
-	pids, err := daemon.ExecutionDriver(ctx).GetPidsForContainer(container.ID)
34
+	pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
35 35
 	if err != nil {
36 36
 		return nil, err
37 37
 	}
... ...
@@ -77,6 +76,6 @@ func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs stri
77 77
 			}
78 78
 		}
79 79
 	}
80
-	container.logEvent(ctx, "top")
80
+	container.logEvent("top")
81 81
 	return procList, nil
82 82
 }
... ...
@@ -2,11 +2,10 @@ package daemon
2 2
 
3 3
 import (
4 4
 	"github.com/docker/docker/api/types"
5
-	"github.com/docker/docker/context"
6 5
 	derr "github.com/docker/docker/errors"
7 6
 )
8 7
 
9 8
 // ContainerTop is not supported on Windows and returns an error.
10
-func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
9
+func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
11 10
 	return nil, derr.ErrorCodeNoTop
12 11
 }
... ...
@@ -1,18 +1,17 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/context"
5 4
 	derr "github.com/docker/docker/errors"
6 5
 )
7 6
 
8 7
 // ContainerUnpause unpauses a container
9
-func (daemon *Daemon) ContainerUnpause(ctx context.Context, name string) error {
10
-	container, err := daemon.Get(ctx, name)
8
+func (daemon *Daemon) ContainerUnpause(name string) error {
9
+	container, err := daemon.Get(name)
11 10
 	if err != nil {
12 11
 		return err
13 12
 	}
14 13
 
15
-	if err := container.unpause(ctx); err != nil {
14
+	if err := container.unpause(); err != nil {
16 15
 		return derr.ErrorCodeCantUnpause.WithArgs(name, err)
17 16
 	}
18 17
 
... ...
@@ -10,7 +10,6 @@ import (
10 10
 	"strings"
11 11
 
12 12
 	"github.com/Sirupsen/logrus"
13
-	"github.com/docker/docker/context"
14 13
 	"github.com/docker/docker/daemon/execdriver"
15 14
 	derr "github.com/docker/docker/errors"
16 15
 	"github.com/docker/docker/pkg/system"
... ...
@@ -286,7 +285,7 @@ func parseVolumesFrom(spec string) (string, string, error) {
286 286
 // 1. Select the previously configured mount points for the containers, if any.
287 287
 // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
288 288
 // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
289
-func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
289
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
290 290
 	binds := map[string]bool{}
291 291
 	mountPoints := map[string]*mountPoint{}
292 292
 
... ...
@@ -302,7 +301,7 @@ func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Contai
302 302
 			return err
303 303
 		}
304 304
 
305
-		c, err := daemon.Get(ctx, containerID)
305
+		c, err := daemon.Get(containerID)
306 306
 		if err != nil {
307 307
 			return err
308 308
 		}
... ...
@@ -3,7 +3,6 @@
3 3
 package daemon
4 4
 
5 5
 import (
6
-	"github.com/docker/docker/context"
7 6
 	"github.com/docker/docker/daemon/execdriver"
8 7
 	"github.com/docker/docker/runconfig"
9 8
 )
... ...
@@ -32,6 +31,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
32 32
 // registerMountPoints initializes the container mount points with the
33 33
 // configured volumes and bind mounts. Windows does not support volumes or
34 34
 // mount points.
35
-func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
35
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
36 36
 	return nil
37 37
 }
... ...
@@ -1,18 +1,14 @@
1 1
 package daemon
2 2
 
3
-import (
4
-	"time"
5
-
6
-	"github.com/docker/docker/context"
7
-)
3
+import "time"
8 4
 
9 5
 // ContainerWait stops processing until the given container is
10 6
 // stopped. If the container is not found, an error is returned. On a
11 7
 // successful stop, the exit code of the container is returned. On a
12 8
 // timeout, an error is returned. If you want to wait forever, supply
13 9
 // a negative duration for the timeout.
14
-func (daemon *Daemon) ContainerWait(ctx context.Context, name string, timeout time.Duration) (int, error) {
15
-	container, err := daemon.Get(ctx, name)
10
+func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
11
+	container, err := daemon.Get(name)
16 12
 	if err != nil {
17 13
 		return -1, err
18 14
 	}
... ...
@@ -17,7 +17,6 @@ import (
17 17
 	"github.com/docker/docker/autogen/dockerversion"
18 18
 	"github.com/docker/docker/cli"
19 19
 	"github.com/docker/docker/cliconfig"
20
-	"github.com/docker/docker/context"
21 20
 	"github.com/docker/docker/daemon"
22 21
 	"github.com/docker/docker/daemon/logger"
23 22
 	"github.com/docker/docker/opts"
... ...
@@ -151,11 +150,6 @@ func getGlobalFlag() (globalFlag *flag.Flag) {
151 151
 
152 152
 // CmdDaemon is the daemon command, called the raw arguments after `docker daemon`.
153 153
 func (cli *DaemonCli) CmdDaemon(args ...string) error {
154
-	// This may need to be made even more global - it all depends
155
-	// on whether we want the CLI to have a context object too.
156
-	// For now we'll leave it as a daemon-side object only.
157
-	ctx := context.Background()
158
-
159 154
 	// warn from uuid package when running the daemon
160 155
 	uuid.Loggerf = logrus.Warnf
161 156
 
... ...
@@ -230,7 +224,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
230 230
 		serverConfig.TLSConfig = tlsConfig
231 231
 	}
232 232
 
233
-	api := apiserver.New(ctx, serverConfig)
233
+	api := apiserver.New(serverConfig)
234 234
 
235 235
 	// The serve API routine never exits unless an error occurs
236 236
 	// We need to start it as a goroutine and wait on it so
... ...
@@ -251,7 +245,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
251 251
 	cli.TrustKeyPath = commonFlags.TrustKey
252 252
 
253 253
 	registryService := registry.NewService(cli.registryOptions)
254
-	d, err := daemon.NewDaemon(ctx, cli.Config, registryService)
254
+	d, err := daemon.NewDaemon(cli.Config, registryService)
255 255
 	if err != nil {
256 256
 		if pfile != nil {
257 257
 			if err := pfile.Remove(); err != nil {
... ...
@@ -266,14 +260,14 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
266 266
 	logrus.WithFields(logrus.Fields{
267 267
 		"version":     dockerversion.VERSION,
268 268
 		"commit":      dockerversion.GITCOMMIT,
269
-		"execdriver":  d.ExecutionDriver(ctx).Name(),
270
-		"graphdriver": d.GraphDriver(ctx).String(),
269
+		"execdriver":  d.ExecutionDriver().Name(),
270
+		"graphdriver": d.GraphDriver().String(),
271 271
 	}).Info("Docker daemon")
272 272
 
273 273
 	signal.Trap(func() {
274 274
 		api.Close()
275 275
 		<-serveAPIWait
276
-		shutdownDaemon(ctx, d, 15)
276
+		shutdownDaemon(d, 15)
277 277
 		if pfile != nil {
278 278
 			if err := pfile.Remove(); err != nil {
279 279
 				logrus.Error(err)
... ...
@@ -283,12 +277,12 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
283 283
 
284 284
 	// after the daemon is done setting up we can tell the api to start
285 285
 	// accepting connections with specified daemon
286
-	api.AcceptConnections(ctx, d)
286
+	api.AcceptConnections(d)
287 287
 
288 288
 	// Daemon is fully initialized and handling API traffic
289 289
 	// Wait for serve API to complete
290 290
 	errAPI := <-serveAPIWait
291
-	shutdownDaemon(ctx, d, 15)
291
+	shutdownDaemon(d, 15)
292 292
 	if errAPI != nil {
293 293
 		if pfile != nil {
294 294
 			if err := pfile.Remove(); err != nil {
... ...
@@ -303,10 +297,10 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
303 303
 // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
304 304
 // d.Shutdown() is waiting too long to kill container or worst it's
305 305
 // blocked there
306
-func shutdownDaemon(ctx context.Context, d *daemon.Daemon, timeout time.Duration) {
306
+func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
307 307
 	ch := make(chan struct{})
308 308
 	go func() {
309
-		d.Shutdown(ctx)
309
+		d.Shutdown()
310 310
 		close(ch)
311 311
 	}()
312 312
 	select {
... ...
@@ -5,7 +5,6 @@ import (
5 5
 	"net/http"
6 6
 	"net/url"
7 7
 
8
-	"github.com/docker/docker/context"
9 8
 	"github.com/docker/docker/pkg/httputils"
10 9
 	"github.com/docker/docker/pkg/progressreader"
11 10
 	"github.com/docker/docker/pkg/streamformatter"
... ...
@@ -17,7 +16,7 @@ import (
17 17
 // inConfig (if src is "-"), or from a URI specified in src. Progress output is
18 18
 // written to outStream. Repository and tag names can optionally be given in
19 19
 // the repo and tag arguments, respectively.
20
-func (s *TagStore) Import(ctx context.Context, src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
20
+func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
21 21
 	var (
22 22
 		sf      = streamformatter.NewJSONStreamFormatter()
23 23
 		archive io.ReadCloser
... ...
@@ -75,6 +74,6 @@ func (s *TagStore) Import(ctx context.Context, src string, repo string, tag stri
75 75
 		logID = utils.ImageReference(logID, tag)
76 76
 	}
77 77
 
78
-	s.eventsService.Log(ctx, "import", logID, "")
78
+	s.eventsService.Log("import", logID, "")
79 79
 	return nil
80 80
 }
... ...
@@ -6,7 +6,6 @@ import (
6 6
 
7 7
 	"github.com/Sirupsen/logrus"
8 8
 	"github.com/docker/docker/cliconfig"
9
-	"github.com/docker/docker/context"
10 9
 	"github.com/docker/docker/pkg/streamformatter"
11 10
 	"github.com/docker/docker/registry"
12 11
 	"github.com/docker/docker/utils"
... ...
@@ -63,7 +62,7 @@ func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.Re
63 63
 
64 64
 // Pull initiates a pull operation. image is the repository name to pull, and
65 65
 // tag may be either empty, or indicate a specific tag to pull.
66
-func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePullConfig *ImagePullConfig) error {
66
+func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
67 67
 	var sf = streamformatter.NewJSONStreamFormatter()
68 68
 
69 69
 	// Resolve the Repository name from fqn to RepositoryInfo
... ...
@@ -132,7 +131,7 @@ func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePull
132 132
 
133 133
 		}
134 134
 
135
-		s.eventsService.Log(ctx, "pull", logName, "")
135
+		s.eventsService.Log("pull", logName, "")
136 136
 		return nil
137 137
 	}
138 138
 
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"github.com/Sirupsen/logrus"
8 8
 	"github.com/docker/distribution/digest"
9 9
 	"github.com/docker/docker/cliconfig"
10
-	"github.com/docker/docker/context"
11 10
 	"github.com/docker/docker/pkg/streamformatter"
12 11
 	"github.com/docker/docker/registry"
13 12
 )
... ...
@@ -68,7 +67,7 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository
68 68
 }
69 69
 
70 70
 // Push initiates a push operation on the repository named localName.
71
-func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *ImagePushConfig) error {
71
+func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
72 72
 	// FIXME: Allow to interrupt current push when new push of same image is done.
73 73
 
74 74
 	var sf = streamformatter.NewJSONStreamFormatter()
... ...
@@ -116,7 +115,7 @@ func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *
116 116
 
117 117
 		}
118 118
 
119
-		s.eventsService.Log(ctx, "push", repoInfo.LocalName, "")
119
+		s.eventsService.Log("push", repoInfo.LocalName, "")
120 120
 		return nil
121 121
 	}
122 122
 
... ...
@@ -410,7 +410,7 @@ func (s *DockerSuite) TestEventsFilterContainer(c *check.C) {
410 410
 			}
411 411
 
412 412
 			// Check the id
413
-			parsedID := strings.TrimSuffix(e[3], ":")
413
+			parsedID := strings.TrimSuffix(e[1], ":")
414 414
 			if parsedID != id {
415 415
 				return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID)
416 416
 			}
... ...
@@ -686,78 +686,3 @@ func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) {
686 686
 		c.Fatalf("Missing 'push' log event for image %s\n%s", repoName, out)
687 687
 	}
688 688
 }
689
-
690
-func (s *DockerSuite) TestEventsReqID(c *check.C) {
691
-	// Tests for the "[reqid: xxx]" field in Events
692
-	testRequires(c, DaemonIsLinux)
693
-
694
-	reqIDMatch := `[^ ]+ \[reqid: ([0-9a-z]{12})\] [0-9a-z]+: `
695
-	reqIDRE := regexp.MustCompile(reqIDMatch)
696
-
697
-	// Simple test just to make sure it works at all
698
-	dockerCmd(c, "create", "busybox", "true")
699
-
700
-	out, _ := dockerCmd(c, "events", "--since=0", "--until=0s")
701
-	events := strings.Split(strings.TrimSpace(out), "\n")
702
-
703
-	if len(events) == 0 {
704
-		c.Fatalf("Wrong # of events, should just be one, got:\n%v\n", events)
705
-	}
706
-
707
-	createEvent := events[len(events)-1]
708
-
709
-	matched, err := regexp.MatchString(reqIDMatch, createEvent)
710
-	if err != nil || !matched {
711
-		c.Fatalf("Error finding reqID in event: %v\n", createEvent)
712
-	}
713
-
714
-	reqID1 := reqIDRE.FindStringSubmatch(createEvent)[1]
715
-
716
-	// Now make sure another cmd doesn't get the same reqID
717
-	dockerCmd(c, "create", "busybox", "true")
718
-
719
-	out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
720
-	events = strings.Split(strings.TrimSpace(out), "\n")
721
-	createEvent = events[len(events)-1]
722
-
723
-	matched, err = regexp.MatchString(reqIDMatch, createEvent)
724
-	if err != nil || !matched {
725
-		c.Fatalf("Error finding reqID in event: %v\n", createEvent)
726
-	}
727
-
728
-	reqID2 := reqIDRE.FindStringSubmatch(createEvent)[1]
729
-
730
-	if reqID1 == reqID2 {
731
-		c.Fatalf("Should not have the same reqID(%s):\n%v\n", reqID1, createEvent)
732
-	}
733
-
734
-	// Now make sure a build **does** use the same reqID for all
735
-	// 4 events that are generated
736
-	_, err = buildImage("reqidimg", `
737
-		  FROM busybox
738
-		  RUN echo HI`, true)
739
-	if err != nil {
740
-		c.Fatalf("Couldn't create image: %q", err)
741
-	}
742
-
743
-	out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
744
-	events = strings.Split(strings.TrimSpace(out), "\n")
745
-
746
-	// Get last event's reqID - will use it to find other matching events
747
-	lastEvent := events[len(events)-1]
748
-	reqID := reqIDRE.FindStringSubmatch(lastEvent)[1]
749
-
750
-	// Find all events with this same reqID
751
-	eventList := []string{lastEvent}
752
-	for i := len(events) - 2; i >= 0; i-- {
753
-		tmpID := reqIDRE.FindStringSubmatch(events[i])[1]
754
-		if tmpID != reqID {
755
-			break
756
-		}
757
-		eventList = append(eventList, events[i])
758
-	}
759
-
760
-	if len(eventList) != 5 { // create, start, die, commit, destroy
761
-		c.Fatalf("Wrong # of matching events - should be 5:\n%q\n", eventList)
762
-	}
763
-}
... ...
@@ -92,7 +92,6 @@ func (p *JSONProgress) String() string {
92 92
 // the created time, where it from, status, ID of the
93 93
 // message. It's used for docker events.
94 94
 type JSONMessage struct {
95
-	RequestID       string        `json:"reqid,omitempty"`
96 95
 	Stream          string        `json:"stream,omitempty"`
97 96
 	Status          string        `json:"status,omitempty"`
98 97
 	Progress        *JSONProgress `json:"progressDetail,omitempty"`
... ...
@@ -128,9 +127,6 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
128 128
 	} else if jm.Time != 0 {
129 129
 		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
130 130
 	}
131
-	if jm.RequestID != "" {
132
-		fmt.Fprintf(out, "[reqid: %s] ", jm.RequestID)
133
-	}
134 131
 	if jm.ID != "" {
135 132
 		fmt.Fprintf(out, "%s: ", jm.ID)
136 133
 	}