Add context.RequestID to event stream
| ... | ... |
@@ -45,7 +45,7 @@ func (s *Server) getContainersJSON(ctx context.Context, w http.ResponseWriter, r |
| 45 | 45 |
config.Limit = limit |
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 |
- containers, err := s.daemon.Containers(config) |
|
| 48 |
+ containers, err := s.daemon.Containers(ctx, config) |
|
| 49 | 49 |
if err != nil {
|
| 50 | 50 |
return err |
| 51 | 51 |
} |
| ... | ... |
@@ -83,7 +83,7 @@ func (s *Server) getContainersStats(ctx context.Context, w http.ResponseWriter, |
| 83 | 83 |
Version: version, |
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 |
- return s.daemon.ContainerStats(vars["name"], config) |
|
| 86 |
+ return s.daemon.ContainerStats(ctx, vars["name"], config) |
|
| 87 | 87 |
} |
| 88 | 88 |
|
| 89 | 89 |
func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| ... | ... |
@@ -118,7 +118,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r |
| 118 | 118 |
closeNotifier = notifier.CloseNotify() |
| 119 | 119 |
} |
| 120 | 120 |
|
| 121 |
- c, err := s.daemon.Get(vars["name"]) |
|
| 121 |
+ c, err := s.daemon.Get(ctx, vars["name"]) |
|
| 122 | 122 |
if err != nil {
|
| 123 | 123 |
return err |
| 124 | 124 |
} |
| ... | ... |
@@ -140,7 +140,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r |
| 140 | 140 |
Stop: closeNotifier, |
| 141 | 141 |
} |
| 142 | 142 |
|
| 143 |
- if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
|
|
| 143 |
+ if err := s.daemon.ContainerLogs(ctx, c, logsConfig); err != nil {
|
|
| 144 | 144 |
// The client may be expecting all of the data we're sending to |
| 145 | 145 |
// be multiplexed, so send it through OutStream, which will |
| 146 | 146 |
// have been set up to handle that if needed. |
| ... | ... |
@@ -155,7 +155,7 @@ func (s *Server) getContainersExport(ctx context.Context, w http.ResponseWriter, |
| 155 | 155 |
return fmt.Errorf("Missing parameter")
|
| 156 | 156 |
} |
| 157 | 157 |
|
| 158 |
- return s.daemon.ContainerExport(vars["name"], w) |
|
| 158 |
+ return s.daemon.ContainerExport(ctx, vars["name"], w) |
|
| 159 | 159 |
} |
| 160 | 160 |
|
| 161 | 161 |
func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| ... | ... |
@@ -183,7 +183,7 @@ func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter, |
| 183 | 183 |
hostConfig = c |
| 184 | 184 |
} |
| 185 | 185 |
|
| 186 |
- if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil {
|
|
| 186 |
+ if err := s.daemon.ContainerStart(ctx, vars["name"], hostConfig); err != nil {
|
|
| 187 | 187 |
return err |
| 188 | 188 |
} |
| 189 | 189 |
w.WriteHeader(http.StatusNoContent) |
| ... | ... |
@@ -200,7 +200,7 @@ func (s *Server) postContainersStop(ctx context.Context, w http.ResponseWriter, |
| 200 | 200 |
|
| 201 | 201 |
seconds, _ := strconv.Atoi(r.Form.Get("t"))
|
| 202 | 202 |
|
| 203 |
- if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
|
|
| 203 |
+ if err := s.daemon.ContainerStop(ctx, vars["name"], seconds); err != nil {
|
|
| 204 | 204 |
return err |
| 205 | 205 |
} |
| 206 | 206 |
w.WriteHeader(http.StatusNoContent) |
| ... | ... |
@@ -227,7 +227,7 @@ func (s *Server) postContainersKill(ctx context.Context, w http.ResponseWriter, |
| 227 | 227 |
} |
| 228 | 228 |
} |
| 229 | 229 |
|
| 230 |
- if err := s.daemon.ContainerKill(name, uint64(sig)); err != nil {
|
|
| 230 |
+ if err := s.daemon.ContainerKill(ctx, name, uint64(sig)); err != nil {
|
|
| 231 | 231 |
theErr, isDerr := err.(errcode.ErrorCoder) |
| 232 | 232 |
isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning |
| 233 | 233 |
|
| ... | ... |
@@ -254,7 +254,7 @@ func (s *Server) postContainersRestart(ctx context.Context, w http.ResponseWrite |
| 254 | 254 |
|
| 255 | 255 |
timeout, _ := strconv.Atoi(r.Form.Get("t"))
|
| 256 | 256 |
|
| 257 |
- if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
|
|
| 257 |
+ if err := s.daemon.ContainerRestart(ctx, vars["name"], timeout); err != nil {
|
|
| 258 | 258 |
return err |
| 259 | 259 |
} |
| 260 | 260 |
|
| ... | ... |
@@ -271,7 +271,7 @@ func (s *Server) postContainersPause(ctx context.Context, w http.ResponseWriter, |
| 271 | 271 |
return err |
| 272 | 272 |
} |
| 273 | 273 |
|
| 274 |
- if err := s.daemon.ContainerPause(vars["name"]); err != nil {
|
|
| 274 |
+ if err := s.daemon.ContainerPause(ctx, vars["name"]); err != nil {
|
|
| 275 | 275 |
return err |
| 276 | 276 |
} |
| 277 | 277 |
|
| ... | ... |
@@ -288,7 +288,7 @@ func (s *Server) postContainersUnpause(ctx context.Context, w http.ResponseWrite |
| 288 | 288 |
return err |
| 289 | 289 |
} |
| 290 | 290 |
|
| 291 |
- if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
|
|
| 291 |
+ if err := s.daemon.ContainerUnpause(ctx, vars["name"]); err != nil {
|
|
| 292 | 292 |
return err |
| 293 | 293 |
} |
| 294 | 294 |
|
| ... | ... |
@@ -302,7 +302,7 @@ func (s *Server) postContainersWait(ctx context.Context, w http.ResponseWriter, |
| 302 | 302 |
return fmt.Errorf("Missing parameter")
|
| 303 | 303 |
} |
| 304 | 304 |
|
| 305 |
- status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second) |
|
| 305 |
+ status, err := s.daemon.ContainerWait(ctx, vars["name"], -1*time.Second) |
|
| 306 | 306 |
if err != nil {
|
| 307 | 307 |
return err |
| 308 | 308 |
} |
| ... | ... |
@@ -317,7 +317,7 @@ func (s *Server) getContainersChanges(ctx context.Context, w http.ResponseWriter |
| 317 | 317 |
return fmt.Errorf("Missing parameter")
|
| 318 | 318 |
} |
| 319 | 319 |
|
| 320 |
- changes, err := s.daemon.ContainerChanges(vars["name"]) |
|
| 320 |
+ changes, err := s.daemon.ContainerChanges(ctx, vars["name"]) |
|
| 321 | 321 |
if err != nil {
|
| 322 | 322 |
return err |
| 323 | 323 |
} |
| ... | ... |
@@ -334,7 +334,7 @@ func (s *Server) getContainersTop(ctx context.Context, w http.ResponseWriter, r |
| 334 | 334 |
return err |
| 335 | 335 |
} |
| 336 | 336 |
|
| 337 |
- procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args"))
|
|
| 337 |
+ procList, err := s.daemon.ContainerTop(ctx, vars["name"], r.Form.Get("ps_args"))
|
|
| 338 | 338 |
if err != nil {
|
| 339 | 339 |
return err |
| 340 | 340 |
} |
| ... | ... |
@@ -352,7 +352,7 @@ func (s *Server) postContainerRename(ctx context.Context, w http.ResponseWriter, |
| 352 | 352 |
|
| 353 | 353 |
name := vars["name"] |
| 354 | 354 |
newName := r.Form.Get("name")
|
| 355 |
- if err := s.daemon.ContainerRename(name, newName); err != nil {
|
|
| 355 |
+ if err := s.daemon.ContainerRename(ctx, name, newName); err != nil {
|
|
| 356 | 356 |
return err |
| 357 | 357 |
} |
| 358 | 358 |
w.WriteHeader(http.StatusNoContent) |
| ... | ... |
@@ -378,7 +378,7 @@ func (s *Server) postContainersCreate(ctx context.Context, w http.ResponseWriter |
| 378 | 378 |
version := ctx.Version() |
| 379 | 379 |
adjustCPUShares := version.LessThan("1.19")
|
| 380 | 380 |
|
| 381 |
- container, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig, adjustCPUShares) |
|
| 381 |
+ container, warnings, err := s.daemon.ContainerCreate(ctx, name, config, hostConfig, adjustCPUShares) |
|
| 382 | 382 |
if err != nil {
|
| 383 | 383 |
return err |
| 384 | 384 |
} |
| ... | ... |
@@ -404,7 +404,7 @@ func (s *Server) deleteContainers(ctx context.Context, w http.ResponseWriter, r |
| 404 | 404 |
RemoveLink: boolValue(r, "link"), |
| 405 | 405 |
} |
| 406 | 406 |
|
| 407 |
- if err := s.daemon.ContainerRm(name, config); err != nil {
|
|
| 407 |
+ if err := s.daemon.ContainerRm(ctx, name, config); err != nil {
|
|
| 408 | 408 |
// Force a 404 for the empty string |
| 409 | 409 |
if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") {
|
| 410 | 410 |
return fmt.Errorf("no such id: \"\"")
|
| ... | ... |
@@ -434,7 +434,7 @@ func (s *Server) postContainersResize(ctx context.Context, w http.ResponseWriter |
| 434 | 434 |
return err |
| 435 | 435 |
} |
| 436 | 436 |
|
| 437 |
- return s.daemon.ContainerResize(vars["name"], height, width) |
|
| 437 |
+ return s.daemon.ContainerResize(ctx, vars["name"], height, width) |
|
| 438 | 438 |
} |
| 439 | 439 |
|
| 440 | 440 |
func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| ... | ... |
@@ -446,7 +446,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter |
| 446 | 446 |
} |
| 447 | 447 |
containerName := vars["name"] |
| 448 | 448 |
|
| 449 |
- if !s.daemon.Exists(containerName) {
|
|
| 449 |
+ if !s.daemon.Exists(ctx, containerName) {
|
|
| 450 | 450 |
return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) |
| 451 | 451 |
} |
| 452 | 452 |
|
| ... | ... |
@@ -472,7 +472,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter |
| 472 | 472 |
Stream: boolValue(r, "stream"), |
| 473 | 473 |
} |
| 474 | 474 |
|
| 475 |
- if err := s.daemon.ContainerAttachWithLogs(containerName, attachWithLogsConfig); err != nil {
|
|
| 475 |
+ if err := s.daemon.ContainerAttachWithLogs(ctx, containerName, attachWithLogsConfig); err != nil {
|
|
| 476 | 476 |
fmt.Fprintf(outStream, "Error attaching: %s\n", err) |
| 477 | 477 |
} |
| 478 | 478 |
|
| ... | ... |
@@ -488,7 +488,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter, |
| 488 | 488 |
} |
| 489 | 489 |
containerName := vars["name"] |
| 490 | 490 |
|
| 491 |
- if !s.daemon.Exists(containerName) {
|
|
| 491 |
+ if !s.daemon.Exists(ctx, containerName) {
|
|
| 492 | 492 |
return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) |
| 493 | 493 |
} |
| 494 | 494 |
|
| ... | ... |
@@ -503,7 +503,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter, |
| 503 | 503 |
Stream: boolValue(r, "stream"), |
| 504 | 504 |
} |
| 505 | 505 |
|
| 506 |
- if err := s.daemon.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil {
|
|
| 506 |
+ if err := s.daemon.ContainerWsAttachWithLogs(ctx, containerName, wsAttachWithLogsConfig); err != nil {
|
|
| 507 | 507 |
logrus.Errorf("Error attaching websocket: %s", err)
|
| 508 | 508 |
} |
| 509 | 509 |
}) |
| ... | ... |
@@ -32,7 +32,7 @@ func (s *Server) postContainersCopy(ctx context.Context, w http.ResponseWriter, |
| 32 | 32 |
return fmt.Errorf("Path cannot be empty")
|
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 |
- data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource) |
|
| 35 |
+ data, err := s.daemon.ContainerCopy(ctx, vars["name"], cfg.Resource) |
|
| 36 | 36 |
if err != nil {
|
| 37 | 37 |
if strings.Contains(strings.ToLower(err.Error()), "no such id") {
|
| 38 | 38 |
w.WriteHeader(http.StatusNotFound) |
| ... | ... |
@@ -74,7 +74,7 @@ func (s *Server) headContainersArchive(ctx context.Context, w http.ResponseWrite |
| 74 | 74 |
return err |
| 75 | 75 |
} |
| 76 | 76 |
|
| 77 |
- stat, err := s.daemon.ContainerStatPath(v.name, v.path) |
|
| 77 |
+ stat, err := s.daemon.ContainerStatPath(ctx, v.name, v.path) |
|
| 78 | 78 |
if err != nil {
|
| 79 | 79 |
return err |
| 80 | 80 |
} |
| ... | ... |
@@ -88,7 +88,7 @@ func (s *Server) getContainersArchive(ctx context.Context, w http.ResponseWriter |
| 88 | 88 |
return err |
| 89 | 89 |
} |
| 90 | 90 |
|
| 91 |
- tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path) |
|
| 91 |
+ tarArchive, stat, err := s.daemon.ContainerArchivePath(ctx, v.name, v.path) |
|
| 92 | 92 |
if err != nil {
|
| 93 | 93 |
return err |
| 94 | 94 |
} |
| ... | ... |
@@ -111,5 +111,5 @@ func (s *Server) putContainersArchive(ctx context.Context, w http.ResponseWriter |
| 111 | 111 |
} |
| 112 | 112 |
|
| 113 | 113 |
noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir") |
| 114 |
- return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body) |
|
| 114 |
+ return s.daemon.ContainerExtractToDir(ctx, v.name, v.path, noOverwriteDirNonDir, r.Body) |
|
| 115 | 115 |
} |
| ... | ... |
@@ -45,7 +45,7 @@ func (s *Server) getVersion(ctx context.Context, w http.ResponseWriter, r *http. |
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
func (s *Server) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| 48 |
- info, err := s.daemon.SystemInfo() |
|
| 48 |
+ info, err := s.daemon.SystemInfo(ctx) |
|
| 49 | 49 |
if err != nil {
|
| 50 | 50 |
return err |
| 51 | 51 |
} |
| ... | ... |
@@ -120,7 +120,7 @@ func (s *Server) getEvents(ctx context.Context, w http.ResponseWriter, r *http.R |
| 120 | 120 |
enc := json.NewEncoder(outStream) |
| 121 | 121 |
|
| 122 | 122 |
getContainerID := func(cn string) string {
|
| 123 |
- c, err := d.Get(cn) |
|
| 123 |
+ c, err := d.Get(ctx, cn) |
|
| 124 | 124 |
if err != nil {
|
| 125 | 125 |
return "" |
| 126 | 126 |
} |
| ... | ... |
@@ -19,7 +19,7 @@ func (s *Server) getExecByID(ctx context.Context, w http.ResponseWriter, r *http |
| 19 | 19 |
return fmt.Errorf("Missing parameter 'id'")
|
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 |
- eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) |
|
| 22 |
+ eConfig, err := s.daemon.ContainerExecInspect(ctx, vars["id"]) |
|
| 23 | 23 |
if err != nil {
|
| 24 | 24 |
return err |
| 25 | 25 |
} |
| ... | ... |
@@ -47,7 +47,7 @@ func (s *Server) postContainerExecCreate(ctx context.Context, w http.ResponseWri |
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 | 49 |
// Register an instance of Exec in container. |
| 50 |
- id, err := s.daemon.ContainerExecCreate(execConfig) |
|
| 50 |
+ id, err := s.daemon.ContainerExecCreate(ctx, execConfig) |
|
| 51 | 51 |
if err != nil {
|
| 52 | 52 |
logrus.Errorf("Error setting up exec command in container %s: %s", name, err)
|
| 53 | 53 |
return err |
| ... | ... |
@@ -100,7 +100,7 @@ func (s *Server) postContainerExecStart(ctx context.Context, w http.ResponseWrit |
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 | 102 |
// Now run the user process in container. |
| 103 |
- if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil {
|
|
| 103 |
+ if err := s.daemon.ContainerExecStart(ctx, execName, stdin, stdout, stderr); err != nil {
|
|
| 104 | 104 |
fmt.Fprintf(outStream, "Error running exec in container: %v\n", err) |
| 105 | 105 |
} |
| 106 | 106 |
return nil |
| ... | ... |
@@ -123,5 +123,5 @@ func (s *Server) postContainerExecResize(ctx context.Context, w http.ResponseWri |
| 123 | 123 |
return err |
| 124 | 124 |
} |
| 125 | 125 |
|
| 126 |
- return s.daemon.ContainerExecResize(vars["name"], height, width) |
|
| 126 |
+ return s.daemon.ContainerExecResize(ctx, vars["name"], height, width) |
|
| 127 | 127 |
} |
| ... | ... |
@@ -55,7 +55,7 @@ func (s *Server) postCommit(ctx context.Context, w http.ResponseWriter, r *http. |
| 55 | 55 |
Config: c, |
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 |
- imgID, err := builder.Commit(cname, s.daemon, commitCfg) |
|
| 58 |
+ imgID, err := builder.Commit(ctx, cname, s.daemon, commitCfg) |
|
| 59 | 59 |
if err != nil {
|
| 60 | 60 |
return err |
| 61 | 61 |
} |
| ... | ... |
@@ -112,7 +112,7 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r |
| 112 | 112 |
OutStream: output, |
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 |
- err = s.daemon.Repositories().Pull(image, tag, imagePullConfig) |
|
| 115 |
+ err = s.daemon.Repositories(ctx).Pull(ctx, image, tag, imagePullConfig) |
|
| 116 | 116 |
} else { //import
|
| 117 | 117 |
if tag == "" {
|
| 118 | 118 |
repo, tag = parsers.ParseRepositoryTag(repo) |
| ... | ... |
@@ -124,12 +124,12 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r |
| 124 | 124 |
// generated from the download to be available to the output |
| 125 | 125 |
// stream processing below |
| 126 | 126 |
var newConfig *runconfig.Config |
| 127 |
- newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
|
|
| 127 |
+ newConfig, err = builder.BuildFromConfig(ctx, s.daemon, &runconfig.Config{}, r.Form["changes"])
|
|
| 128 | 128 |
if err != nil {
|
| 129 | 129 |
return err |
| 130 | 130 |
} |
| 131 | 131 |
|
| 132 |
- err = s.daemon.Repositories().Import(src, repo, tag, message, r.Body, output, newConfig) |
|
| 132 |
+ err = s.daemon.Repositories(ctx).Import(ctx, src, repo, tag, message, r.Body, output, newConfig) |
|
| 133 | 133 |
} |
| 134 | 134 |
if err != nil {
|
| 135 | 135 |
if !output.Flushed() {
|
| ... | ... |
@@ -184,7 +184,7 @@ func (s *Server) postImagesPush(ctx context.Context, w http.ResponseWriter, r *h |
| 184 | 184 |
|
| 185 | 185 |
w.Header().Set("Content-Type", "application/json")
|
| 186 | 186 |
|
| 187 |
- if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
|
|
| 187 |
+ if err := s.daemon.Repositories(ctx).Push(ctx, name, imagePushConfig); err != nil {
|
|
| 188 | 188 |
if !output.Flushed() {
|
| 189 | 189 |
return err |
| 190 | 190 |
} |
| ... | ... |
@@ -212,7 +212,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt |
| 212 | 212 |
names = r.Form["names"] |
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 |
- if err := s.daemon.Repositories().ImageExport(names, output); err != nil {
|
|
| 215 |
+ if err := s.daemon.Repositories(ctx).ImageExport(names, output); err != nil {
|
|
| 216 | 216 |
if !output.Flushed() {
|
| 217 | 217 |
return err |
| 218 | 218 |
} |
| ... | ... |
@@ -223,7 +223,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt |
| 223 | 223 |
} |
| 224 | 224 |
|
| 225 | 225 |
func (s *Server) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| 226 |
- return s.daemon.Repositories().Load(r.Body, w) |
|
| 226 |
+ return s.daemon.Repositories(ctx).Load(r.Body, w) |
|
| 227 | 227 |
} |
| 228 | 228 |
|
| 229 | 229 |
func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| ... | ... |
@@ -243,7 +243,7 @@ func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *htt |
| 243 | 243 |
force := boolValue(r, "force") |
| 244 | 244 |
prune := !boolValue(r, "noprune") |
| 245 | 245 |
|
| 246 |
- list, err := s.daemon.ImageDelete(name, force, prune) |
|
| 246 |
+ list, err := s.daemon.ImageDelete(ctx, name, force, prune) |
|
| 247 | 247 |
if err != nil {
|
| 248 | 248 |
return err |
| 249 | 249 |
} |
| ... | ... |
@@ -256,7 +256,7 @@ func (s *Server) getImagesByName(ctx context.Context, w http.ResponseWriter, r * |
| 256 | 256 |
return fmt.Errorf("Missing parameter")
|
| 257 | 257 |
} |
| 258 | 258 |
|
| 259 |
- imageInspect, err := s.daemon.Repositories().Lookup(vars["name"]) |
|
| 259 |
+ imageInspect, err := s.daemon.Repositories(ctx).Lookup(vars["name"]) |
|
| 260 | 260 |
if err != nil {
|
| 261 | 261 |
return err |
| 262 | 262 |
} |
| ... | ... |
@@ -346,7 +346,7 @@ func (s *Server) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 346 | 346 |
}() |
| 347 | 347 |
} |
| 348 | 348 |
|
| 349 |
- if err := builder.Build(s.daemon, buildConfig); err != nil {
|
|
| 349 |
+ if err := builder.Build(ctx, s.daemon, buildConfig); err != nil {
|
|
| 350 | 350 |
// Do not write the error in the http output if it's still empty. |
| 351 | 351 |
// This prevents from writing a 200(OK) when there is an interal error. |
| 352 | 352 |
if !output.Flushed() {
|
| ... | ... |
@@ -364,7 +364,7 @@ func (s *Server) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *ht |
| 364 | 364 |
} |
| 365 | 365 |
|
| 366 | 366 |
// FIXME: The filter parameter could just be a match filter |
| 367 |
- images, err := s.daemon.Repositories().Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
|
|
| 367 |
+ images, err := s.daemon.Repositories(ctx).Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
|
|
| 368 | 368 |
if err != nil {
|
| 369 | 369 |
return err |
| 370 | 370 |
} |
| ... | ... |
@@ -378,7 +378,7 @@ func (s *Server) getImagesHistory(ctx context.Context, w http.ResponseWriter, r |
| 378 | 378 |
} |
| 379 | 379 |
|
| 380 | 380 |
name := vars["name"] |
| 381 |
- history, err := s.daemon.Repositories().History(name) |
|
| 381 |
+ history, err := s.daemon.Repositories(ctx).History(name) |
|
| 382 | 382 |
if err != nil {
|
| 383 | 383 |
return err |
| 384 | 384 |
} |
| ... | ... |
@@ -398,10 +398,10 @@ func (s *Server) postImagesTag(ctx context.Context, w http.ResponseWriter, r *ht |
| 398 | 398 |
tag := r.Form.Get("tag")
|
| 399 | 399 |
force := boolValue(r, "force") |
| 400 | 400 |
name := vars["name"] |
| 401 |
- if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
|
|
| 401 |
+ if err := s.daemon.Repositories(ctx).Tag(repo, tag, name, force); err != nil {
|
|
| 402 | 402 |
return err |
| 403 | 403 |
} |
| 404 |
- s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
|
|
| 404 |
+ s.daemon.EventsService.Log(ctx, "tag", utils.ImageReference(repo, tag), "") |
|
| 405 | 405 |
w.WriteHeader(http.StatusCreated) |
| 406 | 406 |
return nil |
| 407 | 407 |
} |
| ... | ... |
@@ -20,11 +20,11 @@ func (s *Server) getContainersByName(ctx context.Context, w http.ResponseWriter, |
| 20 | 20 |
|
| 21 | 21 |
switch {
|
| 22 | 22 |
case version.LessThan("1.20"):
|
| 23 |
- json, err = s.daemon.ContainerInspectPre120(vars["name"]) |
|
| 23 |
+ json, err = s.daemon.ContainerInspectPre120(ctx, vars["name"]) |
|
| 24 | 24 |
case version.Equal("1.20"):
|
| 25 |
- json, err = s.daemon.ContainerInspect120(vars["name"]) |
|
| 25 |
+ json, err = s.daemon.ContainerInspect120(ctx, vars["name"]) |
|
| 26 | 26 |
default: |
| 27 |
- json, err = s.daemon.ContainerInspect(vars["name"]) |
|
| 27 |
+ json, err = s.daemon.ContainerInspect(ctx, vars["name"]) |
|
| 28 | 28 |
} |
| 29 | 29 |
|
| 30 | 30 |
if err != nil {
|
| ... | ... |
@@ -18,6 +18,7 @@ import ( |
| 18 | 18 |
"github.com/docker/docker/context" |
| 19 | 19 |
"github.com/docker/docker/daemon" |
| 20 | 20 |
"github.com/docker/docker/pkg/sockets" |
| 21 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 21 | 22 |
"github.com/docker/docker/utils" |
| 22 | 23 |
) |
| 23 | 24 |
|
| ... | ... |
@@ -41,12 +42,12 @@ type Server struct {
|
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 | 43 |
// New returns a new instance of the server based on the specified configuration. |
| 44 |
-func New(cfg *Config) *Server {
|
|
| 44 |
+func New(ctx context.Context, cfg *Config) *Server {
|
|
| 45 | 45 |
srv := &Server{
|
| 46 | 46 |
cfg: cfg, |
| 47 | 47 |
start: make(chan struct{}),
|
| 48 | 48 |
} |
| 49 |
- srv.router = createRouter(srv) |
|
| 49 |
+ srv.router = createRouter(ctx, srv) |
|
| 50 | 50 |
return srv |
| 51 | 51 |
} |
| 52 | 52 |
|
| ... | ... |
@@ -290,7 +291,7 @@ func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
|
| 290 | 290 |
return |
| 291 | 291 |
} |
| 292 | 292 |
|
| 293 |
-func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
|
|
| 293 |
+func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
|
|
| 294 | 294 |
return func(w http.ResponseWriter, r *http.Request) {
|
| 295 | 295 |
// log the handler generation |
| 296 | 296 |
logrus.Debugf("Calling %s %s", localMethod, localRoute)
|
| ... | ... |
@@ -302,7 +303,8 @@ func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHan |
| 302 | 302 |
// apply to all requests. Data that is specific to the |
| 303 | 303 |
// immediate function being called should still be passed |
| 304 | 304 |
// as 'args' on the function call. |
| 305 |
- ctx := context.Background() |
|
| 305 |
+ reqID := stringid.TruncateID(stringid.GenerateNonCryptoID()) |
|
| 306 |
+ ctx = context.WithValue(ctx, context.RequestID, reqID) |
|
| 306 | 307 |
handlerFunc := s.handleWithGlobalMiddlewares(localHandler) |
| 307 | 308 |
|
| 308 | 309 |
if err := handlerFunc(ctx, w, r, mux.Vars(r)); err != nil {
|
| ... | ... |
@@ -314,7 +316,7 @@ func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHan |
| 314 | 314 |
|
| 315 | 315 |
// createRouter initializes the main router the server uses. |
| 316 | 316 |
// we keep enableCors just for legacy usage, need to be removed in the future |
| 317 |
-func createRouter(s *Server) *mux.Router {
|
|
| 317 |
+func createRouter(ctx context.Context, s *Server) *mux.Router {
|
|
| 318 | 318 |
r := mux.NewRouter() |
| 319 | 319 |
if os.Getenv("DEBUG") != "" {
|
| 320 | 320 |
profilerSetup(r, "/debug/") |
| ... | ... |
@@ -394,7 +396,7 @@ func createRouter(s *Server) *mux.Router {
|
| 394 | 394 |
localMethod := method |
| 395 | 395 |
|
| 396 | 396 |
// build the handler function |
| 397 |
- f := s.makeHTTPHandler(localMethod, localRoute, localFct) |
|
| 397 |
+ f := s.makeHTTPHandler(ctx, localMethod, localRoute, localFct) |
|
| 398 | 398 |
|
| 399 | 399 |
// add the new route |
| 400 | 400 |
if localRoute == "" {
|
| ... | ... |
@@ -2,8 +2,12 @@ |
| 2 | 2 |
|
| 3 | 3 |
package server |
| 4 | 4 |
|
| 5 |
-func (s *Server) registerSubRouter() {
|
|
| 6 |
- httpHandler := s.daemon.NetworkAPIRouter() |
|
| 5 |
+import ( |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func (s *Server) registerSubRouter(ctx context.Context) {
|
|
| 10 |
+ httpHandler := s.daemon.NetworkAPIRouter(ctx) |
|
| 7 | 11 |
|
| 8 | 12 |
subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
|
| 9 | 13 |
subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)
|
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"net/http" |
| 9 | 9 |
"strconv" |
| 10 | 10 |
|
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/daemon" |
| 12 | 13 |
"github.com/docker/docker/pkg/sockets" |
| 13 | 14 |
"github.com/docker/libnetwork/portallocator" |
| ... | ... |
@@ -63,10 +64,10 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
|
| 63 | 63 |
// AcceptConnections allows clients to connect to the API server. |
| 64 | 64 |
// Referenced Daemon is notified about this server, and waits for the |
| 65 | 65 |
// daemon acknowledgement before the incoming connections are accepted. |
| 66 |
-func (s *Server) AcceptConnections(d *daemon.Daemon) {
|
|
| 66 |
+func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
|
|
| 67 | 67 |
// Tell the init daemon we are accepting requests |
| 68 | 68 |
s.daemon = d |
| 69 |
- s.registerSubRouter() |
|
| 69 |
+ s.registerSubRouter(ctx) |
|
| 70 | 70 |
go systemdDaemon.SdNotify("READY=1")
|
| 71 | 71 |
// close the lock so the listeners start accepting connections |
| 72 | 72 |
select {
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"net" |
| 8 | 8 |
"net/http" |
| 9 | 9 |
|
| 10 |
+ "github.com/docker/docker/context" |
|
| 10 | 11 |
"github.com/docker/docker/daemon" |
| 11 | 12 |
) |
| 12 | 13 |
|
| ... | ... |
@@ -42,9 +43,9 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
|
| 42 | 42 |
} |
| 43 | 43 |
|
| 44 | 44 |
// AcceptConnections allows router to start listening for the incoming requests. |
| 45 |
-func (s *Server) AcceptConnections(d *daemon.Daemon) {
|
|
| 45 |
+func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
|
|
| 46 | 46 |
s.daemon = d |
| 47 |
- s.registerSubRouter() |
|
| 47 |
+ s.registerSubRouter(ctx) |
|
| 48 | 48 |
// close the lock so the listeners start accepting connections |
| 49 | 49 |
select {
|
| 50 | 50 |
case <-s.start: |
| ... | ... |
@@ -13,7 +13,7 @@ func (s *Server) getVolumesList(ctx context.Context, w http.ResponseWriter, r *h |
| 13 | 13 |
return err |
| 14 | 14 |
} |
| 15 | 15 |
|
| 16 |
- volumes, err := s.daemon.Volumes(r.Form.Get("filters"))
|
|
| 16 |
+ volumes, err := s.daemon.Volumes(ctx, r.Form.Get("filters"))
|
|
| 17 | 17 |
if err != nil {
|
| 18 | 18 |
return err |
| 19 | 19 |
} |
| ... | ... |
@@ -25,7 +25,7 @@ func (s *Server) getVolumeByName(ctx context.Context, w http.ResponseWriter, r * |
| 25 | 25 |
return err |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- v, err := s.daemon.VolumeInspect(vars["name"]) |
|
| 28 |
+ v, err := s.daemon.VolumeInspect(ctx, vars["name"]) |
|
| 29 | 29 |
if err != nil {
|
| 30 | 30 |
return err |
| 31 | 31 |
} |
| ... | ... |
@@ -46,7 +46,7 @@ func (s *Server) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r |
| 46 | 46 |
return err |
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 |
- volume, err := s.daemon.VolumeCreate(req.Name, req.Driver, req.DriverOpts) |
|
| 49 |
+ volume, err := s.daemon.VolumeCreate(ctx, req.Name, req.Driver, req.DriverOpts) |
|
| 50 | 50 |
if err != nil {
|
| 51 | 51 |
return err |
| 52 | 52 |
} |
| ... | ... |
@@ -57,7 +57,7 @@ func (s *Server) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *ht |
| 57 | 57 |
if err := parseForm(r); err != nil {
|
| 58 | 58 |
return err |
| 59 | 59 |
} |
| 60 |
- if err := s.daemon.VolumeRm(vars["name"]); err != nil {
|
|
| 60 |
+ if err := s.daemon.VolumeRm(ctx, vars["name"]); err != nil {
|
|
| 61 | 61 |
return err |
| 62 | 62 |
} |
| 63 | 63 |
w.WriteHeader(http.StatusNoContent) |
| ... | ... |
@@ -18,6 +18,7 @@ import ( |
| 18 | 18 |
"strings" |
| 19 | 19 |
|
| 20 | 20 |
"github.com/Sirupsen/logrus" |
| 21 |
+ "github.com/docker/docker/context" |
|
| 21 | 22 |
derr "github.com/docker/docker/errors" |
| 22 | 23 |
flag "github.com/docker/docker/pkg/mflag" |
| 23 | 24 |
"github.com/docker/docker/pkg/nat" |
| ... | ... |
@@ -43,7 +44,7 @@ func nullDispatch(b *builder, args []string, attributes map[string]bool, origina |
| 43 | 43 |
// Sets the environment variable foo to bar, also makes interpolation |
| 44 | 44 |
// in the dockerfile available from the next statement on via ${foo}.
|
| 45 | 45 |
// |
| 46 |
-func env(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 46 |
+func env(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 47 | 47 |
if len(args) == 0 {
|
| 48 | 48 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
|
| 49 | 49 |
} |
| ... | ... |
@@ -96,13 +97,13 @@ func env(b *builder, args []string, attributes map[string]bool, original string) |
| 96 | 96 |
j++ |
| 97 | 97 |
} |
| 98 | 98 |
|
| 99 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 99 |
+ return b.commit(ctx, "", b.Config.Cmd, commitStr) |
|
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 | 102 |
// MAINTAINER some text <maybe@an.email.address> |
| 103 | 103 |
// |
| 104 | 104 |
// Sets the maintainer metadata. |
| 105 |
-func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 105 |
+func maintainer(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 106 | 106 |
if len(args) != 1 {
|
| 107 | 107 |
return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
|
| 108 | 108 |
} |
| ... | ... |
@@ -112,14 +113,14 @@ func maintainer(b *builder, args []string, attributes map[string]bool, original |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 | 114 |
b.maintainer = args[0] |
| 115 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 115 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 | 118 |
// LABEL some json data describing the image |
| 119 | 119 |
// |
| 120 | 120 |
// Sets the Label variable foo to bar, |
| 121 | 121 |
// |
| 122 |
-func label(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 122 |
+func label(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 123 | 123 |
if len(args) == 0 {
|
| 124 | 124 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
|
| 125 | 125 |
} |
| ... | ... |
@@ -147,7 +148,7 @@ func label(b *builder, args []string, attributes map[string]bool, original strin |
| 147 | 147 |
b.Config.Labels[args[j]] = args[j+1] |
| 148 | 148 |
j++ |
| 149 | 149 |
} |
| 150 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 150 |
+ return b.commit(ctx, "", b.Config.Cmd, commitStr) |
|
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 | 153 |
// ADD foo /path |
| ... | ... |
@@ -155,7 +156,7 @@ func label(b *builder, args []string, attributes map[string]bool, original strin |
| 155 | 155 |
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling |
| 156 | 156 |
// exist here. If you do not wish to have this automatic handling, use COPY. |
| 157 | 157 |
// |
| 158 |
-func add(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 158 |
+func add(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 159 | 159 |
if len(args) < 2 {
|
| 160 | 160 |
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
|
| 161 | 161 |
} |
| ... | ... |
@@ -164,14 +165,14 @@ func add(b *builder, args []string, attributes map[string]bool, original string) |
| 164 | 164 |
return err |
| 165 | 165 |
} |
| 166 | 166 |
|
| 167 |
- return b.runContextCommand(args, true, true, "ADD") |
|
| 167 |
+ return b.runContextCommand(ctx, args, true, true, "ADD") |
|
| 168 | 168 |
} |
| 169 | 169 |
|
| 170 | 170 |
// COPY foo /path |
| 171 | 171 |
// |
| 172 | 172 |
// Same as 'ADD' but without the tar and remote url handling. |
| 173 | 173 |
// |
| 174 |
-func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 174 |
+func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 175 | 175 |
if len(args) < 2 {
|
| 176 | 176 |
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
|
| 177 | 177 |
} |
| ... | ... |
@@ -180,14 +181,14 @@ func dispatchCopy(b *builder, args []string, attributes map[string]bool, origina |
| 180 | 180 |
return err |
| 181 | 181 |
} |
| 182 | 182 |
|
| 183 |
- return b.runContextCommand(args, false, false, "COPY") |
|
| 183 |
+ return b.runContextCommand(ctx, args, false, false, "COPY") |
|
| 184 | 184 |
} |
| 185 | 185 |
|
| 186 | 186 |
// FROM imagename |
| 187 | 187 |
// |
| 188 | 188 |
// This sets the image the dockerfile will build on top of. |
| 189 | 189 |
// |
| 190 |
-func from(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 190 |
+func from(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 191 | 191 |
if len(args) != 1 {
|
| 192 | 192 |
return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
|
| 193 | 193 |
} |
| ... | ... |
@@ -208,16 +209,16 @@ func from(b *builder, args []string, attributes map[string]bool, original string |
| 208 | 208 |
return nil |
| 209 | 209 |
} |
| 210 | 210 |
|
| 211 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 211 |
+ image, err := b.Daemon.Repositories(ctx).LookupImage(name) |
|
| 212 | 212 |
if b.Pull {
|
| 213 |
- image, err = b.pullImage(name) |
|
| 213 |
+ image, err = b.pullImage(ctx, name) |
|
| 214 | 214 |
if err != nil {
|
| 215 | 215 |
return err |
| 216 | 216 |
} |
| 217 | 217 |
} |
| 218 | 218 |
if err != nil {
|
| 219 |
- if b.Daemon.Graph().IsNotExist(err, name) {
|
|
| 220 |
- image, err = b.pullImage(name) |
|
| 219 |
+ if b.Daemon.Graph(ctx).IsNotExist(err, name) {
|
|
| 220 |
+ image, err = b.pullImage(ctx, name) |
|
| 221 | 221 |
} |
| 222 | 222 |
|
| 223 | 223 |
// note that the top level err will still be !nil here if IsNotExist is |
| ... | ... |
@@ -227,7 +228,7 @@ func from(b *builder, args []string, attributes map[string]bool, original string |
| 227 | 227 |
} |
| 228 | 228 |
} |
| 229 | 229 |
|
| 230 |
- return b.processImageFrom(image) |
|
| 230 |
+ return b.processImageFrom(ctx, image) |
|
| 231 | 231 |
} |
| 232 | 232 |
|
| 233 | 233 |
// ONBUILD RUN echo yo |
| ... | ... |
@@ -239,7 +240,7 @@ func from(b *builder, args []string, attributes map[string]bool, original string |
| 239 | 239 |
// special cases. search for 'OnBuild' in internals.go for additional special |
| 240 | 240 |
// cases. |
| 241 | 241 |
// |
| 242 |
-func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 242 |
+func onbuild(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 243 | 243 |
if len(args) == 0 {
|
| 244 | 244 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
|
| 245 | 245 |
} |
| ... | ... |
@@ -259,14 +260,14 @@ func onbuild(b *builder, args []string, attributes map[string]bool, original str |
| 259 | 259 |
original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") |
| 260 | 260 |
|
| 261 | 261 |
b.Config.OnBuild = append(b.Config.OnBuild, original) |
| 262 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 262 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 263 | 263 |
} |
| 264 | 264 |
|
| 265 | 265 |
// WORKDIR /tmp |
| 266 | 266 |
// |
| 267 | 267 |
// Set the working directory for future RUN/CMD/etc statements. |
| 268 | 268 |
// |
| 269 |
-func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 269 |
+func workdir(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 270 | 270 |
if len(args) != 1 {
|
| 271 | 271 |
return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
|
| 272 | 272 |
} |
| ... | ... |
@@ -286,7 +287,7 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str |
| 286 | 286 |
|
| 287 | 287 |
b.Config.WorkingDir = workdir |
| 288 | 288 |
|
| 289 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 289 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 290 | 290 |
} |
| 291 | 291 |
|
| 292 | 292 |
// RUN some command yo |
| ... | ... |
@@ -299,7 +300,7 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str |
| 299 | 299 |
// RUN echo hi # cmd /S /C echo hi (Windows) |
| 300 | 300 |
// RUN [ "echo", "hi" ] # echo hi |
| 301 | 301 |
// |
| 302 |
-func run(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 302 |
+func run(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 303 | 303 |
if b.image == "" && !b.noBaseImage {
|
| 304 | 304 |
return derr.ErrorCodeMissingFrom |
| 305 | 305 |
} |
| ... | ... |
@@ -380,7 +381,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 380 | 380 |
} |
| 381 | 381 |
|
| 382 | 382 |
b.Config.Cmd = saveCmd |
| 383 |
- hit, err := b.probeCache() |
|
| 383 |
+ hit, err := b.probeCache(ctx) |
|
| 384 | 384 |
if err != nil {
|
| 385 | 385 |
return err |
| 386 | 386 |
} |
| ... | ... |
@@ -395,17 +396,17 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 395 | 395 |
|
| 396 | 396 |
logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
|
| 397 | 397 |
|
| 398 |
- c, err := b.create() |
|
| 398 |
+ c, err := b.create(ctx) |
|
| 399 | 399 |
if err != nil {
|
| 400 | 400 |
return err |
| 401 | 401 |
} |
| 402 | 402 |
|
| 403 | 403 |
// Ensure that we keep the container mounted until the commit |
| 404 | 404 |
// to avoid unmounting and then mounting directly again |
| 405 |
- c.Mount() |
|
| 406 |
- defer c.Unmount() |
|
| 405 |
+ c.Mount(ctx) |
|
| 406 |
+ defer c.Unmount(ctx) |
|
| 407 | 407 |
|
| 408 |
- err = b.run(c) |
|
| 408 |
+ err = b.run(ctx, c) |
|
| 409 | 409 |
if err != nil {
|
| 410 | 410 |
return err |
| 411 | 411 |
} |
| ... | ... |
@@ -415,7 +416,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 415 | 415 |
// properly match it. |
| 416 | 416 |
b.Config.Env = env |
| 417 | 417 |
b.Config.Cmd = saveCmd |
| 418 |
- if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 418 |
+ if err := b.commit(ctx, c.ID, cmd, "run"); err != nil {
|
|
| 419 | 419 |
return err |
| 420 | 420 |
} |
| 421 | 421 |
|
| ... | ... |
@@ -427,7 +428,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 427 | 427 |
// Set the default command to run in the container (which may be empty). |
| 428 | 428 |
// Argument handling is the same as RUN. |
| 429 | 429 |
// |
| 430 |
-func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 430 |
+func cmd(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 431 | 431 |
if err := b.BuilderFlags.Parse(); err != nil {
|
| 432 | 432 |
return err |
| 433 | 433 |
} |
| ... | ... |
@@ -444,7 +445,7 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string) |
| 444 | 444 |
|
| 445 | 445 |
b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...) |
| 446 | 446 |
|
| 447 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 447 |
+ if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 448 | 448 |
return err |
| 449 | 449 |
} |
| 450 | 450 |
|
| ... | ... |
@@ -463,7 +464,7 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string) |
| 463 | 463 |
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint |
| 464 | 464 |
// is initialized at NewBuilder time instead of through argument parsing. |
| 465 | 465 |
// |
| 466 |
-func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 466 |
+func entrypoint(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 467 | 467 |
if err := b.BuilderFlags.Parse(); err != nil {
|
| 468 | 468 |
return err |
| 469 | 469 |
} |
| ... | ... |
@@ -492,7 +493,7 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original |
| 492 | 492 |
b.Config.Cmd = nil |
| 493 | 493 |
} |
| 494 | 494 |
|
| 495 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
|
| 495 |
+ if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
|
| 496 | 496 |
return err |
| 497 | 497 |
} |
| 498 | 498 |
|
| ... | ... |
@@ -504,7 +505,7 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original |
| 504 | 504 |
// Expose ports for links and port mappings. This all ends up in |
| 505 | 505 |
// b.Config.ExposedPorts for runconfig. |
| 506 | 506 |
// |
| 507 |
-func expose(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 507 |
+func expose(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 508 | 508 |
portsTab := args |
| 509 | 509 |
|
| 510 | 510 |
if len(args) == 0 {
|
| ... | ... |
@@ -537,7 +538,7 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri |
| 537 | 537 |
i++ |
| 538 | 538 |
} |
| 539 | 539 |
sort.Strings(portList) |
| 540 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 540 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 541 | 541 |
} |
| 542 | 542 |
|
| 543 | 543 |
// USER foo |
| ... | ... |
@@ -545,7 +546,7 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri |
| 545 | 545 |
// Set the user to 'foo' for future commands and when running the |
| 546 | 546 |
// ENTRYPOINT/CMD at container run time. |
| 547 | 547 |
// |
| 548 |
-func user(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 548 |
+func user(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 549 | 549 |
if len(args) != 1 {
|
| 550 | 550 |
return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
|
| 551 | 551 |
} |
| ... | ... |
@@ -555,14 +556,14 @@ func user(b *builder, args []string, attributes map[string]bool, original string |
| 555 | 555 |
} |
| 556 | 556 |
|
| 557 | 557 |
b.Config.User = args[0] |
| 558 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 558 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 559 | 559 |
} |
| 560 | 560 |
|
| 561 | 561 |
// VOLUME /foo |
| 562 | 562 |
// |
| 563 | 563 |
// Expose the volume /foo for use. Will also accept the JSON array form. |
| 564 | 564 |
// |
| 565 |
-func volume(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 565 |
+func volume(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 566 | 566 |
if len(args) == 0 {
|
| 567 | 567 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
|
| 568 | 568 |
} |
| ... | ... |
@@ -581,7 +582,7 @@ func volume(b *builder, args []string, attributes map[string]bool, original stri |
| 581 | 581 |
} |
| 582 | 582 |
b.Config.Volumes[v] = struct{}{}
|
| 583 | 583 |
} |
| 584 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 584 |
+ if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 585 | 585 |
return err |
| 586 | 586 |
} |
| 587 | 587 |
return nil |
| ... | ... |
@@ -590,7 +591,7 @@ func volume(b *builder, args []string, attributes map[string]bool, original stri |
| 590 | 590 |
// STOPSIGNAL signal |
| 591 | 591 |
// |
| 592 | 592 |
// Set the signal that will be used to kill the container. |
| 593 |
-func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 593 |
+func stopSignal(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 594 | 594 |
if len(args) != 1 {
|
| 595 | 595 |
return fmt.Errorf("STOPSIGNAL requires exactly one argument")
|
| 596 | 596 |
} |
| ... | ... |
@@ -602,7 +603,7 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original |
| 602 | 602 |
} |
| 603 | 603 |
|
| 604 | 604 |
b.Config.StopSignal = sig |
| 605 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 605 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 606 | 606 |
} |
| 607 | 607 |
|
| 608 | 608 |
// ARG name[=value] |
| ... | ... |
@@ -610,7 +611,7 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original |
| 610 | 610 |
// Adds the variable foo to the trusted list of variables that can be passed |
| 611 | 611 |
// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. |
| 612 | 612 |
// Dockerfile author may optionally set a default value of this variable. |
| 613 |
-func arg(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 613 |
+func arg(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 614 | 614 |
if len(args) != 1 {
|
| 615 | 615 |
return fmt.Errorf("ARG requires exactly one argument definition")
|
| 616 | 616 |
} |
| ... | ... |
@@ -646,5 +647,5 @@ func arg(b *builder, args []string, attributes map[string]bool, original string) |
| 646 | 646 |
b.buildArgs[name] = value |
| 647 | 647 |
} |
| 648 | 648 |
|
| 649 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 649 |
+ return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 650 | 650 |
} |
| ... | ... |
@@ -32,6 +32,7 @@ import ( |
| 32 | 32 |
"github.com/docker/docker/builder/command" |
| 33 | 33 |
"github.com/docker/docker/builder/parser" |
| 34 | 34 |
"github.com/docker/docker/cliconfig" |
| 35 |
+ "github.com/docker/docker/context" |
|
| 35 | 36 |
"github.com/docker/docker/daemon" |
| 36 | 37 |
"github.com/docker/docker/pkg/fileutils" |
| 37 | 38 |
"github.com/docker/docker/pkg/streamformatter" |
| ... | ... |
@@ -57,10 +58,10 @@ var replaceEnvAllowed = map[string]struct{}{
|
| 57 | 57 |
command.Arg: {},
|
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error |
|
| 60 |
+var evaluateTable map[string]func(context.Context, *builder, []string, map[string]bool, string) error |
|
| 61 | 61 |
|
| 62 | 62 |
func init() {
|
| 63 |
- evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
|
|
| 63 |
+ evaluateTable = map[string]func(context.Context, *builder, []string, map[string]bool, string) error{
|
|
| 64 | 64 |
command.Env: env, |
| 65 | 65 |
command.Label: label, |
| 66 | 66 |
command.Maintainer: maintainer, |
| ... | ... |
@@ -158,7 +159,7 @@ type builder struct {
|
| 158 | 158 |
// processing. |
| 159 | 159 |
// * Print a happy message and return the image ID. |
| 160 | 160 |
// |
| 161 |
-func (b *builder) Run(context io.Reader) (string, error) {
|
|
| 161 |
+func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
|
|
| 162 | 162 |
if err := b.readContext(context); err != nil {
|
| 163 | 163 |
return "", err |
| 164 | 164 |
} |
| ... | ... |
@@ -187,15 +188,15 @@ func (b *builder) Run(context io.Reader) (string, error) {
|
| 187 | 187 |
default: |
| 188 | 188 |
// Not cancelled yet, keep going... |
| 189 | 189 |
} |
| 190 |
- if err := b.dispatch(i, n); err != nil {
|
|
| 190 |
+ if err := b.dispatch(ctx, i, n); err != nil {
|
|
| 191 | 191 |
if b.ForceRemove {
|
| 192 |
- b.clearTmp() |
|
| 192 |
+ b.clearTmp(ctx) |
|
| 193 | 193 |
} |
| 194 | 194 |
return "", err |
| 195 | 195 |
} |
| 196 | 196 |
fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image)) |
| 197 | 197 |
if b.Remove {
|
| 198 |
- b.clearTmp() |
|
| 198 |
+ b.clearTmp(ctx) |
|
| 199 | 199 |
} |
| 200 | 200 |
} |
| 201 | 201 |
|
| ... | ... |
@@ -311,7 +312,7 @@ func (b *builder) isBuildArgAllowed(arg string) bool {
|
| 311 | 311 |
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to |
| 312 | 312 |
// deal with that, at least until it becomes more of a general concern with new |
| 313 | 313 |
// features. |
| 314 |
-func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
|
| 314 |
+func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) error {
|
|
| 315 | 315 |
cmd := ast.Value |
| 316 | 316 |
|
| 317 | 317 |
// To ensure the user is give a decent error message if the platform |
| ... | ... |
@@ -404,7 +405,7 @@ func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
| 404 | 404 |
if f, ok := evaluateTable[cmd]; ok {
|
| 405 | 405 |
b.BuilderFlags = NewBFlags() |
| 406 | 406 |
b.BuilderFlags.Args = flags |
| 407 |
- return f(b, strList, attrs, original) |
|
| 407 |
+ return f(ctx, b, strList, attrs, original) |
|
| 408 | 408 |
} |
| 409 | 409 |
|
| 410 | 410 |
return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
|
| ... | ... |
@@ -22,6 +22,7 @@ import ( |
| 22 | 22 |
"github.com/Sirupsen/logrus" |
| 23 | 23 |
"github.com/docker/docker/builder/parser" |
| 24 | 24 |
"github.com/docker/docker/cliconfig" |
| 25 |
+ "github.com/docker/docker/context" |
|
| 25 | 26 |
"github.com/docker/docker/daemon" |
| 26 | 27 |
"github.com/docker/docker/graph" |
| 27 | 28 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -75,7 +76,7 @@ func (b *builder) readContext(context io.Reader) (err error) {
|
| 75 | 75 |
return |
| 76 | 76 |
} |
| 77 | 77 |
|
| 78 |
-func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 78 |
+func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 79 | 79 |
if b.disableCommit {
|
| 80 | 80 |
return nil |
| 81 | 81 |
} |
| ... | ... |
@@ -92,7 +93,7 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin |
| 92 | 92 |
} |
| 93 | 93 |
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
| 94 | 94 |
|
| 95 |
- hit, err := b.probeCache() |
|
| 95 |
+ hit, err := b.probeCache(ctx) |
|
| 96 | 96 |
if err != nil {
|
| 97 | 97 |
return err |
| 98 | 98 |
} |
| ... | ... |
@@ -100,18 +101,18 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin |
| 100 | 100 |
return nil |
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 |
- container, err := b.create() |
|
| 103 |
+ container, err := b.create(ctx) |
|
| 104 | 104 |
if err != nil {
|
| 105 | 105 |
return err |
| 106 | 106 |
} |
| 107 | 107 |
id = container.ID |
| 108 | 108 |
|
| 109 |
- if err := container.Mount(); err != nil {
|
|
| 109 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 110 | 110 |
return err |
| 111 | 111 |
} |
| 112 |
- defer container.Unmount() |
|
| 112 |
+ defer container.Unmount(ctx) |
|
| 113 | 113 |
} |
| 114 |
- container, err := b.Daemon.Get(id) |
|
| 114 |
+ container, err := b.Daemon.Get(ctx, id) |
|
| 115 | 115 |
if err != nil {
|
| 116 | 116 |
return err |
| 117 | 117 |
} |
| ... | ... |
@@ -127,11 +128,11 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin |
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 | 129 |
// Commit the container |
| 130 |
- image, err := b.Daemon.Commit(container, commitCfg) |
|
| 130 |
+ image, err := b.Daemon.Commit(ctx, container, commitCfg) |
|
| 131 | 131 |
if err != nil {
|
| 132 | 132 |
return err |
| 133 | 133 |
} |
| 134 |
- b.Daemon.Graph().Retain(b.id, image.ID) |
|
| 134 |
+ b.Daemon.Graph(ctx).Retain(b.id, image.ID) |
|
| 135 | 135 |
b.activeImages = append(b.activeImages, image.ID) |
| 136 | 136 |
b.image = image.ID |
| 137 | 137 |
return nil |
| ... | ... |
@@ -145,7 +146,7 @@ type copyInfo struct {
|
| 145 | 145 |
tmpDir string |
| 146 | 146 |
} |
| 147 | 147 |
|
| 148 |
-func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 148 |
+func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 149 | 149 |
if b.context == nil {
|
| 150 | 150 |
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
| 151 | 151 |
} |
| ... | ... |
@@ -223,7 +224,7 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp |
| 223 | 223 |
} |
| 224 | 224 |
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
| 225 | 225 |
|
| 226 |
- hit, err := b.probeCache() |
|
| 226 |
+ hit, err := b.probeCache(ctx) |
|
| 227 | 227 |
if err != nil {
|
| 228 | 228 |
return err |
| 229 | 229 |
} |
| ... | ... |
@@ -232,16 +233,16 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp |
| 232 | 232 |
return nil |
| 233 | 233 |
} |
| 234 | 234 |
|
| 235 |
- container, _, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
|
|
| 235 |
+ container, _, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true) |
|
| 236 | 236 |
if err != nil {
|
| 237 | 237 |
return err |
| 238 | 238 |
} |
| 239 | 239 |
b.TmpContainers[container.ID] = struct{}{}
|
| 240 | 240 |
|
| 241 |
- if err := container.Mount(); err != nil {
|
|
| 241 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 242 | 242 |
return err |
| 243 | 243 |
} |
| 244 |
- defer container.Unmount() |
|
| 244 |
+ defer container.Unmount(ctx) |
|
| 245 | 245 |
|
| 246 | 246 |
for _, ci := range copyInfos {
|
| 247 | 247 |
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
| ... | ... |
@@ -249,7 +250,7 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp |
| 249 | 249 |
} |
| 250 | 250 |
} |
| 251 | 251 |
|
| 252 |
- if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
|
| 252 |
+ if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
|
| 253 | 253 |
return err |
| 254 | 254 |
} |
| 255 | 255 |
return nil |
| ... | ... |
@@ -484,7 +485,7 @@ func containsWildcards(name string) bool {
|
| 484 | 484 |
return false |
| 485 | 485 |
} |
| 486 | 486 |
|
| 487 |
-func (b *builder) pullImage(name string) (*image.Image, error) {
|
|
| 487 |
+func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
|
|
| 488 | 488 |
remote, tag := parsers.ParseRepositoryTag(name) |
| 489 | 489 |
if tag == "" {
|
| 490 | 490 |
tag = "latest" |
| ... | ... |
@@ -510,11 +511,11 @@ func (b *builder) pullImage(name string) (*image.Image, error) {
|
| 510 | 510 |
OutStream: ioutils.NopWriteCloser(b.OutOld), |
| 511 | 511 |
} |
| 512 | 512 |
|
| 513 |
- if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
|
|
| 513 |
+ if err := b.Daemon.Repositories(ctx).Pull(ctx, remote, tag, imagePullConfig); err != nil {
|
|
| 514 | 514 |
return nil, err |
| 515 | 515 |
} |
| 516 | 516 |
|
| 517 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 517 |
+ image, err := b.Daemon.Repositories(ctx).LookupImage(name) |
|
| 518 | 518 |
if err != nil {
|
| 519 | 519 |
return nil, err |
| 520 | 520 |
} |
| ... | ... |
@@ -522,7 +523,7 @@ func (b *builder) pullImage(name string) (*image.Image, error) {
|
| 522 | 522 |
return image, nil |
| 523 | 523 |
} |
| 524 | 524 |
|
| 525 |
-func (b *builder) processImageFrom(img *image.Image) error {
|
|
| 525 |
+func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
|
|
| 526 | 526 |
b.image = img.ID |
| 527 | 527 |
|
| 528 | 528 |
if img.Config != nil {
|
| ... | ... |
@@ -562,7 +563,7 @@ func (b *builder) processImageFrom(img *image.Image) error {
|
| 562 | 562 |
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
| 563 | 563 |
} |
| 564 | 564 |
|
| 565 |
- if err := b.dispatch(i, n); err != nil {
|
|
| 565 |
+ if err := b.dispatch(ctx, i, n); err != nil {
|
|
| 566 | 566 |
return err |
| 567 | 567 |
} |
| 568 | 568 |
} |
| ... | ... |
@@ -576,12 +577,12 @@ func (b *builder) processImageFrom(img *image.Image) error {
|
| 576 | 576 |
// in the current server `b.Daemon`. If an image is found, probeCache returns |
| 577 | 577 |
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
| 578 | 578 |
// is any error, it returns `(false, err)`. |
| 579 |
-func (b *builder) probeCache() (bool, error) {
|
|
| 579 |
+func (b *builder) probeCache(ctx context.Context) (bool, error) {
|
|
| 580 | 580 |
if !b.UtilizeCache || b.cacheBusted {
|
| 581 | 581 |
return false, nil |
| 582 | 582 |
} |
| 583 | 583 |
|
| 584 |
- cache, err := b.Daemon.ImageGetCached(b.image, b.Config) |
|
| 584 |
+ cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config) |
|
| 585 | 585 |
if err != nil {
|
| 586 | 586 |
return false, err |
| 587 | 587 |
} |
| ... | ... |
@@ -594,12 +595,12 @@ func (b *builder) probeCache() (bool, error) {
|
| 594 | 594 |
fmt.Fprintf(b.OutStream, " ---> Using cache\n") |
| 595 | 595 |
logrus.Debugf("[BUILDER] Use cached version")
|
| 596 | 596 |
b.image = cache.ID |
| 597 |
- b.Daemon.Graph().Retain(b.id, cache.ID) |
|
| 597 |
+ b.Daemon.Graph(ctx).Retain(b.id, cache.ID) |
|
| 598 | 598 |
b.activeImages = append(b.activeImages, cache.ID) |
| 599 | 599 |
return true, nil |
| 600 | 600 |
} |
| 601 | 601 |
|
| 602 |
-func (b *builder) create() (*daemon.Container, error) {
|
|
| 602 |
+func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
|
|
| 603 | 603 |
if b.image == "" && !b.noBaseImage {
|
| 604 | 604 |
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
| 605 | 605 |
} |
| ... | ... |
@@ -620,7 +621,7 @@ func (b *builder) create() (*daemon.Container, error) {
|
| 620 | 620 |
config := *b.Config |
| 621 | 621 |
|
| 622 | 622 |
// Create the container |
| 623 |
- c, warnings, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
|
|
| 623 |
+ c, warnings, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true) |
|
| 624 | 624 |
if err != nil {
|
| 625 | 625 |
return nil, err |
| 626 | 626 |
} |
| ... | ... |
@@ -643,14 +644,14 @@ func (b *builder) create() (*daemon.Container, error) {
|
| 643 | 643 |
return c, nil |
| 644 | 644 |
} |
| 645 | 645 |
|
| 646 |
-func (b *builder) run(c *daemon.Container) error {
|
|
| 646 |
+func (b *builder) run(ctx context.Context, c *daemon.Container) error {
|
|
| 647 | 647 |
var errCh chan error |
| 648 | 648 |
if b.Verbose {
|
| 649 | 649 |
errCh = c.Attach(nil, b.OutStream, b.ErrStream) |
| 650 | 650 |
} |
| 651 | 651 |
|
| 652 | 652 |
//start the container |
| 653 |
- if err := c.Start(); err != nil {
|
|
| 653 |
+ if err := c.Start(ctx); err != nil {
|
|
| 654 | 654 |
return err |
| 655 | 655 |
} |
| 656 | 656 |
|
| ... | ... |
@@ -660,7 +661,7 @@ func (b *builder) run(c *daemon.Container) error {
|
| 660 | 660 |
select {
|
| 661 | 661 |
case <-b.cancelled: |
| 662 | 662 |
logrus.Debugln("Build cancelled, killing container:", c.ID)
|
| 663 |
- c.Kill() |
|
| 663 |
+ c.Kill(ctx) |
|
| 664 | 664 |
case <-finished: |
| 665 | 665 |
} |
| 666 | 666 |
}() |
| ... | ... |
@@ -791,13 +792,13 @@ func copyAsDirectory(source, destination string, destExisted bool) error {
|
| 791 | 791 |
return fixPermissions(source, destination, 0, 0, destExisted) |
| 792 | 792 |
} |
| 793 | 793 |
|
| 794 |
-func (b *builder) clearTmp() {
|
|
| 794 |
+func (b *builder) clearTmp(ctx context.Context) {
|
|
| 795 | 795 |
for c := range b.TmpContainers {
|
| 796 | 796 |
rmConfig := &daemon.ContainerRmConfig{
|
| 797 | 797 |
ForceRemove: true, |
| 798 | 798 |
RemoveVolume: true, |
| 799 | 799 |
} |
| 800 |
- if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
|
|
| 800 |
+ if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
|
|
| 801 | 801 |
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
| 802 | 802 |
return |
| 803 | 803 |
} |
| ... | ... |
@@ -14,6 +14,7 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/api" |
| 15 | 15 |
"github.com/docker/docker/builder/parser" |
| 16 | 16 |
"github.com/docker/docker/cliconfig" |
| 17 |
+ "github.com/docker/docker/context" |
|
| 17 | 18 |
"github.com/docker/docker/daemon" |
| 18 | 19 |
"github.com/docker/docker/graph/tags" |
| 19 | 20 |
"github.com/docker/docker/pkg/archive" |
| ... | ... |
@@ -112,7 +113,7 @@ func NewBuildConfig() *Config {
|
| 112 | 112 |
|
| 113 | 113 |
// Build is the main interface of the package, it gathers the Builder |
| 114 | 114 |
// struct and calls builder.Run() to do all the real build job. |
| 115 |
-func Build(d *daemon.Daemon, buildConfig *Config) error {
|
|
| 115 |
+func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
|
|
| 116 | 116 |
var ( |
| 117 | 117 |
repoName string |
| 118 | 118 |
tag string |
| ... | ... |
@@ -229,15 +230,15 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
|
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 | 231 |
defer func() {
|
| 232 |
- builder.Daemon.Graph().Release(builder.id, builder.activeImages...) |
|
| 232 |
+ builder.Daemon.Graph(ctx).Release(builder.id, builder.activeImages...) |
|
| 233 | 233 |
}() |
| 234 | 234 |
|
| 235 |
- id, err := builder.Run(context) |
|
| 235 |
+ id, err := builder.Run(ctx, context) |
|
| 236 | 236 |
if err != nil {
|
| 237 | 237 |
return err |
| 238 | 238 |
} |
| 239 | 239 |
if repoName != "" {
|
| 240 |
- return d.Repositories().Tag(repoName, tag, id, true) |
|
| 240 |
+ return d.Repositories(ctx).Tag(repoName, tag, id, true) |
|
| 241 | 241 |
} |
| 242 | 242 |
return nil |
| 243 | 243 |
} |
| ... | ... |
@@ -247,7 +248,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
|
| 247 | 247 |
// |
| 248 | 248 |
// - call parse.Parse() to get AST root from Dockerfile entries |
| 249 | 249 |
// - do build by calling builder.dispatch() to call all entries' handling routines |
| 250 |
-func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 250 |
+func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 251 | 251 |
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
| 252 | 252 |
if err != nil {
|
| 253 | 253 |
return nil, err |
| ... | ... |
@@ -269,7 +270,7 @@ func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (* |
| 269 | 269 |
} |
| 270 | 270 |
|
| 271 | 271 |
for i, n := range ast.Children {
|
| 272 |
- if err := builder.dispatch(i, n); err != nil {
|
|
| 272 |
+ if err := builder.dispatch(ctx, i, n); err != nil {
|
|
| 273 | 273 |
return nil, err |
| 274 | 274 |
} |
| 275 | 275 |
} |
| ... | ... |
@@ -289,8 +290,8 @@ type CommitConfig struct {
|
| 289 | 289 |
} |
| 290 | 290 |
|
| 291 | 291 |
// Commit will create a new image from a container's changes |
| 292 |
-func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 293 |
- container, err := d.Get(name) |
|
| 292 |
+func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 293 |
+ container, err := d.Get(ctx, name) |
|
| 294 | 294 |
if err != nil {
|
| 295 | 295 |
return "", err |
| 296 | 296 |
} |
| ... | ... |
@@ -304,7 +305,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
| 304 | 304 |
c.Config = &runconfig.Config{}
|
| 305 | 305 |
} |
| 306 | 306 |
|
| 307 |
- newConfig, err := BuildFromConfig(d, c.Config, c.Changes) |
|
| 307 |
+ newConfig, err := BuildFromConfig(ctx, d, c.Config, c.Changes) |
|
| 308 | 308 |
if err != nil {
|
| 309 | 309 |
return "", err |
| 310 | 310 |
} |
| ... | ... |
@@ -322,7 +323,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
| 322 | 322 |
Config: newConfig, |
| 323 | 323 |
} |
| 324 | 324 |
|
| 325 |
- img, err := d.Commit(container, commitCfg) |
|
| 325 |
+ img, err := d.Commit(ctx, container, commitCfg) |
|
| 326 | 326 |
if err != nil {
|
| 327 | 327 |
return "", err |
| 328 | 328 |
} |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/api/types" |
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/pkg/archive" |
| 12 | 13 |
"github.com/docker/docker/pkg/chrootarchive" |
| 13 | 14 |
"github.com/docker/docker/pkg/ioutils" |
| ... | ... |
@@ -20,8 +21,8 @@ var ErrExtractPointNotDirectory = errors.New("extraction point is not a director
|
| 20 | 20 |
|
| 21 | 21 |
// ContainerCopy performs a deprecated operation of archiving the resource at |
| 22 | 22 |
// the specified path in the conatiner identified by the given name. |
| 23 |
-func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
|
| 24 |
- container, err := daemon.Get(name) |
|
| 23 |
+func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string) (io.ReadCloser, error) {
|
|
| 24 |
+ container, err := daemon.Get(ctx, name) |
|
| 25 | 25 |
if err != nil {
|
| 26 | 26 |
return nil, err |
| 27 | 27 |
} |
| ... | ... |
@@ -30,30 +31,30 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err |
| 30 | 30 |
res = res[1:] |
| 31 | 31 |
} |
| 32 | 32 |
|
| 33 |
- return container.copy(res) |
|
| 33 |
+ return container.copy(ctx, res) |
|
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 | 36 |
// ContainerStatPath stats the filesystem resource at the specified path in the |
| 37 | 37 |
// container identified by the given name. |
| 38 |
-func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
|
| 39 |
- container, err := daemon.Get(name) |
|
| 38 |
+func (daemon *Daemon) ContainerStatPath(ctx context.Context, name string, path string) (stat *types.ContainerPathStat, err error) {
|
|
| 39 |
+ container, err := daemon.Get(ctx, name) |
|
| 40 | 40 |
if err != nil {
|
| 41 | 41 |
return nil, err |
| 42 | 42 |
} |
| 43 | 43 |
|
| 44 |
- return container.StatPath(path) |
|
| 44 |
+ return container.StatPath(ctx, path) |
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
// ContainerArchivePath creates an archive of the filesystem resource at the |
| 48 | 48 |
// specified path in the container identified by the given name. Returns a |
| 49 | 49 |
// tar archive of the resource and whether it was a directory or a single file. |
| 50 |
-func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 51 |
- container, err := daemon.Get(name) |
|
| 50 |
+func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 51 |
+ container, err := daemon.Get(ctx, name) |
|
| 52 | 52 |
if err != nil {
|
| 53 | 53 |
return nil, nil, err |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
- return container.ArchivePath(path) |
|
| 56 |
+ return container.ArchivePath(ctx, path) |
|
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 | 59 |
// ContainerExtractToDir extracts the given archive to the specified location |
| ... | ... |
@@ -62,13 +63,13 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io |
| 62 | 62 |
// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will |
| 63 | 63 |
// be an error if unpacking the given content would cause an existing directory |
| 64 | 64 |
// to be replaced with a non-directory and vice versa. |
| 65 |
-func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
|
|
| 66 |
- container, err := daemon.Get(name) |
|
| 65 |
+func (daemon *Daemon) ContainerExtractToDir(ctx context.Context, name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
|
|
| 66 |
+ container, err := daemon.Get(ctx, name) |
|
| 67 | 67 |
if err != nil {
|
| 68 | 68 |
return err |
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 |
- return container.ExtractToDir(path, noOverwriteDirNonDir, content) |
|
| 71 |
+ return container.ExtractToDir(ctx, path, noOverwriteDirNonDir, content) |
|
| 72 | 72 |
} |
| 73 | 73 |
|
| 74 | 74 |
// resolvePath resolves the given path in the container to a resource on the |
| ... | ... |
@@ -133,14 +134,14 @@ func (container *Container) statPath(resolvedPath, absPath string) (stat *types. |
| 133 | 133 |
|
| 134 | 134 |
// StatPath stats the filesystem resource at the specified path in this |
| 135 | 135 |
// container. Returns stat info about the resource. |
| 136 |
-func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
|
|
| 136 |
+func (container *Container) StatPath(ctx context.Context, path string) (stat *types.ContainerPathStat, err error) {
|
|
| 137 | 137 |
container.Lock() |
| 138 | 138 |
defer container.Unlock() |
| 139 | 139 |
|
| 140 |
- if err = container.Mount(); err != nil {
|
|
| 140 |
+ if err = container.Mount(ctx); err != nil {
|
|
| 141 | 141 |
return nil, err |
| 142 | 142 |
} |
| 143 |
- defer container.Unmount() |
|
| 143 |
+ defer container.Unmount(ctx) |
|
| 144 | 144 |
|
| 145 | 145 |
err = container.mountVolumes() |
| 146 | 146 |
defer container.unmountVolumes(true) |
| ... | ... |
@@ -159,7 +160,7 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat |
| 159 | 159 |
// ArchivePath creates an archive of the filesystem resource at the specified |
| 160 | 160 |
// path in this container. Returns a tar archive of the resource and stat info |
| 161 | 161 |
// about the resource. |
| 162 |
-func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 162 |
+func (container *Container) ArchivePath(ctx context.Context, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 163 | 163 |
container.Lock() |
| 164 | 164 |
|
| 165 | 165 |
defer func() {
|
| ... | ... |
@@ -171,7 +172,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta |
| 171 | 171 |
} |
| 172 | 172 |
}() |
| 173 | 173 |
|
| 174 |
- if err = container.Mount(); err != nil {
|
|
| 174 |
+ if err = container.Mount(ctx); err != nil {
|
|
| 175 | 175 |
return nil, nil, err |
| 176 | 176 |
} |
| 177 | 177 |
|
| ... | ... |
@@ -180,7 +181,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta |
| 180 | 180 |
// unmount any volumes |
| 181 | 181 |
container.unmountVolumes(true) |
| 182 | 182 |
// unmount the container's rootfs |
| 183 |
- container.Unmount() |
|
| 183 |
+ container.Unmount(ctx) |
|
| 184 | 184 |
} |
| 185 | 185 |
}() |
| 186 | 186 |
|
| ... | ... |
@@ -214,12 +215,12 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta |
| 214 | 214 |
content = ioutils.NewReadCloserWrapper(data, func() error {
|
| 215 | 215 |
err := data.Close() |
| 216 | 216 |
container.unmountVolumes(true) |
| 217 |
- container.Unmount() |
|
| 217 |
+ container.Unmount(ctx) |
|
| 218 | 218 |
container.Unlock() |
| 219 | 219 |
return err |
| 220 | 220 |
}) |
| 221 | 221 |
|
| 222 |
- container.logEvent("archive-path")
|
|
| 222 |
+ container.logEvent(ctx, "archive-path") |
|
| 223 | 223 |
|
| 224 | 224 |
return content, stat, nil |
| 225 | 225 |
} |
| ... | ... |
@@ -230,14 +231,14 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta |
| 230 | 230 |
// noOverwriteDirNonDir is true then it will be an error if unpacking the |
| 231 | 231 |
// given content would cause an existing directory to be replaced with a non- |
| 232 | 232 |
// directory and vice versa. |
| 233 |
-func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
|
|
| 233 |
+func (container *Container) ExtractToDir(ctx context.Context, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
|
|
| 234 | 234 |
container.Lock() |
| 235 | 235 |
defer container.Unlock() |
| 236 | 236 |
|
| 237 |
- if err = container.Mount(); err != nil {
|
|
| 237 |
+ if err = container.Mount(ctx); err != nil {
|
|
| 238 | 238 |
return err |
| 239 | 239 |
} |
| 240 |
- defer container.Unmount() |
|
| 240 |
+ defer container.Unmount(ctx) |
|
| 241 | 241 |
|
| 242 | 242 |
err = container.mountVolumes() |
| 243 | 243 |
defer container.unmountVolumes(true) |
| ... | ... |
@@ -318,7 +319,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, |
| 318 | 318 |
return err |
| 319 | 319 |
} |
| 320 | 320 |
|
| 321 |
- container.logEvent("extract-to-dir")
|
|
| 321 |
+ container.logEvent(ctx, "extract-to-dir") |
|
| 322 | 322 |
|
| 323 | 323 |
return nil |
| 324 | 324 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 6 | 7 |
"github.com/docker/docker/pkg/stdcopy" |
| 7 | 8 |
) |
| 8 | 9 |
|
| ... | ... |
@@ -15,8 +16,8 @@ type ContainerAttachWithLogsConfig struct {
|
| 15 | 15 |
} |
| 16 | 16 |
|
| 17 | 17 |
// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig. |
| 18 |
-func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error {
|
|
| 19 |
- container, err := daemon.Get(prefixOrName) |
|
| 18 |
+func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerAttachWithLogsConfig) error {
|
|
| 19 |
+ container, err := daemon.Get(ctx, prefixOrName) |
|
| 20 | 20 |
if err != nil {
|
| 21 | 21 |
return err |
| 22 | 22 |
} |
| ... | ... |
@@ -43,7 +44,7 @@ func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerA |
| 43 | 43 |
stderr = errStream |
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 |
- return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) |
|
| 46 |
+ return container.attachWithLogs(ctx, stdin, stdout, stderr, c.Logs, c.Stream) |
|
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 | 49 |
// ContainerWsAttachWithLogsConfig attach with websockets, since all |
| ... | ... |
@@ -55,10 +56,10 @@ type ContainerWsAttachWithLogsConfig struct {
|
| 55 | 55 |
} |
| 56 | 56 |
|
| 57 | 57 |
// ContainerWsAttachWithLogs websocket connection |
| 58 |
-func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
|
|
| 59 |
- container, err := daemon.Get(prefixOrName) |
|
| 58 |
+func (daemon *Daemon) ContainerWsAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
|
|
| 59 |
+ container, err := daemon.Get(ctx, prefixOrName) |
|
| 60 | 60 |
if err != nil {
|
| 61 | 61 |
return err |
| 62 | 62 |
} |
| 63 |
- return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) |
|
| 63 |
+ return container.attachWithLogs(ctx, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) |
|
| 64 | 64 |
} |
| ... | ... |
@@ -1,10 +1,13 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/docker/pkg/archive" |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/context" |
|
| 5 |
+ "github.com/docker/docker/pkg/archive" |
|
| 6 |
+) |
|
| 4 | 7 |
|
| 5 | 8 |
// ContainerChanges returns a list of container fs changes |
| 6 |
-func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
|
|
| 7 |
- container, err := daemon.Get(name) |
|
| 9 |
+func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) {
|
|
| 10 |
+ container, err := daemon.Get(ctx, name) |
|
| 8 | 11 |
if err != nil {
|
| 9 | 12 |
return nil, err |
| 10 | 13 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
"github.com/docker/docker/image" |
| 5 | 6 |
"github.com/docker/docker/runconfig" |
| 6 | 7 |
) |
| ... | ... |
@@ -18,10 +19,10 @@ type ContainerCommitConfig struct {
|
| 18 | 18 |
|
| 19 | 19 |
// Commit creates a new filesystem image from the current state of a container. |
| 20 | 20 |
// The image can optionally be tagged into a repository. |
| 21 |
-func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
|
|
| 21 |
+func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *ContainerCommitConfig) (*image.Image, error) {
|
|
| 22 | 22 |
if c.Pause && !container.isPaused() {
|
| 23 |
- container.pause() |
|
| 24 |
- defer container.unpause() |
|
| 23 |
+ container.pause(ctx) |
|
| 24 |
+ defer container.unpause(ctx) |
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
rwTar, err := container.exportContainerRw() |
| ... | ... |
@@ -46,6 +47,6 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i |
| 46 | 46 |
return img, err |
| 47 | 47 |
} |
| 48 | 48 |
} |
| 49 |
- container.logEvent("commit")
|
|
| 49 |
+ container.logEvent(ctx, "commit") |
|
| 50 | 50 |
return img, nil |
| 51 | 51 |
} |
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"github.com/opencontainers/runc/libcontainer/label" |
| 16 | 16 |
|
| 17 | 17 |
"github.com/Sirupsen/logrus" |
| 18 |
+ "github.com/docker/docker/context" |
|
| 18 | 19 |
"github.com/docker/docker/daemon/execdriver" |
| 19 | 20 |
"github.com/docker/docker/daemon/logger" |
| 20 | 21 |
"github.com/docker/docker/daemon/logger/jsonfilelog" |
| ... | ... |
@@ -170,9 +171,10 @@ func (container *Container) writeHostConfig() error {
|
| 170 | 170 |
return ioutil.WriteFile(pth, data, 0666) |
| 171 | 171 |
} |
| 172 | 172 |
|
| 173 |
-func (container *Container) logEvent(action string) {
|
|
| 173 |
+func (container *Container) logEvent(ctx context.Context, action string) {
|
|
| 174 | 174 |
d := container.daemon |
| 175 | 175 |
d.EventsService.Log( |
| 176 |
+ ctx, |
|
| 176 | 177 |
action, |
| 177 | 178 |
container.ID, |
| 178 | 179 |
container.Config.Image, |
| ... | ... |
@@ -238,7 +240,7 @@ func (container *Container) exportContainerRw() (archive.Archive, error) {
|
| 238 | 238 |
// container needs, such as storage and networking, as well as links |
| 239 | 239 |
// between containers. The container is left waiting for a signal to |
| 240 | 240 |
// begin running. |
| 241 |
-func (container *Container) Start() (err error) {
|
|
| 241 |
+func (container *Container) Start(ctx context.Context) (err error) {
|
|
| 242 | 242 |
container.Lock() |
| 243 | 243 |
defer container.Unlock() |
| 244 | 244 |
|
| ... | ... |
@@ -260,12 +262,12 @@ func (container *Container) Start() (err error) {
|
| 260 | 260 |
container.ExitCode = 128 |
| 261 | 261 |
} |
| 262 | 262 |
container.toDisk() |
| 263 |
- container.cleanup() |
|
| 264 |
- container.logEvent("die")
|
|
| 263 |
+ container.cleanup(ctx) |
|
| 264 |
+ container.logEvent(ctx, "die") |
|
| 265 | 265 |
} |
| 266 | 266 |
}() |
| 267 | 267 |
|
| 268 |
- if err := container.Mount(); err != nil {
|
|
| 268 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 269 | 269 |
return err |
| 270 | 270 |
} |
| 271 | 271 |
|
| ... | ... |
@@ -273,10 +275,10 @@ func (container *Container) Start() (err error) {
|
| 273 | 273 |
// backwards API compatibility. |
| 274 | 274 |
container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig) |
| 275 | 275 |
|
| 276 |
- if err := container.initializeNetworking(); err != nil {
|
|
| 276 |
+ if err := container.initializeNetworking(ctx); err != nil {
|
|
| 277 | 277 |
return err |
| 278 | 278 |
} |
| 279 |
- linkedEnv, err := container.setupLinkedContainers() |
|
| 279 |
+ linkedEnv, err := container.setupLinkedContainers(ctx) |
|
| 280 | 280 |
if err != nil {
|
| 281 | 281 |
return err |
| 282 | 282 |
} |
| ... | ... |
@@ -284,7 +286,7 @@ func (container *Container) Start() (err error) {
|
| 284 | 284 |
return err |
| 285 | 285 |
} |
| 286 | 286 |
env := container.createDaemonEnvironment(linkedEnv) |
| 287 |
- if err := populateCommand(container, env); err != nil {
|
|
| 287 |
+ if err := populateCommand(ctx, container, env); err != nil {
|
|
| 288 | 288 |
return err |
| 289 | 289 |
} |
| 290 | 290 |
|
| ... | ... |
@@ -301,7 +303,7 @@ func (container *Container) Start() (err error) {
|
| 301 | 301 |
mounts = append(mounts, container.ipcMounts()...) |
| 302 | 302 |
|
| 303 | 303 |
container.command.Mounts = mounts |
| 304 |
- return container.waitForStart() |
|
| 304 |
+ return container.waitForStart(ctx) |
|
| 305 | 305 |
} |
| 306 | 306 |
|
| 307 | 307 |
// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data |
| ... | ... |
@@ -334,14 +336,14 @@ func (container *Container) isNetworkAllocated() bool {
|
| 334 | 334 |
|
| 335 | 335 |
// cleanup releases any network resources allocated to the container along with any rules |
| 336 | 336 |
// around how containers are linked together. It also unmounts the container's root filesystem. |
| 337 |
-func (container *Container) cleanup() {
|
|
| 337 |
+func (container *Container) cleanup(ctx context.Context) {
|
|
| 338 | 338 |
container.releaseNetwork() |
| 339 | 339 |
|
| 340 | 340 |
if err := container.unmountIpcMounts(); err != nil {
|
| 341 | 341 |
logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
|
| 342 | 342 |
} |
| 343 | 343 |
|
| 344 |
- if err := container.Unmount(); err != nil {
|
|
| 344 |
+ if err := container.Unmount(ctx); err != nil {
|
|
| 345 | 345 |
logrus.Errorf("%s: Failed to umount filesystem: %v", container.ID, err)
|
| 346 | 346 |
} |
| 347 | 347 |
|
| ... | ... |
@@ -357,7 +359,7 @@ func (container *Container) cleanup() {
|
| 357 | 357 |
// to send the signal. An error is returned if the container is paused |
| 358 | 358 |
// or not running, or if there is a problem returned from the |
| 359 | 359 |
// underlying kill command. |
| 360 |
-func (container *Container) killSig(sig int) error {
|
|
| 360 |
+func (container *Container) killSig(ctx context.Context, sig int) error {
|
|
| 361 | 361 |
logrus.Debugf("Sending %d to %s", sig, container.ID)
|
| 362 | 362 |
container.Lock() |
| 363 | 363 |
defer container.Unlock() |
| ... | ... |
@@ -385,13 +387,13 @@ func (container *Container) killSig(sig int) error {
|
| 385 | 385 |
if err := container.daemon.kill(container, sig); err != nil {
|
| 386 | 386 |
return err |
| 387 | 387 |
} |
| 388 |
- container.logEvent("kill")
|
|
| 388 |
+ container.logEvent(ctx, "kill") |
|
| 389 | 389 |
return nil |
| 390 | 390 |
} |
| 391 | 391 |
|
| 392 | 392 |
// Wrapper aroung killSig() suppressing "no such process" error. |
| 393 |
-func (container *Container) killPossiblyDeadProcess(sig int) error {
|
|
| 394 |
- err := container.killSig(sig) |
|
| 393 |
+func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int) error {
|
|
| 394 |
+ err := container.killSig(ctx, sig) |
|
| 395 | 395 |
if err == syscall.ESRCH {
|
| 396 | 396 |
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
|
| 397 | 397 |
return nil |
| ... | ... |
@@ -399,7 +401,7 @@ func (container *Container) killPossiblyDeadProcess(sig int) error {
|
| 399 | 399 |
return err |
| 400 | 400 |
} |
| 401 | 401 |
|
| 402 |
-func (container *Container) pause() error {
|
|
| 402 |
+func (container *Container) pause(ctx context.Context) error {
|
|
| 403 | 403 |
container.Lock() |
| 404 | 404 |
defer container.Unlock() |
| 405 | 405 |
|
| ... | ... |
@@ -417,11 +419,11 @@ func (container *Container) pause() error {
|
| 417 | 417 |
return err |
| 418 | 418 |
} |
| 419 | 419 |
container.Paused = true |
| 420 |
- container.logEvent("pause")
|
|
| 420 |
+ container.logEvent(ctx, "pause") |
|
| 421 | 421 |
return nil |
| 422 | 422 |
} |
| 423 | 423 |
|
| 424 |
-func (container *Container) unpause() error {
|
|
| 424 |
+func (container *Container) unpause(ctx context.Context) error {
|
|
| 425 | 425 |
container.Lock() |
| 426 | 426 |
defer container.Unlock() |
| 427 | 427 |
|
| ... | ... |
@@ -439,18 +441,18 @@ func (container *Container) unpause() error {
|
| 439 | 439 |
return err |
| 440 | 440 |
} |
| 441 | 441 |
container.Paused = false |
| 442 |
- container.logEvent("unpause")
|
|
| 442 |
+ container.logEvent(ctx, "unpause") |
|
| 443 | 443 |
return nil |
| 444 | 444 |
} |
| 445 | 445 |
|
| 446 | 446 |
// Kill forcefully terminates a container. |
| 447 |
-func (container *Container) Kill() error {
|
|
| 447 |
+func (container *Container) Kill(ctx context.Context) error {
|
|
| 448 | 448 |
if !container.IsRunning() {
|
| 449 | 449 |
return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
| 450 | 450 |
} |
| 451 | 451 |
|
| 452 | 452 |
// 1. Send SIGKILL |
| 453 |
- if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
|
|
| 453 |
+ if err := container.killPossiblyDeadProcess(ctx, int(syscall.SIGKILL)); err != nil {
|
|
| 454 | 454 |
// While normally we might "return err" here we're not going to |
| 455 | 455 |
// because if we can't stop the container by this point then |
| 456 | 456 |
// its probably because its already stopped. Meaning, between |
| ... | ... |
@@ -484,15 +486,15 @@ func (container *Container) Kill() error {
|
| 484 | 484 |
// process to exit. If a negative duration is given, Stop will wait |
| 485 | 485 |
// for the initial signal forever. If the container is not running Stop returns |
| 486 | 486 |
// immediately. |
| 487 |
-func (container *Container) Stop(seconds int) error {
|
|
| 487 |
+func (container *Container) Stop(ctx context.Context, seconds int) error {
|
|
| 488 | 488 |
if !container.IsRunning() {
|
| 489 | 489 |
return nil |
| 490 | 490 |
} |
| 491 | 491 |
|
| 492 | 492 |
// 1. Send a SIGTERM |
| 493 |
- if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil {
|
|
| 493 |
+ if err := container.killPossiblyDeadProcess(ctx, container.stopSignal()); err != nil {
|
|
| 494 | 494 |
logrus.Infof("Failed to send SIGTERM to the process, force killing")
|
| 495 |
- if err := container.killPossiblyDeadProcess(9); err != nil {
|
|
| 495 |
+ if err := container.killPossiblyDeadProcess(ctx, 9); err != nil {
|
|
| 496 | 496 |
return err |
| 497 | 497 |
} |
| 498 | 498 |
} |
| ... | ... |
@@ -501,13 +503,13 @@ func (container *Container) Stop(seconds int) error {
|
| 501 | 501 |
if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
| 502 | 502 |
logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
| 503 | 503 |
// 3. If it doesn't, then send SIGKILL |
| 504 |
- if err := container.Kill(); err != nil {
|
|
| 504 |
+ if err := container.Kill(ctx); err != nil {
|
|
| 505 | 505 |
container.WaitStop(-1 * time.Second) |
| 506 | 506 |
return err |
| 507 | 507 |
} |
| 508 | 508 |
} |
| 509 | 509 |
|
| 510 |
- container.logEvent("stop")
|
|
| 510 |
+ container.logEvent(ctx, "stop") |
|
| 511 | 511 |
return nil |
| 512 | 512 |
} |
| 513 | 513 |
|
| ... | ... |
@@ -515,61 +517,61 @@ func (container *Container) Stop(seconds int) error {
|
| 515 | 515 |
// container. When stopping, wait for the given duration in seconds to |
| 516 | 516 |
// gracefully stop, before forcefully terminating the container. If |
| 517 | 517 |
// given a negative duration, wait forever for a graceful stop. |
| 518 |
-func (container *Container) Restart(seconds int) error {
|
|
| 518 |
+func (container *Container) Restart(ctx context.Context, seconds int) error {
|
|
| 519 | 519 |
// Avoid unnecessarily unmounting and then directly mounting |
| 520 | 520 |
// the container when the container stops and then starts |
| 521 | 521 |
// again |
| 522 |
- if err := container.Mount(); err == nil {
|
|
| 523 |
- defer container.Unmount() |
|
| 522 |
+ if err := container.Mount(ctx); err == nil {
|
|
| 523 |
+ defer container.Unmount(ctx) |
|
| 524 | 524 |
} |
| 525 | 525 |
|
| 526 |
- if err := container.Stop(seconds); err != nil {
|
|
| 526 |
+ if err := container.Stop(ctx, seconds); err != nil {
|
|
| 527 | 527 |
return err |
| 528 | 528 |
} |
| 529 | 529 |
|
| 530 |
- if err := container.Start(); err != nil {
|
|
| 530 |
+ if err := container.Start(ctx); err != nil {
|
|
| 531 | 531 |
return err |
| 532 | 532 |
} |
| 533 | 533 |
|
| 534 |
- container.logEvent("restart")
|
|
| 534 |
+ container.logEvent(ctx, "restart") |
|
| 535 | 535 |
return nil |
| 536 | 536 |
} |
| 537 | 537 |
|
| 538 | 538 |
// Resize changes the TTY of the process running inside the container |
| 539 | 539 |
// to the given height and width. The container must be running. |
| 540 |
-func (container *Container) Resize(h, w int) error {
|
|
| 540 |
+func (container *Container) Resize(ctx context.Context, h, w int) error {
|
|
| 541 | 541 |
if !container.IsRunning() {
|
| 542 | 542 |
return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
| 543 | 543 |
} |
| 544 | 544 |
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
|
| 545 | 545 |
return err |
| 546 | 546 |
} |
| 547 |
- container.logEvent("resize")
|
|
| 547 |
+ container.logEvent(ctx, "resize") |
|
| 548 | 548 |
return nil |
| 549 | 549 |
} |
| 550 | 550 |
|
| 551 |
-func (container *Container) export() (archive.Archive, error) {
|
|
| 552 |
- if err := container.Mount(); err != nil {
|
|
| 551 |
+func (container *Container) export(ctx context.Context) (archive.Archive, error) {
|
|
| 552 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 553 | 553 |
return nil, err |
| 554 | 554 |
} |
| 555 | 555 |
|
| 556 | 556 |
archive, err := archive.Tar(container.basefs, archive.Uncompressed) |
| 557 | 557 |
if err != nil {
|
| 558 |
- container.Unmount() |
|
| 558 |
+ container.Unmount(ctx) |
|
| 559 | 559 |
return nil, err |
| 560 | 560 |
} |
| 561 | 561 |
arch := ioutils.NewReadCloserWrapper(archive, func() error {
|
| 562 | 562 |
err := archive.Close() |
| 563 |
- container.Unmount() |
|
| 563 |
+ container.Unmount(ctx) |
|
| 564 | 564 |
return err |
| 565 | 565 |
}) |
| 566 |
- container.logEvent("export")
|
|
| 566 |
+ container.logEvent(ctx, "export") |
|
| 567 | 567 |
return arch, err |
| 568 | 568 |
} |
| 569 | 569 |
|
| 570 | 570 |
// Mount sets container.basefs |
| 571 |
-func (container *Container) Mount() error {
|
|
| 572 |
- return container.daemon.Mount(container) |
|
| 571 |
+func (container *Container) Mount(ctx context.Context) error {
|
|
| 572 |
+ return container.daemon.Mount(ctx, container) |
|
| 573 | 573 |
} |
| 574 | 574 |
|
| 575 | 575 |
func (container *Container) changes() ([]archive.Change, error) {
|
| ... | ... |
@@ -578,7 +580,7 @@ func (container *Container) changes() ([]archive.Change, error) {
|
| 578 | 578 |
return container.daemon.changes(container) |
| 579 | 579 |
} |
| 580 | 580 |
|
| 581 |
-func (container *Container) getImage() (*image.Image, error) {
|
|
| 581 |
+func (container *Container) getImage(ctx context.Context) (*image.Image, error) {
|
|
| 582 | 582 |
if container.daemon == nil {
|
| 583 | 583 |
return nil, derr.ErrorCodeImageUnregContainer |
| 584 | 584 |
} |
| ... | ... |
@@ -587,7 +589,7 @@ func (container *Container) getImage() (*image.Image, error) {
|
| 587 | 587 |
|
| 588 | 588 |
// Unmount asks the daemon to release the layered filesystems that are |
| 589 | 589 |
// mounted by the container. |
| 590 |
-func (container *Container) Unmount() error {
|
|
| 590 |
+func (container *Container) Unmount(ctx context.Context) error {
|
|
| 591 | 591 |
return container.daemon.unmount(container) |
| 592 | 592 |
} |
| 593 | 593 |
|
| ... | ... |
@@ -612,7 +614,7 @@ func validateID(id string) error {
|
| 612 | 612 |
return nil |
| 613 | 613 |
} |
| 614 | 614 |
|
| 615 |
-func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
|
|
| 615 |
+func (container *Container) copy(ctx context.Context, resource string) (rc io.ReadCloser, err error) {
|
|
| 616 | 616 |
container.Lock() |
| 617 | 617 |
|
| 618 | 618 |
defer func() {
|
| ... | ... |
@@ -624,7 +626,7 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error) |
| 624 | 624 |
} |
| 625 | 625 |
}() |
| 626 | 626 |
|
| 627 |
- if err := container.Mount(); err != nil {
|
|
| 627 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 628 | 628 |
return nil, err |
| 629 | 629 |
} |
| 630 | 630 |
|
| ... | ... |
@@ -633,7 +635,7 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error) |
| 633 | 633 |
// unmount any volumes |
| 634 | 634 |
container.unmountVolumes(true) |
| 635 | 635 |
// unmount the container's rootfs |
| 636 |
- container.Unmount() |
|
| 636 |
+ container.Unmount(ctx) |
|
| 637 | 637 |
} |
| 638 | 638 |
}() |
| 639 | 639 |
|
| ... | ... |
@@ -669,11 +671,11 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error) |
| 669 | 669 |
reader := ioutils.NewReadCloserWrapper(archive, func() error {
|
| 670 | 670 |
err := archive.Close() |
| 671 | 671 |
container.unmountVolumes(true) |
| 672 |
- container.Unmount() |
|
| 672 |
+ container.Unmount(ctx) |
|
| 673 | 673 |
container.Unlock() |
| 674 | 674 |
return err |
| 675 | 675 |
}) |
| 676 |
- container.logEvent("copy")
|
|
| 676 |
+ container.logEvent(ctx, "copy") |
|
| 677 | 677 |
return reader, nil |
| 678 | 678 |
} |
| 679 | 679 |
|
| ... | ... |
@@ -752,14 +754,14 @@ func (container *Container) startLogging() error {
|
| 752 | 752 |
return nil |
| 753 | 753 |
} |
| 754 | 754 |
|
| 755 |
-func (container *Container) waitForStart() error {
|
|
| 755 |
+func (container *Container) waitForStart(ctx context.Context) error {
|
|
| 756 | 756 |
container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) |
| 757 | 757 |
|
| 758 | 758 |
// block until we either receive an error from the initial start of the container's |
| 759 | 759 |
// process or until the process is running in the container |
| 760 | 760 |
select {
|
| 761 | 761 |
case <-container.monitor.startSignal: |
| 762 |
- case err := <-promise.Go(container.monitor.Start): |
|
| 762 |
+ case err := <-promise.Go(func() error { return container.monitor.Start(ctx) }):
|
|
| 763 | 763 |
return err |
| 764 | 764 |
} |
| 765 | 765 |
|
| ... | ... |
@@ -790,11 +792,11 @@ func (container *Container) getExecIDs() []string {
|
| 790 | 790 |
return container.execCommands.List() |
| 791 | 791 |
} |
| 792 | 792 |
|
| 793 |
-func (container *Container) exec(ExecConfig *ExecConfig) error {
|
|
| 793 |
+func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) error {
|
|
| 794 | 794 |
container.Lock() |
| 795 | 795 |
defer container.Unlock() |
| 796 | 796 |
|
| 797 |
- callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 797 |
+ callback := func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 798 | 798 |
if processConfig.Tty {
|
| 799 | 799 |
// The callback is called after the process Start() |
| 800 | 800 |
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave |
| ... | ... |
@@ -809,7 +811,7 @@ func (container *Container) exec(ExecConfig *ExecConfig) error {
|
| 809 | 809 |
|
| 810 | 810 |
// We use a callback here instead of a goroutine and an chan for |
| 811 | 811 |
// synchronization purposes |
| 812 |
- cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
|
|
| 812 |
+ cErr := promise.Go(func() error { return container.monitorExec(ctx, ExecConfig, callback) })
|
|
| 813 | 813 |
|
| 814 | 814 |
// Exec should not return until the process is actually running |
| 815 | 815 |
select {
|
| ... | ... |
@@ -821,13 +823,13 @@ func (container *Container) exec(ExecConfig *ExecConfig) error {
|
| 821 | 821 |
return nil |
| 822 | 822 |
} |
| 823 | 823 |
|
| 824 |
-func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
|
|
| 824 |
+func (container *Container) monitorExec(ctx context.Context, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
|
|
| 825 | 825 |
var ( |
| 826 | 826 |
err error |
| 827 | 827 |
exitCode int |
| 828 | 828 |
) |
| 829 | 829 |
pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin) |
| 830 |
- exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback) |
|
| 830 |
+ exitCode, err = container.daemon.Exec(ctx, container, ExecConfig, pipes, callback) |
|
| 831 | 831 |
if err != nil {
|
| 832 | 832 |
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
|
| 833 | 833 |
} |
| ... | ... |
@@ -860,7 +862,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr |
| 860 | 860 |
return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr) |
| 861 | 861 |
} |
| 862 | 862 |
|
| 863 |
-func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
|
|
| 863 |
+func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
|
|
| 864 | 864 |
if logs {
|
| 865 | 865 |
logDriver, err := container.getLogger() |
| 866 | 866 |
if err != nil {
|
| ... | ... |
@@ -892,7 +894,7 @@ func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr i |
| 892 | 892 |
} |
| 893 | 893 |
} |
| 894 | 894 |
|
| 895 |
- container.logEvent("attach")
|
|
| 895 |
+ container.logEvent(ctx, "attach") |
|
| 896 | 896 |
|
| 897 | 897 |
//stream |
| 898 | 898 |
if stream {
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"time" |
| 16 | 16 |
|
| 17 | 17 |
"github.com/Sirupsen/logrus" |
| 18 |
+ "github.com/docker/docker/context" |
|
| 18 | 19 |
"github.com/docker/docker/daemon/execdriver" |
| 19 | 20 |
"github.com/docker/docker/daemon/links" |
| 20 | 21 |
"github.com/docker/docker/daemon/network" |
| ... | ... |
@@ -77,12 +78,12 @@ func killProcessDirectly(container *Container) error {
|
| 77 | 77 |
return nil |
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 |
-func (container *Container) setupLinkedContainers() ([]string, error) {
|
|
| 80 |
+func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
|
|
| 81 | 81 |
var ( |
| 82 | 82 |
env []string |
| 83 | 83 |
daemon = container.daemon |
| 84 | 84 |
) |
| 85 |
- children, err := daemon.children(container.Name) |
|
| 85 |
+ children, err := daemon.children(ctx, container.Name) |
|
| 86 | 86 |
if err != nil {
|
| 87 | 87 |
return nil, err |
| 88 | 88 |
} |
| ... | ... |
@@ -175,7 +176,7 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs. |
| 175 | 175 |
return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err) |
| 176 | 176 |
} |
| 177 | 177 |
|
| 178 |
-func populateCommand(c *Container, env []string) error {
|
|
| 178 |
+func populateCommand(ctx context.Context, c *Container, env []string) error {
|
|
| 179 | 179 |
var en *execdriver.Network |
| 180 | 180 |
if !c.Config.NetworkDisabled {
|
| 181 | 181 |
en = &execdriver.Network{}
|
| ... | ... |
@@ -185,7 +186,7 @@ func populateCommand(c *Container, env []string) error {
|
| 185 | 185 |
|
| 186 | 186 |
parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) |
| 187 | 187 |
if parts[0] == "container" {
|
| 188 |
- nc, err := c.getNetworkedContainer() |
|
| 188 |
+ nc, err := c.getNetworkedContainer(ctx) |
|
| 189 | 189 |
if err != nil {
|
| 190 | 190 |
return err |
| 191 | 191 |
} |
| ... | ... |
@@ -206,7 +207,7 @@ func populateCommand(c *Container, env []string) error {
|
| 206 | 206 |
} |
| 207 | 207 |
|
| 208 | 208 |
if c.hostConfig.IpcMode.IsContainer() {
|
| 209 |
- ic, err := c.getIpcContainer() |
|
| 209 |
+ ic, err := c.getIpcContainer(ctx) |
|
| 210 | 210 |
if err != nil {
|
| 211 | 211 |
return err |
| 212 | 212 |
} |
| ... | ... |
@@ -349,18 +350,18 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi |
| 349 | 349 |
} |
| 350 | 350 |
|
| 351 | 351 |
// GetSize returns the real size & virtual size of the container. |
| 352 |
-func (container *Container) getSize() (int64, int64) {
|
|
| 352 |
+func (container *Container) getSize(ctx context.Context) (int64, int64) {
|
|
| 353 | 353 |
var ( |
| 354 | 354 |
sizeRw, sizeRootfs int64 |
| 355 | 355 |
err error |
| 356 | 356 |
driver = container.daemon.driver |
| 357 | 357 |
) |
| 358 | 358 |
|
| 359 |
- if err := container.Mount(); err != nil {
|
|
| 359 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 360 | 360 |
logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
|
| 361 | 361 |
return sizeRw, sizeRootfs |
| 362 | 362 |
} |
| 363 |
- defer container.Unmount() |
|
| 363 |
+ defer container.Unmount(ctx) |
|
| 364 | 364 |
|
| 365 | 365 |
initID := fmt.Sprintf("%s-init", container.ID)
|
| 366 | 366 |
sizeRw, err = driver.DiffSize(container.ID, initID) |
| ... | ... |
@@ -412,7 +413,7 @@ func (container *Container) buildHostnameFile() error {
|
| 412 | 412 |
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) |
| 413 | 413 |
} |
| 414 | 414 |
|
| 415 |
-func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, error) {
|
|
| 415 |
+func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwork.SandboxOption, error) {
|
|
| 416 | 416 |
var ( |
| 417 | 417 |
sboxOptions []libnetwork.SandboxOption |
| 418 | 418 |
err error |
| ... | ... |
@@ -489,7 +490,7 @@ func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, e |
| 489 | 489 |
|
| 490 | 490 |
var childEndpoints, parentEndpoints []string |
| 491 | 491 |
|
| 492 |
- children, err := container.daemon.children(container.Name) |
|
| 492 |
+ children, err := container.daemon.children(ctx, container.Name) |
|
| 493 | 493 |
if err != nil {
|
| 494 | 494 |
return nil, err |
| 495 | 495 |
} |
| ... | ... |
@@ -520,7 +521,7 @@ func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, e |
| 520 | 520 |
continue |
| 521 | 521 |
} |
| 522 | 522 |
|
| 523 |
- c, err := container.daemon.Get(ref.ParentID) |
|
| 523 |
+ c, err := container.daemon.Get(ctx, ref.ParentID) |
|
| 524 | 524 |
if err != nil {
|
| 525 | 525 |
logrus.Error(err) |
| 526 | 526 |
} |
| ... | ... |
@@ -679,7 +680,7 @@ func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox) |
| 679 | 679 |
|
| 680 | 680 |
// UpdateNetwork is used to update the container's network (e.g. when linked containers |
| 681 | 681 |
// get removed/unlinked). |
| 682 |
-func (container *Container) updateNetwork() error {
|
|
| 682 |
+func (container *Container) updateNetwork(ctx context.Context) error {
|
|
| 683 | 683 |
ctrl := container.daemon.netController |
| 684 | 684 |
sid := container.NetworkSettings.SandboxID |
| 685 | 685 |
|
| ... | ... |
@@ -688,7 +689,7 @@ func (container *Container) updateNetwork() error {
|
| 688 | 688 |
return derr.ErrorCodeNoSandbox.WithArgs(sid, err) |
| 689 | 689 |
} |
| 690 | 690 |
|
| 691 |
- options, err := container.buildSandboxOptions() |
|
| 691 |
+ options, err := container.buildSandboxOptions(ctx) |
|
| 692 | 692 |
if err != nil {
|
| 693 | 693 |
return derr.ErrorCodeNetworkUpdate.WithArgs(err) |
| 694 | 694 |
} |
| ... | ... |
@@ -812,7 +813,7 @@ func createNetwork(controller libnetwork.NetworkController, dnet string, driver |
| 812 | 812 |
return controller.NewNetwork(driver, dnet, createOptions...) |
| 813 | 813 |
} |
| 814 | 814 |
|
| 815 |
-func (container *Container) secondaryNetworkRequired(primaryNetworkType string) bool {
|
|
| 815 |
+func (container *Container) secondaryNetworkRequired(ctx context.Context, primaryNetworkType string) bool {
|
|
| 816 | 816 |
switch primaryNetworkType {
|
| 817 | 817 |
case "bridge", "none", "host", "container": |
| 818 | 818 |
return false |
| ... | ... |
@@ -831,7 +832,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string) |
| 831 | 831 |
return false |
| 832 | 832 |
} |
| 833 | 833 |
|
| 834 |
-func (container *Container) allocateNetwork() error {
|
|
| 834 |
+func (container *Container) allocateNetwork(ctx context.Context) error {
|
|
| 835 | 835 |
mode := container.hostConfig.NetworkMode |
| 836 | 836 |
controller := container.daemon.netController |
| 837 | 837 |
if container.Config.NetworkDisabled || mode.IsContainer() {
|
| ... | ... |
@@ -865,21 +866,21 @@ func (container *Container) allocateNetwork() error {
|
| 865 | 865 |
service = strings.Replace(service, "/", "", -1) |
| 866 | 866 |
} |
| 867 | 867 |
|
| 868 |
- if container.secondaryNetworkRequired(networkDriver) {
|
|
| 868 |
+ if container.secondaryNetworkRequired(ctx, networkDriver) {
|
|
| 869 | 869 |
// Configure Bridge as secondary network for port binding purposes |
| 870 |
- if err := container.configureNetwork("bridge", service, "bridge", false); err != nil {
|
|
| 870 |
+ if err := container.configureNetwork(ctx, "bridge", service, "bridge", false); err != nil {
|
|
| 871 | 871 |
return err |
| 872 | 872 |
} |
| 873 | 873 |
} |
| 874 | 874 |
|
| 875 |
- if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil {
|
|
| 875 |
+ if err := container.configureNetwork(ctx, networkName, service, networkDriver, mode.IsDefault()); err != nil {
|
|
| 876 | 876 |
return err |
| 877 | 877 |
} |
| 878 | 878 |
|
| 879 | 879 |
return container.writeHostConfig() |
| 880 | 880 |
} |
| 881 | 881 |
|
| 882 |
-func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
|
|
| 882 |
+func (container *Container) configureNetwork(ctx context.Context, networkName, service, networkDriver string, canCreateNetwork bool) error {
|
|
| 883 | 883 |
controller := container.daemon.netController |
| 884 | 884 |
|
| 885 | 885 |
n, err := controller.NetworkByName(networkName) |
| ... | ... |
@@ -923,7 +924,7 @@ func (container *Container) configureNetwork(networkName, service, networkDriver |
| 923 | 923 |
return false |
| 924 | 924 |
}) |
| 925 | 925 |
if sb == nil {
|
| 926 |
- options, err := container.buildSandboxOptions() |
|
| 926 |
+ options, err := container.buildSandboxOptions(ctx) |
|
| 927 | 927 |
if err != nil {
|
| 928 | 928 |
return err |
| 929 | 929 |
} |
| ... | ... |
@@ -946,12 +947,12 @@ func (container *Container) configureNetwork(networkName, service, networkDriver |
| 946 | 946 |
return nil |
| 947 | 947 |
} |
| 948 | 948 |
|
| 949 |
-func (container *Container) initializeNetworking() error {
|
|
| 949 |
+func (container *Container) initializeNetworking(ctx context.Context) error {
|
|
| 950 | 950 |
var err error |
| 951 | 951 |
|
| 952 | 952 |
if container.hostConfig.NetworkMode.IsContainer() {
|
| 953 | 953 |
// we need to get the hosts files from the container to join |
| 954 |
- nc, err := container.getNetworkedContainer() |
|
| 954 |
+ nc, err := container.getNetworkedContainer(ctx) |
|
| 955 | 955 |
if err != nil {
|
| 956 | 956 |
return err |
| 957 | 957 |
} |
| ... | ... |
@@ -977,7 +978,7 @@ func (container *Container) initializeNetworking() error {
|
| 977 | 977 |
|
| 978 | 978 |
} |
| 979 | 979 |
|
| 980 |
- if err := container.allocateNetwork(); err != nil {
|
|
| 980 |
+ if err := container.allocateNetwork(ctx); err != nil {
|
|
| 981 | 981 |
return err |
| 982 | 982 |
} |
| 983 | 983 |
|
| ... | ... |
@@ -998,9 +999,9 @@ func (container *Container) setNetworkNamespaceKey(pid int) error {
|
| 998 | 998 |
return sandbox.SetKey(path) |
| 999 | 999 |
} |
| 1000 | 1000 |
|
| 1001 |
-func (container *Container) getIpcContainer() (*Container, error) {
|
|
| 1001 |
+func (container *Container) getIpcContainer(ctx context.Context) (*Container, error) {
|
|
| 1002 | 1002 |
containerID := container.hostConfig.IpcMode.Container() |
| 1003 |
- c, err := container.daemon.Get(containerID) |
|
| 1003 |
+ c, err := container.daemon.Get(ctx, containerID) |
|
| 1004 | 1004 |
if err != nil {
|
| 1005 | 1005 |
return nil, err |
| 1006 | 1006 |
} |
| ... | ... |
@@ -1036,14 +1037,14 @@ func (container *Container) setupWorkingDirectory() error {
|
| 1036 | 1036 |
return nil |
| 1037 | 1037 |
} |
| 1038 | 1038 |
|
| 1039 |
-func (container *Container) getNetworkedContainer() (*Container, error) {
|
|
| 1039 |
+func (container *Container) getNetworkedContainer(ctx context.Context) (*Container, error) {
|
|
| 1040 | 1040 |
parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) |
| 1041 | 1041 |
switch parts[0] {
|
| 1042 | 1042 |
case "container": |
| 1043 | 1043 |
if len(parts) != 2 {
|
| 1044 | 1044 |
return nil, derr.ErrorCodeParseContainer |
| 1045 | 1045 |
} |
| 1046 |
- nc, err := container.daemon.Get(parts[1]) |
|
| 1046 |
+ nc, err := container.daemon.Get(ctx, parts[1]) |
|
| 1047 | 1047 |
if err != nil {
|
| 1048 | 1048 |
return nil, err |
| 1049 | 1049 |
} |
| ... | ... |
@@ -5,6 +5,7 @@ package daemon |
| 5 | 5 |
import ( |
| 6 | 6 |
"strings" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
"github.com/docker/docker/daemon/execdriver" |
| 9 | 10 |
derr "github.com/docker/docker/errors" |
| 10 | 11 |
) |
| ... | ... |
@@ -25,7 +26,7 @@ func killProcessDirectly(container *Container) error {
|
| 25 | 25 |
return nil |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
-func (container *Container) setupLinkedContainers() ([]string, error) {
|
|
| 28 |
+func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
|
|
| 29 | 29 |
return nil, nil |
| 30 | 30 |
} |
| 31 | 31 |
|
| ... | ... |
@@ -34,7 +35,7 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string |
| 34 | 34 |
return container.Config.Env |
| 35 | 35 |
} |
| 36 | 36 |
|
| 37 |
-func (container *Container) initializeNetworking() error {
|
|
| 37 |
+func (container *Container) initializeNetworking(ctx context.Context) error {
|
|
| 38 | 38 |
return nil |
| 39 | 39 |
} |
| 40 | 40 |
|
| ... | ... |
@@ -42,7 +43,7 @@ func (container *Container) setupWorkingDirectory() error {
|
| 42 | 42 |
return nil |
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
-func populateCommand(c *Container, env []string) error {
|
|
| 45 |
+func populateCommand(ctx context.Context, c *Container, env []string) error {
|
|
| 46 | 46 |
en := &execdriver.Network{
|
| 47 | 47 |
Interface: nil, |
| 48 | 48 |
} |
| ... | ... |
@@ -135,7 +136,7 @@ func populateCommand(c *Container, env []string) error {
|
| 135 | 135 |
} |
| 136 | 136 |
|
| 137 | 137 |
// GetSize returns real size & virtual size |
| 138 |
-func (container *Container) getSize() (int64, int64) {
|
|
| 138 |
+func (container *Container) getSize(ctx context.Context) (int64, int64) {
|
|
| 139 | 139 |
// TODO Windows |
| 140 | 140 |
return 0, 0 |
| 141 | 141 |
} |
| ... | ... |
@@ -150,7 +151,7 @@ func (container *Container) allocateNetwork() error {
|
| 150 | 150 |
return nil |
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 |
-func (container *Container) updateNetwork() error {
|
|
| 153 |
+func (container *Container) updateNetwork(ctx context.Context) error {
|
|
| 154 | 154 |
return nil |
| 155 | 155 |
} |
| 156 | 156 |
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
|
| 6 | 6 |
"github.com/Sirupsen/logrus" |
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
derr "github.com/docker/docker/errors" |
| 9 | 10 |
"github.com/docker/docker/graph/tags" |
| 10 | 11 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -15,21 +16,21 @@ import ( |
| 15 | 15 |
) |
| 16 | 16 |
|
| 17 | 17 |
// ContainerCreate takes configs and creates a container. |
| 18 |
-func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
|
|
| 18 |
+func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
|
|
| 19 | 19 |
if config == nil {
|
| 20 | 20 |
return nil, nil, derr.ErrorCodeEmptyConfig |
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 |
- warnings, err := daemon.verifyContainerSettings(hostConfig, config) |
|
| 23 |
+ warnings, err := daemon.verifyContainerSettings(ctx, hostConfig, config) |
|
| 24 | 24 |
if err != nil {
|
| 25 | 25 |
return nil, warnings, err |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 | 28 |
daemon.adaptContainerSettings(hostConfig, adjustCPUShares) |
| 29 | 29 |
|
| 30 |
- container, buildWarnings, err := daemon.Create(config, hostConfig, name) |
|
| 30 |
+ container, buildWarnings, err := daemon.Create(ctx, config, hostConfig, name) |
|
| 31 | 31 |
if err != nil {
|
| 32 |
- if daemon.Graph().IsNotExist(err, config.Image) {
|
|
| 32 |
+ if daemon.Graph(ctx).IsNotExist(err, config.Image) {
|
|
| 33 | 33 |
if strings.Contains(config.Image, "@") {
|
| 34 | 34 |
return nil, warnings, derr.ErrorCodeNoSuchImageHash.WithArgs(config.Image) |
| 35 | 35 |
} |
| ... | ... |
@@ -48,7 +49,7 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos |
| 48 | 48 |
} |
| 49 | 49 |
|
| 50 | 50 |
// Create creates a new container from the given configuration with a given name. |
| 51 |
-func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
|
|
| 51 |
+func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
|
|
| 52 | 52 |
var ( |
| 53 | 53 |
container *Container |
| 54 | 54 |
warnings []string |
| ... | ... |
@@ -76,29 +77,29 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 76 | 76 |
hostConfig = &runconfig.HostConfig{}
|
| 77 | 77 |
} |
| 78 | 78 |
if hostConfig.SecurityOpt == nil {
|
| 79 |
- hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) |
|
| 79 |
+ hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(ctx, hostConfig.IpcMode, hostConfig.PidMode) |
|
| 80 | 80 |
if err != nil {
|
| 81 | 81 |
return nil, nil, err |
| 82 | 82 |
} |
| 83 | 83 |
} |
| 84 |
- if container, err = daemon.newContainer(name, config, imgID); err != nil {
|
|
| 84 |
+ if container, err = daemon.newContainer(ctx, name, config, imgID); err != nil {
|
|
| 85 | 85 |
return nil, nil, err |
| 86 | 86 |
} |
| 87 | 87 |
defer func() {
|
| 88 | 88 |
if retErr != nil {
|
| 89 |
- if err := daemon.rm(container, false); err != nil {
|
|
| 89 |
+ if err := daemon.rm(ctx, container, false); err != nil {
|
|
| 90 | 90 |
logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err)
|
| 91 | 91 |
} |
| 92 | 92 |
} |
| 93 | 93 |
}() |
| 94 | 94 |
|
| 95 |
- if err := daemon.Register(container); err != nil {
|
|
| 95 |
+ if err := daemon.Register(ctx, container); err != nil {
|
|
| 96 | 96 |
return nil, nil, err |
| 97 | 97 |
} |
| 98 | 98 |
if err := daemon.createRootfs(container); err != nil {
|
| 99 | 99 |
return nil, nil, err |
| 100 | 100 |
} |
| 101 |
- if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
|
| 101 |
+ if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
|
|
| 102 | 102 |
return nil, nil, err |
| 103 | 103 |
} |
| 104 | 104 |
defer func() {
|
| ... | ... |
@@ -108,10 +109,10 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 108 | 108 |
} |
| 109 | 109 |
} |
| 110 | 110 |
}() |
| 111 |
- if err := container.Mount(); err != nil {
|
|
| 111 |
+ if err := container.Mount(ctx); err != nil {
|
|
| 112 | 112 |
return nil, nil, err |
| 113 | 113 |
} |
| 114 |
- defer container.Unmount() |
|
| 114 |
+ defer container.Unmount(ctx) |
|
| 115 | 115 |
|
| 116 | 116 |
if err := createContainerPlatformSpecificSettings(container, config, hostConfig, img); err != nil {
|
| 117 | 117 |
return nil, nil, err |
| ... | ... |
@@ -121,16 +122,16 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 121 | 121 |
logrus.Errorf("Error saving new container to disk: %v", err)
|
| 122 | 122 |
return nil, nil, err |
| 123 | 123 |
} |
| 124 |
- container.logEvent("create")
|
|
| 124 |
+ container.logEvent(ctx, "create") |
|
| 125 | 125 |
return container, warnings, nil |
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 |
-func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
|
|
| 128 |
+func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
|
|
| 129 | 129 |
if ipcMode.IsHost() || pidMode.IsHost() {
|
| 130 | 130 |
return label.DisableSecOpt(), nil |
| 131 | 131 |
} |
| 132 | 132 |
if ipcContainer := ipcMode.Container(); ipcContainer != "" {
|
| 133 |
- c, err := daemon.Get(ipcContainer) |
|
| 133 |
+ c, err := daemon.Get(ctx, ipcContainer) |
|
| 134 | 134 |
if err != nil {
|
| 135 | 135 |
return nil, err |
| 136 | 136 |
} |
| ... | ... |
@@ -142,7 +143,7 @@ func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run |
| 142 | 142 |
|
| 143 | 143 |
// VolumeCreate creates a volume with the specified name, driver, and opts |
| 144 | 144 |
// This is called directly from the remote API |
| 145 |
-func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) {
|
|
| 145 |
+func (daemon *Daemon) VolumeCreate(ctx context.Context, name, driverName string, opts map[string]string) (*types.Volume, error) {
|
|
| 146 | 146 |
if name == "" {
|
| 147 | 147 |
name = stringid.GenerateNonCryptoID() |
| 148 | 148 |
} |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
|
| 21 | 21 |
"github.com/Sirupsen/logrus" |
| 22 | 22 |
"github.com/docker/docker/api" |
| 23 |
+ "github.com/docker/docker/context" |
|
| 23 | 24 |
"github.com/docker/docker/daemon/events" |
| 24 | 25 |
"github.com/docker/docker/daemon/execdriver" |
| 25 | 26 |
"github.com/docker/docker/daemon/execdriver/execdrivers" |
| ... | ... |
@@ -127,14 +128,14 @@ type Daemon struct {
|
| 127 | 127 |
// - A partial container ID prefix (e.g. short ID) of any length that is |
| 128 | 128 |
// unique enough to only return a single container object |
| 129 | 129 |
// If none of these searches succeed, an error is returned |
| 130 |
-func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
|
|
| 130 |
+func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container, error) {
|
|
| 131 | 131 |
if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
|
| 132 | 132 |
// prefix is an exact match to a full container ID |
| 133 | 133 |
return containerByID, nil |
| 134 | 134 |
} |
| 135 | 135 |
|
| 136 | 136 |
// GetByName will match only an exact name provided; we ignore errors |
| 137 |
- if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
|
|
| 137 |
+ if containerByName, _ := daemon.GetByName(ctx, prefixOrName); containerByName != nil {
|
|
| 138 | 138 |
// prefix is an exact match to a full container Name |
| 139 | 139 |
return containerByName, nil |
| 140 | 140 |
} |
| ... | ... |
@@ -152,8 +153,8 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
|
| 152 | 152 |
|
| 153 | 153 |
// Exists returns a true if a container of the specified ID or name exists, |
| 154 | 154 |
// false otherwise. |
| 155 |
-func (daemon *Daemon) Exists(id string) bool {
|
|
| 156 |
- c, _ := daemon.Get(id) |
|
| 155 |
+func (daemon *Daemon) Exists(ctx context.Context, id string) bool {
|
|
| 156 |
+ c, _ := daemon.Get(ctx, id) |
|
| 157 | 157 |
return c != nil |
| 158 | 158 |
} |
| 159 | 159 |
|
| ... | ... |
@@ -178,8 +179,8 @@ func (daemon *Daemon) load(id string) (*Container, error) {
|
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 | 180 |
// Register makes a container object usable by the daemon as <container.ID> |
| 181 |
-func (daemon *Daemon) Register(container *Container) error {
|
|
| 182 |
- if container.daemon != nil || daemon.Exists(container.ID) {
|
|
| 181 |
+func (daemon *Daemon) Register(ctx context.Context, container *Container) error {
|
|
| 182 |
+ if container.daemon != nil || daemon.Exists(ctx, container.ID) {
|
|
| 183 | 183 |
return fmt.Errorf("Container is already loaded")
|
| 184 | 184 |
} |
| 185 | 185 |
if err := validateID(container.ID); err != nil {
|
| ... | ... |
@@ -217,10 +218,7 @@ func (daemon *Daemon) Register(container *Container) error {
|
| 217 | 217 |
} |
| 218 | 218 |
daemon.execDriver.Terminate(cmd) |
| 219 | 219 |
|
| 220 |
- if err := container.unmountIpcMounts(); err != nil {
|
|
| 221 |
- logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
|
|
| 222 |
- } |
|
| 223 |
- if err := container.Unmount(); err != nil {
|
|
| 220 |
+ if err := container.Unmount(ctx); err != nil {
|
|
| 224 | 221 |
logrus.Debugf("unmount error %s", err)
|
| 225 | 222 |
} |
| 226 | 223 |
if err := container.toDiskLocking(); err != nil {
|
| ... | ... |
@@ -254,7 +252,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
| 254 | 254 |
return nil |
| 255 | 255 |
} |
| 256 | 256 |
|
| 257 |
-func (daemon *Daemon) restore() error {
|
|
| 257 |
+func (daemon *Daemon) restore(ctx context.Context) error {
|
|
| 258 | 258 |
type cr struct {
|
| 259 | 259 |
container *Container |
| 260 | 260 |
registered bool |
| ... | ... |
@@ -324,7 +322,7 @@ func (daemon *Daemon) restore() error {
|
| 324 | 324 |
} |
| 325 | 325 |
} |
| 326 | 326 |
|
| 327 |
- if err := daemon.Register(container); err != nil {
|
|
| 327 |
+ if err := daemon.Register(ctx, container); err != nil {
|
|
| 328 | 328 |
logrus.Errorf("Failed to register container %s: %s", container.ID, err)
|
| 329 | 329 |
// The container register failed should not be started. |
| 330 | 330 |
return |
| ... | ... |
@@ -335,7 +333,7 @@ func (daemon *Daemon) restore() error {
|
| 335 | 335 |
if daemon.configStore.AutoRestart && container.shouldRestart() {
|
| 336 | 336 |
logrus.Debugf("Starting container %s", container.ID)
|
| 337 | 337 |
|
| 338 |
- if err := container.Start(); err != nil {
|
|
| 338 |
+ if err := container.Start(ctx); err != nil {
|
|
| 339 | 339 |
logrus.Errorf("Failed to start container %s: %s", container.ID, err)
|
| 340 | 340 |
} |
| 341 | 341 |
} |
| ... | ... |
@@ -365,7 +363,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image. |
| 365 | 365 |
return nil |
| 366 | 366 |
} |
| 367 | 367 |
|
| 368 |
-func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
|
|
| 368 |
+func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (string, string, error) {
|
|
| 369 | 369 |
var ( |
| 370 | 370 |
err error |
| 371 | 371 |
id = stringid.GenerateNonCryptoID() |
| ... | ... |
@@ -378,14 +376,14 @@ func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
|
| 378 | 378 |
return id, name, nil |
| 379 | 379 |
} |
| 380 | 380 |
|
| 381 |
- if name, err = daemon.reserveName(id, name); err != nil {
|
|
| 381 |
+ if name, err = daemon.reserveName(ctx, id, name); err != nil {
|
|
| 382 | 382 |
return "", "", err |
| 383 | 383 |
} |
| 384 | 384 |
|
| 385 | 385 |
return id, name, nil |
| 386 | 386 |
} |
| 387 | 387 |
|
| 388 |
-func (daemon *Daemon) reserveName(id, name string) (string, error) {
|
|
| 388 |
+func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string, error) {
|
|
| 389 | 389 |
if !validContainerNamePattern.MatchString(name) {
|
| 390 | 390 |
return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
|
| 391 | 391 |
} |
| ... | ... |
@@ -399,7 +397,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
|
| 399 | 399 |
return "", err |
| 400 | 400 |
} |
| 401 | 401 |
|
| 402 |
- conflictingContainer, err := daemon.GetByName(name) |
|
| 402 |
+ conflictingContainer, err := daemon.GetByName(ctx, name) |
|
| 403 | 403 |
if err != nil {
|
| 404 | 404 |
if strings.Contains(err.Error(), "Could not find entity") {
|
| 405 | 405 |
return "", err |
| ... | ... |
@@ -469,12 +467,12 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic |
| 469 | 469 |
return entrypoint, args |
| 470 | 470 |
} |
| 471 | 471 |
|
| 472 |
-func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) {
|
|
| 472 |
+func (daemon *Daemon) newContainer(ctx context.Context, name string, config *runconfig.Config, imgID string) (*Container, error) {
|
|
| 473 | 473 |
var ( |
| 474 | 474 |
id string |
| 475 | 475 |
err error |
| 476 | 476 |
) |
| 477 |
- id, name, err = daemon.generateIDAndName(name) |
|
| 477 |
+ id, name, err = daemon.generateIDAndName(ctx, name) |
|
| 478 | 478 |
if err != nil {
|
| 479 | 479 |
return nil, err |
| 480 | 480 |
} |
| ... | ... |
@@ -511,7 +509,7 @@ func GetFullContainerName(name string) (string, error) {
|
| 511 | 511 |
} |
| 512 | 512 |
|
| 513 | 513 |
// GetByName returns a container given a name. |
| 514 |
-func (daemon *Daemon) GetByName(name string) (*Container, error) {
|
|
| 514 |
+func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, error) {
|
|
| 515 | 515 |
fullName, err := GetFullContainerName(name) |
| 516 | 516 |
if err != nil {
|
| 517 | 517 |
return nil, err |
| ... | ... |
@@ -530,7 +528,7 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
|
| 530 | 530 |
// children returns all child containers of the container with the |
| 531 | 531 |
// given name. The containers are returned as a map from the container |
| 532 | 532 |
// name to a pointer to Container. |
| 533 |
-func (daemon *Daemon) children(name string) (map[string]*Container, error) {
|
|
| 533 |
+func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Container, error) {
|
|
| 534 | 534 |
name, err := GetFullContainerName(name) |
| 535 | 535 |
if err != nil {
|
| 536 | 536 |
return nil, err |
| ... | ... |
@@ -538,7 +536,7 @@ func (daemon *Daemon) children(name string) (map[string]*Container, error) {
|
| 538 | 538 |
children := make(map[string]*Container) |
| 539 | 539 |
|
| 540 | 540 |
err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
|
| 541 |
- c, err := daemon.Get(e.ID()) |
|
| 541 |
+ c, err := daemon.Get(ctx, e.ID()) |
|
| 542 | 542 |
if err != nil {
|
| 543 | 543 |
return err |
| 544 | 544 |
} |
| ... | ... |
@@ -574,7 +572,7 @@ func (daemon *Daemon) registerLink(parent, child *Container, alias string) error |
| 574 | 574 |
|
| 575 | 575 |
// NewDaemon sets up everything for the daemon to be able to service |
| 576 | 576 |
// requests from the webserver. |
| 577 |
-func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
|
|
| 577 |
+func NewDaemon(ctx context.Context, config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
|
|
| 578 | 578 |
setDefaultMtu(config) |
| 579 | 579 |
|
| 580 | 580 |
// Ensure we have compatible configuration options |
| ... | ... |
@@ -642,7 +640,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 642 | 642 |
// Ensure the graph driver is shutdown at a later point |
| 643 | 643 |
defer func() {
|
| 644 | 644 |
if err != nil {
|
| 645 |
- if err := d.Shutdown(); err != nil {
|
|
| 645 |
+ if err := d.Shutdown(ctx); err != nil {
|
|
| 646 | 646 |
logrus.Error(err) |
| 647 | 647 |
} |
| 648 | 648 |
} |
| ... | ... |
@@ -776,7 +774,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 776 | 776 |
|
| 777 | 777 |
go d.execCommandGC() |
| 778 | 778 |
|
| 779 |
- if err := d.restore(); err != nil {
|
|
| 779 |
+ if err := d.restore(ctx); err != nil {
|
|
| 780 | 780 |
return nil, err |
| 781 | 781 |
} |
| 782 | 782 |
|
| ... | ... |
@@ -784,12 +782,12 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 784 | 784 |
} |
| 785 | 785 |
|
| 786 | 786 |
// Shutdown stops the daemon. |
| 787 |
-func (daemon *Daemon) Shutdown() error {
|
|
| 787 |
+func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|
| 788 | 788 |
daemon.shutdown = true |
| 789 | 789 |
if daemon.containers != nil {
|
| 790 | 790 |
group := sync.WaitGroup{}
|
| 791 | 791 |
logrus.Debug("starting clean shutdown of all containers...")
|
| 792 |
- for _, container := range daemon.List() {
|
|
| 792 |
+ for _, container := range daemon.List(ctx) {
|
|
| 793 | 793 |
c := container |
| 794 | 794 |
if c.IsRunning() {
|
| 795 | 795 |
logrus.Debugf("stopping %s", c.ID)
|
| ... | ... |
@@ -812,7 +810,7 @@ func (daemon *Daemon) Shutdown() error {
|
| 812 | 812 |
logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
|
| 813 | 813 |
return |
| 814 | 814 |
} |
| 815 |
- if err := c.unpause(); err != nil {
|
|
| 815 |
+ if err := c.unpause(ctx); err != nil {
|
|
| 816 | 816 |
logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
|
| 817 | 817 |
return |
| 818 | 818 |
} |
| ... | ... |
@@ -827,7 +825,7 @@ func (daemon *Daemon) Shutdown() error {
|
| 827 | 827 |
} |
| 828 | 828 |
} else {
|
| 829 | 829 |
// If container failed to exit in 10 seconds of SIGTERM, then using the force |
| 830 |
- if err := c.Stop(10); err != nil {
|
|
| 830 |
+ if err := c.Stop(ctx, 10); err != nil {
|
|
| 831 | 831 |
logrus.Errorf("Stop container %s with error: %v", c.ID, err)
|
| 832 | 832 |
} |
| 833 | 833 |
} |
| ... | ... |
@@ -865,7 +863,7 @@ func (daemon *Daemon) Shutdown() error {
|
| 865 | 865 |
|
| 866 | 866 |
// Mount sets container.basefs |
| 867 | 867 |
// (is it not set coming in? why is it unset?) |
| 868 |
-func (daemon *Daemon) Mount(container *Container) error {
|
|
| 868 |
+func (daemon *Daemon) Mount(ctx context.Context, container *Container) error {
|
|
| 869 | 869 |
dir, err := daemon.driver.Get(container.ID, container.getMountLabel()) |
| 870 | 870 |
if err != nil {
|
| 871 | 871 |
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
|
| ... | ... |
@@ -890,14 +888,14 @@ func (daemon *Daemon) unmount(container *Container) error {
|
| 890 | 890 |
return nil |
| 891 | 891 |
} |
| 892 | 892 |
|
| 893 |
-func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
|
|
| 893 |
+func (daemon *Daemon) run(ctx context.Context, c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
|
|
| 894 | 894 |
hooks := execdriver.Hooks{
|
| 895 | 895 |
Start: startCallback, |
| 896 | 896 |
} |
| 897 |
- hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 897 |
+ hooks.PreStart = append(hooks.PreStart, func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 898 | 898 |
return c.setNetworkNamespaceKey(pid) |
| 899 | 899 |
}) |
| 900 |
- return daemon.execDriver.Run(c.command, pipes, hooks) |
|
| 900 |
+ return daemon.execDriver.Run(ctx, c.command, pipes, hooks) |
|
| 901 | 901 |
} |
| 902 | 902 |
|
| 903 | 903 |
func (daemon *Daemon) kill(c *Container, sig int) error {
|
| ... | ... |
@@ -964,12 +962,12 @@ func (daemon *Daemon) createRootfs(container *Container) error {
|
| 964 | 964 |
// which need direct access to daemon.graph. |
| 965 | 965 |
// Once the tests switch to using engine and jobs, this method |
| 966 | 966 |
// can go away. |
| 967 |
-func (daemon *Daemon) Graph() *graph.Graph {
|
|
| 967 |
+func (daemon *Daemon) Graph(ctx context.Context) *graph.Graph {
|
|
| 968 | 968 |
return daemon.graph |
| 969 | 969 |
} |
| 970 | 970 |
|
| 971 | 971 |
// Repositories returns all repositories. |
| 972 |
-func (daemon *Daemon) Repositories() *graph.TagStore {
|
|
| 972 |
+func (daemon *Daemon) Repositories(ctx context.Context) *graph.TagStore {
|
|
| 973 | 973 |
return daemon.repositories |
| 974 | 974 |
} |
| 975 | 975 |
|
| ... | ... |
@@ -983,13 +981,13 @@ func (daemon *Daemon) systemInitPath() string {
|
| 983 | 983 |
|
| 984 | 984 |
// GraphDriver returns the currently used driver for processing |
| 985 | 985 |
// container layers. |
| 986 |
-func (daemon *Daemon) GraphDriver() graphdriver.Driver {
|
|
| 986 |
+func (daemon *Daemon) GraphDriver(ctx context.Context) graphdriver.Driver {
|
|
| 987 | 987 |
return daemon.driver |
| 988 | 988 |
} |
| 989 | 989 |
|
| 990 | 990 |
// ExecutionDriver returns the currently used driver for creating and |
| 991 | 991 |
// starting execs in a container. |
| 992 |
-func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
|
|
| 992 |
+func (daemon *Daemon) ExecutionDriver(ctx context.Context) execdriver.Driver {
|
|
| 993 | 993 |
return daemon.execDriver |
| 994 | 994 |
} |
| 995 | 995 |
|
| ... | ... |
@@ -1001,9 +999,9 @@ func (daemon *Daemon) containerGraph() *graphdb.Database {
|
| 1001 | 1001 |
// of the image with imgID, that had the same config when it was |
| 1002 | 1002 |
// created. nil is returned if a child cannot be found. An error is |
| 1003 | 1003 |
// returned if the parent image cannot be found. |
| 1004 |
-func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
| 1004 |
+func (daemon *Daemon) ImageGetCached(ctx context.Context, imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
| 1005 | 1005 |
// Retrieve all images |
| 1006 |
- images := daemon.Graph().Map() |
|
| 1006 |
+ images := daemon.Graph(ctx).Map() |
|
| 1007 | 1007 |
|
| 1008 | 1008 |
// Store the tree in a map of map (map[parentId][childId]) |
| 1009 | 1009 |
imageMap := make(map[string]map[string]struct{})
|
| ... | ... |
@@ -1039,7 +1037,7 @@ func tempDir(rootDir string) (string, error) {
|
| 1039 | 1039 |
return tmpDir, system.MkdirAll(tmpDir, 0700) |
| 1040 | 1040 |
} |
| 1041 | 1041 |
|
| 1042 |
-func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 1042 |
+func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 1043 | 1043 |
container.Lock() |
| 1044 | 1044 |
if err := parseSecurityOpt(container, hostConfig); err != nil {
|
| 1045 | 1045 |
container.Unlock() |
| ... | ... |
@@ -1049,14 +1047,14 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig. |
| 1049 | 1049 |
|
| 1050 | 1050 |
// Do not lock while creating volumes since this could be calling out to external plugins |
| 1051 | 1051 |
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin |
| 1052 |
- if err := daemon.registerMountPoints(container, hostConfig); err != nil {
|
|
| 1052 |
+ if err := daemon.registerMountPoints(ctx, container, hostConfig); err != nil {
|
|
| 1053 | 1053 |
return err |
| 1054 | 1054 |
} |
| 1055 | 1055 |
|
| 1056 | 1056 |
container.Lock() |
| 1057 | 1057 |
defer container.Unlock() |
| 1058 | 1058 |
// Register any links from the host config before starting the container |
| 1059 |
- if err := daemon.registerLinks(container, hostConfig); err != nil {
|
|
| 1059 |
+ if err := daemon.registerLinks(ctx, container, hostConfig); err != nil {
|
|
| 1060 | 1060 |
return err |
| 1061 | 1061 |
} |
| 1062 | 1062 |
|
| ... | ... |
@@ -1094,7 +1092,7 @@ func getDefaultRouteMtu() (int, error) {
|
| 1094 | 1094 |
|
| 1095 | 1095 |
// verifyContainerSettings performs validation of the hostconfig and config |
| 1096 | 1096 |
// structures. |
| 1097 |
-func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 1097 |
+func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 1098 | 1098 |
|
| 1099 | 1099 |
// First perform verification of settings common across all platforms. |
| 1100 | 1100 |
if config != nil {
|
| ... | ... |
@@ -1131,7 +1129,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, |
| 1131 | 1131 |
} |
| 1132 | 1132 |
|
| 1133 | 1133 |
// Now do platform-specific verification |
| 1134 |
- return verifyPlatformContainerSettings(daemon, hostConfig, config) |
|
| 1134 |
+ return verifyPlatformContainerSettings(ctx, daemon, hostConfig, config) |
|
| 1135 | 1135 |
} |
| 1136 | 1136 |
|
| 1137 | 1137 |
func configureVolumes(config *Config) (*store.VolumeStore, error) {
|
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"path/filepath" |
| 9 | 9 |
"testing" |
| 10 | 10 |
|
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/pkg/graphdb" |
| 12 | 13 |
"github.com/docker/docker/pkg/stringid" |
| 13 | 14 |
"github.com/docker/docker/pkg/truncindex" |
| ... | ... |
@@ -92,32 +93,34 @@ func TestGet(t *testing.T) {
|
| 92 | 92 |
containerGraphDB: graph, |
| 93 | 93 |
} |
| 94 | 94 |
|
| 95 |
- if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
|
|
| 95 |
+ ctx := context.Background() |
|
| 96 |
+ |
|
| 97 |
+ if container, _ := daemon.Get(ctx, "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
|
|
| 96 | 98 |
t.Fatal("Should explicitly match full container IDs")
|
| 97 | 99 |
} |
| 98 | 100 |
|
| 99 |
- if container, _ := daemon.Get("75fb0b8009"); container != c4 {
|
|
| 101 |
+ if container, _ := daemon.Get(ctx, "75fb0b8009"); container != c4 {
|
|
| 100 | 102 |
t.Fatal("Should match a partial ID")
|
| 101 | 103 |
} |
| 102 | 104 |
|
| 103 |
- if container, _ := daemon.Get("drunk_hawking"); container != c2 {
|
|
| 105 |
+ if container, _ := daemon.Get(ctx, "drunk_hawking"); container != c2 {
|
|
| 104 | 106 |
t.Fatal("Should match a full name")
|
| 105 | 107 |
} |
| 106 | 108 |
|
| 107 | 109 |
// c3.Name is a partial match for both c3.ID and c2.ID |
| 108 |
- if c, _ := daemon.Get("3cdbd1aa"); c != c3 {
|
|
| 110 |
+ if c, _ := daemon.Get(ctx, "3cdbd1aa"); c != c3 {
|
|
| 109 | 111 |
t.Fatal("Should match a full name even though it collides with another container's ID")
|
| 110 | 112 |
} |
| 111 | 113 |
|
| 112 |
- if container, _ := daemon.Get("d22d69a2b896"); container != c5 {
|
|
| 114 |
+ if container, _ := daemon.Get(ctx, "d22d69a2b896"); container != c5 {
|
|
| 113 | 115 |
t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID")
|
| 114 | 116 |
} |
| 115 | 117 |
|
| 116 |
- if _, err := daemon.Get("3cdbd1"); err == nil {
|
|
| 118 |
+ if _, err := daemon.Get(ctx, "3cdbd1"); err == nil {
|
|
| 117 | 119 |
t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
|
| 118 | 120 |
} |
| 119 | 121 |
|
| 120 |
- if _, err := daemon.Get("nothing"); err == nil {
|
|
| 122 |
+ if _, err := daemon.Get(ctx, "nothing"); err == nil {
|
|
| 121 | 123 |
t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
|
| 122 | 124 |
} |
| 123 | 125 |
|
| ... | ... |
@@ -486,13 +489,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
|
| 486 | 486 |
t.Fatalf("Expected 1 volume mounted, was 0\n")
|
| 487 | 487 |
} |
| 488 | 488 |
|
| 489 |
+ ctx := context.Background() |
|
| 490 |
+ |
|
| 489 | 491 |
m := c.MountPoints["/vol1"] |
| 490 |
- _, err = daemon.VolumeCreate(m.Name, m.Driver, nil) |
|
| 492 |
+ _, err = daemon.VolumeCreate(ctx, m.Name, m.Driver, nil) |
|
| 491 | 493 |
if err != nil {
|
| 492 | 494 |
t.Fatal(err) |
| 493 | 495 |
} |
| 494 | 496 |
|
| 495 |
- if err := daemon.VolumeRm(m.Name); err != nil {
|
|
| 497 |
+ if err := daemon.VolumeRm(ctx, m.Name); err != nil {
|
|
| 496 | 498 |
t.Fatal(err) |
| 497 | 499 |
} |
| 498 | 500 |
|
| ... | ... |
@@ -13,6 +13,7 @@ import ( |
| 13 | 13 |
|
| 14 | 14 |
"github.com/Sirupsen/logrus" |
| 15 | 15 |
"github.com/docker/docker/autogen/dockerversion" |
| 16 |
+ "github.com/docker/docker/context" |
|
| 16 | 17 |
"github.com/docker/docker/daemon/graphdriver" |
| 17 | 18 |
"github.com/docker/docker/pkg/fileutils" |
| 18 | 19 |
"github.com/docker/docker/pkg/parsers" |
| ... | ... |
@@ -117,12 +118,12 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a |
| 117 | 117 |
|
| 118 | 118 |
// verifyPlatformContainerSettings performs platform-specific validation of the |
| 119 | 119 |
// hostconfig and config structures. |
| 120 |
-func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 120 |
+func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 121 | 121 |
warnings := []string{}
|
| 122 | 122 |
sysInfo := sysinfo.New(true) |
| 123 | 123 |
|
| 124 |
- if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
|
|
| 125 |
- return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
|
|
| 124 |
+ if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver(ctx).Name(), "lxc") {
|
|
| 125 |
+ return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver(ctx).Name())
|
|
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
// memory subsystem checks and adjustments |
| ... | ... |
@@ -495,12 +496,12 @@ func setupInitLayer(initLayer string) error {
|
| 495 | 495 |
|
| 496 | 496 |
// NetworkAPIRouter implements a feature for server-experimental, |
| 497 | 497 |
// directly calling into libnetwork. |
| 498 |
-func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
|
|
| 498 |
+func (daemon *Daemon) NetworkAPIRouter(ctx context.Context) func(w http.ResponseWriter, req *http.Request) {
|
|
| 499 | 499 |
return nwapi.NewHTTPHandler(daemon.netController) |
| 500 | 500 |
} |
| 501 | 501 |
|
| 502 | 502 |
// registerLinks writes the links to a file. |
| 503 |
-func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 503 |
+func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 504 | 504 |
if hostConfig == nil || hostConfig.Links == nil {
|
| 505 | 505 |
return nil |
| 506 | 506 |
} |
| ... | ... |
@@ -510,14 +511,14 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig. |
| 510 | 510 |
if err != nil {
|
| 511 | 511 |
return err |
| 512 | 512 |
} |
| 513 |
- child, err := daemon.Get(name) |
|
| 513 |
+ child, err := daemon.Get(ctx, name) |
|
| 514 | 514 |
if err != nil {
|
| 515 | 515 |
//An error from daemon.Get() means this name could not be found |
| 516 | 516 |
return fmt.Errorf("Could not get container for %s", name)
|
| 517 | 517 |
} |
| 518 | 518 |
for child.hostConfig.NetworkMode.IsContainer() {
|
| 519 | 519 |
parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2) |
| 520 |
- child, err = daemon.Get(parts[1]) |
|
| 520 |
+ child, err = daemon.Get(ctx, parts[1]) |
|
| 521 | 521 |
if err != nil {
|
| 522 | 522 |
return fmt.Errorf("Could not get container for %s", parts[1])
|
| 523 | 523 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"syscall" |
| 7 | 7 |
|
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 |
+ "github.com/docker/docker/context" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/graphdriver" |
| 10 | 11 |
// register the windows graph driver |
| 11 | 12 |
_ "github.com/docker/docker/daemon/graphdriver/windows" |
| ... | ... |
@@ -47,7 +48,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a |
| 47 | 47 |
|
| 48 | 48 |
// verifyPlatformContainerSettings performs platform-specific validation of the |
| 49 | 49 |
// hostconfig and config structures. |
| 50 |
-func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 50 |
+func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
|
|
| 51 | 51 |
return nil, nil |
| 52 | 52 |
} |
| 53 | 53 |
|
| ... | ... |
@@ -104,7 +105,7 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error) |
| 104 | 104 |
|
| 105 | 105 |
// registerLinks sets up links between containers and writes the |
| 106 | 106 |
// configuration out for persistence. |
| 107 |
-func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 107 |
+func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 108 | 108 |
// TODO Windows. Factored out for network modes. There may be more |
| 109 | 109 |
// refactoring required here. |
| 110 | 110 |
|
| ... | ... |
@@ -117,7 +118,7 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig. |
| 117 | 117 |
if err != nil {
|
| 118 | 118 |
return err |
| 119 | 119 |
} |
| 120 |
- child, err := daemon.Get(name) |
|
| 120 |
+ child, err := daemon.Get(ctx, name) |
|
| 121 | 121 |
if err != nil {
|
| 122 | 122 |
//An error from daemon.Get() means this name could not be found |
| 123 | 123 |
return fmt.Errorf("Could not get container for %s", name)
|
| ... | ... |
@@ -5,6 +5,8 @@ import ( |
| 5 | 5 |
"os" |
| 6 | 6 |
"path" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/context" |
|
| 9 |
+ |
|
| 8 | 10 |
"github.com/Sirupsen/logrus" |
| 9 | 11 |
derr "github.com/docker/docker/errors" |
| 10 | 12 |
"github.com/docker/docker/volume/store" |
| ... | ... |
@@ -19,8 +21,8 @@ type ContainerRmConfig struct {
|
| 19 | 19 |
// is returned if the container is not found, or if the remove |
| 20 | 20 |
// fails. If the remove succeeds, the container name is released, and |
| 21 | 21 |
// network links are removed. |
| 22 |
-func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
|
|
| 23 |
- container, err := daemon.Get(name) |
|
| 22 |
+func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *ContainerRmConfig) error {
|
|
| 23 |
+ container, err := daemon.Get(ctx, name) |
|
| 24 | 24 |
if err != nil {
|
| 25 | 25 |
return err |
| 26 | 26 |
} |
| ... | ... |
@@ -43,9 +45,9 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error |
| 43 | 43 |
return err |
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 |
- parentContainer, _ := daemon.Get(pe.ID()) |
|
| 46 |
+ parentContainer, _ := daemon.Get(ctx, pe.ID()) |
|
| 47 | 47 |
if parentContainer != nil {
|
| 48 |
- if err := parentContainer.updateNetwork(); err != nil {
|
|
| 48 |
+ if err := parentContainer.updateNetwork(ctx); err != nil {
|
|
| 49 | 49 |
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
|
| 50 | 50 |
} |
| 51 | 51 |
} |
| ... | ... |
@@ -53,7 +55,7 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error |
| 53 | 53 |
return nil |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
- if err := daemon.rm(container, config.ForceRemove); err != nil {
|
|
| 56 |
+ if err := daemon.rm(ctx, container, config.ForceRemove); err != nil {
|
|
| 57 | 57 |
// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err)) |
| 58 | 58 |
return err |
| 59 | 59 |
} |
| ... | ... |
@@ -66,12 +68,12 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error |
| 66 | 66 |
} |
| 67 | 67 |
|
| 68 | 68 |
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. |
| 69 |
-func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
|
|
| 69 |
+func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove bool) (err error) {
|
|
| 70 | 70 |
if container.IsRunning() {
|
| 71 | 71 |
if !forceRemove {
|
| 72 | 72 |
return derr.ErrorCodeRmRunning |
| 73 | 73 |
} |
| 74 |
- if err := container.Kill(); err != nil {
|
|
| 74 |
+ if err := container.Kill(ctx); err != nil {
|
|
| 75 | 75 |
return derr.ErrorCodeRmFailed.WithArgs(err) |
| 76 | 76 |
} |
| 77 | 77 |
} |
| ... | ... |
@@ -92,7 +94,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
|
| 92 | 92 |
|
| 93 | 93 |
defer container.resetRemovalInProgress() |
| 94 | 94 |
|
| 95 |
- if err = container.Stop(3); err != nil {
|
|
| 95 |
+ if err = container.Stop(ctx, 3); err != nil {
|
|
| 96 | 96 |
return err |
| 97 | 97 |
} |
| 98 | 98 |
|
| ... | ... |
@@ -113,7 +115,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
|
| 113 | 113 |
daemon.idIndex.Delete(container.ID) |
| 114 | 114 |
daemon.containers.Delete(container.ID) |
| 115 | 115 |
os.RemoveAll(container.root) |
| 116 |
- container.logEvent("destroy")
|
|
| 116 |
+ container.logEvent(ctx, "destroy") |
|
| 117 | 117 |
} |
| 118 | 118 |
}() |
| 119 | 119 |
|
| ... | ... |
@@ -142,14 +144,14 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
|
| 142 | 142 |
daemon.idIndex.Delete(container.ID) |
| 143 | 143 |
daemon.containers.Delete(container.ID) |
| 144 | 144 |
|
| 145 |
- container.logEvent("destroy")
|
|
| 145 |
+ container.logEvent(ctx, "destroy") |
|
| 146 | 146 |
return nil |
| 147 | 147 |
} |
| 148 | 148 |
|
| 149 | 149 |
// VolumeRm removes the volume with the given name. |
| 150 | 150 |
// If the volume is referenced by a container it is not removed |
| 151 | 151 |
// This is called directly from the remote API |
| 152 |
-func (daemon *Daemon) VolumeRm(name string) error {
|
|
| 152 |
+func (daemon *Daemon) VolumeRm(ctx context.Context, name string) error {
|
|
| 153 | 153 |
v, err := daemon.volumes.Get(name) |
| 154 | 154 |
if err != nil {
|
| 155 | 155 |
return err |
| ... | ... |
@@ -4,6 +4,8 @@ import ( |
| 4 | 4 |
"sync" |
| 5 | 5 |
"time" |
| 6 | 6 |
|
| 7 |
+ "github.com/docker/docker/context" |
|
| 8 |
+ |
|
| 7 | 9 |
"github.com/docker/docker/pkg/jsonmessage" |
| 8 | 10 |
"github.com/docker/docker/pkg/pubsub" |
| 9 | 11 |
) |
| ... | ... |
@@ -44,9 +46,9 @@ func (e *Events) Evict(l chan interface{}) {
|
| 44 | 44 |
|
| 45 | 45 |
// Log broadcasts event to listeners. Each listener has 100 millisecond for |
| 46 | 46 |
// receiving event or it will be skipped. |
| 47 |
-func (e *Events) Log(action, id, from string) {
|
|
| 47 |
+func (e *Events) Log(ctx context.Context, action, id, from string) {
|
|
| 48 | 48 |
now := time.Now().UTC() |
| 49 |
- jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
|
|
| 49 |
+ jm := &jsonmessage.JSONMessage{RequestID: ctx.RequestID(), Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
|
|
| 50 | 50 |
e.mu.Lock() |
| 51 | 51 |
if len(e.events) == cap(e.events) {
|
| 52 | 52 |
// discard oldest event |
| ... | ... |
@@ -5,10 +5,12 @@ import ( |
| 5 | 5 |
"testing" |
| 6 | 6 |
"time" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
"github.com/docker/docker/pkg/jsonmessage" |
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
func TestEventsLog(t *testing.T) {
|
| 13 |
+ ctx := context.Background() |
|
| 12 | 14 |
e := New() |
| 13 | 15 |
_, l1 := e.Subscribe() |
| 14 | 16 |
_, l2 := e.Subscribe() |
| ... | ... |
@@ -18,7 +20,7 @@ func TestEventsLog(t *testing.T) {
|
| 18 | 18 |
if count != 2 {
|
| 19 | 19 |
t.Fatalf("Must be 2 subscribers, got %d", count)
|
| 20 | 20 |
} |
| 21 |
- e.Log("test", "cont", "image")
|
|
| 21 |
+ e.Log(ctx, "test", "cont", "image") |
|
| 22 | 22 |
select {
|
| 23 | 23 |
case msg := <-l1: |
| 24 | 24 |
jmsg, ok := msg.(*jsonmessage.JSONMessage) |
| ... | ... |
@@ -64,13 +66,14 @@ func TestEventsLog(t *testing.T) {
|
| 64 | 64 |
} |
| 65 | 65 |
|
| 66 | 66 |
func TestEventsLogTimeout(t *testing.T) {
|
| 67 |
+ ctx := context.Background() |
|
| 67 | 68 |
e := New() |
| 68 | 69 |
_, l := e.Subscribe() |
| 69 | 70 |
defer e.Evict(l) |
| 70 | 71 |
|
| 71 | 72 |
c := make(chan struct{})
|
| 72 | 73 |
go func() {
|
| 73 |
- e.Log("test", "cont", "image")
|
|
| 74 |
+ e.Log(ctx, "test", "cont", "image") |
|
| 74 | 75 |
close(c) |
| 75 | 76 |
}() |
| 76 | 77 |
|
| ... | ... |
@@ -82,13 +85,14 @@ func TestEventsLogTimeout(t *testing.T) {
|
| 82 | 82 |
} |
| 83 | 83 |
|
| 84 | 84 |
func TestLogEvents(t *testing.T) {
|
| 85 |
+ ctx := context.Background() |
|
| 85 | 86 |
e := New() |
| 86 | 87 |
|
| 87 | 88 |
for i := 0; i < eventsLimit+16; i++ {
|
| 88 | 89 |
action := fmt.Sprintf("action_%d", i)
|
| 89 | 90 |
id := fmt.Sprintf("cont_%d", i)
|
| 90 | 91 |
from := fmt.Sprintf("image_%d", i)
|
| 91 |
- e.Log(action, id, from) |
|
| 92 |
+ e.Log(ctx, action, id, from) |
|
| 92 | 93 |
} |
| 93 | 94 |
time.Sleep(50 * time.Millisecond) |
| 94 | 95 |
current, l := e.Subscribe() |
| ... | ... |
@@ -97,7 +101,7 @@ func TestLogEvents(t *testing.T) {
|
| 97 | 97 |
action := fmt.Sprintf("action_%d", num)
|
| 98 | 98 |
id := fmt.Sprintf("cont_%d", num)
|
| 99 | 99 |
from := fmt.Sprintf("image_%d", num)
|
| 100 |
- e.Log(action, id, from) |
|
| 100 |
+ e.Log(ctx, action, id, from) |
|
| 101 | 101 |
} |
| 102 | 102 |
if len(e.events) != eventsLimit {
|
| 103 | 103 |
t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
|
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"time" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/Sirupsen/logrus" |
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/daemon/execdriver" |
| 12 | 13 |
derr "github.com/docker/docker/errors" |
| 13 | 14 |
"github.com/docker/docker/pkg/broadcastwriter" |
| ... | ... |
@@ -117,8 +118,8 @@ func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
|
| 117 | 117 |
d.execCommands.Delete(ExecConfig.ID) |
| 118 | 118 |
} |
| 119 | 119 |
|
| 120 |
-func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
|
| 121 |
- container, err := d.Get(name) |
|
| 120 |
+func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Container, error) {
|
|
| 121 |
+ container, err := d.Get(ctx, name) |
|
| 122 | 122 |
if err != nil {
|
| 123 | 123 |
return nil, err |
| 124 | 124 |
} |
| ... | ... |
@@ -133,13 +134,13 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 | 135 |
// ContainerExecCreate sets up an exec in a running container. |
| 136 |
-func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
|
|
| 136 |
+func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.ExecConfig) (string, error) {
|
|
| 137 | 137 |
// Not all drivers support Exec (LXC for example) |
| 138 | 138 |
if err := checkExecSupport(d.execDriver.Name()); err != nil {
|
| 139 | 139 |
return "", err |
| 140 | 140 |
} |
| 141 | 141 |
|
| 142 |
- container, err := d.getActiveContainer(config.Container) |
|
| 142 |
+ container, err := d.getActiveContainer(ctx, config.Container) |
|
| 143 | 143 |
if err != nil {
|
| 144 | 144 |
return "", err |
| 145 | 145 |
} |
| ... | ... |
@@ -174,14 +175,14 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro |
| 174 | 174 |
|
| 175 | 175 |
d.registerExecCommand(ExecConfig) |
| 176 | 176 |
|
| 177 |
- container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
|
|
| 177 |
+ container.logEvent(ctx, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) |
|
| 178 | 178 |
|
| 179 | 179 |
return ExecConfig.ID, nil |
| 180 | 180 |
} |
| 181 | 181 |
|
| 182 | 182 |
// ContainerExecStart starts a previously set up exec instance. The |
| 183 | 183 |
// std streams are set up. |
| 184 |
-func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
|
|
| 184 |
+func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
|
|
| 185 | 185 |
var ( |
| 186 | 186 |
cStdin io.ReadCloser |
| 187 | 187 |
cStdout, cStderr io.Writer |
| ... | ... |
@@ -207,7 +208,7 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout |
| 207 | 207 |
logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
|
| 208 | 208 |
container := ExecConfig.Container |
| 209 | 209 |
|
| 210 |
- container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
|
|
| 210 |
+ container.logEvent(ctx, "exec_start: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) |
|
| 211 | 211 |
|
| 212 | 212 |
if ExecConfig.OpenStdin {
|
| 213 | 213 |
r, w := io.Pipe() |
| ... | ... |
@@ -243,7 +244,7 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout |
| 243 | 243 |
// the exitStatus) even after the cmd is done running. |
| 244 | 244 |
|
| 245 | 245 |
go func() {
|
| 246 |
- if err := container.exec(ExecConfig); err != nil {
|
|
| 246 |
+ if err := container.exec(ctx, ExecConfig); err != nil {
|
|
| 247 | 247 |
execErr <- derr.ErrorCodeExecCantRun.WithArgs(execName, container.ID, err) |
| 248 | 248 |
} |
| 249 | 249 |
}() |
| ... | ... |
@@ -267,11 +268,11 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout |
| 267 | 267 |
} |
| 268 | 268 |
|
| 269 | 269 |
// Exec calls the underlying exec driver to run |
| 270 |
-func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 270 |
+func (d *Daemon) Exec(ctx context.Context, c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 271 | 271 |
hooks := execdriver.Hooks{
|
| 272 | 272 |
Start: startCallback, |
| 273 | 273 |
} |
| 274 |
- exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks) |
|
| 274 |
+ exitStatus, err := d.execDriver.Exec(ctx, c.command, ExecConfig.ProcessConfig, pipes, hooks) |
|
| 275 | 275 |
|
| 276 | 276 |
// On err, make sure we don't leave ExitCode at zero |
| 277 | 277 |
if err != nil && exitStatus == 0 {
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"time" |
| 8 | 8 |
|
| 9 | 9 |
// TODO Windows: Factor out ulimit |
| 10 |
+ "github.com/docker/docker/context" |
|
| 10 | 11 |
"github.com/docker/docker/pkg/ulimit" |
| 11 | 12 |
"github.com/opencontainers/runc/libcontainer" |
| 12 | 13 |
"github.com/opencontainers/runc/libcontainer/configs" |
| ... | ... |
@@ -29,7 +30,7 @@ var ( |
| 29 | 29 |
// through PreStart, Start and PostStop events. |
| 30 | 30 |
// Callbacks are provided a processConfig pointer and the pid of the child. |
| 31 | 31 |
// The channel will be used to notify the OOM events. |
| 32 |
-type DriverCallback func(processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
|
|
| 32 |
+type DriverCallback func(ctx context.Context, processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
|
|
| 33 | 33 |
|
| 34 | 34 |
// Hooks is a struct containing function pointers to callbacks |
| 35 | 35 |
// used by any execdriver implementation exploiting hooks capabilities |
| ... | ... |
@@ -69,11 +70,11 @@ type ExitStatus struct {
|
| 69 | 69 |
type Driver interface {
|
| 70 | 70 |
// Run executes the process, blocks until the process exits and returns |
| 71 | 71 |
// the exit code. It's the last stage on Docker side for running a container. |
| 72 |
- Run(c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error) |
|
| 72 |
+ Run(ctx context.Context, c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error) |
|
| 73 | 73 |
|
| 74 | 74 |
// Exec executes the process in an existing container, blocks until the |
| 75 | 75 |
// process exits and returns the exit code. |
| 76 |
- Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error) |
|
| 76 |
+ Exec(ctx context.Context, c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error) |
|
| 77 | 77 |
|
| 78 | 78 |
// Kill sends signals to process in container. |
| 79 | 79 |
Kill(c *Command, sig int) error |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
"time" |
| 21 | 21 |
|
| 22 | 22 |
"github.com/Sirupsen/logrus" |
| 23 |
+ "github.com/docker/docker/context" |
|
| 23 | 24 |
"github.com/docker/docker/daemon/execdriver" |
| 24 | 25 |
"github.com/docker/docker/pkg/stringutils" |
| 25 | 26 |
sysinfo "github.com/docker/docker/pkg/system" |
| ... | ... |
@@ -125,7 +126,7 @@ func killNetNsProc(proc *os.Process) {
|
| 125 | 125 |
|
| 126 | 126 |
// Run implements the exec driver Driver interface, |
| 127 | 127 |
// it calls 'exec.Cmd' to launch lxc commands to run a container. |
| 128 |
-func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 128 |
+func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 129 | 129 |
var ( |
| 130 | 130 |
term execdriver.Terminal |
| 131 | 131 |
err error |
| ... | ... |
@@ -329,7 +330,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd |
| 329 | 329 |
|
| 330 | 330 |
if hooks.Start != nil {
|
| 331 | 331 |
logrus.Debugf("Invoking startCallback")
|
| 332 |
- hooks.Start(&c.ProcessConfig, pid, oomKillNotification) |
|
| 332 |
+ hooks.Start(ctx, &c.ProcessConfig, pid, oomKillNotification) |
|
| 333 | 333 |
|
| 334 | 334 |
} |
| 335 | 335 |
|
| ... | ... |
@@ -871,7 +872,7 @@ func (t *TtyConsole) Close() error {
|
| 871 | 871 |
|
| 872 | 872 |
// Exec implements the exec driver Driver interface, |
| 873 | 873 |
// it is not implemented by lxc. |
| 874 |
-func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 874 |
+func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 875 | 875 |
return -1, ErrExec |
| 876 | 876 |
} |
| 877 | 877 |
|
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
"strings" |
| 10 | 10 |
"syscall" |
| 11 | 11 |
|
| 12 |
+ "github.com/docker/docker/context" |
|
| 12 | 13 |
"github.com/docker/docker/daemon/execdriver" |
| 13 | 14 |
"github.com/opencontainers/runc/libcontainer/apparmor" |
| 14 | 15 |
"github.com/opencontainers/runc/libcontainer/configs" |
| ... | ... |
@@ -18,7 +19,7 @@ import ( |
| 18 | 18 |
|
| 19 | 19 |
// createContainer populates and configures the container type with the |
| 20 | 20 |
// data provided by the execdriver.Command |
| 21 |
-func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
|
|
| 21 |
+func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
|
|
| 22 | 22 |
container := execdriver.InitContainer(c) |
| 23 | 23 |
|
| 24 | 24 |
if err := d.createIpc(container, c); err != nil {
|
| ... | ... |
@@ -33,7 +34,7 @@ func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) |
| 33 | 33 |
return nil, err |
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 |
- if err := d.createNetwork(container, c, hooks); err != nil {
|
|
| 36 |
+ if err := d.createNetwork(ctx, container, c, hooks); err != nil {
|
|
| 37 | 37 |
return nil, err |
| 38 | 38 |
} |
| 39 | 39 |
|
| ... | ... |
@@ -113,7 +114,7 @@ func generateIfaceName() (string, error) {
|
| 113 | 113 |
return "", errors.New("Failed to find name for new interface")
|
| 114 | 114 |
} |
| 115 | 115 |
|
| 116 |
-func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
|
|
| 116 |
+func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
|
|
| 117 | 117 |
if c.Network == nil {
|
| 118 | 118 |
return nil |
| 119 | 119 |
} |
| ... | ... |
@@ -150,7 +151,7 @@ func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, |
| 150 | 150 |
// non-blocking and return the correct result when read. |
| 151 | 151 |
chOOM := make(chan struct{})
|
| 152 | 152 |
close(chOOM) |
| 153 |
- if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil {
|
|
| 153 |
+ if err := fnHook(ctx, &c.ProcessConfig, s.Pid, chOOM); err != nil {
|
|
| 154 | 154 |
return err |
| 155 | 155 |
} |
| 156 | 156 |
} |
| ... | ... |
@@ -14,6 +14,7 @@ import ( |
| 14 | 14 |
"time" |
| 15 | 15 |
|
| 16 | 16 |
"github.com/Sirupsen/logrus" |
| 17 |
+ "github.com/docker/docker/context" |
|
| 17 | 18 |
"github.com/docker/docker/daemon/execdriver" |
| 18 | 19 |
"github.com/docker/docker/pkg/parsers" |
| 19 | 20 |
"github.com/docker/docker/pkg/pools" |
| ... | ... |
@@ -131,9 +132,9 @@ type execOutput struct {
|
| 131 | 131 |
|
| 132 | 132 |
// Run implements the exec driver Driver interface, |
| 133 | 133 |
// it calls libcontainer APIs to run a container. |
| 134 |
-func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 134 |
+func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 135 | 135 |
// take the Command and populate the libcontainer.Config from it |
| 136 |
- container, err := d.createContainer(c, hooks) |
|
| 136 |
+ container, err := d.createContainer(ctx, c, hooks) |
|
| 137 | 137 |
if err != nil {
|
| 138 | 138 |
return execdriver.ExitStatus{ExitCode: -1}, err
|
| 139 | 139 |
} |
| ... | ... |
@@ -174,7 +175,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd |
| 174 | 174 |
p.Wait() |
| 175 | 175 |
return execdriver.ExitStatus{ExitCode: -1}, err
|
| 176 | 176 |
} |
| 177 |
- hooks.Start(&c.ProcessConfig, pid, oom) |
|
| 177 |
+ hooks.Start(ctx, &c.ProcessConfig, pid, oom) |
|
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 | 180 |
waitF := p.Wait |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"os/exec" |
| 9 | 9 |
"syscall" |
| 10 | 10 |
|
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/daemon/execdriver" |
| 12 | 13 |
"github.com/opencontainers/runc/libcontainer" |
| 13 | 14 |
// Blank import 'nsenter' so that init in that package will call c |
| ... | ... |
@@ -19,7 +20,7 @@ import ( |
| 19 | 19 |
|
| 20 | 20 |
// Exec implements the exec driver Driver interface, |
| 21 | 21 |
// it calls libcontainer APIs to execute a container. |
| 22 |
-func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 22 |
+func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 23 | 23 |
active := d.activeContainers[c.ID] |
| 24 | 24 |
if active == nil {
|
| 25 | 25 |
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
|
| ... | ... |
@@ -57,7 +58,7 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo |
| 57 | 57 |
// non-blocking and return the correct result when read. |
| 58 | 58 |
chOOM := make(chan struct{})
|
| 59 | 59 |
close(chOOM) |
| 60 |
- hooks.Start(&c.ProcessConfig, pid, chOOM) |
|
| 60 |
+ hooks.Start(ctx, &c.ProcessConfig, pid, chOOM) |
|
| 61 | 61 |
} |
| 62 | 62 |
|
| 63 | 63 |
ps, err := p.Wait() |
| ... | ... |
@@ -7,12 +7,13 @@ import ( |
| 7 | 7 |
"fmt" |
| 8 | 8 |
|
| 9 | 9 |
"github.com/Sirupsen/logrus" |
| 10 |
+ "github.com/docker/docker/context" |
|
| 10 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 11 | 12 |
"github.com/microsoft/hcsshim" |
| 12 | 13 |
) |
| 13 | 14 |
|
| 14 | 15 |
// Exec implements the exec driver Driver interface. |
| 15 |
-func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 16 |
+func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
|
|
| 16 | 17 |
|
| 17 | 18 |
var ( |
| 18 | 19 |
term execdriver.Terminal |
| ... | ... |
@@ -74,7 +75,7 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo |
| 74 | 74 |
// non-blocking and return the correct result when read. |
| 75 | 75 |
chOOM := make(chan struct{})
|
| 76 | 76 |
close(chOOM) |
| 77 |
- hooks.Start(&c.ProcessConfig, int(pid), chOOM) |
|
| 77 |
+ hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM) |
|
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 | 80 |
if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"syscall" |
| 16 | 16 |
|
| 17 | 17 |
"github.com/Sirupsen/logrus" |
| 18 |
+ "github.com/docker/docker/context" |
|
| 18 | 19 |
"github.com/docker/docker/daemon/execdriver" |
| 19 | 20 |
"github.com/microsoft/hcsshim" |
| 20 | 21 |
) |
| ... | ... |
@@ -79,7 +80,7 @@ type containerInit struct {
|
| 79 | 79 |
const defaultOwner = "docker" |
| 80 | 80 |
|
| 81 | 81 |
// Run implements the exec driver Driver interface |
| 82 |
-func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 82 |
+func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
|
|
| 83 | 83 |
|
| 84 | 84 |
var ( |
| 85 | 85 |
term execdriver.Terminal |
| ... | ... |
@@ -298,7 +299,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd |
| 298 | 298 |
// non-blocking and return the correct result when read. |
| 299 | 299 |
chOOM := make(chan struct{})
|
| 300 | 300 |
close(chOOM) |
| 301 |
- hooks.Start(&c.ProcessConfig, int(pid), chOOM) |
|
| 301 |
+ hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM) |
|
| 302 | 302 |
} |
| 303 | 303 |
|
| 304 | 304 |
var exitCode int32 |
| ... | ... |
@@ -3,18 +3,19 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 6 | 7 |
derr "github.com/docker/docker/errors" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 | 10 |
// ContainerExport writes the contents of the container to the given |
| 10 | 11 |
// writer. An error is returned if the container cannot be found. |
| 11 |
-func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
|
| 12 |
- container, err := daemon.Get(name) |
|
| 12 |
+func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error {
|
|
| 13 |
+ container, err := daemon.Get(ctx, name) |
|
| 13 | 14 |
if err != nil {
|
| 14 | 15 |
return err |
| 15 | 16 |
} |
| 16 | 17 |
|
| 17 |
- data, err := container.export() |
|
| 18 |
+ data, err := container.export(ctx) |
|
| 18 | 19 |
if err != nil {
|
| 19 | 20 |
return derr.ErrorCodeExportFailed.WithArgs(name, err) |
| 20 | 21 |
} |
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"strings" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
derr "github.com/docker/docker/errors" |
| 9 | 10 |
"github.com/docker/docker/graph/tags" |
| 10 | 11 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -50,10 +51,10 @@ import ( |
| 50 | 50 |
// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph |
| 51 | 51 |
// package. This would require that we no longer need the daemon to determine |
| 52 | 52 |
// whether images are being used by a stopped or running container. |
| 53 |
-func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
|
|
| 53 |
+func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDelete, error) {
|
|
| 54 | 54 |
records := []types.ImageDelete{}
|
| 55 | 55 |
|
| 56 |
- img, err := daemon.Repositories().LookupImage(imageRef) |
|
| 56 |
+ img, err := daemon.Repositories(ctx).LookupImage(imageRef) |
|
| 57 | 57 |
if err != nil {
|
| 58 | 58 |
return nil, err |
| 59 | 59 |
} |
| ... | ... |
@@ -64,8 +65,8 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 64 | 64 |
// first. We can only remove this reference if either force is |
| 65 | 65 |
// true, there are multiple repository references to this |
| 66 | 66 |
// image, or there are no containers using the given reference. |
| 67 |
- if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) {
|
|
| 68 |
- if container := daemon.getContainerUsingImage(img.ID); container != nil {
|
|
| 67 |
+ if !(force || daemon.imageHasMultipleRepositoryReferences(ctx, img.ID)) {
|
|
| 68 |
+ if container := daemon.getContainerUsingImage(ctx, img.ID); container != nil {
|
|
| 69 | 69 |
// If we removed the repository reference then |
| 70 | 70 |
// this image would remain "dangling" and since |
| 71 | 71 |
// we really want to avoid that the client must |
| ... | ... |
@@ -74,14 +75,14 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 74 | 74 |
} |
| 75 | 75 |
} |
| 76 | 76 |
|
| 77 |
- parsedRef, err := daemon.removeImageRef(imageRef) |
|
| 77 |
+ parsedRef, err := daemon.removeImageRef(ctx, imageRef) |
|
| 78 | 78 |
if err != nil {
|
| 79 | 79 |
return nil, err |
| 80 | 80 |
} |
| 81 | 81 |
|
| 82 | 82 |
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
|
| 83 | 83 |
|
| 84 |
- daemon.EventsService.Log("untag", img.ID, "")
|
|
| 84 |
+ daemon.EventsService.Log(ctx, "untag", img.ID, "") |
|
| 85 | 85 |
records = append(records, untaggedRecord) |
| 86 | 86 |
|
| 87 | 87 |
removedRepositoryRef = true |
| ... | ... |
@@ -90,21 +91,21 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 90 | 90 |
// repository reference to the image then we will want to |
| 91 | 91 |
// remove that reference. |
| 92 | 92 |
// FIXME: Is this the behavior we want? |
| 93 |
- repoRefs := daemon.Repositories().ByID()[img.ID] |
|
| 93 |
+ repoRefs := daemon.Repositories(ctx).ByID()[img.ID] |
|
| 94 | 94 |
if len(repoRefs) == 1 {
|
| 95 |
- parsedRef, err := daemon.removeImageRef(repoRefs[0]) |
|
| 95 |
+ parsedRef, err := daemon.removeImageRef(ctx, repoRefs[0]) |
|
| 96 | 96 |
if err != nil {
|
| 97 | 97 |
return nil, err |
| 98 | 98 |
} |
| 99 | 99 |
|
| 100 | 100 |
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
|
| 101 | 101 |
|
| 102 |
- daemon.EventsService.Log("untag", img.ID, "")
|
|
| 102 |
+ daemon.EventsService.Log(ctx, "untag", img.ID, "") |
|
| 103 | 103 |
records = append(records, untaggedRecord) |
| 104 | 104 |
} |
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 |
- return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef) |
|
| 107 |
+ return records, daemon.imageDeleteHelper(ctx, img, &records, force, prune, removedRepositoryRef) |
|
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 | 110 |
// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the |
| ... | ... |
@@ -115,14 +116,14 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
|
| 115 | 115 |
|
| 116 | 116 |
// imageHasMultipleRepositoryReferences returns whether there are multiple |
| 117 | 117 |
// repository references to the given imageID. |
| 118 |
-func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool {
|
|
| 119 |
- return len(daemon.Repositories().ByID()[imageID]) > 1 |
|
| 118 |
+func (daemon *Daemon) imageHasMultipleRepositoryReferences(ctx context.Context, imageID string) bool {
|
|
| 119 |
+ return len(daemon.Repositories(ctx).ByID()[imageID]) > 1 |
|
| 120 | 120 |
} |
| 121 | 121 |
|
| 122 | 122 |
// getContainerUsingImage returns a container that was created using the given |
| 123 | 123 |
// imageID. Returns nil if there is no such container. |
| 124 |
-func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
|
|
| 125 |
- for _, container := range daemon.List() {
|
|
| 124 |
+func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string) *Container {
|
|
| 125 |
+ for _, container := range daemon.List(ctx) {
|
|
| 126 | 126 |
if container.ImageID == imageID {
|
| 127 | 127 |
return container |
| 128 | 128 |
} |
| ... | ... |
@@ -136,7 +137,7 @@ func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
|
| 136 | 136 |
// repositoryRef must not be an image ID but a repository name followed by an |
| 137 | 137 |
// optional tag or digest reference. If tag or digest is omitted, the default |
| 138 | 138 |
// tag is used. Returns the resolved image reference and an error. |
| 139 |
-func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
|
|
| 139 |
+func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string) (string, error) {
|
|
| 140 | 140 |
repository, ref := parsers.ParseRepositoryTag(repositoryRef) |
| 141 | 141 |
if ref == "" {
|
| 142 | 142 |
ref = tags.DefaultTag |
| ... | ... |
@@ -145,7 +146,7 @@ func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
|
| 145 | 145 |
// Ignore the boolean value returned, as far as we're concerned, this |
| 146 | 146 |
// is an idempotent operation and it's okay if the reference didn't |
| 147 | 147 |
// exist in the first place. |
| 148 |
- _, err := daemon.Repositories().Delete(repository, ref) |
|
| 148 |
+ _, err := daemon.Repositories(ctx).Delete(repository, ref) |
|
| 149 | 149 |
|
| 150 | 150 |
return utils.ImageReference(repository, ref), err |
| 151 | 151 |
} |
| ... | ... |
@@ -155,18 +156,18 @@ func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
|
| 155 | 155 |
// on the first encountered error. Removed references are logged to this |
| 156 | 156 |
// daemon's event service. An "Untagged" types.ImageDelete is added to the |
| 157 | 157 |
// given list of records. |
| 158 |
-func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error {
|
|
| 159 |
- imageRefs := daemon.Repositories().ByID()[imgID] |
|
| 158 |
+func (daemon *Daemon) removeAllReferencesToImageID(ctx context.Context, imgID string, records *[]types.ImageDelete) error {
|
|
| 159 |
+ imageRefs := daemon.Repositories(ctx).ByID()[imgID] |
|
| 160 | 160 |
|
| 161 | 161 |
for _, imageRef := range imageRefs {
|
| 162 |
- parsedRef, err := daemon.removeImageRef(imageRef) |
|
| 162 |
+ parsedRef, err := daemon.removeImageRef(ctx, imageRef) |
|
| 163 | 163 |
if err != nil {
|
| 164 | 164 |
return err |
| 165 | 165 |
} |
| 166 | 166 |
|
| 167 | 167 |
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
|
| 168 | 168 |
|
| 169 |
- daemon.EventsService.Log("untag", imgID, "")
|
|
| 169 |
+ daemon.EventsService.Log(ctx, "untag", imgID, "") |
|
| 170 | 170 |
*records = append(*records, untaggedRecord) |
| 171 | 171 |
} |
| 172 | 172 |
|
| ... | ... |
@@ -203,11 +204,11 @@ func (idc *imageDeleteConflict) Error() string {
|
| 203 | 203 |
// conflict is encountered, it will be returned immediately without deleting |
| 204 | 204 |
// the image. If quiet is true, any encountered conflicts will be ignored and |
| 205 | 205 |
// the function will return nil immediately without deleting the image. |
| 206 |
-func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
|
|
| 206 |
+func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
|
|
| 207 | 207 |
// First, determine if this image has any conflicts. Ignore soft conflicts |
| 208 | 208 |
// if force is true. |
| 209 |
- if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil {
|
|
| 210 |
- if quiet && !daemon.imageIsDangling(img) {
|
|
| 209 |
+ if conflict := daemon.checkImageDeleteConflict(ctx, img, force); conflict != nil {
|
|
| 210 |
+ if quiet && !daemon.imageIsDangling(ctx, img) {
|
|
| 211 | 211 |
// Ignore conflicts UNLESS the image is "dangling" in |
| 212 | 212 |
// which case we want the user to know. |
| 213 | 213 |
return nil |
| ... | ... |
@@ -219,15 +220,15 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image |
| 219 | 219 |
} |
| 220 | 220 |
|
| 221 | 221 |
// Delete all repository tag/digest references to this image. |
| 222 |
- if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil {
|
|
| 222 |
+ if err := daemon.removeAllReferencesToImageID(ctx, img.ID, records); err != nil {
|
|
| 223 | 223 |
return err |
| 224 | 224 |
} |
| 225 | 225 |
|
| 226 |
- if err := daemon.Graph().Delete(img.ID); err != nil {
|
|
| 226 |
+ if err := daemon.Graph(ctx).Delete(img.ID); err != nil {
|
|
| 227 | 227 |
return err |
| 228 | 228 |
} |
| 229 | 229 |
|
| 230 |
- daemon.EventsService.Log("delete", img.ID, "")
|
|
| 230 |
+ daemon.EventsService.Log(ctx, "delete", img.ID, "") |
|
| 231 | 231 |
*records = append(*records, types.ImageDelete{Deleted: img.ID})
|
| 232 | 232 |
|
| 233 | 233 |
if !prune || img.Parent == "" {
|
| ... | ... |
@@ -237,14 +238,14 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image |
| 237 | 237 |
// We need to prune the parent image. This means delete it if there are |
| 238 | 238 |
// no tags/digests referencing it and there are no containers using it ( |
| 239 | 239 |
// either running or stopped). |
| 240 |
- parentImg, err := daemon.Graph().Get(img.Parent) |
|
| 240 |
+ parentImg, err := daemon.Graph(ctx).Get(img.Parent) |
|
| 241 | 241 |
if err != nil {
|
| 242 | 242 |
return derr.ErrorCodeImgNoParent.WithArgs(err) |
| 243 | 243 |
} |
| 244 | 244 |
|
| 245 | 245 |
// Do not force prunings, but do so quietly (stopping on any encountered |
| 246 | 246 |
// conflicts). |
| 247 |
- return daemon.imageDeleteHelper(parentImg, records, false, true, true) |
|
| 247 |
+ return daemon.imageDeleteHelper(ctx, parentImg, records, false, true, true) |
|
| 248 | 248 |
} |
| 249 | 249 |
|
| 250 | 250 |
// checkImageDeleteConflict determines whether there are any conflicts |
| ... | ... |
@@ -253,9 +254,9 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image |
| 253 | 253 |
// using the image. A soft conflict is any tags/digest referencing the given |
| 254 | 254 |
// image or any stopped container using the image. If ignoreSoftConflicts is |
| 255 | 255 |
// true, this function will not check for soft conflict conditions. |
| 256 |
-func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
|
|
| 256 |
+func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
|
|
| 257 | 257 |
// Check for hard conflicts first. |
| 258 |
- if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil {
|
|
| 258 |
+ if conflict := daemon.checkImageDeleteHardConflict(ctx, img); conflict != nil {
|
|
| 259 | 259 |
return conflict |
| 260 | 260 |
} |
| 261 | 261 |
|
| ... | ... |
@@ -265,12 +266,12 @@ func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConfl |
| 265 | 265 |
return nil |
| 266 | 266 |
} |
| 267 | 267 |
|
| 268 |
- return daemon.checkImageDeleteSoftConflict(img) |
|
| 268 |
+ return daemon.checkImageDeleteSoftConflict(ctx, img) |
|
| 269 | 269 |
} |
| 270 | 270 |
|
| 271 |
-func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict {
|
|
| 271 |
+func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
|
|
| 272 | 272 |
// Check if the image ID is being used by a pull or build. |
| 273 |
- if daemon.Graph().IsHeld(img.ID) {
|
|
| 273 |
+ if daemon.Graph(ctx).IsHeld(img.ID) {
|
|
| 274 | 274 |
return &imageDeleteConflict{
|
| 275 | 275 |
hard: true, |
| 276 | 276 |
imgID: img.ID, |
| ... | ... |
@@ -279,7 +280,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet |
| 279 | 279 |
} |
| 280 | 280 |
|
| 281 | 281 |
// Check if the image has any descendent images. |
| 282 |
- if daemon.Graph().HasChildren(img) {
|
|
| 282 |
+ if daemon.Graph(ctx).HasChildren(img) {
|
|
| 283 | 283 |
return &imageDeleteConflict{
|
| 284 | 284 |
hard: true, |
| 285 | 285 |
imgID: img.ID, |
| ... | ... |
@@ -288,7 +289,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet |
| 288 | 288 |
} |
| 289 | 289 |
|
| 290 | 290 |
// Check if any running container is using the image. |
| 291 |
- for _, container := range daemon.List() {
|
|
| 291 |
+ for _, container := range daemon.List(ctx) {
|
|
| 292 | 292 |
if !container.IsRunning() {
|
| 293 | 293 |
// Skip this until we check for soft conflicts later. |
| 294 | 294 |
continue |
| ... | ... |
@@ -306,9 +307,9 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet |
| 306 | 306 |
return nil |
| 307 | 307 |
} |
| 308 | 308 |
|
| 309 |
-func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict {
|
|
| 309 |
+func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
|
|
| 310 | 310 |
// Check if any repository tags/digest reference this image. |
| 311 |
- if daemon.Repositories().HasReferences(img) {
|
|
| 311 |
+ if daemon.Repositories(ctx).HasReferences(img) {
|
|
| 312 | 312 |
return &imageDeleteConflict{
|
| 313 | 313 |
imgID: img.ID, |
| 314 | 314 |
message: "image is referenced in one or more repositories", |
| ... | ... |
@@ -316,7 +317,7 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet |
| 316 | 316 |
} |
| 317 | 317 |
|
| 318 | 318 |
// Check if any stopped containers reference this image. |
| 319 |
- for _, container := range daemon.List() {
|
|
| 319 |
+ for _, container := range daemon.List(ctx) {
|
|
| 320 | 320 |
if container.IsRunning() {
|
| 321 | 321 |
// Skip this as it was checked above in hard conflict conditions. |
| 322 | 322 |
continue |
| ... | ... |
@@ -336,6 +337,6 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet |
| 336 | 336 |
// imageIsDangling returns whether the given image is "dangling" which means |
| 337 | 337 |
// that there are no repository references to the given image and it has no |
| 338 | 338 |
// child images. |
| 339 |
-func (daemon *Daemon) imageIsDangling(img *image.Image) bool {
|
|
| 340 |
- return !(daemon.Repositories().HasReferences(img) || daemon.Graph().HasChildren(img)) |
|
| 339 |
+func (daemon *Daemon) imageIsDangling(ctx context.Context, img *image.Image) bool {
|
|
| 340 |
+ return !(daemon.Repositories(ctx).HasReferences(img) || daemon.Graph(ctx).HasChildren(img)) |
|
| 341 | 341 |
} |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 | 9 |
"github.com/docker/docker/api/types" |
| 10 | 10 |
"github.com/docker/docker/autogen/dockerversion" |
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
"github.com/docker/docker/pkg/fileutils" |
| 12 | 13 |
"github.com/docker/docker/pkg/parsers/kernel" |
| 13 | 14 |
"github.com/docker/docker/pkg/parsers/operatingsystem" |
| ... | ... |
@@ -18,8 +19,8 @@ import ( |
| 18 | 18 |
) |
| 19 | 19 |
|
| 20 | 20 |
// SystemInfo returns information about the host server the daemon is running on. |
| 21 |
-func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
|
| 22 |
- images := daemon.Graph().Map() |
|
| 21 |
+func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
|
|
| 22 |
+ images := daemon.Graph(ctx).Map() |
|
| 23 | 23 |
var imgcount int |
| 24 | 24 |
if images == nil {
|
| 25 | 25 |
imgcount = 0 |
| ... | ... |
@@ -65,10 +66,10 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
| 65 | 65 |
|
| 66 | 66 |
v := &types.Info{
|
| 67 | 67 |
ID: daemon.ID, |
| 68 |
- Containers: len(daemon.List()), |
|
| 68 |
+ Containers: len(daemon.List(ctx)), |
|
| 69 | 69 |
Images: imgcount, |
| 70 |
- Driver: daemon.GraphDriver().String(), |
|
| 71 |
- DriverStatus: daemon.GraphDriver().Status(), |
|
| 70 |
+ Driver: daemon.GraphDriver(ctx).String(), |
|
| 71 |
+ DriverStatus: daemon.GraphDriver(ctx).Status(), |
|
| 72 | 72 |
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, |
| 73 | 73 |
BridgeNfIptables: !sysInfo.BridgeNfCallIptablesDisabled, |
| 74 | 74 |
BridgeNfIP6tables: !sysInfo.BridgeNfCallIP6tablesDisabled, |
| ... | ... |
@@ -76,7 +77,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
| 76 | 76 |
NFd: fileutils.GetTotalUsedFds(), |
| 77 | 77 |
NGoroutines: runtime.NumGoroutine(), |
| 78 | 78 |
SystemTime: time.Now().Format(time.RFC3339Nano), |
| 79 |
- ExecutionDriver: daemon.ExecutionDriver().Name(), |
|
| 79 |
+ ExecutionDriver: daemon.ExecutionDriver(ctx).Name(), |
|
| 80 | 80 |
LoggingDriver: daemon.defaultLogConfig.Type, |
| 81 | 81 |
NEventsListener: daemon.EventsService.SubscribersCount(), |
| 82 | 82 |
KernelVersion: kernelVersion, |
| ... | ... |
@@ -5,13 +5,14 @@ import ( |
| 5 | 5 |
"time" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 | 11 |
// ContainerInspect returns low-level information about a |
| 11 | 12 |
// container. Returns an error if the container cannot be found, or if |
| 12 | 13 |
// there is an error getting the data. |
| 13 |
-func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
|
|
| 14 |
- container, err := daemon.Get(name) |
|
| 14 |
+func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types.ContainerJSON, error) {
|
|
| 15 |
+ container, err := daemon.Get(ctx, name) |
|
| 15 | 16 |
if err != nil {
|
| 16 | 17 |
return nil, err |
| 17 | 18 |
} |
| ... | ... |
@@ -19,7 +20,7 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error |
| 19 | 19 |
container.Lock() |
| 20 | 20 |
defer container.Unlock() |
| 21 | 21 |
|
| 22 |
- base, err := daemon.getInspectData(container) |
|
| 22 |
+ base, err := daemon.getInspectData(ctx, container) |
|
| 23 | 23 |
if err != nil {
|
| 24 | 24 |
return nil, err |
| 25 | 25 |
} |
| ... | ... |
@@ -30,8 +31,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error |
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 | 32 |
// ContainerInspect120 serializes the master version of a container into a json type. |
| 33 |
-func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, error) {
|
|
| 34 |
- container, err := daemon.Get(name) |
|
| 33 |
+func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*types.ContainerJSON120, error) {
|
|
| 34 |
+ container, err := daemon.Get(ctx, name) |
|
| 35 | 35 |
if err != nil {
|
| 36 | 36 |
return nil, err |
| 37 | 37 |
} |
| ... | ... |
@@ -39,7 +40,7 @@ func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, |
| 39 | 39 |
container.Lock() |
| 40 | 40 |
defer container.Unlock() |
| 41 | 41 |
|
| 42 |
- base, err := daemon.getInspectData(container) |
|
| 42 |
+ base, err := daemon.getInspectData(ctx, container) |
|
| 43 | 43 |
if err != nil {
|
| 44 | 44 |
return nil, err |
| 45 | 45 |
} |
| ... | ... |
@@ -53,11 +54,11 @@ func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, |
| 53 | 53 |
return &types.ContainerJSON120{base, mountPoints, config}, nil
|
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
-func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
|
|
| 56 |
+func (daemon *Daemon) getInspectData(ctx context.Context, container *Container) (*types.ContainerJSONBase, error) {
|
|
| 57 | 57 |
// make a copy to play with |
| 58 | 58 |
hostConfig := *container.hostConfig |
| 59 | 59 |
|
| 60 |
- if children, err := daemon.children(container.Name); err == nil {
|
|
| 60 |
+ if children, err := daemon.children(ctx, container.Name); err == nil {
|
|
| 61 | 61 |
for linkAlias, child := range children {
|
| 62 | 62 |
hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
|
| 63 | 63 |
} |
| ... | ... |
@@ -120,7 +121,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON |
| 120 | 120 |
|
| 121 | 121 |
// ContainerExecInspect returns low-level information about the exec |
| 122 | 122 |
// command. An error is returned if the exec cannot be found. |
| 123 |
-func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
|
|
| 123 |
+func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*ExecConfig, error) {
|
|
| 124 | 124 |
eConfig, err := daemon.getExecConfig(id) |
| 125 | 125 |
if err != nil {
|
| 126 | 126 |
return nil, err |
| ... | ... |
@@ -130,7 +131,7 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
|
| 130 | 130 |
|
| 131 | 131 |
// VolumeInspect looks up a volume by name. An error is returned if |
| 132 | 132 |
// the volume cannot be found. |
| 133 |
-func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
|
|
| 133 |
+func (daemon *Daemon) VolumeInspect(ctx context.Context, name string) (*types.Volume, error) {
|
|
| 134 | 134 |
v, err := daemon.volumes.Get(name) |
| 135 | 135 |
if err != nil {
|
| 136 | 136 |
return nil, err |
| ... | ... |
@@ -2,7 +2,10 @@ |
| 2 | 2 |
|
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 |
-import "github.com/docker/docker/api/types" |
|
| 5 |
+import ( |
|
| 6 |
+ "github.com/docker/docker/api/types" |
|
| 7 |
+ "github.com/docker/docker/context" |
|
| 8 |
+) |
|
| 6 | 9 |
|
| 7 | 10 |
// This sets platform-specific fields |
| 8 | 11 |
func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
| ... | ... |
@@ -15,8 +18,8 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type |
| 15 | 15 |
} |
| 16 | 16 |
|
| 17 | 17 |
// ContainerInspectPre120 gets containers for pre 1.20 APIs. |
| 18 |
-func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
|
|
| 19 |
- container, err := daemon.Get(name) |
|
| 18 |
+func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSONPre120, error) {
|
|
| 19 |
+ container, err := daemon.Get(ctx, name) |
|
| 20 | 20 |
if err != nil {
|
| 21 | 21 |
return nil, err |
| 22 | 22 |
} |
| ... | ... |
@@ -24,7 +27,7 @@ func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONP |
| 24 | 24 |
container.Lock() |
| 25 | 25 |
defer container.Unlock() |
| 26 | 26 |
|
| 27 |
- base, err := daemon.getInspectData(container) |
|
| 27 |
+ base, err := daemon.getInspectData(ctx, container) |
|
| 28 | 28 |
if err != nil {
|
| 29 | 29 |
return nil, err |
| 30 | 30 |
} |
| ... | ... |
@@ -1,6 +1,9 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/docker/api/types" |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/api/types" |
|
| 5 |
+ "github.com/docker/docker/context" |
|
| 6 |
+) |
|
| 4 | 7 |
|
| 5 | 8 |
// This sets platform-specific fields |
| 6 | 9 |
func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
| ... | ... |
@@ -12,6 +15,6 @@ func addMountPoints(container *Container) []types.MountPoint {
|
| 12 | 12 |
} |
| 13 | 13 |
|
| 14 | 14 |
// ContainerInspectPre120 get containers for pre 1.20 APIs. |
| 15 |
-func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSON, error) {
|
|
| 16 |
- return daemon.ContainerInspect(name) |
|
| 15 |
+func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSON, error) {
|
|
| 16 |
+ return daemon.ContainerInspect(ctx, name) |
|
| 17 | 17 |
} |
| ... | ... |
@@ -1,25 +1,29 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "syscall" |
|
| 3 |
+import ( |
|
| 4 |
+ "syscall" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 7 |
+) |
|
| 4 | 8 |
|
| 5 | 9 |
// ContainerKill send signal to the container |
| 6 | 10 |
// If no signal is given (sig 0), then Kill with SIGKILL and wait |
| 7 | 11 |
// for the container to exit. |
| 8 | 12 |
// If a signal is given, then just send it to the container and return. |
| 9 |
-func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
|
|
| 10 |
- container, err := daemon.Get(name) |
|
| 13 |
+func (daemon *Daemon) ContainerKill(ctx context.Context, name string, sig uint64) error {
|
|
| 14 |
+ container, err := daemon.Get(ctx, name) |
|
| 11 | 15 |
if err != nil {
|
| 12 | 16 |
return err |
| 13 | 17 |
} |
| 14 | 18 |
|
| 15 | 19 |
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
| 16 | 20 |
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
| 17 |
- if err := container.Kill(); err != nil {
|
|
| 21 |
+ if err := container.Kill(ctx); err != nil {
|
|
| 18 | 22 |
return err |
| 19 | 23 |
} |
| 20 | 24 |
} else {
|
| 21 | 25 |
// Otherwise, just send the requested signal |
| 22 |
- if err := container.killSig(int(sig)); err != nil {
|
|
| 26 |
+ if err := container.killSig(ctx, int(sig)); err != nil {
|
|
| 23 | 27 |
return err |
| 24 | 28 |
} |
| 25 | 29 |
} |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
|
| 9 | 9 |
"github.com/Sirupsen/logrus" |
| 10 | 10 |
"github.com/docker/docker/api/types" |
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
derr "github.com/docker/docker/errors" |
| 12 | 13 |
"github.com/docker/docker/image" |
| 13 | 14 |
"github.com/docker/docker/pkg/graphdb" |
| ... | ... |
@@ -20,7 +21,7 @@ type iterationAction int |
| 20 | 20 |
|
| 21 | 21 |
// containerReducer represents a reducer for a container. |
| 22 | 22 |
// Returns the object to serialize by the api. |
| 23 |
-type containerReducer func(*Container, *listContext) (*types.Container, error) |
|
| 23 |
+type containerReducer func(context.Context, *Container, *listContext) (*types.Container, error) |
|
| 24 | 24 |
|
| 25 | 25 |
const ( |
| 26 | 26 |
// includeContainer is the action to include a container in the reducer. |
| ... | ... |
@@ -35,7 +36,7 @@ const ( |
| 35 | 35 |
var errStopIteration = errors.New("container list iteration stopped")
|
| 36 | 36 |
|
| 37 | 37 |
// List returns an array of all containers registered in the daemon. |
| 38 |
-func (daemon *Daemon) List() []*Container {
|
|
| 38 |
+func (daemon *Daemon) List(ctx context.Context) []*Container {
|
|
| 39 | 39 |
return daemon.containers.List() |
| 40 | 40 |
} |
| 41 | 41 |
|
| ... | ... |
@@ -79,21 +80,21 @@ type listContext struct {
|
| 79 | 79 |
} |
| 80 | 80 |
|
| 81 | 81 |
// Containers returns the list of containers to show given the user's filtering. |
| 82 |
-func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
|
|
| 83 |
- return daemon.reduceContainers(config, daemon.transformContainer) |
|
| 82 |
+func (daemon *Daemon) Containers(ctx context.Context, config *ContainersConfig) ([]*types.Container, error) {
|
|
| 83 |
+ return daemon.reduceContainers(ctx, config, daemon.transformContainer) |
|
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 | 86 |
// reduceContainer parses the user filtering and generates the list of containers to return based on a reducer. |
| 87 |
-func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
|
|
| 87 |
+func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
|
|
| 88 | 88 |
containers := []*types.Container{}
|
| 89 | 89 |
|
| 90 |
- ctx, err := daemon.foldFilter(config) |
|
| 90 |
+ fctx, err := daemon.foldFilter(ctx, config) |
|
| 91 | 91 |
if err != nil {
|
| 92 | 92 |
return nil, err |
| 93 | 93 |
} |
| 94 | 94 |
|
| 95 |
- for _, container := range daemon.List() {
|
|
| 96 |
- t, err := daemon.reducePsContainer(container, ctx, reducer) |
|
| 95 |
+ for _, container := range daemon.List(ctx) {
|
|
| 96 |
+ t, err := daemon.reducePsContainer(ctx, container, fctx, reducer) |
|
| 97 | 97 |
if err != nil {
|
| 98 | 98 |
if err != errStopIteration {
|
| 99 | 99 |
return nil, err |
| ... | ... |
@@ -102,19 +103,19 @@ func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer contain |
| 102 | 102 |
} |
| 103 | 103 |
if t != nil {
|
| 104 | 104 |
containers = append(containers, t) |
| 105 |
- ctx.idx++ |
|
| 105 |
+ fctx.idx++ |
|
| 106 | 106 |
} |
| 107 | 107 |
} |
| 108 | 108 |
return containers, nil |
| 109 | 109 |
} |
| 110 | 110 |
|
| 111 | 111 |
// reducePsContainer is the basic representation for a container as expected by the ps command. |
| 112 |
-func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
|
|
| 112 |
+func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Container, lctx *listContext, reducer containerReducer) (*types.Container, error) {
|
|
| 113 | 113 |
container.Lock() |
| 114 | 114 |
defer container.Unlock() |
| 115 | 115 |
|
| 116 | 116 |
// filter containers to return |
| 117 |
- action := includeContainerInList(container, ctx) |
|
| 117 |
+ action := includeContainerInList(container, lctx) |
|
| 118 | 118 |
switch action {
|
| 119 | 119 |
case excludeContainer: |
| 120 | 120 |
return nil, nil |
| ... | ... |
@@ -123,11 +124,11 @@ func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, |
| 123 | 123 |
} |
| 124 | 124 |
|
| 125 | 125 |
// transform internal container struct into api structs |
| 126 |
- return reducer(container, ctx) |
|
| 126 |
+ return reducer(ctx, container, lctx) |
|
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 | 129 |
// foldFilter generates the container filter based in the user's filtering options. |
| 130 |
-func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) {
|
|
| 130 |
+func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig) (*listContext, error) {
|
|
| 131 | 131 |
psFilters, err := filters.FromParam(config.Filters) |
| 132 | 132 |
if err != nil {
|
| 133 | 133 |
return nil, err |
| ... | ... |
@@ -159,11 +160,11 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) |
| 159 | 159 |
var ancestorFilter bool |
| 160 | 160 |
if ancestors, ok := psFilters["ancestor"]; ok {
|
| 161 | 161 |
ancestorFilter = true |
| 162 |
- byParents := daemon.Graph().ByParent() |
|
| 162 |
+ byParents := daemon.Graph(ctx).ByParent() |
|
| 163 | 163 |
// The idea is to walk the graph down the most "efficient" way. |
| 164 | 164 |
for _, ancestor := range ancestors {
|
| 165 | 165 |
// First, get the imageId of the ancestor filter (yay) |
| 166 |
- image, err := daemon.Repositories().LookupImage(ancestor) |
|
| 166 |
+ image, err := daemon.Repositories(ctx).LookupImage(ancestor) |
|
| 167 | 167 |
if err != nil {
|
| 168 | 168 |
logrus.Warnf("Error while looking up for image %v", ancestor)
|
| 169 | 169 |
continue |
| ... | ... |
@@ -185,14 +186,14 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) |
| 185 | 185 |
|
| 186 | 186 |
var beforeCont, sinceCont *Container |
| 187 | 187 |
if config.Before != "" {
|
| 188 |
- beforeCont, err = daemon.Get(config.Before) |
|
| 188 |
+ beforeCont, err = daemon.Get(ctx, config.Before) |
|
| 189 | 189 |
if err != nil {
|
| 190 | 190 |
return nil, err |
| 191 | 191 |
} |
| 192 | 192 |
} |
| 193 | 193 |
|
| 194 | 194 |
if config.Since != "" {
|
| 195 |
- sinceCont, err = daemon.Get(config.Since) |
|
| 195 |
+ sinceCont, err = daemon.Get(ctx, config.Since) |
|
| 196 | 196 |
if err != nil {
|
| 197 | 197 |
return nil, err |
| 198 | 198 |
} |
| ... | ... |
@@ -286,13 +287,13 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct |
| 286 | 286 |
} |
| 287 | 287 |
|
| 288 | 288 |
// transformContainer generates the container type expected by the docker ps command. |
| 289 |
-func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
|
|
| 289 |
+func (daemon *Daemon) transformContainer(ctx context.Context, container *Container, lctx *listContext) (*types.Container, error) {
|
|
| 290 | 290 |
newC := &types.Container{
|
| 291 | 291 |
ID: container.ID, |
| 292 |
- Names: ctx.names[container.ID], |
|
| 292 |
+ Names: lctx.names[container.ID], |
|
| 293 | 293 |
} |
| 294 | 294 |
|
| 295 |
- img, err := daemon.Repositories().LookupImage(container.Config.Image) |
|
| 295 |
+ img, err := daemon.Repositories(ctx).LookupImage(container.Config.Image) |
|
| 296 | 296 |
if err != nil {
|
| 297 | 297 |
// If the image can no longer be found by its original reference, |
| 298 | 298 |
// it makes sense to show the ID instead of a stale reference. |
| ... | ... |
@@ -349,8 +350,8 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) |
| 349 | 349 |
} |
| 350 | 350 |
} |
| 351 | 351 |
|
| 352 |
- if ctx.Size {
|
|
| 353 |
- sizeRw, sizeRootFs := container.getSize() |
|
| 352 |
+ if lctx.Size {
|
|
| 353 |
+ sizeRw, sizeRootFs := container.getSize(ctx) |
|
| 354 | 354 |
newC.SizeRw = sizeRw |
| 355 | 355 |
newC.SizeRootFs = sizeRootFs |
| 356 | 356 |
} |
| ... | ... |
@@ -361,7 +362,7 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) |
| 361 | 361 |
|
| 362 | 362 |
// Volumes lists known volumes, using the filter to restrict the range |
| 363 | 363 |
// of volumes returned. |
| 364 |
-func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
|
|
| 364 |
+func (daemon *Daemon) Volumes(ctx context.Context, filter string) ([]*types.Volume, error) {
|
|
| 365 | 365 |
var volumesOut []*types.Volume |
| 366 | 366 |
volFilters, err := filters.FromParam(filter) |
| 367 | 367 |
if err != nil {
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"time" |
| 7 | 7 |
|
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 |
+ "github.com/docker/docker/context" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/logger" |
| 10 | 11 |
derr "github.com/docker/docker/errors" |
| 11 | 12 |
"github.com/docker/docker/pkg/stdcopy" |
| ... | ... |
@@ -30,7 +31,7 @@ type ContainerLogsConfig struct {
|
| 30 | 30 |
|
| 31 | 31 |
// ContainerLogs hooks up a container's stdout and stderr streams |
| 32 | 32 |
// configured with the given struct. |
| 33 |
-func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
|
|
| 33 |
+func (daemon *Daemon) ContainerLogs(ctx context.Context, container *Container, config *ContainerLogsConfig) error {
|
|
| 34 | 34 |
if !(config.UseStdout || config.UseStderr) {
|
| 35 | 35 |
return derr.ErrorCodeNeedStream |
| 36 | 36 |
} |
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"time" |
| 8 | 8 |
|
| 9 | 9 |
"github.com/Sirupsen/logrus" |
| 10 |
+ "github.com/docker/docker/context" |
|
| 10 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 11 | 12 |
"github.com/docker/docker/pkg/stringid" |
| 12 | 13 |
"github.com/docker/docker/runconfig" |
| ... | ... |
@@ -84,9 +85,9 @@ func (m *containerMonitor) ExitOnNext() {
|
| 84 | 84 |
|
| 85 | 85 |
// Close closes the container's resources such as networking allocations and |
| 86 | 86 |
// unmounts the contatiner's root filesystem |
| 87 |
-func (m *containerMonitor) Close() error {
|
|
| 87 |
+func (m *containerMonitor) Close(ctx context.Context) error {
|
|
| 88 | 88 |
// Cleanup networking and mounts |
| 89 |
- m.container.cleanup() |
|
| 89 |
+ m.container.cleanup(ctx) |
|
| 90 | 90 |
|
| 91 | 91 |
// FIXME: here is race condition between two RUN instructions in Dockerfile |
| 92 | 92 |
// because they share same runconfig and change image. Must be fixed |
| ... | ... |
@@ -101,7 +102,7 @@ func (m *containerMonitor) Close() error {
|
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 | 103 |
// Start starts the containers process and monitors it according to the restart policy |
| 104 |
-func (m *containerMonitor) Start() error {
|
|
| 104 |
+func (m *containerMonitor) Start(ctx context.Context) error {
|
|
| 105 | 105 |
var ( |
| 106 | 106 |
err error |
| 107 | 107 |
exitStatus execdriver.ExitStatus |
| ... | ... |
@@ -117,7 +118,7 @@ func (m *containerMonitor) Start() error {
|
| 117 | 117 |
m.container.setStopped(&exitStatus) |
| 118 | 118 |
defer m.container.Unlock() |
| 119 | 119 |
} |
| 120 |
- m.Close() |
|
| 120 |
+ m.Close(ctx) |
|
| 121 | 121 |
}() |
| 122 | 122 |
// reset stopped flag |
| 123 | 123 |
if m.container.HasBeenManuallyStopped {
|
| ... | ... |
@@ -138,11 +139,11 @@ func (m *containerMonitor) Start() error {
|
| 138 | 138 |
|
| 139 | 139 |
pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) |
| 140 | 140 |
|
| 141 |
- m.container.logEvent("start")
|
|
| 141 |
+ m.container.logEvent(ctx, "start") |
|
| 142 | 142 |
|
| 143 | 143 |
m.lastStartTime = time.Now() |
| 144 | 144 |
|
| 145 |
- if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
|
|
| 145 |
+ if exitStatus, err = m.container.daemon.run(ctx, m.container, pipes, m.callback); err != nil {
|
|
| 146 | 146 |
// if we receive an internal error from the initial start of a container then lets |
| 147 | 147 |
// return it instead of entering the restart loop |
| 148 | 148 |
if m.container.RestartCount == 0 {
|
| ... | ... |
@@ -162,7 +163,7 @@ func (m *containerMonitor) Start() error {
|
| 162 | 162 |
|
| 163 | 163 |
if m.shouldRestart(exitStatus.ExitCode) {
|
| 164 | 164 |
m.container.setRestarting(&exitStatus) |
| 165 |
- m.container.logEvent("die")
|
|
| 165 |
+ m.container.logEvent(ctx, "die") |
|
| 166 | 166 |
m.resetContainer(true) |
| 167 | 167 |
|
| 168 | 168 |
// sleep with a small time increment between each restart to help avoid issues cased by quickly |
| ... | ... |
@@ -177,7 +178,7 @@ func (m *containerMonitor) Start() error {
|
| 177 | 177 |
continue |
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 |
- m.container.logEvent("die")
|
|
| 180 |
+ m.container.logEvent(ctx, "die") |
|
| 181 | 181 |
m.resetContainer(true) |
| 182 | 182 |
return err |
| 183 | 183 |
} |
| ... | ... |
@@ -245,11 +246,11 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
|
| 245 | 245 |
|
| 246 | 246 |
// callback ensures that the container's state is properly updated after we |
| 247 | 247 |
// received ack from the execution drivers |
| 248 |
-func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 248 |
+func (m *containerMonitor) callback(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 249 | 249 |
go func() {
|
| 250 | 250 |
_, ok := <-chOOM |
| 251 | 251 |
if ok {
|
| 252 |
- m.container.logEvent("oom")
|
|
| 252 |
+ m.container.logEvent(ctx, "oom") |
|
| 253 | 253 |
} |
| 254 | 254 |
}() |
| 255 | 255 |
|
| ... | ... |
@@ -1,17 +1,18 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| 7 | 8 |
// ContainerPause pauses a container |
| 8 |
-func (daemon *Daemon) ContainerPause(name string) error {
|
|
| 9 |
- container, err := daemon.Get(name) |
|
| 9 |
+func (daemon *Daemon) ContainerPause(ctx context.Context, name string) error {
|
|
| 10 |
+ container, err := daemon.Get(ctx, name) |
|
| 10 | 11 |
if err != nil {
|
| 11 | 12 |
return err |
| 12 | 13 |
} |
| 13 | 14 |
|
| 14 |
- if err := container.pause(); err != nil {
|
|
| 15 |
+ if err := container.pause(ctx); err != nil {
|
|
| 15 | 16 |
return derr.ErrorCodePauseError.WithArgs(name, err) |
| 16 | 17 |
} |
| 17 | 18 |
|
| ... | ... |
@@ -1,18 +1,19 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| 7 | 8 |
// ContainerRename changes the name of a container, using the oldName |
| 8 | 9 |
// to find the container. An error is returned if newName is already |
| 9 | 10 |
// reserved. |
| 10 |
-func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
|
| 11 |
+func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName string) error {
|
|
| 11 | 12 |
if oldName == "" || newName == "" {
|
| 12 | 13 |
return derr.ErrorCodeEmptyRename |
| 13 | 14 |
} |
| 14 | 15 |
|
| 15 |
- container, err := daemon.Get(oldName) |
|
| 16 |
+ container, err := daemon.Get(ctx, oldName) |
|
| 16 | 17 |
if err != nil {
|
| 17 | 18 |
return err |
| 18 | 19 |
} |
| ... | ... |
@@ -21,7 +22,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 21 | 21 |
|
| 22 | 22 |
container.Lock() |
| 23 | 23 |
defer container.Unlock() |
| 24 |
- if newName, err = daemon.reserveName(container.ID, newName); err != nil {
|
|
| 24 |
+ if newName, err = daemon.reserveName(ctx, container.ID, newName); err != nil {
|
|
| 25 | 25 |
return derr.ErrorCodeRenameTaken.WithArgs(err) |
| 26 | 26 |
} |
| 27 | 27 |
|
| ... | ... |
@@ -29,7 +30,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 29 | 29 |
|
| 30 | 30 |
undo := func() {
|
| 31 | 31 |
container.Name = oldName |
| 32 |
- daemon.reserveName(container.ID, oldName) |
|
| 32 |
+ daemon.reserveName(ctx, container.ID, oldName) |
|
| 33 | 33 |
daemon.containerGraphDB.Delete(newName) |
| 34 | 34 |
} |
| 35 | 35 |
|
| ... | ... |
@@ -43,6 +44,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 43 | 43 |
return err |
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 |
- container.logEvent("rename")
|
|
| 46 |
+ container.logEvent(ctx, "rename") |
|
| 47 | 47 |
return nil |
| 48 | 48 |
} |
| ... | ... |
@@ -1,20 +1,24 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/context" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 3 | 7 |
// ContainerResize changes the size of the TTY of the process running |
| 4 | 8 |
// in the container with the given name to the given height and width. |
| 5 |
-func (daemon *Daemon) ContainerResize(name string, height, width int) error {
|
|
| 6 |
- container, err := daemon.Get(name) |
|
| 9 |
+func (daemon *Daemon) ContainerResize(ctx context.Context, name string, height, width int) error {
|
|
| 10 |
+ container, err := daemon.Get(ctx, name) |
|
| 7 | 11 |
if err != nil {
|
| 8 | 12 |
return err |
| 9 | 13 |
} |
| 10 | 14 |
|
| 11 |
- return container.Resize(height, width) |
|
| 15 |
+ return container.Resize(ctx, height, width) |
|
| 12 | 16 |
} |
| 13 | 17 |
|
| 14 | 18 |
// ContainerExecResize changes the size of the TTY of the process |
| 15 | 19 |
// running in the exec with the given name to the given height and |
| 16 | 20 |
// width. |
| 17 |
-func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
|
|
| 21 |
+func (daemon *Daemon) ContainerExecResize(ctx context.Context, name string, height, width int) error {
|
|
| 18 | 22 |
ExecConfig, err := daemon.getExecConfig(name) |
| 19 | 23 |
if err != nil {
|
| 20 | 24 |
return err |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| ... | ... |
@@ -10,12 +11,12 @@ import ( |
| 10 | 10 |
// timeout, ContainerRestart will wait forever until a graceful |
| 11 | 11 |
// stop. Returns an error if the container cannot be found, or if |
| 12 | 12 |
// there is an underlying error at any stage of the restart. |
| 13 |
-func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
|
|
| 14 |
- container, err := daemon.Get(name) |
|
| 13 |
+func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, seconds int) error {
|
|
| 14 |
+ container, err := daemon.Get(ctx, name) |
|
| 15 | 15 |
if err != nil {
|
| 16 | 16 |
return err |
| 17 | 17 |
} |
| 18 |
- if err := container.Restart(seconds); err != nil {
|
|
| 18 |
+ if err := container.Restart(ctx, seconds); err != nil {
|
|
| 19 | 19 |
return derr.ErrorCodeCantRestart.WithArgs(name, err) |
| 20 | 20 |
} |
| 21 | 21 |
return nil |
| ... | ... |
@@ -3,14 +3,15 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"runtime" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 6 | 7 |
derr "github.com/docker/docker/errors" |
| 7 | 8 |
"github.com/docker/docker/runconfig" |
| 8 | 9 |
"github.com/docker/docker/utils" |
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
// ContainerStart starts a container. |
| 12 |
-func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
|
|
| 13 |
- container, err := daemon.Get(name) |
|
| 13 |
+func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *runconfig.HostConfig) error {
|
|
| 14 |
+ container, err := daemon.Get(ctx, name) |
|
| 14 | 15 |
if err != nil {
|
| 15 | 16 |
return err |
| 16 | 17 |
} |
| ... | ... |
@@ -28,7 +29,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 28 | 28 |
// This is kept for backward compatibility - hostconfig should be passed when |
| 29 | 29 |
// creating a container, not during start. |
| 30 | 30 |
if hostConfig != nil {
|
| 31 |
- if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
|
| 31 |
+ if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
|
|
| 32 | 32 |
return err |
| 33 | 33 |
} |
| 34 | 34 |
} |
| ... | ... |
@@ -40,11 +41,11 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 40 | 40 |
|
| 41 | 41 |
// check if hostConfig is in line with the current system settings. |
| 42 | 42 |
// It may happen cgroups are umounted or the like. |
| 43 |
- if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
|
|
| 43 |
+ if _, err = daemon.verifyContainerSettings(ctx, container.hostConfig, nil); err != nil {
|
|
| 44 | 44 |
return err |
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 |
- if err := container.Start(); err != nil {
|
|
| 47 |
+ if err := container.Start(ctx); err != nil {
|
|
| 48 | 48 |
return derr.ErrorCodeCantStart.WithArgs(name, utils.GetErrorMessage(err)) |
| 49 | 49 |
} |
| 50 | 50 |
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"io" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
"github.com/docker/docker/daemon/execdriver" |
| 9 | 10 |
"github.com/docker/docker/pkg/version" |
| 10 | 11 |
"github.com/docker/libnetwork/osl" |
| ... | ... |
@@ -22,9 +23,9 @@ type ContainerStatsConfig struct {
|
| 22 | 22 |
|
| 23 | 23 |
// ContainerStats writes information about the container to the stream |
| 24 | 24 |
// given in the config object. |
| 25 |
-func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error {
|
|
| 25 |
+func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *ContainerStatsConfig) error {
|
|
| 26 | 26 |
|
| 27 |
- container, err := daemon.Get(prefixOrName) |
|
| 27 |
+ container, err := daemon.Get(ctx, prefixOrName) |
|
| 28 | 28 |
if err != nil {
|
| 29 | 29 |
return err |
| 30 | 30 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| ... | ... |
@@ -10,15 +11,15 @@ import ( |
| 10 | 10 |
// will wait for a graceful termination. An error is returned if the |
| 11 | 11 |
// container is not found, is already stopped, or if there is a |
| 12 | 12 |
// problem stopping the container. |
| 13 |
-func (daemon *Daemon) ContainerStop(name string, seconds int) error {
|
|
| 14 |
- container, err := daemon.Get(name) |
|
| 13 |
+func (daemon *Daemon) ContainerStop(ctx context.Context, name string, seconds int) error {
|
|
| 14 |
+ container, err := daemon.Get(ctx, name) |
|
| 15 | 15 |
if err != nil {
|
| 16 | 16 |
return err |
| 17 | 17 |
} |
| 18 | 18 |
if !container.IsRunning() {
|
| 19 | 19 |
return derr.ErrorCodeStopped |
| 20 | 20 |
} |
| 21 |
- if err := container.Stop(seconds); err != nil {
|
|
| 21 |
+ if err := container.Stop(ctx, seconds); err != nil {
|
|
| 22 | 22 |
return derr.ErrorCodeCantStop.WithArgs(name, err) |
| 23 | 23 |
} |
| 24 | 24 |
return nil |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/api/types" |
| 11 |
+ "github.com/docker/docker/context" |
|
| 11 | 12 |
derr "github.com/docker/docker/errors" |
| 12 | 13 |
) |
| 13 | 14 |
|
| ... | ... |
@@ -16,12 +17,12 @@ import ( |
| 16 | 16 |
// "-ef" if no args are given. An error is returned if the container |
| 17 | 17 |
// is not found, or is not running, or if there are any problems |
| 18 | 18 |
// running ps, or parsing the output. |
| 19 |
-func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
|
|
| 19 |
+func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
|
|
| 20 | 20 |
if psArgs == "" {
|
| 21 | 21 |
psArgs = "-ef" |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
- container, err := daemon.Get(name) |
|
| 24 |
+ container, err := daemon.Get(ctx, name) |
|
| 25 | 25 |
if err != nil {
|
| 26 | 26 |
return nil, err |
| 27 | 27 |
} |
| ... | ... |
@@ -30,7 +31,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container |
| 30 | 30 |
return nil, derr.ErrorCodeNotRunning.WithArgs(name) |
| 31 | 31 |
} |
| 32 | 32 |
|
| 33 |
- pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) |
|
| 33 |
+ pids, err := daemon.ExecutionDriver(ctx).GetPidsForContainer(container.ID) |
|
| 34 | 34 |
if err != nil {
|
| 35 | 35 |
return nil, err |
| 36 | 36 |
} |
| ... | ... |
@@ -76,6 +77,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container |
| 76 | 76 |
} |
| 77 | 77 |
} |
| 78 | 78 |
} |
| 79 |
- container.logEvent("top")
|
|
| 79 |
+ container.logEvent(ctx, "top") |
|
| 80 | 80 |
return procList, nil |
| 81 | 81 |
} |
| ... | ... |
@@ -2,10 +2,11 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/docker/docker/api/types" |
| 5 |
+ "github.com/docker/docker/context" |
|
| 5 | 6 |
derr "github.com/docker/docker/errors" |
| 6 | 7 |
) |
| 7 | 8 |
|
| 8 | 9 |
// ContainerTop is not supported on Windows and returns an error. |
| 9 |
-func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
|
|
| 10 |
+func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
|
|
| 10 | 11 |
return nil, derr.ErrorCodeNoTop |
| 11 | 12 |
} |
| ... | ... |
@@ -1,17 +1,18 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/context" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| 7 | 8 |
// ContainerUnpause unpauses a container |
| 8 |
-func (daemon *Daemon) ContainerUnpause(name string) error {
|
|
| 9 |
- container, err := daemon.Get(name) |
|
| 9 |
+func (daemon *Daemon) ContainerUnpause(ctx context.Context, name string) error {
|
|
| 10 |
+ container, err := daemon.Get(ctx, name) |
|
| 10 | 11 |
if err != nil {
|
| 11 | 12 |
return err |
| 12 | 13 |
} |
| 13 | 14 |
|
| 14 |
- if err := container.unpause(); err != nil {
|
|
| 15 |
+ if err := container.unpause(ctx); err != nil {
|
|
| 15 | 16 |
return derr.ErrorCodeCantUnpause.WithArgs(name, err) |
| 16 | 17 |
} |
| 17 | 18 |
|
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
"strings" |
| 11 | 11 |
|
| 12 | 12 |
"github.com/Sirupsen/logrus" |
| 13 |
+ "github.com/docker/docker/context" |
|
| 13 | 14 |
"github.com/docker/docker/daemon/execdriver" |
| 14 | 15 |
derr "github.com/docker/docker/errors" |
| 15 | 16 |
"github.com/docker/docker/pkg/system" |
| ... | ... |
@@ -285,7 +286,7 @@ func parseVolumesFrom(spec string) (string, string, error) {
|
| 285 | 285 |
// 1. Select the previously configured mount points for the containers, if any. |
| 286 | 286 |
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. |
| 287 | 287 |
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. |
| 288 |
-func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 288 |
+func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 289 | 289 |
binds := map[string]bool{}
|
| 290 | 290 |
mountPoints := map[string]*mountPoint{}
|
| 291 | 291 |
|
| ... | ... |
@@ -301,7 +302,7 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc |
| 301 | 301 |
return err |
| 302 | 302 |
} |
| 303 | 303 |
|
| 304 |
- c, err := daemon.Get(containerID) |
|
| 304 |
+ c, err := daemon.Get(ctx, containerID) |
|
| 305 | 305 |
if err != nil {
|
| 306 | 306 |
return err |
| 307 | 307 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ |
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
+ "github.com/docker/docker/context" |
|
| 6 | 7 |
"github.com/docker/docker/daemon/execdriver" |
| 7 | 8 |
"github.com/docker/docker/runconfig" |
| 8 | 9 |
) |
| ... | ... |
@@ -31,6 +32,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
|
| 31 | 31 |
// registerMountPoints initializes the container mount points with the |
| 32 | 32 |
// configured volumes and bind mounts. Windows does not support volumes or |
| 33 | 33 |
// mount points. |
| 34 |
-func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 34 |
+func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 35 | 35 |
return nil |
| 36 | 36 |
} |
| ... | ... |
@@ -1,14 +1,18 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "time" |
|
| 3 |
+import ( |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/context" |
|
| 7 |
+) |
|
| 4 | 8 |
|
| 5 | 9 |
// ContainerWait stops processing until the given container is |
| 6 | 10 |
// stopped. If the container is not found, an error is returned. On a |
| 7 | 11 |
// successful stop, the exit code of the container is returned. On a |
| 8 | 12 |
// timeout, an error is returned. If you want to wait forever, supply |
| 9 | 13 |
// a negative duration for the timeout. |
| 10 |
-func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
|
|
| 11 |
- container, err := daemon.Get(name) |
|
| 14 |
+func (daemon *Daemon) ContainerWait(ctx context.Context, name string, timeout time.Duration) (int, error) {
|
|
| 15 |
+ container, err := daemon.Get(ctx, name) |
|
| 12 | 16 |
if err != nil {
|
| 13 | 17 |
return -1, err |
| 14 | 18 |
} |
| ... | ... |
@@ -17,6 +17,7 @@ import ( |
| 17 | 17 |
"github.com/docker/docker/autogen/dockerversion" |
| 18 | 18 |
"github.com/docker/docker/cli" |
| 19 | 19 |
"github.com/docker/docker/cliconfig" |
| 20 |
+ "github.com/docker/docker/context" |
|
| 20 | 21 |
"github.com/docker/docker/daemon" |
| 21 | 22 |
"github.com/docker/docker/daemon/logger" |
| 22 | 23 |
"github.com/docker/docker/opts" |
| ... | ... |
@@ -150,6 +151,11 @@ func getGlobalFlag() (globalFlag *flag.Flag) {
|
| 150 | 150 |
|
| 151 | 151 |
// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. |
| 152 | 152 |
func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 153 |
+ // This may need to be made even more global - it all depends |
|
| 154 |
+ // on whether we want the CLI to have a context object too. |
|
| 155 |
+ // For now we'll leave it as a daemon-side object only. |
|
| 156 |
+ ctx := context.Background() |
|
| 157 |
+ |
|
| 153 | 158 |
// warn from uuid package when running the daemon |
| 154 | 159 |
uuid.Loggerf = logrus.Warnf |
| 155 | 160 |
|
| ... | ... |
@@ -224,7 +230,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 224 | 224 |
serverConfig.TLSConfig = tlsConfig |
| 225 | 225 |
} |
| 226 | 226 |
|
| 227 |
- api := apiserver.New(serverConfig) |
|
| 227 |
+ api := apiserver.New(ctx, serverConfig) |
|
| 228 | 228 |
|
| 229 | 229 |
// The serve API routine never exits unless an error occurs |
| 230 | 230 |
// We need to start it as a goroutine and wait on it so |
| ... | ... |
@@ -245,7 +251,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 245 | 245 |
cli.TrustKeyPath = commonFlags.TrustKey |
| 246 | 246 |
|
| 247 | 247 |
registryService := registry.NewService(cli.registryOptions) |
| 248 |
- d, err := daemon.NewDaemon(cli.Config, registryService) |
|
| 248 |
+ d, err := daemon.NewDaemon(ctx, cli.Config, registryService) |
|
| 249 | 249 |
if err != nil {
|
| 250 | 250 |
if pfile != nil {
|
| 251 | 251 |
if err := pfile.Remove(); err != nil {
|
| ... | ... |
@@ -260,14 +266,14 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 260 | 260 |
logrus.WithFields(logrus.Fields{
|
| 261 | 261 |
"version": dockerversion.VERSION, |
| 262 | 262 |
"commit": dockerversion.GITCOMMIT, |
| 263 |
- "execdriver": d.ExecutionDriver().Name(), |
|
| 264 |
- "graphdriver": d.GraphDriver().String(), |
|
| 263 |
+ "execdriver": d.ExecutionDriver(ctx).Name(), |
|
| 264 |
+ "graphdriver": d.GraphDriver(ctx).String(), |
|
| 265 | 265 |
}).Info("Docker daemon")
|
| 266 | 266 |
|
| 267 | 267 |
signal.Trap(func() {
|
| 268 | 268 |
api.Close() |
| 269 | 269 |
<-serveAPIWait |
| 270 |
- shutdownDaemon(d, 15) |
|
| 270 |
+ shutdownDaemon(ctx, d, 15) |
|
| 271 | 271 |
if pfile != nil {
|
| 272 | 272 |
if err := pfile.Remove(); err != nil {
|
| 273 | 273 |
logrus.Error(err) |
| ... | ... |
@@ -277,12 +283,12 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 277 | 277 |
|
| 278 | 278 |
// after the daemon is done setting up we can tell the api to start |
| 279 | 279 |
// accepting connections with specified daemon |
| 280 |
- api.AcceptConnections(d) |
|
| 280 |
+ api.AcceptConnections(ctx, d) |
|
| 281 | 281 |
|
| 282 | 282 |
// Daemon is fully initialized and handling API traffic |
| 283 | 283 |
// Wait for serve API to complete |
| 284 | 284 |
errAPI := <-serveAPIWait |
| 285 |
- shutdownDaemon(d, 15) |
|
| 285 |
+ shutdownDaemon(ctx, d, 15) |
|
| 286 | 286 |
if errAPI != nil {
|
| 287 | 287 |
if pfile != nil {
|
| 288 | 288 |
if err := pfile.Remove(); err != nil {
|
| ... | ... |
@@ -297,10 +303,10 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
| 297 | 297 |
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case |
| 298 | 298 |
// d.Shutdown() is waiting too long to kill container or worst it's |
| 299 | 299 |
// blocked there |
| 300 |
-func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
|
|
| 300 |
+func shutdownDaemon(ctx context.Context, d *daemon.Daemon, timeout time.Duration) {
|
|
| 301 | 301 |
ch := make(chan struct{})
|
| 302 | 302 |
go func() {
|
| 303 |
- d.Shutdown() |
|
| 303 |
+ d.Shutdown(ctx) |
|
| 304 | 304 |
close(ch) |
| 305 | 305 |
}() |
| 306 | 306 |
select {
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"net/http" |
| 6 | 6 |
"net/url" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/context" |
|
| 8 | 9 |
"github.com/docker/docker/pkg/httputils" |
| 9 | 10 |
"github.com/docker/docker/pkg/progressreader" |
| 10 | 11 |
"github.com/docker/docker/pkg/streamformatter" |
| ... | ... |
@@ -16,7 +17,7 @@ import ( |
| 16 | 16 |
// inConfig (if src is "-"), or from a URI specified in src. Progress output is |
| 17 | 17 |
// written to outStream. Repository and tag names can optionally be given in |
| 18 | 18 |
// the repo and tag arguments, respectively. |
| 19 |
-func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
|
|
| 19 |
+func (s *TagStore) Import(ctx context.Context, src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
|
|
| 20 | 20 |
var ( |
| 21 | 21 |
sf = streamformatter.NewJSONStreamFormatter() |
| 22 | 22 |
archive io.ReadCloser |
| ... | ... |
@@ -74,6 +75,6 @@ func (s *TagStore) Import(src string, repo string, tag string, msg string, inCon |
| 74 | 74 |
logID = utils.ImageReference(logID, tag) |
| 75 | 75 |
} |
| 76 | 76 |
|
| 77 |
- s.eventsService.Log("import", logID, "")
|
|
| 77 |
+ s.eventsService.Log(ctx, "import", logID, "") |
|
| 78 | 78 |
return nil |
| 79 | 79 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/Sirupsen/logrus" |
| 8 | 8 |
"github.com/docker/docker/cliconfig" |
| 9 |
+ "github.com/docker/docker/context" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/streamformatter" |
| 10 | 11 |
"github.com/docker/docker/registry" |
| 11 | 12 |
"github.com/docker/docker/utils" |
| ... | ... |
@@ -62,7 +63,7 @@ func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.Re |
| 62 | 62 |
|
| 63 | 63 |
// Pull initiates a pull operation. image is the repository name to pull, and |
| 64 | 64 |
// tag may be either empty, or indicate a specific tag to pull. |
| 65 |
-func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
|
|
| 65 |
+func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePullConfig *ImagePullConfig) error {
|
|
| 66 | 66 |
var sf = streamformatter.NewJSONStreamFormatter() |
| 67 | 67 |
|
| 68 | 68 |
// Resolve the Repository name from fqn to RepositoryInfo |
| ... | ... |
@@ -131,7 +132,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf |
| 131 | 131 |
|
| 132 | 132 |
} |
| 133 | 133 |
|
| 134 |
- s.eventsService.Log("pull", logName, "")
|
|
| 134 |
+ s.eventsService.Log(ctx, "pull", logName, "") |
|
| 135 | 135 |
return nil |
| 136 | 136 |
} |
| 137 | 137 |
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"github.com/Sirupsen/logrus" |
| 8 | 8 |
"github.com/docker/distribution/digest" |
| 9 | 9 |
"github.com/docker/docker/cliconfig" |
| 10 |
+ "github.com/docker/docker/context" |
|
| 10 | 11 |
"github.com/docker/docker/pkg/streamformatter" |
| 11 | 12 |
"github.com/docker/docker/registry" |
| 12 | 13 |
) |
| ... | ... |
@@ -67,7 +68,7 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository |
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 | 69 |
// Push initiates a push operation on the repository named localName. |
| 70 |
-func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
|
|
| 70 |
+func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *ImagePushConfig) error {
|
|
| 71 | 71 |
// FIXME: Allow to interrupt current push when new push of same image is done. |
| 72 | 72 |
|
| 73 | 73 |
var sf = streamformatter.NewJSONStreamFormatter() |
| ... | ... |
@@ -115,7 +116,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro |
| 115 | 115 |
|
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 |
- s.eventsService.Log("push", repoInfo.LocalName, "")
|
|
| 118 |
+ s.eventsService.Log(ctx, "push", repoInfo.LocalName, "") |
|
| 119 | 119 |
return nil |
| 120 | 120 |
} |
| 121 | 121 |
|
| ... | ... |
@@ -410,7 +410,7 @@ func (s *DockerSuite) TestEventsFilterContainer(c *check.C) {
|
| 410 | 410 |
} |
| 411 | 411 |
|
| 412 | 412 |
// Check the id |
| 413 |
- parsedID := strings.TrimSuffix(e[1], ":") |
|
| 413 |
+ parsedID := strings.TrimSuffix(e[3], ":") |
|
| 414 | 414 |
if parsedID != id {
|
| 415 | 415 |
return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID)
|
| 416 | 416 |
} |
| ... | ... |
@@ -686,3 +686,78 @@ func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) {
|
| 686 | 686 |
c.Fatalf("Missing 'push' log event for image %s\n%s", repoName, out)
|
| 687 | 687 |
} |
| 688 | 688 |
} |
| 689 |
+ |
|
| 690 |
+func (s *DockerSuite) TestEventsReqID(c *check.C) {
|
|
| 691 |
+ // Tests for the "[reqid: xxx]" field in Events |
|
| 692 |
+ testRequires(c, DaemonIsLinux) |
|
| 693 |
+ |
|
| 694 |
+ reqIDMatch := `[^ ]+ \[reqid: ([0-9a-z]{12})\] [0-9a-z]+: `
|
|
| 695 |
+ reqIDRE := regexp.MustCompile(reqIDMatch) |
|
| 696 |
+ |
|
| 697 |
+ // Simple test just to make sure it works at all |
|
| 698 |
+ dockerCmd(c, "create", "busybox", "true") |
|
| 699 |
+ |
|
| 700 |
+ out, _ := dockerCmd(c, "events", "--since=0", "--until=0s") |
|
| 701 |
+ events := strings.Split(strings.TrimSpace(out), "\n") |
|
| 702 |
+ |
|
| 703 |
+ if len(events) == 0 {
|
|
| 704 |
+ c.Fatalf("Wrong # of events, should just be one, got:\n%v\n", events)
|
|
| 705 |
+ } |
|
| 706 |
+ |
|
| 707 |
+ createEvent := events[len(events)-1] |
|
| 708 |
+ |
|
| 709 |
+ matched, err := regexp.MatchString(reqIDMatch, createEvent) |
|
| 710 |
+ if err != nil || !matched {
|
|
| 711 |
+ c.Fatalf("Error finding reqID in event: %v\n", createEvent)
|
|
| 712 |
+ } |
|
| 713 |
+ |
|
| 714 |
+ reqID1 := reqIDRE.FindStringSubmatch(createEvent)[1] |
|
| 715 |
+ |
|
| 716 |
+ // Now make sure another cmd doesn't get the same reqID |
|
| 717 |
+ dockerCmd(c, "create", "busybox", "true") |
|
| 718 |
+ |
|
| 719 |
+ out, _ = dockerCmd(c, "events", "--since=0", "--until=0s") |
|
| 720 |
+ events = strings.Split(strings.TrimSpace(out), "\n") |
|
| 721 |
+ createEvent = events[len(events)-1] |
|
| 722 |
+ |
|
| 723 |
+ matched, err = regexp.MatchString(reqIDMatch, createEvent) |
|
| 724 |
+ if err != nil || !matched {
|
|
| 725 |
+ c.Fatalf("Error finding reqID in event: %v\n", createEvent)
|
|
| 726 |
+ } |
|
| 727 |
+ |
|
| 728 |
+ reqID2 := reqIDRE.FindStringSubmatch(createEvent)[1] |
|
| 729 |
+ |
|
| 730 |
+ if reqID1 == reqID2 {
|
|
| 731 |
+ c.Fatalf("Should not have the same reqID(%s):\n%v\n", reqID1, createEvent)
|
|
| 732 |
+ } |
|
| 733 |
+ |
|
| 734 |
+ // Now make sure a build **does** use the same reqID for all |
|
| 735 |
+ // 4 events that are generated |
|
| 736 |
+ _, err = buildImage("reqidimg", `
|
|
| 737 |
+ FROM busybox |
|
| 738 |
+ RUN echo HI`, true) |
|
| 739 |
+ if err != nil {
|
|
| 740 |
+ c.Fatalf("Couldn't create image: %q", err)
|
|
| 741 |
+ } |
|
| 742 |
+ |
|
| 743 |
+ out, _ = dockerCmd(c, "events", "--since=0", "--until=0s") |
|
| 744 |
+ events = strings.Split(strings.TrimSpace(out), "\n") |
|
| 745 |
+ |
|
| 746 |
+ // Get last event's reqID - will use it to find other matching events |
|
| 747 |
+ lastEvent := events[len(events)-1] |
|
| 748 |
+ reqID := reqIDRE.FindStringSubmatch(lastEvent)[1] |
|
| 749 |
+ |
|
| 750 |
+ // Find all events with this same reqID |
|
| 751 |
+ eventList := []string{lastEvent}
|
|
| 752 |
+ for i := len(events) - 2; i >= 0; i-- {
|
|
| 753 |
+ tmpID := reqIDRE.FindStringSubmatch(events[i])[1] |
|
| 754 |
+ if tmpID != reqID {
|
|
| 755 |
+ break |
|
| 756 |
+ } |
|
| 757 |
+ eventList = append(eventList, events[i]) |
|
| 758 |
+ } |
|
| 759 |
+ |
|
| 760 |
+ if len(eventList) != 5 { // create, start, die, commit, destroy
|
|
| 761 |
+ c.Fatalf("Wrong # of matching events - should be 5:\n%q\n", eventList)
|
|
| 762 |
+ } |
|
| 763 |
+} |
| ... | ... |
@@ -92,6 +92,7 @@ func (p *JSONProgress) String() string {
|
| 92 | 92 |
// the created time, where it from, status, ID of the |
| 93 | 93 |
// message. It's used for docker events. |
| 94 | 94 |
type JSONMessage struct {
|
| 95 |
+ RequestID string `json:"reqid,omitempty"` |
|
| 95 | 96 |
Stream string `json:"stream,omitempty"` |
| 96 | 97 |
Status string `json:"status,omitempty"` |
| 97 | 98 |
Progress *JSONProgress `json:"progressDetail,omitempty"` |
| ... | ... |
@@ -127,6 +128,9 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
|
| 127 | 127 |
} else if jm.Time != 0 {
|
| 128 | 128 |
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) |
| 129 | 129 |
} |
| 130 |
+ if jm.RequestID != "" {
|
|
| 131 |
+ fmt.Fprintf(out, "[reqid: %s] ", jm.RequestID) |
|
| 132 |
+ } |
|
| 130 | 133 |
if jm.ID != "" {
|
| 131 | 134 |
fmt.Fprintf(out, "%s: ", jm.ID) |
| 132 | 135 |
} |