Remove double reference between containers and exec configurations by
keeping only the container id.
Signed-off-by: David Calavera <david.calavera@gmail.com>
| ... | ... |
@@ -14,6 +14,7 @@ import ( |
| 14 | 14 |
"github.com/opencontainers/runc/libcontainer/label" |
| 15 | 15 |
|
| 16 | 16 |
"github.com/Sirupsen/logrus" |
| 17 |
+ "github.com/docker/docker/daemon/exec" |
|
| 17 | 18 |
"github.com/docker/docker/daemon/execdriver" |
| 18 | 19 |
"github.com/docker/docker/daemon/logger" |
| 19 | 20 |
"github.com/docker/docker/daemon/logger/jsonfilelog" |
| ... | ... |
@@ -62,7 +63,7 @@ type CommonContainer struct {
|
| 62 | 62 |
hostConfig *runconfig.HostConfig |
| 63 | 63 |
command *execdriver.Command |
| 64 | 64 |
monitor *containerMonitor |
| 65 |
- execCommands *execStore |
|
| 65 |
+ execCommands *exec.Store |
|
| 66 | 66 |
// logDriver for closing |
| 67 | 67 |
logDriver logger.Logger |
| 68 | 68 |
logCopier *logger.Copier |
| ... | ... |
@@ -75,7 +76,7 @@ func newBaseContainer(id, root string) *Container {
|
| 75 | 75 |
CommonContainer: CommonContainer{
|
| 76 | 76 |
ID: id, |
| 77 | 77 |
State: NewState(), |
| 78 |
- execCommands: newExecStore(), |
|
| 78 |
+ execCommands: exec.NewStore(), |
|
| 79 | 79 |
root: root, |
| 80 | 80 |
MountPoints: make(map[string]*volume.MountPoint), |
| 81 | 81 |
StreamConfig: runconfig.NewStreamConfig(), |
| ... | ... |
@@ -22,6 +22,7 @@ import ( |
| 22 | 22 |
"github.com/docker/docker/api/types" |
| 23 | 23 |
"github.com/docker/docker/cliconfig" |
| 24 | 24 |
"github.com/docker/docker/daemon/events" |
| 25 |
+ "github.com/docker/docker/daemon/exec" |
|
| 25 | 26 |
"github.com/docker/docker/daemon/execdriver" |
| 26 | 27 |
"github.com/docker/docker/daemon/execdriver/execdrivers" |
| 27 | 28 |
"github.com/docker/docker/daemon/graphdriver" |
| ... | ... |
@@ -106,7 +107,7 @@ type Daemon struct {
|
| 106 | 106 |
repository string |
| 107 | 107 |
sysInitPath string |
| 108 | 108 |
containers *contStore |
| 109 |
- execCommands *execStore |
|
| 109 |
+ execCommands *exec.Store |
|
| 110 | 110 |
graph *graph.Graph |
| 111 | 111 |
repositories *graph.TagStore |
| 112 | 112 |
idIndex *truncindex.TruncIndex |
| ... | ... |
@@ -790,7 +791,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 790 | 790 |
d.ID = trustKey.PublicKey().KeyID() |
| 791 | 791 |
d.repository = daemonRepo |
| 792 | 792 |
d.containers = &contStore{s: make(map[string]*Container)}
|
| 793 |
- d.execCommands = newExecStore() |
|
| 793 |
+ d.execCommands = exec.NewStore() |
|
| 794 | 794 |
d.graph = g |
| 795 | 795 |
d.repositories = repositories |
| 796 | 796 |
d.idIndex = truncindex.NewTruncIndex([]string{})
|
| ... | ... |
@@ -3,91 +3,23 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 | 5 |
"strings" |
| 6 |
- "sync" |
|
| 7 | 6 |
"time" |
| 8 | 7 |
|
| 9 | 8 |
"github.com/Sirupsen/logrus" |
| 9 |
+ "github.com/docker/docker/daemon/exec" |
|
| 10 | 10 |
"github.com/docker/docker/daemon/execdriver" |
| 11 | 11 |
derr "github.com/docker/docker/errors" |
| 12 | 12 |
"github.com/docker/docker/pkg/pools" |
| 13 | 13 |
"github.com/docker/docker/pkg/promise" |
| 14 |
- "github.com/docker/docker/pkg/stringid" |
|
| 15 | 14 |
"github.com/docker/docker/pkg/stringutils" |
| 16 | 15 |
"github.com/docker/docker/runconfig" |
| 17 | 16 |
) |
| 18 | 17 |
|
| 19 |
-// ExecConfig holds the configurations for execs. The Daemon keeps |
|
| 20 |
-// track of both running and finished execs so that they can be |
|
| 21 |
-// examined both during and after completion. |
|
| 22 |
-type ExecConfig struct {
|
|
| 23 |
- sync.Mutex |
|
| 24 |
- ID string |
|
| 25 |
- Running bool |
|
| 26 |
- ExitCode int |
|
| 27 |
- ProcessConfig *execdriver.ProcessConfig |
|
| 28 |
- OpenStdin bool |
|
| 29 |
- OpenStderr bool |
|
| 30 |
- OpenStdout bool |
|
| 31 |
- streamConfig *runconfig.StreamConfig |
|
| 32 |
- Container *Container |
|
| 33 |
- canRemove bool |
|
| 34 |
- |
|
| 35 |
- // waitStart will be closed immediately after the exec is really started. |
|
| 36 |
- waitStart chan struct{}
|
|
| 37 |
-} |
|
| 38 |
- |
|
| 39 |
-type execStore struct {
|
|
| 40 |
- s map[string]*ExecConfig |
|
| 41 |
- sync.RWMutex |
|
| 42 |
-} |
|
| 43 |
- |
|
| 44 |
-func newExecStore() *execStore {
|
|
| 45 |
- return &execStore{s: make(map[string]*ExecConfig, 0)}
|
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-func (e *execStore) Add(id string, ExecConfig *ExecConfig) {
|
|
| 49 |
- e.Lock() |
|
| 50 |
- e.s[id] = ExecConfig |
|
| 51 |
- e.Unlock() |
|
| 52 |
-} |
|
| 53 |
- |
|
| 54 |
-func (e *execStore) Get(id string) *ExecConfig {
|
|
| 55 |
- e.RLock() |
|
| 56 |
- res := e.s[id] |
|
| 57 |
- e.RUnlock() |
|
| 58 |
- return res |
|
| 59 |
-} |
|
| 60 |
- |
|
| 61 |
-func (e *execStore) Delete(id string) {
|
|
| 62 |
- e.Lock() |
|
| 63 |
- delete(e.s, id) |
|
| 64 |
- e.Unlock() |
|
| 65 |
-} |
|
| 66 |
- |
|
| 67 |
-func (e *execStore) List() []string {
|
|
| 68 |
- var IDs []string |
|
| 69 |
- e.RLock() |
|
| 70 |
- for id := range e.s {
|
|
| 71 |
- IDs = append(IDs, id) |
|
| 72 |
- } |
|
| 73 |
- e.RUnlock() |
|
| 74 |
- return IDs |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func (ExecConfig *ExecConfig) resize(h, w int) error {
|
|
| 78 |
- select {
|
|
| 79 |
- case <-ExecConfig.waitStart: |
|
| 80 |
- case <-time.After(time.Second): |
|
| 81 |
- return derr.ErrorCodeExecResize.WithArgs(ExecConfig.ID) |
|
| 82 |
- } |
|
| 83 |
- return ExecConfig.ProcessConfig.Terminal.Resize(h, w) |
|
| 84 |
-} |
|
| 85 |
- |
|
| 86 |
-func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) {
|
|
| 18 |
+func (d *Daemon) registerExecCommand(container *Container, config *exec.Config) {
|
|
| 87 | 19 |
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. |
| 88 |
- ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig) |
|
| 20 |
+ container.execCommands.Add(config.ID, config) |
|
| 89 | 21 |
// Storing execs in daemon for easy access via remote API. |
| 90 |
- d.execCommands.Add(ExecConfig.ID, ExecConfig) |
|
| 22 |
+ d.execCommands.Add(config.ID, config) |
|
| 91 | 23 |
} |
| 92 | 24 |
|
| 93 | 25 |
// ExecExists looks up the exec instance and returns a bool if it exists or not. |
| ... | ... |
@@ -101,7 +33,7 @@ func (d *Daemon) ExecExists(name string) (bool, error) {
|
| 101 | 101 |
|
| 102 | 102 |
// getExecConfig looks up the exec instance by name. If the container associated |
| 103 | 103 |
// with the exec instance is stopped or paused, it will return an error. |
| 104 |
-func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) {
|
|
| 104 |
+func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
|
| 105 | 105 |
ec := d.execCommands.Get(name) |
| 106 | 106 |
|
| 107 | 107 |
// If the exec is found but its container is not in the daemon's list of |
| ... | ... |
@@ -110,22 +42,24 @@ func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) {
|
| 110 | 110 |
// the user sees the same error now that they will after the |
| 111 | 111 |
// 5 minute clean-up loop is run which erases old/dead execs. |
| 112 | 112 |
|
| 113 |
- if ec != nil && d.containers.Get(ec.Container.ID) != nil {
|
|
| 114 |
- if !ec.Container.IsRunning() {
|
|
| 115 |
- return nil, derr.ErrorCodeContainerNotRunning.WithArgs(ec.Container.ID, ec.Container.State.String()) |
|
| 116 |
- } |
|
| 117 |
- if ec.Container.isPaused() {
|
|
| 118 |
- return nil, derr.ErrorCodeExecPaused.WithArgs(ec.Container.ID) |
|
| 113 |
+ if ec != nil {
|
|
| 114 |
+ if container := d.containers.Get(ec.ContainerID); container != nil {
|
|
| 115 |
+ if !container.IsRunning() {
|
|
| 116 |
+ return nil, derr.ErrorCodeContainerNotRunning.WithArgs(container.ID, container.State.String()) |
|
| 117 |
+ } |
|
| 118 |
+ if container.isPaused() {
|
|
| 119 |
+ return nil, derr.ErrorCodeExecPaused.WithArgs(container.ID) |
|
| 120 |
+ } |
|
| 121 |
+ return ec, nil |
|
| 119 | 122 |
} |
| 120 |
- return ec, nil |
|
| 121 | 123 |
} |
| 122 | 124 |
|
| 123 | 125 |
return nil, derr.ErrorCodeNoExecID.WithArgs(name) |
| 124 | 126 |
} |
| 125 | 127 |
|
| 126 |
-func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
|
|
| 127 |
- ExecConfig.Container.execCommands.Delete(ExecConfig.ID) |
|
| 128 |
- d.execCommands.Delete(ExecConfig.ID) |
|
| 128 |
+func (d *Daemon) unregisterExecCommand(container *Container, execConfig *exec.Config) {
|
|
| 129 |
+ container.execCommands.Delete(execConfig.ID) |
|
| 130 |
+ d.execCommands.Delete(execConfig.ID) |
|
| 129 | 131 |
} |
| 130 | 132 |
|
| 131 | 133 |
func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
| ... | ... |
@@ -162,23 +96,18 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro |
| 162 | 162 |
} |
| 163 | 163 |
setPlatformSpecificExecProcessConfig(config, container, processConfig) |
| 164 | 164 |
|
| 165 |
- ExecConfig := &ExecConfig{
|
|
| 166 |
- ID: stringid.GenerateNonCryptoID(), |
|
| 167 |
- OpenStdin: config.AttachStdin, |
|
| 168 |
- OpenStdout: config.AttachStdout, |
|
| 169 |
- OpenStderr: config.AttachStderr, |
|
| 170 |
- streamConfig: runconfig.NewStreamConfig(), |
|
| 171 |
- ProcessConfig: processConfig, |
|
| 172 |
- Container: container, |
|
| 173 |
- Running: false, |
|
| 174 |
- waitStart: make(chan struct{}),
|
|
| 175 |
- } |
|
| 165 |
+ execConfig := exec.NewConfig() |
|
| 166 |
+ execConfig.OpenStdin = config.AttachStdin |
|
| 167 |
+ execConfig.OpenStdout = config.AttachStdout |
|
| 168 |
+ execConfig.OpenStderr = config.AttachStderr |
|
| 169 |
+ execConfig.ProcessConfig = processConfig |
|
| 170 |
+ execConfig.ContainerID = container.ID |
|
| 176 | 171 |
|
| 177 |
- d.registerExecCommand(ExecConfig) |
|
| 172 |
+ d.registerExecCommand(container, execConfig) |
|
| 178 | 173 |
|
| 179 |
- d.LogContainerEvent(container, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) |
|
| 174 |
+ d.LogContainerEvent(container, "exec_create: "+execConfig.ProcessConfig.Entrypoint+" "+strings.Join(execConfig.ProcessConfig.Arguments, " ")) |
|
| 180 | 175 |
|
| 181 |
- return ExecConfig.ID, nil |
|
| 176 |
+ return execConfig.ID, nil |
|
| 182 | 177 |
} |
| 183 | 178 |
|
| 184 | 179 |
// ContainerExecStart starts a previously set up exec instance. The |
| ... | ... |
@@ -202,8 +131,8 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 202 | 202 |
ec.Running = true |
| 203 | 203 |
ec.Unlock() |
| 204 | 204 |
|
| 205 |
- logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID)
|
|
| 206 |
- container := ec.Container |
|
| 205 |
+ container := d.containers.Get(ec.ContainerID) |
|
| 206 |
+ logrus.Debugf("starting exec command %s in container %s", ec.ID, container.ID)
|
|
| 207 | 207 |
d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " ")) |
| 208 | 208 |
|
| 209 | 209 |
if ec.OpenStdin {
|
| ... | ... |
@@ -223,12 +152,12 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 223 | 223 |
} |
| 224 | 224 |
|
| 225 | 225 |
if ec.OpenStdin {
|
| 226 |
- ec.streamConfig.NewInputPipes() |
|
| 226 |
+ ec.NewInputPipes() |
|
| 227 | 227 |
} else {
|
| 228 |
- ec.streamConfig.NewNopInputPipe() |
|
| 228 |
+ ec.NewNopInputPipe() |
|
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 |
- attachErr := attach(ec.streamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) |
|
| 231 |
+ attachErr := attach(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) |
|
| 232 | 232 |
|
| 233 | 233 |
execErr := make(chan error) |
| 234 | 234 |
|
| ... | ... |
@@ -263,19 +192,19 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 263 | 263 |
} |
| 264 | 264 |
|
| 265 | 265 |
// Exec calls the underlying exec driver to run |
| 266 |
-func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 266 |
+func (d *Daemon) Exec(c *Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 267 | 267 |
hooks := execdriver.Hooks{
|
| 268 | 268 |
Start: startCallback, |
| 269 | 269 |
} |
| 270 |
- exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks) |
|
| 270 |
+ exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, hooks) |
|
| 271 | 271 |
|
| 272 | 272 |
// On err, make sure we don't leave ExitCode at zero |
| 273 | 273 |
if err != nil && exitStatus == 0 {
|
| 274 | 274 |
exitStatus = 128 |
| 275 | 275 |
} |
| 276 | 276 |
|
| 277 |
- ExecConfig.ExitCode = exitStatus |
|
| 278 |
- ExecConfig.Running = false |
|
| 277 |
+ execConfig.ExitCode = exitStatus |
|
| 278 |
+ execConfig.Running = false |
|
| 279 | 279 |
|
| 280 | 280 |
return exitStatus, err |
| 281 | 281 |
} |
| ... | ... |
@@ -288,13 +217,13 @@ func (d *Daemon) execCommandGC() {
|
| 288 | 288 |
cleaned int |
| 289 | 289 |
liveExecCommands = d.containerExecIds() |
| 290 | 290 |
) |
| 291 |
- for id, config := range d.execCommands.s {
|
|
| 292 |
- if config.canRemove {
|
|
| 291 |
+ for id, config := range d.execCommands.Commands() {
|
|
| 292 |
+ if config.CanRemove {
|
|
| 293 | 293 |
cleaned++ |
| 294 | 294 |
d.execCommands.Delete(id) |
| 295 | 295 |
} else {
|
| 296 | 296 |
if _, exists := liveExecCommands[id]; !exists {
|
| 297 |
- config.canRemove = true |
|
| 297 |
+ config.CanRemove = true |
|
| 298 | 298 |
} |
| 299 | 299 |
} |
| 300 | 300 |
} |
| ... | ... |
@@ -316,7 +245,7 @@ func (d *Daemon) containerExecIds() map[string]struct{} {
|
| 316 | 316 |
return ids |
| 317 | 317 |
} |
| 318 | 318 |
|
| 319 |
-func (d *Daemon) containerExec(container *Container, ec *ExecConfig) error {
|
|
| 319 |
+func (d *Daemon) containerExec(container *Container, ec *exec.Config) error {
|
|
| 320 | 320 |
container.Lock() |
| 321 | 321 |
defer container.Unlock() |
| 322 | 322 |
|
| ... | ... |
@@ -329,43 +258,35 @@ func (d *Daemon) containerExec(container *Container, ec *ExecConfig) error {
|
| 329 | 329 |
c.Close() |
| 330 | 330 |
} |
| 331 | 331 |
} |
| 332 |
- close(ec.waitStart) |
|
| 332 |
+ ec.Close() |
|
| 333 | 333 |
return nil |
| 334 | 334 |
} |
| 335 | 335 |
|
| 336 | 336 |
// We use a callback here instead of a goroutine and an chan for |
| 337 | 337 |
// synchronization purposes |
| 338 | 338 |
cErr := promise.Go(func() error { return d.monitorExec(container, ec, callback) })
|
| 339 |
- |
|
| 340 |
- // Exec should not return until the process is actually running |
|
| 341 |
- select {
|
|
| 342 |
- case <-ec.waitStart: |
|
| 343 |
- case err := <-cErr: |
|
| 344 |
- return err |
|
| 345 |
- } |
|
| 346 |
- |
|
| 347 |
- return nil |
|
| 339 |
+ return ec.Wait(cErr) |
|
| 348 | 340 |
} |
| 349 | 341 |
|
| 350 |
-func (d *Daemon) monitorExec(container *Container, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
|
|
| 351 |
- pipes := execdriver.NewPipes(ExecConfig.streamConfig.Stdin(), ExecConfig.streamConfig.Stdout(), ExecConfig.streamConfig.Stderr(), ExecConfig.OpenStdin) |
|
| 352 |
- exitCode, err := d.Exec(container, ExecConfig, pipes, callback) |
|
| 342 |
+func (d *Daemon) monitorExec(container *Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
|
|
| 343 |
+ pipes := execdriver.NewPipes(execConfig.Stdin(), execConfig.Stdout(), execConfig.Stderr(), execConfig.OpenStdin) |
|
| 344 |
+ exitCode, err := d.Exec(container, execConfig, pipes, callback) |
|
| 353 | 345 |
if err != nil {
|
| 354 | 346 |
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
|
| 355 | 347 |
} |
| 356 | 348 |
logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
|
| 357 | 349 |
|
| 358 |
- if err := ExecConfig.streamConfig.CloseStreams(); err != nil {
|
|
| 350 |
+ if err := execConfig.CloseStreams(); err != nil {
|
|
| 359 | 351 |
logrus.Errorf("%s: %s", container.ID, err)
|
| 360 | 352 |
} |
| 361 | 353 |
|
| 362 |
- if ExecConfig.ProcessConfig.Terminal != nil {
|
|
| 363 |
- if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
|
|
| 354 |
+ if execConfig.ProcessConfig.Terminal != nil {
|
|
| 355 |
+ if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
|
|
| 364 | 356 |
logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
|
| 365 | 357 |
} |
| 366 | 358 |
} |
| 367 | 359 |
// remove the exec command from the container's store only and not the |
| 368 | 360 |
// daemon's store so that the exec command can be inspected. |
| 369 |
- container.execCommands.Delete(ExecConfig.ID) |
|
| 361 |
+ container.execCommands.Delete(execConfig.ID) |
|
| 370 | 362 |
return err |
| 371 | 363 |
} |
| 372 | 364 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,115 @@ |
| 0 |
+package exec |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "sync" |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 7 |
+ derr "github.com/docker/docker/errors" |
|
| 8 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 9 |
+ "github.com/docker/docker/runconfig" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+// Config holds the configurations for execs. The Daemon keeps |
|
| 13 |
+// track of both running and finished execs so that they can be |
|
| 14 |
+// examined both during and after completion. |
|
| 15 |
+type Config struct {
|
|
| 16 |
+ sync.Mutex |
|
| 17 |
+ *runconfig.StreamConfig |
|
| 18 |
+ ID string |
|
| 19 |
+ Running bool |
|
| 20 |
+ ExitCode int |
|
| 21 |
+ ProcessConfig *execdriver.ProcessConfig |
|
| 22 |
+ OpenStdin bool |
|
| 23 |
+ OpenStderr bool |
|
| 24 |
+ OpenStdout bool |
|
| 25 |
+ CanRemove bool |
|
| 26 |
+ ContainerID string |
|
| 27 |
+ |
|
| 28 |
+ // waitStart will be closed immediately after the exec is really started. |
|
| 29 |
+ waitStart chan struct{}
|
|
| 30 |
+} |
|
| 31 |
+ |
|
| 32 |
+// NewConfig initializes the a new exec configuration |
|
| 33 |
+func NewConfig() *Config {
|
|
| 34 |
+ return &Config{
|
|
| 35 |
+ ID: stringid.GenerateNonCryptoID(), |
|
| 36 |
+ StreamConfig: runconfig.NewStreamConfig(), |
|
| 37 |
+ waitStart: make(chan struct{}),
|
|
| 38 |
+ } |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+// Store keeps track of the exec configurations. |
|
| 42 |
+type Store struct {
|
|
| 43 |
+ commands map[string]*Config |
|
| 44 |
+ sync.RWMutex |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+// NewStore initializes a new exec store. |
|
| 48 |
+func NewStore() *Store {
|
|
| 49 |
+ return &Store{commands: make(map[string]*Config, 0)}
|
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+// Commands returns the exec configurations in the store. |
|
| 53 |
+func (e *Store) Commands() map[string]*Config {
|
|
| 54 |
+ return e.commands |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+// Add adds a new exec configuration to the store. |
|
| 58 |
+func (e *Store) Add(id string, Config *Config) {
|
|
| 59 |
+ e.Lock() |
|
| 60 |
+ e.commands[id] = Config |
|
| 61 |
+ e.Unlock() |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// Get returns an exec configuration by its id. |
|
| 65 |
+func (e *Store) Get(id string) *Config {
|
|
| 66 |
+ e.RLock() |
|
| 67 |
+ res := e.commands[id] |
|
| 68 |
+ e.RUnlock() |
|
| 69 |
+ return res |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 72 |
+// Delete removes an exec configuration from the store. |
|
| 73 |
+func (e *Store) Delete(id string) {
|
|
| 74 |
+ e.Lock() |
|
| 75 |
+ delete(e.commands, id) |
|
| 76 |
+ e.Unlock() |
|
| 77 |
+} |
|
| 78 |
+ |
|
| 79 |
+// List returns the list of exec ids in the store. |
|
| 80 |
+func (e *Store) List() []string {
|
|
| 81 |
+ var IDs []string |
|
| 82 |
+ e.RLock() |
|
| 83 |
+ for id := range e.commands {
|
|
| 84 |
+ IDs = append(IDs, id) |
|
| 85 |
+ } |
|
| 86 |
+ e.RUnlock() |
|
| 87 |
+ return IDs |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 90 |
+// Wait waits until the exec process finishes or there is an error in the error channel. |
|
| 91 |
+func (c *Config) Wait(cErr chan error) error {
|
|
| 92 |
+ // Exec should not return until the process is actually running |
|
| 93 |
+ select {
|
|
| 94 |
+ case <-c.waitStart: |
|
| 95 |
+ case err := <-cErr: |
|
| 96 |
+ return err |
|
| 97 |
+ } |
|
| 98 |
+ return nil |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// Close closes the wait channel for the progress. |
|
| 102 |
+func (c *Config) Close() {
|
|
| 103 |
+ close(c.waitStart) |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// Resize changes the size of the terminal for the exec process. |
|
| 107 |
+func (c *Config) Resize(h, w int) error {
|
|
| 108 |
+ select {
|
|
| 109 |
+ case <-c.waitStart: |
|
| 110 |
+ case <-time.After(time.Second): |
|
| 111 |
+ return derr.ErrorCodeExecResize.WithArgs(c.ID) |
|
| 112 |
+ } |
|
| 113 |
+ return c.ProcessConfig.Terminal.Resize(h, w) |
|
| 114 |
+} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 | 8 |
"github.com/docker/docker/api/types/versions/v1p20" |
| 9 |
+ "github.com/docker/docker/daemon/exec" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/network" |
| 10 | 11 |
) |
| 11 | 12 |
|
| ... | ... |
@@ -159,7 +160,7 @@ func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co |
| 159 | 159 |
|
| 160 | 160 |
// ContainerExecInspect returns low-level information about the exec |
| 161 | 161 |
// command. An error is returned if the exec cannot be found. |
| 162 |
-func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
|
|
| 162 |
+func (daemon *Daemon) ContainerExecInspect(id string) (*exec.Config, error) {
|
|
| 163 | 163 |
eConfig, err := daemon.getExecConfig(id) |
| 164 | 164 |
if err != nil {
|
| 165 | 165 |
return nil, err |
| ... | ... |
@@ -160,8 +160,8 @@ func (daemon *Daemon) Cleanup(container *Container) {
|
| 160 | 160 |
|
| 161 | 161 |
daemon.conditionalUnmountOnCleanup(container) |
| 162 | 162 |
|
| 163 |
- for _, eConfig := range container.execCommands.s {
|
|
| 164 |
- daemon.unregisterExecCommand(eConfig) |
|
| 163 |
+ for _, eConfig := range container.execCommands.Commands() {
|
|
| 164 |
+ daemon.unregisterExecCommand(container, eConfig) |
|
| 165 | 165 |
} |
| 166 | 166 |
|
| 167 | 167 |
if err := container.unmountVolumes(false); err != nil {
|