Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
| ... | ... |
@@ -49,17 +49,17 @@ func archivePath(i interface{}, src string, opts *archive.TarOptions, root strin
|
| 49 | 49 |
// ContainerCopy performs a deprecated operation of archiving the resource at |
| 50 | 50 |
// the specified path in the container identified by the given name. |
| 51 | 51 |
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
| 52 |
- container, err := daemon.GetContainer(name) |
|
| 52 |
+ ctr, err := daemon.GetContainer(name) |
|
| 53 | 53 |
if err != nil {
|
| 54 | 54 |
return nil, err |
| 55 | 55 |
} |
| 56 | 56 |
|
| 57 | 57 |
// Make sure an online file-system operation is permitted. |
| 58 |
- if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
|
| 58 |
+ if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
|
| 59 | 59 |
return nil, errdefs.System(err) |
| 60 | 60 |
} |
| 61 | 61 |
|
| 62 |
- data, err := daemon.containerCopy(container, res) |
|
| 62 |
+ data, err := daemon.containerCopy(ctr, res) |
|
| 63 | 63 |
if err == nil {
|
| 64 | 64 |
return data, nil |
| 65 | 65 |
} |
| ... | ... |
@@ -73,17 +73,17 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err |
| 73 | 73 |
// ContainerStatPath stats the filesystem resource at the specified path in the |
| 74 | 74 |
// container identified by the given name. |
| 75 | 75 |
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
| 76 |
- container, err := daemon.GetContainer(name) |
|
| 76 |
+ ctr, err := daemon.GetContainer(name) |
|
| 77 | 77 |
if err != nil {
|
| 78 | 78 |
return nil, err |
| 79 | 79 |
} |
| 80 | 80 |
|
| 81 | 81 |
// Make sure an online file-system operation is permitted. |
| 82 |
- if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
|
| 82 |
+ if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
|
| 83 | 83 |
return nil, errdefs.System(err) |
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 |
- stat, err = daemon.containerStatPath(container, path) |
|
| 86 |
+ stat, err = daemon.containerStatPath(ctr, path) |
|
| 87 | 87 |
if err == nil {
|
| 88 | 88 |
return stat, nil |
| 89 | 89 |
} |
| ... | ... |
@@ -98,17 +98,17 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C |
| 98 | 98 |
// specified path in the container identified by the given name. Returns a |
| 99 | 99 |
// tar archive of the resource and whether it was a directory or a single file. |
| 100 | 100 |
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
| 101 |
- container, err := daemon.GetContainer(name) |
|
| 101 |
+ ctr, err := daemon.GetContainer(name) |
|
| 102 | 102 |
if err != nil {
|
| 103 | 103 |
return nil, nil, err |
| 104 | 104 |
} |
| 105 | 105 |
|
| 106 | 106 |
// Make sure an online file-system operation is permitted. |
| 107 |
- if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
|
| 107 |
+ if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
|
| 108 | 108 |
return nil, nil, errdefs.System(err) |
| 109 | 109 |
} |
| 110 | 110 |
|
| 111 |
- content, stat, err = daemon.containerArchivePath(container, path) |
|
| 111 |
+ content, stat, err = daemon.containerArchivePath(ctr, path) |
|
| 112 | 112 |
if err == nil {
|
| 113 | 113 |
return content, stat, nil |
| 114 | 114 |
} |
| ... | ... |
@@ -126,17 +126,17 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io |
| 126 | 126 |
// be an error if unpacking the given content would cause an existing directory |
| 127 | 127 |
// to be replaced with a non-directory and vice versa. |
| 128 | 128 |
func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error {
|
| 129 |
- container, err := daemon.GetContainer(name) |
|
| 129 |
+ ctr, err := daemon.GetContainer(name) |
|
| 130 | 130 |
if err != nil {
|
| 131 | 131 |
return err |
| 132 | 132 |
} |
| 133 | 133 |
|
| 134 | 134 |
// Make sure an online file-system operation is permitted. |
| 135 |
- if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
|
| 135 |
+ if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
|
| 136 | 136 |
return errdefs.System(err) |
| 137 | 137 |
} |
| 138 | 138 |
|
| 139 |
- err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) |
|
| 139 |
+ err = daemon.containerExtractToDir(ctr, path, copyUIDGID, noOverwriteDirNonDir, content) |
|
| 140 | 140 |
if err == nil {
|
| 141 | 141 |
return nil |
| 142 | 142 |
} |
| ... | ... |
@@ -433,7 +433,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str |
| 433 | 433 |
basePath = d |
| 434 | 434 |
filter = []string{f}
|
| 435 | 435 |
} |
| 436 |
- archive, err := archivePath(driver, basePath, &archive.TarOptions{
|
|
| 436 |
+ archv, err := archivePath(driver, basePath, &archive.TarOptions{
|
|
| 437 | 437 |
Compression: archive.Uncompressed, |
| 438 | 438 |
IncludeFiles: filter, |
| 439 | 439 |
}, container.BaseFS.Path()) |
| ... | ... |
@@ -441,8 +441,8 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str |
| 441 | 441 |
return nil, err |
| 442 | 442 |
} |
| 443 | 443 |
|
| 444 |
- reader := ioutils.NewReadCloserWrapper(archive, func() error {
|
|
| 445 |
- err := archive.Close() |
|
| 444 |
+ reader := ioutils.NewReadCloserWrapper(archv, func() error {
|
|
| 445 |
+ err := archv.Close() |
|
| 446 | 446 |
container.DetachAndUnmount(daemon.LogVolumeEvent) |
| 447 | 447 |
daemon.Unmount(container) |
| 448 | 448 |
container.Unlock() |
| ... | ... |
@@ -27,15 +27,15 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA |
| 27 | 27 |
} |
| 28 | 28 |
} |
| 29 | 29 |
|
| 30 |
- container, err := daemon.GetContainer(prefixOrName) |
|
| 30 |
+ ctr, err := daemon.GetContainer(prefixOrName) |
|
| 31 | 31 |
if err != nil {
|
| 32 | 32 |
return err |
| 33 | 33 |
} |
| 34 |
- if container.IsPaused() {
|
|
| 34 |
+ if ctr.IsPaused() {
|
|
| 35 | 35 |
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
|
| 36 | 36 |
return errdefs.Conflict(err) |
| 37 | 37 |
} |
| 38 |
- if container.IsRestarting() {
|
|
| 38 |
+ if ctr.IsRestarting() {
|
|
| 39 | 39 |
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
|
| 40 | 40 |
return errdefs.Conflict(err) |
| 41 | 41 |
} |
| ... | ... |
@@ -44,11 +44,11 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA |
| 44 | 44 |
UseStdin: c.UseStdin, |
| 45 | 45 |
UseStdout: c.UseStdout, |
| 46 | 46 |
UseStderr: c.UseStderr, |
| 47 |
- TTY: container.Config.Tty, |
|
| 48 |
- CloseStdin: container.Config.StdinOnce, |
|
| 47 |
+ TTY: ctr.Config.Tty, |
|
| 48 |
+ CloseStdin: ctr.Config.StdinOnce, |
|
| 49 | 49 |
DetachKeys: keys, |
| 50 | 50 |
} |
| 51 |
- container.StreamConfig.AttachStreams(&cfg) |
|
| 51 |
+ ctr.StreamConfig.AttachStreams(&cfg) |
|
| 52 | 52 |
|
| 53 | 53 |
inStream, outStream, errStream, err := c.GetStreams() |
| 54 | 54 |
if err != nil {
|
| ... | ... |
@@ -56,7 +56,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA |
| 56 | 56 |
} |
| 57 | 57 |
defer inStream.Close() |
| 58 | 58 |
|
| 59 |
- if !container.Config.Tty && c.MuxStreams {
|
|
| 59 |
+ if !ctr.Config.Tty && c.MuxStreams {
|
|
| 60 | 60 |
errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) |
| 61 | 61 |
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) |
| 62 | 62 |
} |
| ... | ... |
@@ -71,7 +71,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA |
| 71 | 71 |
cfg.Stderr = errStream |
| 72 | 72 |
} |
| 73 | 73 |
|
| 74 |
- if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil {
|
|
| 74 |
+ if err := daemon.containerAttach(ctr, &cfg, c.Logs, c.Stream); err != nil {
|
|
| 75 | 75 |
fmt.Fprintf(outStream, "Error attaching: %s\n", err) |
| 76 | 76 |
} |
| 77 | 77 |
return nil |
| ... | ... |
@@ -79,7 +79,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA |
| 79 | 79 |
|
| 80 | 80 |
// ContainerAttachRaw attaches the provided streams to the container's stdio |
| 81 | 81 |
func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error {
|
| 82 |
- container, err := daemon.GetContainer(prefixOrName) |
|
| 82 |
+ ctr, err := daemon.GetContainer(prefixOrName) |
|
| 83 | 83 |
if err != nil {
|
| 84 | 84 |
return err |
| 85 | 85 |
} |
| ... | ... |
@@ -87,10 +87,10 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose |
| 87 | 87 |
UseStdin: stdin != nil, |
| 88 | 88 |
UseStdout: stdout != nil, |
| 89 | 89 |
UseStderr: stderr != nil, |
| 90 |
- TTY: container.Config.Tty, |
|
| 91 |
- CloseStdin: container.Config.StdinOnce, |
|
| 90 |
+ TTY: ctr.Config.Tty, |
|
| 91 |
+ CloseStdin: ctr.Config.StdinOnce, |
|
| 92 | 92 |
} |
| 93 |
- container.StreamConfig.AttachStreams(&cfg) |
|
| 93 |
+ ctr.StreamConfig.AttachStreams(&cfg) |
|
| 94 | 94 |
close(attached) |
| 95 | 95 |
if cfg.UseStdin {
|
| 96 | 96 |
cfg.Stdin = stdin |
| ... | ... |
@@ -102,7 +102,7 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose |
| 102 | 102 |
cfg.Stderr = stderr |
| 103 | 103 |
} |
| 104 | 104 |
|
| 105 |
- return daemon.containerAttach(container, &cfg, false, doStream) |
|
| 105 |
+ return daemon.containerAttach(ctr, &cfg, false, doStream) |
|
| 106 | 106 |
} |
| 107 | 107 |
|
| 108 | 108 |
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {
|
| ... | ... |
@@ -92,20 +92,20 @@ func (daemon *Daemon) containerRoot(id string) string {
|
| 92 | 92 |
// Load reads the contents of a container from disk |
| 93 | 93 |
// This is typically done at startup. |
| 94 | 94 |
func (daemon *Daemon) load(id string) (*container.Container, error) {
|
| 95 |
- container := daemon.newBaseContainer(id) |
|
| 95 |
+ ctr := daemon.newBaseContainer(id) |
|
| 96 | 96 |
|
| 97 |
- if err := container.FromDisk(); err != nil {
|
|
| 97 |
+ if err := ctr.FromDisk(); err != nil {
|
|
| 98 | 98 |
return nil, err |
| 99 | 99 |
} |
| 100 |
- if err := label.ReserveLabel(container.ProcessLabel); err != nil {
|
|
| 100 |
+ if err := label.ReserveLabel(ctr.ProcessLabel); err != nil {
|
|
| 101 | 101 |
return nil, err |
| 102 | 102 |
} |
| 103 | 103 |
|
| 104 |
- if container.ID != id {
|
|
| 105 |
- return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
|
|
| 104 |
+ if ctr.ID != id {
|
|
| 105 |
+ return ctr, fmt.Errorf("Container %s is stored at %s", ctr.ID, id)
|
|
| 106 | 106 |
} |
| 107 | 107 |
|
| 108 |
- return container, nil |
|
| 108 |
+ return ctr, nil |
|
| 109 | 109 |
} |
| 110 | 110 |
|
| 111 | 111 |
// Register makes a container object usable by the daemon as <container.ID> |
| ... | ... |
@@ -342,12 +342,12 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
| 342 | 342 |
return nil |
| 343 | 343 |
} |
| 344 | 344 |
|
| 345 |
- options, err := daemon.buildSandboxOptions(container) |
|
| 345 |
+ sbOptions, err := daemon.buildSandboxOptions(container) |
|
| 346 | 346 |
if err != nil {
|
| 347 | 347 |
return fmt.Errorf("Update network failed: %v", err)
|
| 348 | 348 |
} |
| 349 | 349 |
|
| 350 |
- if err := sb.Refresh(options...); err != nil {
|
|
| 350 |
+ if err := sb.Refresh(sbOptions...); err != nil {
|
|
| 351 | 351 |
return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err)
|
| 352 | 352 |
} |
| 353 | 353 |
|
| ... | ... |
@@ -378,7 +378,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN |
| 378 | 378 |
if container.NetworkSettings.Networks != nil {
|
| 379 | 379 |
networkName := n.Name() |
| 380 | 380 |
containerName := strings.TrimPrefix(container.Name, "/") |
| 381 |
- if network, ok := container.NetworkSettings.Networks[networkName]; ok && network.EndpointID != "" {
|
|
| 381 |
+ if nw, ok := container.NetworkSettings.Networks[networkName]; ok && nw.EndpointID != "" {
|
|
| 382 | 382 |
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
|
| 383 | 383 |
return n, nil, errdefs.Conflict(err) |
| 384 | 384 |
} |
| ... | ... |
@@ -584,11 +584,11 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
| 584 | 584 |
// create its network sandbox now if not present |
| 585 | 585 |
if len(networks) == 0 {
|
| 586 | 586 |
if nil == daemon.getNetworkSandbox(container) {
|
| 587 |
- options, err := daemon.buildSandboxOptions(container) |
|
| 587 |
+ sbOptions, err := daemon.buildSandboxOptions(container) |
|
| 588 | 588 |
if err != nil {
|
| 589 | 589 |
return err |
| 590 | 590 |
} |
| 591 |
- sb, err := daemon.netController.NewSandbox(container.ID, options...) |
|
| 591 |
+ sb, err := daemon.netController.NewSandbox(container.ID, sbOptions...) |
|
| 592 | 592 |
if err != nil {
|
| 593 | 593 |
return err |
| 594 | 594 |
} |
| ... | ... |
@@ -802,11 +802,11 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName |
| 802 | 802 |
} |
| 803 | 803 |
|
| 804 | 804 |
if sb == nil {
|
| 805 |
- options, err := daemon.buildSandboxOptions(container) |
|
| 805 |
+ sbOptions, err := daemon.buildSandboxOptions(container) |
|
| 806 | 806 |
if err != nil {
|
| 807 | 807 |
return err |
| 808 | 808 |
} |
| 809 |
- sb, err = controller.NewSandbox(container.ID, options...) |
|
| 809 |
+ sb, err = controller.NewSandbox(container.ID, sbOptions...) |
|
| 810 | 810 |
if err != nil {
|
| 811 | 811 |
return err |
| 812 | 812 |
} |
| ... | ... |
@@ -1135,11 +1135,11 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw |
| 1135 | 1135 |
|
| 1136 | 1136 |
// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response |
| 1137 | 1137 |
func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error {
|
| 1138 |
- container, err := daemon.GetContainer(containerName) |
|
| 1138 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 1139 | 1139 |
if err != nil {
|
| 1140 | 1140 |
return err |
| 1141 | 1141 |
} |
| 1142 |
- sb := daemon.getNetworkSandbox(container) |
|
| 1142 |
+ sb := daemon.getNetworkSandbox(ctr) |
|
| 1143 | 1143 |
if sb == nil {
|
| 1144 | 1144 |
return fmt.Errorf("network sandbox does not exist for container %s", containerName)
|
| 1145 | 1145 |
} |
| ... | ... |
@@ -1148,11 +1148,11 @@ func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) erro |
| 1148 | 1148 |
|
| 1149 | 1149 |
// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response |
| 1150 | 1150 |
func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error {
|
| 1151 |
- container, err := daemon.GetContainer(containerName) |
|
| 1151 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 1152 | 1152 |
if err != nil {
|
| 1153 | 1153 |
return err |
| 1154 | 1154 |
} |
| 1155 |
- sb := daemon.getNetworkSandbox(container) |
|
| 1155 |
+ sb := daemon.getNetworkSandbox(ctr) |
|
| 1156 | 1156 |
if sb == nil {
|
| 1157 | 1157 |
// If the network sandbox is not found, then there is nothing to deactivate |
| 1158 | 1158 |
logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
|
| ... | ... |
@@ -61,33 +61,33 @@ func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]s |
| 61 | 61 |
func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) {
|
| 62 | 62 |
errMsg := "can't join IPC of container " + id |
| 63 | 63 |
// Check the container exists |
| 64 |
- container, err := daemon.GetContainer(id) |
|
| 64 |
+ ctr, err := daemon.GetContainer(id) |
|
| 65 | 65 |
if err != nil {
|
| 66 | 66 |
return nil, errors.Wrap(err, errMsg) |
| 67 | 67 |
} |
| 68 | 68 |
// Check the container is running and not restarting |
| 69 |
- if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil {
|
|
| 69 |
+ if err := daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting); err != nil {
|
|
| 70 | 70 |
return nil, errors.Wrap(err, errMsg) |
| 71 | 71 |
} |
| 72 | 72 |
// Check the container ipc is shareable |
| 73 |
- if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() {
|
|
| 73 |
+ if st, err := os.Stat(ctr.ShmPath); err != nil || !st.IsDir() {
|
|
| 74 | 74 |
if err == nil || os.IsNotExist(err) {
|
| 75 | 75 |
return nil, errors.New(errMsg + ": non-shareable IPC (hint: use IpcMode:shareable for the donor container)") |
| 76 | 76 |
} |
| 77 | 77 |
// stat() failed? |
| 78 |
- return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath) |
|
| 78 |
+ return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+ctr.ShmPath) |
|
| 79 | 79 |
} |
| 80 | 80 |
|
| 81 |
- return container, nil |
|
| 81 |
+ return ctr, nil |
|
| 82 | 82 |
} |
| 83 | 83 |
|
| 84 |
-func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) {
|
|
| 85 |
- containerID := container.HostConfig.PidMode.Container() |
|
| 86 |
- container, err := daemon.GetContainer(containerID) |
|
| 84 |
+func (daemon *Daemon) getPidContainer(ctr *container.Container) (*container.Container, error) {
|
|
| 85 |
+ containerID := ctr.HostConfig.PidMode.Container() |
|
| 86 |
+ ctr, err := daemon.GetContainer(containerID) |
|
| 87 | 87 |
if err != nil {
|
| 88 | 88 |
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID) |
| 89 | 89 |
} |
| 90 |
- return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) |
|
| 90 |
+ return ctr, daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting) |
|
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 | 93 |
func containerIsRunning(c *container.Container) error {
|
| ... | ... |
@@ -90,7 +90,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container |
| 90 | 90 |
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
|
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 |
- container, err := daemon.create(opts) |
|
| 93 |
+ ctr, err := daemon.create(opts) |
|
| 94 | 94 |
if err != nil {
|
| 95 | 95 |
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err
|
| 96 | 96 |
} |
| ... | ... |
@@ -100,16 +100,16 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container |
| 100 | 100 |
warnings = make([]string, 0) // Create an empty slice to avoid https://github.com/moby/moby/issues/38222 |
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 |
- return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil
|
|
| 103 |
+ return containertypes.ContainerCreateCreatedBody{ID: ctr.ID, Warnings: warnings}, nil
|
|
| 104 | 104 |
} |
| 105 | 105 |
|
| 106 | 106 |
// Create creates a new container from the given configuration with a given name. |
| 107 | 107 |
func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) {
|
| 108 | 108 |
var ( |
| 109 |
- container *container.Container |
|
| 110 |
- img *image.Image |
|
| 111 |
- imgID image.ID |
|
| 112 |
- err error |
|
| 109 |
+ ctr *container.Container |
|
| 110 |
+ img *image.Image |
|
| 111 |
+ imgID image.ID |
|
| 112 |
+ err error |
|
| 113 | 113 |
) |
| 114 | 114 |
|
| 115 | 115 |
os := runtime.GOOS |
| ... | ... |
@@ -153,22 +153,22 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr |
| 153 | 153 |
return nil, errdefs.InvalidParameter(err) |
| 154 | 154 |
} |
| 155 | 155 |
|
| 156 |
- if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
|
|
| 156 |
+ if ctr, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
|
|
| 157 | 157 |
return nil, err |
| 158 | 158 |
} |
| 159 | 159 |
defer func() {
|
| 160 | 160 |
if retErr != nil {
|
| 161 |
- if err := daemon.cleanupContainer(container, true, true); err != nil {
|
|
| 161 |
+ if err := daemon.cleanupContainer(ctr, true, true); err != nil {
|
|
| 162 | 162 |
logrus.Errorf("failed to cleanup container on create error: %v", err)
|
| 163 | 163 |
} |
| 164 | 164 |
} |
| 165 | 165 |
}() |
| 166 | 166 |
|
| 167 |
- if err := daemon.setSecurityOptions(container, opts.params.HostConfig); err != nil {
|
|
| 167 |
+ if err := daemon.setSecurityOptions(ctr, opts.params.HostConfig); err != nil {
|
|
| 168 | 168 |
return nil, err |
| 169 | 169 |
} |
| 170 | 170 |
|
| 171 |
- container.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt |
|
| 171 |
+ ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt |
|
| 172 | 172 |
|
| 173 | 173 |
// Fixes: https://github.com/moby/moby/issues/34074 and |
| 174 | 174 |
// https://github.com/docker/for-win/issues/999. |
| ... | ... |
@@ -176,38 +176,38 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr |
| 176 | 176 |
// do this on Windows as there's no effective sandbox size limit other than |
| 177 | 177 |
// physical on Linux. |
| 178 | 178 |
if isWindows {
|
| 179 |
- if container.HostConfig.StorageOpt == nil {
|
|
| 180 |
- container.HostConfig.StorageOpt = make(map[string]string) |
|
| 179 |
+ if ctr.HostConfig.StorageOpt == nil {
|
|
| 180 |
+ ctr.HostConfig.StorageOpt = make(map[string]string) |
|
| 181 | 181 |
} |
| 182 | 182 |
for _, v := range daemon.configStore.GraphOptions {
|
| 183 | 183 |
opt := strings.SplitN(v, "=", 2) |
| 184 |
- if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok {
|
|
| 185 |
- container.HostConfig.StorageOpt[opt[0]] = opt[1] |
|
| 184 |
+ if _, ok := ctr.HostConfig.StorageOpt[opt[0]]; !ok {
|
|
| 185 |
+ ctr.HostConfig.StorageOpt[opt[0]] = opt[1] |
|
| 186 | 186 |
} |
| 187 | 187 |
} |
| 188 | 188 |
} |
| 189 | 189 |
|
| 190 | 190 |
// Set RWLayer for container after mount labels have been set |
| 191 |
- rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMapping)) |
|
| 191 |
+ rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping)) |
|
| 192 | 192 |
if err != nil {
|
| 193 | 193 |
return nil, errdefs.System(err) |
| 194 | 194 |
} |
| 195 |
- container.RWLayer = rwLayer |
|
| 195 |
+ ctr.RWLayer = rwLayer |
|
| 196 | 196 |
|
| 197 | 197 |
rootIDs := daemon.idMapping.RootPair() |
| 198 | 198 |
|
| 199 |
- if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil {
|
|
| 199 |
+ if err := idtools.MkdirAndChown(ctr.Root, 0700, rootIDs); err != nil {
|
|
| 200 | 200 |
return nil, err |
| 201 | 201 |
} |
| 202 |
- if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil {
|
|
| 202 |
+ if err := idtools.MkdirAndChown(ctr.CheckpointDir(), 0700, rootIDs); err != nil {
|
|
| 203 | 203 |
return nil, err |
| 204 | 204 |
} |
| 205 | 205 |
|
| 206 |
- if err := daemon.setHostConfig(container, opts.params.HostConfig); err != nil {
|
|
| 206 |
+ if err := daemon.setHostConfig(ctr, opts.params.HostConfig); err != nil {
|
|
| 207 | 207 |
return nil, err |
| 208 | 208 |
} |
| 209 | 209 |
|
| 210 |
- if err := daemon.createContainerOSSpecificSettings(container, opts.params.Config, opts.params.HostConfig); err != nil {
|
|
| 210 |
+ if err := daemon.createContainerOSSpecificSettings(ctr, opts.params.Config, opts.params.HostConfig); err != nil {
|
|
| 211 | 211 |
return nil, err |
| 212 | 212 |
} |
| 213 | 213 |
|
| ... | ... |
@@ -217,15 +217,15 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr |
| 217 | 217 |
} |
| 218 | 218 |
// Make sure NetworkMode has an acceptable value. We do this to ensure |
| 219 | 219 |
// backwards API compatibility. |
| 220 |
- runconfig.SetDefaultNetModeIfBlank(container.HostConfig) |
|
| 220 |
+ runconfig.SetDefaultNetModeIfBlank(ctr.HostConfig) |
|
| 221 | 221 |
|
| 222 |
- daemon.updateContainerNetworkSettings(container, endpointsConfigs) |
|
| 223 |
- if err := daemon.Register(container); err != nil {
|
|
| 222 |
+ daemon.updateContainerNetworkSettings(ctr, endpointsConfigs) |
|
| 223 |
+ if err := daemon.Register(ctr); err != nil {
|
|
| 224 | 224 |
return nil, err |
| 225 | 225 |
} |
| 226 |
- stateCtr.set(container.ID, "stopped") |
|
| 227 |
- daemon.LogContainerEvent(container, "create") |
|
| 228 |
- return container, nil |
|
| 226 |
+ stateCtr.set(ctr.ID, "stopped") |
|
| 227 |
+ daemon.LogContainerEvent(ctr, "create") |
|
| 228 |
+ return ctr, nil |
|
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 | 231 |
func toHostConfigSelinuxLabels(labels []string) []string {
|
| ... | ... |
@@ -993,8 +993,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S |
| 993 | 993 |
} |
| 994 | 994 |
|
| 995 | 995 |
lgrMap := make(map[string]image.LayerGetReleaser) |
| 996 |
- for os, ls := range layerStores {
|
|
| 997 |
- lgrMap[os] = ls |
|
| 996 |
+ for los, ls := range layerStores {
|
|
| 997 |
+ lgrMap[los] = ls |
|
| 998 | 998 |
} |
| 999 | 999 |
imageStore, err := image.NewImageStore(ifs, lgrMap) |
| 1000 | 1000 |
if err != nil {
|
| ... | ... |
@@ -39,7 +39,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u |
| 39 | 39 |
if daemon.root == "" {
|
| 40 | 40 |
return nil |
| 41 | 41 |
} |
| 42 |
- var errors []string |
|
| 42 |
+ var errs []string |
|
| 43 | 43 |
|
| 44 | 44 |
regexps := getCleanPatterns(id) |
| 45 | 45 |
sc := bufio.NewScanner(reader) |
| ... | ... |
@@ -50,7 +50,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u |
| 50 | 50 |
if p.MatchString(mnt) {
|
| 51 | 51 |
if err := unmount(mnt); err != nil {
|
| 52 | 52 |
logrus.Error(err) |
| 53 |
- errors = append(errors, err.Error()) |
|
| 53 |
+ errs = append(errs, err.Error()) |
|
| 54 | 54 |
} |
| 55 | 55 |
} |
| 56 | 56 |
} |
| ... | ... |
@@ -62,8 +62,8 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u |
| 62 | 62 |
return err |
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 |
- if len(errors) > 0 {
|
|
| 66 |
- return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n"))
|
|
| 65 |
+ if len(errs) > 0 {
|
|
| 66 |
+ return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errs, "\n"))
|
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 | 69 |
logrus.Debugf("Cleaning up old mountid %v: done.", id)
|
| ... | ... |
@@ -82,15 +82,15 @@ func TestGetContainer(t *testing.T) {
|
| 82 | 82 |
daemon.reserveName(c4.ID, c4.Name) |
| 83 | 83 |
daemon.reserveName(c5.ID, c5.Name) |
| 84 | 84 |
|
| 85 |
- if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
|
|
| 85 |
+ if ctr, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); ctr != c2 {
|
|
| 86 | 86 |
t.Fatal("Should explicitly match full container IDs")
|
| 87 | 87 |
} |
| 88 | 88 |
|
| 89 |
- if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 {
|
|
| 89 |
+ if ctr, _ := daemon.GetContainer("75fb0b8009"); ctr != c4 {
|
|
| 90 | 90 |
t.Fatal("Should match a partial ID")
|
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 |
- if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 {
|
|
| 93 |
+ if ctr, _ := daemon.GetContainer("drunk_hawking"); ctr != c2 {
|
|
| 94 | 94 |
t.Fatal("Should match a full name")
|
| 95 | 95 |
} |
| 96 | 96 |
|
| ... | ... |
@@ -99,7 +99,7 @@ func TestGetContainer(t *testing.T) {
|
| 99 | 99 |
t.Fatal("Should match a full name even though it collides with another container's ID")
|
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 |
- if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 {
|
|
| 102 |
+ if ctr, _ := daemon.GetContainer("d22d69a2b896"); ctr != c5 {
|
|
| 103 | 103 |
t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID")
|
| 104 | 104 |
} |
| 105 | 105 |
|
| ... | ... |
@@ -176,8 +176,8 @@ func TestContainerInitDNS(t *testing.T) {
|
| 176 | 176 |
"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
|
| 177 | 177 |
|
| 178 | 178 |
// Container struct only used to retrieve path to config file |
| 179 |
- container := &container.Container{Root: containerPath}
|
|
| 180 |
- configPath, err := container.ConfigPath() |
|
| 179 |
+ ctr := &container.Container{Root: containerPath}
|
|
| 180 |
+ configPath, err := ctr.ConfigPath() |
|
| 181 | 181 |
if err != nil {
|
| 182 | 182 |
t.Fatal(err) |
| 183 | 183 |
} |
| ... | ... |
@@ -190,7 +190,7 @@ func TestContainerInitDNS(t *testing.T) {
|
| 190 | 190 |
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
| 191 | 191 |
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
| 192 | 192 |
|
| 193 |
- hostConfigPath, err := container.HostConfigPath() |
|
| 193 |
+ hostConfigPath, err := ctr.HostConfigPath() |
|
| 194 | 194 |
if err != nil {
|
| 195 | 195 |
t.Fatal(err) |
| 196 | 196 |
} |
| ... | ... |
@@ -384,11 +384,11 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf |
| 384 | 384 |
adaptSharedNamespaceContainer(daemon, hostConfig) |
| 385 | 385 |
|
| 386 | 386 |
var err error |
| 387 |
- opts, err := daemon.generateSecurityOpt(hostConfig) |
|
| 387 |
+ secOpts, err := daemon.generateSecurityOpt(hostConfig) |
|
| 388 | 388 |
if err != nil {
|
| 389 | 389 |
return err |
| 390 | 390 |
} |
| 391 |
- hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) |
|
| 391 |
+ hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) |
|
| 392 | 392 |
if hostConfig.OomKillDisable == nil {
|
| 393 | 393 |
defaultOomKillDisable := false |
| 394 | 394 |
hostConfig.OomKillDisable = &defaultOomKillDisable |
| ... | ... |
@@ -1310,7 +1310,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools |
| 1310 | 1310 |
} |
| 1311 | 1311 |
|
| 1312 | 1312 |
func setupDaemonRootPropagation(cfg *config.Config) error {
|
| 1313 |
- rootParentMount, options, err := getSourceMount(cfg.Root) |
|
| 1313 |
+ rootParentMount, mountOptions, err := getSourceMount(cfg.Root) |
|
| 1314 | 1314 |
if err != nil {
|
| 1315 | 1315 |
return errors.Wrap(err, "error getting daemon root's parent mount") |
| 1316 | 1316 |
} |
| ... | ... |
@@ -1326,7 +1326,7 @@ func setupDaemonRootPropagation(cfg *config.Config) error {
|
| 1326 | 1326 |
} |
| 1327 | 1327 |
}() |
| 1328 | 1328 |
|
| 1329 |
- if hasMountInfoOption(options, sharedPropagationOption, slavePropagationOption) {
|
|
| 1329 |
+ if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
|
|
| 1330 | 1330 |
cleanupOldFile = true |
| 1331 | 1331 |
return nil |
| 1332 | 1332 |
} |
| ... | ... |
@@ -1745,11 +1745,11 @@ func (daemon *Daemon) initCgroupsPath(path string) error {
|
| 1745 | 1745 |
} |
| 1746 | 1746 |
|
| 1747 | 1747 |
path = filepath.Join(mnt, root, path) |
| 1748 |
- sysinfo := sysinfo.New(true) |
|
| 1749 |
- if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
|
| 1748 |
+ sysInfo := sysinfo.New(true) |
|
| 1749 |
+ if err := maybeCreateCPURealTimeFile(sysInfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
|
| 1750 | 1750 |
return err |
| 1751 | 1751 |
} |
| 1752 |
- return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) |
|
| 1752 |
+ return maybeCreateCPURealTimeFile(sysInfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) |
|
| 1753 | 1753 |
} |
| 1754 | 1754 |
|
| 1755 | 1755 |
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
|
| ... | ... |
@@ -24,11 +24,11 @@ type fakeContainerGetter struct {
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 | 26 |
func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) {
|
| 27 |
- container, ok := f.containers[cid] |
|
| 27 |
+ ctr, ok := f.containers[cid] |
|
| 28 | 28 |
if !ok {
|
| 29 | 29 |
return nil, errors.New("container not found")
|
| 30 | 30 |
} |
| 31 |
- return container, nil |
|
| 31 |
+ return ctr, nil |
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 | 34 |
// Unix test as uses settings which are not available on Windows |
| ... | ... |
@@ -138,85 +138,85 @@ func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
|
| 138 | 138 |
|
| 139 | 139 |
// Unix test as uses settings which are not available on Windows |
| 140 | 140 |
func TestParseSecurityOptWithDeprecatedColon(t *testing.T) {
|
| 141 |
- container := &container.Container{}
|
|
| 142 |
- config := &containertypes.HostConfig{}
|
|
| 141 |
+ ctr := &container.Container{}
|
|
| 142 |
+ cfg := &containertypes.HostConfig{}
|
|
| 143 | 143 |
|
| 144 | 144 |
// test apparmor |
| 145 |
- config.SecurityOpt = []string{"apparmor=test_profile"}
|
|
| 146 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 145 |
+ cfg.SecurityOpt = []string{"apparmor=test_profile"}
|
|
| 146 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 147 | 147 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 148 | 148 |
} |
| 149 |
- if container.AppArmorProfile != "test_profile" {
|
|
| 150 |
- t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
|
|
| 149 |
+ if ctr.AppArmorProfile != "test_profile" {
|
|
| 150 |
+ t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
|
|
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 | 153 |
// test seccomp |
| 154 | 154 |
sp := "/path/to/seccomp_test.json" |
| 155 |
- config.SecurityOpt = []string{"seccomp=" + sp}
|
|
| 156 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 155 |
+ cfg.SecurityOpt = []string{"seccomp=" + sp}
|
|
| 156 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 157 | 157 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 158 | 158 |
} |
| 159 |
- if container.SeccompProfile != sp {
|
|
| 160 |
- t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile)
|
|
| 159 |
+ if ctr.SeccompProfile != sp {
|
|
| 160 |
+ t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
|
|
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 | 163 |
// test valid label |
| 164 |
- config.SecurityOpt = []string{"label=user:USER"}
|
|
| 165 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 164 |
+ cfg.SecurityOpt = []string{"label=user:USER"}
|
|
| 165 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 166 | 166 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 167 | 167 |
} |
| 168 | 168 |
|
| 169 | 169 |
// test invalid label |
| 170 |
- config.SecurityOpt = []string{"label"}
|
|
| 171 |
- if err := parseSecurityOpt(container, config); err == nil {
|
|
| 170 |
+ cfg.SecurityOpt = []string{"label"}
|
|
| 171 |
+ if err := parseSecurityOpt(ctr, cfg); err == nil {
|
|
| 172 | 172 |
t.Fatal("Expected parseSecurityOpt error, got nil")
|
| 173 | 173 |
} |
| 174 | 174 |
|
| 175 | 175 |
// test invalid opt |
| 176 |
- config.SecurityOpt = []string{"test"}
|
|
| 177 |
- if err := parseSecurityOpt(container, config); err == nil {
|
|
| 176 |
+ cfg.SecurityOpt = []string{"test"}
|
|
| 177 |
+ if err := parseSecurityOpt(ctr, cfg); err == nil {
|
|
| 178 | 178 |
t.Fatal("Expected parseSecurityOpt error, got nil")
|
| 179 | 179 |
} |
| 180 | 180 |
} |
| 181 | 181 |
|
| 182 | 182 |
func TestParseSecurityOpt(t *testing.T) {
|
| 183 |
- container := &container.Container{}
|
|
| 184 |
- config := &containertypes.HostConfig{}
|
|
| 183 |
+ ctr := &container.Container{}
|
|
| 184 |
+ cfg := &containertypes.HostConfig{}
|
|
| 185 | 185 |
|
| 186 | 186 |
// test apparmor |
| 187 |
- config.SecurityOpt = []string{"apparmor=test_profile"}
|
|
| 188 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 187 |
+ cfg.SecurityOpt = []string{"apparmor=test_profile"}
|
|
| 188 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 189 | 189 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 190 | 190 |
} |
| 191 |
- if container.AppArmorProfile != "test_profile" {
|
|
| 192 |
- t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
|
|
| 191 |
+ if ctr.AppArmorProfile != "test_profile" {
|
|
| 192 |
+ t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
|
|
| 193 | 193 |
} |
| 194 | 194 |
|
| 195 | 195 |
// test seccomp |
| 196 | 196 |
sp := "/path/to/seccomp_test.json" |
| 197 |
- config.SecurityOpt = []string{"seccomp=" + sp}
|
|
| 198 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 197 |
+ cfg.SecurityOpt = []string{"seccomp=" + sp}
|
|
| 198 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 199 | 199 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 200 | 200 |
} |
| 201 |
- if container.SeccompProfile != sp {
|
|
| 202 |
- t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile)
|
|
| 201 |
+ if ctr.SeccompProfile != sp {
|
|
| 202 |
+ t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
|
|
| 203 | 203 |
} |
| 204 | 204 |
|
| 205 | 205 |
// test valid label |
| 206 |
- config.SecurityOpt = []string{"label=user:USER"}
|
|
| 207 |
- if err := parseSecurityOpt(container, config); err != nil {
|
|
| 206 |
+ cfg.SecurityOpt = []string{"label=user:USER"}
|
|
| 207 |
+ if err := parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 208 | 208 |
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
| 209 | 209 |
} |
| 210 | 210 |
|
| 211 | 211 |
// test invalid label |
| 212 |
- config.SecurityOpt = []string{"label"}
|
|
| 213 |
- if err := parseSecurityOpt(container, config); err == nil {
|
|
| 212 |
+ cfg.SecurityOpt = []string{"label"}
|
|
| 213 |
+ if err := parseSecurityOpt(ctr, cfg); err == nil {
|
|
| 214 | 214 |
t.Fatal("Expected parseSecurityOpt error, got nil")
|
| 215 | 215 |
} |
| 216 | 216 |
|
| 217 | 217 |
// test invalid opt |
| 218 |
- config.SecurityOpt = []string{"test"}
|
|
| 219 |
- if err := parseSecurityOpt(container, config); err == nil {
|
|
| 218 |
+ cfg.SecurityOpt = []string{"test"}
|
|
| 219 |
+ if err := parseSecurityOpt(ctr, cfg); err == nil {
|
|
| 220 | 220 |
t.Fatal("Expected parseSecurityOpt error, got nil")
|
| 221 | 221 |
} |
| 222 | 222 |
} |
| ... | ... |
@@ -225,28 +225,28 @@ func TestParseNNPSecurityOptions(t *testing.T) {
|
| 225 | 225 |
daemon := &Daemon{
|
| 226 | 226 |
configStore: &config.Config{NoNewPrivileges: true},
|
| 227 | 227 |
} |
| 228 |
- container := &container.Container{}
|
|
| 229 |
- config := &containertypes.HostConfig{}
|
|
| 228 |
+ ctr := &container.Container{}
|
|
| 229 |
+ cfg := &containertypes.HostConfig{}
|
|
| 230 | 230 |
|
| 231 | 231 |
// test NNP when "daemon:true" and "no-new-privileges=false"" |
| 232 |
- config.SecurityOpt = []string{"no-new-privileges=false"}
|
|
| 232 |
+ cfg.SecurityOpt = []string{"no-new-privileges=false"}
|
|
| 233 | 233 |
|
| 234 |
- if err := daemon.parseSecurityOpt(container, config); err != nil {
|
|
| 234 |
+ if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 235 | 235 |
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
| 236 | 236 |
} |
| 237 |
- if container.NoNewPrivileges {
|
|
| 238 |
- t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges)
|
|
| 237 |
+ if ctr.NoNewPrivileges {
|
|
| 238 |
+ t.Fatalf("container.NoNewPrivileges should be FALSE: %v", ctr.NoNewPrivileges)
|
|
| 239 | 239 |
} |
| 240 | 240 |
|
| 241 | 241 |
// test NNP when "daemon:false" and "no-new-privileges=true"" |
| 242 | 242 |
daemon.configStore.NoNewPrivileges = false |
| 243 |
- config.SecurityOpt = []string{"no-new-privileges=true"}
|
|
| 243 |
+ cfg.SecurityOpt = []string{"no-new-privileges=true"}
|
|
| 244 | 244 |
|
| 245 |
- if err := daemon.parseSecurityOpt(container, config); err != nil {
|
|
| 245 |
+ if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
|
|
| 246 | 246 |
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
| 247 | 247 |
} |
| 248 |
- if !container.NoNewPrivileges {
|
|
| 249 |
- t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges)
|
|
| 248 |
+ if !ctr.NoNewPrivileges {
|
|
| 249 |
+ t.Fatalf("container.NoNewPrivileges should be TRUE: %v", ctr.NoNewPrivileges)
|
|
| 250 | 250 |
} |
| 251 | 251 |
} |
| 252 | 252 |
|
| ... | ... |
@@ -15,7 +15,7 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
|
| 15 | 15 |
_, l, _ := e.Subscribe() |
| 16 | 16 |
defer e.Evict(l) |
| 17 | 17 |
|
| 18 |
- container := &container.Container{
|
|
| 18 |
+ ctr := &container.Container{
|
|
| 19 | 19 |
ID: "container_id", |
| 20 | 20 |
Name: "container_name", |
| 21 | 21 |
Config: &containertypes.Config{
|
| ... | ... |
@@ -29,10 +29,10 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
|
| 29 | 29 |
daemon := &Daemon{
|
| 30 | 30 |
EventsService: e, |
| 31 | 31 |
} |
| 32 |
- daemon.LogContainerEvent(container, "create") |
|
| 32 |
+ daemon.LogContainerEvent(ctr, "create") |
|
| 33 | 33 |
|
| 34 |
- if _, mutated := container.Config.Labels["image"]; mutated {
|
|
| 35 |
- t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels)
|
|
| 34 |
+ if _, mutated := ctr.Config.Labels["image"]; mutated {
|
|
| 35 |
+ t.Fatalf("Expected to not mutate the container labels, got %q", ctr.Config.Labels)
|
|
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 | 38 |
validateTestAttributes(t, l, map[string]string{
|
| ... | ... |
@@ -46,7 +46,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
|
| 46 | 46 |
_, l, _ := e.Subscribe() |
| 47 | 47 |
defer e.Evict(l) |
| 48 | 48 |
|
| 49 |
- container := &container.Container{
|
|
| 49 |
+ ctr := &container.Container{
|
|
| 50 | 50 |
ID: "container_id", |
| 51 | 51 |
Name: "container_name", |
| 52 | 52 |
Config: &containertypes.Config{
|
| ... | ... |
@@ -63,7 +63,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
|
| 63 | 63 |
"node": "2", |
| 64 | 64 |
"foo": "bar", |
| 65 | 65 |
} |
| 66 |
- daemon.LogContainerEventWithAttributes(container, "create", attributes) |
|
| 66 |
+ daemon.LogContainerEventWithAttributes(ctr, "create", attributes) |
|
| 67 | 67 |
|
| 68 | 68 |
validateTestAttributes(t, l, map[string]string{
|
| 69 | 69 |
"node": "1", |
| ... | ... |
@@ -54,18 +54,18 @@ func (daemon *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
| 54 | 54 |
// saying the container isn't running, we should return a 404 so that |
| 55 | 55 |
// the user sees the same error now that they will after the |
| 56 | 56 |
// 5 minute clean-up loop is run which erases old/dead execs. |
| 57 |
- container := daemon.containers.Get(ec.ContainerID) |
|
| 58 |
- if container == nil {
|
|
| 57 |
+ ctr := daemon.containers.Get(ec.ContainerID) |
|
| 58 |
+ if ctr == nil {
|
|
| 59 | 59 |
return nil, containerNotFound(name) |
| 60 | 60 |
} |
| 61 |
- if !container.IsRunning() {
|
|
| 62 |
- return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String())
|
|
| 61 |
+ if !ctr.IsRunning() {
|
|
| 62 |
+ return nil, fmt.Errorf("Container %s is not running: %s", ctr.ID, ctr.State.String())
|
|
| 63 | 63 |
} |
| 64 |
- if container.IsPaused() {
|
|
| 65 |
- return nil, errExecPaused(container.ID) |
|
| 64 |
+ if ctr.IsPaused() {
|
|
| 65 |
+ return nil, errExecPaused(ctr.ID) |
|
| 66 | 66 |
} |
| 67 |
- if container.IsRestarting() {
|
|
| 68 |
- return nil, errContainerIsRestarting(container.ID) |
|
| 67 |
+ if ctr.IsRestarting() {
|
|
| 68 |
+ return nil, errContainerIsRestarting(ctr.ID) |
|
| 69 | 69 |
} |
| 70 | 70 |
return ec, nil |
| 71 | 71 |
} |
| ... | ... |
@@ -76,21 +76,21 @@ func (daemon *Daemon) unregisterExecCommand(container *container.Container, exec |
| 76 | 76 |
} |
| 77 | 77 |
|
| 78 | 78 |
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
| 79 |
- container, err := daemon.GetContainer(name) |
|
| 79 |
+ ctr, err := daemon.GetContainer(name) |
|
| 80 | 80 |
if err != nil {
|
| 81 | 81 |
return nil, err |
| 82 | 82 |
} |
| 83 | 83 |
|
| 84 |
- if !container.IsRunning() {
|
|
| 85 |
- return nil, errNotRunning(container.ID) |
|
| 84 |
+ if !ctr.IsRunning() {
|
|
| 85 |
+ return nil, errNotRunning(ctr.ID) |
|
| 86 | 86 |
} |
| 87 |
- if container.IsPaused() {
|
|
| 87 |
+ if ctr.IsPaused() {
|
|
| 88 | 88 |
return nil, errExecPaused(name) |
| 89 | 89 |
} |
| 90 |
- if container.IsRestarting() {
|
|
| 91 |
- return nil, errContainerIsRestarting(container.ID) |
|
| 90 |
+ if ctr.IsRestarting() {
|
|
| 91 |
+ return nil, errContainerIsRestarting(ctr.ID) |
|
| 92 | 92 |
} |
| 93 |
- return container, nil |
|
| 93 |
+ return ctr, nil |
|
| 94 | 94 |
} |
| 95 | 95 |
|
| 96 | 96 |
// ContainerExecCreate sets up an exec in a running container. |
| ... | ... |
@@ -220,11 +220,11 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, stdin |
| 220 | 220 |
|
| 221 | 221 |
p := &specs.Process{}
|
| 222 | 222 |
if runtime.GOOS != "windows" {
|
| 223 |
- container, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID) |
|
| 223 |
+ ctr, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID) |
|
| 224 | 224 |
if err != nil {
|
| 225 | 225 |
return err |
| 226 | 226 |
} |
| 227 |
- spec, err := container.Spec(ctx) |
|
| 227 |
+ spec, err := ctr.Spec(ctx) |
|
| 228 | 228 |
if err != nil {
|
| 229 | 229 |
return err |
| 230 | 230 |
} |
| ... | ... |
@@ -14,26 +14,26 @@ import ( |
| 14 | 14 |
// ContainerExport writes the contents of the container to the given |
| 15 | 15 |
// writer. An error is returned if the container cannot be found. |
| 16 | 16 |
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
| 17 |
- container, err := daemon.GetContainer(name) |
|
| 17 |
+ ctr, err := daemon.GetContainer(name) |
|
| 18 | 18 |
if err != nil {
|
| 19 | 19 |
return err |
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 |
- if isWindows && container.OS == "windows" {
|
|
| 22 |
+ if isWindows && ctr.OS == "windows" {
|
|
| 23 | 23 |
return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers")
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
- if container.IsDead() {
|
|
| 27 |
- err := fmt.Errorf("You cannot export container %s which is Dead", container.ID)
|
|
| 26 |
+ if ctr.IsDead() {
|
|
| 27 |
+ err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID)
|
|
| 28 | 28 |
return errdefs.Conflict(err) |
| 29 | 29 |
} |
| 30 | 30 |
|
| 31 |
- if container.IsRemovalInProgress() {
|
|
| 32 |
- err := fmt.Errorf("You cannot export container %s which is being removed", container.ID)
|
|
| 31 |
+ if ctr.IsRemovalInProgress() {
|
|
| 32 |
+ err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID)
|
|
| 33 | 33 |
return errdefs.Conflict(err) |
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 |
- data, err := daemon.containerExport(container) |
|
| 36 |
+ data, err := daemon.containerExport(ctr) |
|
| 37 | 37 |
if err != nil {
|
| 38 | 38 |
return fmt.Errorf("Error exporting container %s: %v", name, err)
|
| 39 | 39 |
} |
| ... | ... |
@@ -65,7 +65,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R |
| 65 | 65 |
return nil, err |
| 66 | 66 |
} |
| 67 | 67 |
|
| 68 |
- archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
|
|
| 68 |
+ archv, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
|
|
| 69 | 69 |
Compression: archive.Uncompressed, |
| 70 | 70 |
UIDMaps: daemon.idMapping.UIDs(), |
| 71 | 71 |
GIDMaps: daemon.idMapping.GIDs(), |
| ... | ... |
@@ -74,8 +74,8 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R |
| 74 | 74 |
rwlayer.Unmount() |
| 75 | 75 |
return nil, err |
| 76 | 76 |
} |
| 77 |
- arch = ioutils.NewReadCloserWrapper(archive, func() error {
|
|
| 78 |
- err := archive.Close() |
|
| 77 |
+ arch = ioutils.NewReadCloserWrapper(archv, func() error {
|
|
| 78 |
+ err := archv.Close() |
|
| 79 | 79 |
rwlayer.Unmount() |
| 80 | 80 |
daemon.imageService.ReleaseLayer(rwlayer, container.OS) |
| 81 | 81 |
return err |
| ... | ... |
@@ -32,50 +32,50 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) ( |
| 32 | 32 |
// ContainerInspectCurrent returns low-level information about a |
| 33 | 33 |
// container in a most recent api version. |
| 34 | 34 |
func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
|
| 35 |
- container, err := daemon.GetContainer(name) |
|
| 35 |
+ ctr, err := daemon.GetContainer(name) |
|
| 36 | 36 |
if err != nil {
|
| 37 | 37 |
return nil, err |
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 |
- container.Lock() |
|
| 40 |
+ ctr.Lock() |
|
| 41 | 41 |
|
| 42 |
- base, err := daemon.getInspectData(container) |
|
| 42 |
+ base, err := daemon.getInspectData(ctr) |
|
| 43 | 43 |
if err != nil {
|
| 44 |
- container.Unlock() |
|
| 44 |
+ ctr.Unlock() |
|
| 45 | 45 |
return nil, err |
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 | 48 |
apiNetworks := make(map[string]*networktypes.EndpointSettings) |
| 49 |
- for name, epConf := range container.NetworkSettings.Networks {
|
|
| 49 |
+ for name, epConf := range ctr.NetworkSettings.Networks {
|
|
| 50 | 50 |
if epConf.EndpointSettings != nil {
|
| 51 | 51 |
// We must make a copy of this pointer object otherwise it can race with other operations |
| 52 | 52 |
apiNetworks[name] = epConf.EndpointSettings.Copy() |
| 53 | 53 |
} |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
- mountPoints := container.GetMountPoints() |
|
| 56 |
+ mountPoints := ctr.GetMountPoints() |
|
| 57 | 57 |
networkSettings := &types.NetworkSettings{
|
| 58 | 58 |
NetworkSettingsBase: types.NetworkSettingsBase{
|
| 59 |
- Bridge: container.NetworkSettings.Bridge, |
|
| 60 |
- SandboxID: container.NetworkSettings.SandboxID, |
|
| 61 |
- HairpinMode: container.NetworkSettings.HairpinMode, |
|
| 62 |
- LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, |
|
| 63 |
- LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, |
|
| 64 |
- SandboxKey: container.NetworkSettings.SandboxKey, |
|
| 65 |
- SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, |
|
| 66 |
- SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, |
|
| 59 |
+ Bridge: ctr.NetworkSettings.Bridge, |
|
| 60 |
+ SandboxID: ctr.NetworkSettings.SandboxID, |
|
| 61 |
+ HairpinMode: ctr.NetworkSettings.HairpinMode, |
|
| 62 |
+ LinkLocalIPv6Address: ctr.NetworkSettings.LinkLocalIPv6Address, |
|
| 63 |
+ LinkLocalIPv6PrefixLen: ctr.NetworkSettings.LinkLocalIPv6PrefixLen, |
|
| 64 |
+ SandboxKey: ctr.NetworkSettings.SandboxKey, |
|
| 65 |
+ SecondaryIPAddresses: ctr.NetworkSettings.SecondaryIPAddresses, |
|
| 66 |
+ SecondaryIPv6Addresses: ctr.NetworkSettings.SecondaryIPv6Addresses, |
|
| 67 | 67 |
}, |
| 68 |
- DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), |
|
| 68 |
+ DefaultNetworkSettings: daemon.getDefaultNetworkSettings(ctr.NetworkSettings.Networks), |
|
| 69 | 69 |
Networks: apiNetworks, |
| 70 | 70 |
} |
| 71 | 71 |
|
| 72 |
- ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) |
|
| 73 |
- for k, pm := range container.NetworkSettings.Ports {
|
|
| 72 |
+ ports := make(nat.PortMap, len(ctr.NetworkSettings.Ports)) |
|
| 73 |
+ for k, pm := range ctr.NetworkSettings.Ports {
|
|
| 74 | 74 |
ports[k] = pm |
| 75 | 75 |
} |
| 76 | 76 |
networkSettings.NetworkSettingsBase.Ports = ports |
| 77 | 77 |
|
| 78 |
- container.Unlock() |
|
| 78 |
+ ctr.Unlock() |
|
| 79 | 79 |
|
| 80 | 80 |
if size {
|
| 81 | 81 |
sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) |
| ... | ... |
@@ -86,7 +86,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co |
| 86 | 86 |
return &types.ContainerJSON{
|
| 87 | 87 |
ContainerJSONBase: base, |
| 88 | 88 |
Mounts: mountPoints, |
| 89 |
- Config: container.Config, |
|
| 89 |
+ Config: ctr.Config, |
|
| 90 | 90 |
NetworkSettings: networkSettings, |
| 91 | 91 |
}, nil |
| 92 | 92 |
} |
| ... | ... |
@@ -214,7 +214,7 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, err |
| 214 | 214 |
return nil, errExecNotFound(id) |
| 215 | 215 |
} |
| 216 | 216 |
|
| 217 |
- if container := daemon.containers.Get(e.ContainerID); container == nil {
|
|
| 217 |
+ if ctr := daemon.containers.Get(e.ContainerID); ctr == nil {
|
|
| 218 | 218 |
return nil, errExecNotFound(id) |
| 219 | 219 |
} |
| 220 | 220 |
|
| ... | ... |
@@ -20,38 +20,38 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON |
| 20 | 20 |
|
| 21 | 21 |
// containerInspectPre120 gets containers for pre 1.20 APIs. |
| 22 | 22 |
func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
|
| 23 |
- container, err := daemon.GetContainer(name) |
|
| 23 |
+ ctr, err := daemon.GetContainer(name) |
|
| 24 | 24 |
if err != nil {
|
| 25 | 25 |
return nil, err |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- container.Lock() |
|
| 29 |
- defer container.Unlock() |
|
| 28 |
+ ctr.Lock() |
|
| 29 |
+ defer ctr.Unlock() |
|
| 30 | 30 |
|
| 31 |
- base, err := daemon.getInspectData(container) |
|
| 31 |
+ base, err := daemon.getInspectData(ctr) |
|
| 32 | 32 |
if err != nil {
|
| 33 | 33 |
return nil, err |
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 | 36 |
volumes := make(map[string]string) |
| 37 | 37 |
volumesRW := make(map[string]bool) |
| 38 |
- for _, m := range container.MountPoints {
|
|
| 38 |
+ for _, m := range ctr.MountPoints {
|
|
| 39 | 39 |
volumes[m.Destination] = m.Path() |
| 40 | 40 |
volumesRW[m.Destination] = m.RW |
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 | 43 |
config := &v1p19.ContainerConfig{
|
| 44 |
- Config: container.Config, |
|
| 45 |
- MacAddress: container.Config.MacAddress, |
|
| 46 |
- NetworkDisabled: container.Config.NetworkDisabled, |
|
| 47 |
- ExposedPorts: container.Config.ExposedPorts, |
|
| 48 |
- VolumeDriver: container.HostConfig.VolumeDriver, |
|
| 49 |
- Memory: container.HostConfig.Memory, |
|
| 50 |
- MemorySwap: container.HostConfig.MemorySwap, |
|
| 51 |
- CPUShares: container.HostConfig.CPUShares, |
|
| 52 |
- CPUSet: container.HostConfig.CpusetCpus, |
|
| 44 |
+ Config: ctr.Config, |
|
| 45 |
+ MacAddress: ctr.Config.MacAddress, |
|
| 46 |
+ NetworkDisabled: ctr.Config.NetworkDisabled, |
|
| 47 |
+ ExposedPorts: ctr.Config.ExposedPorts, |
|
| 48 |
+ VolumeDriver: ctr.HostConfig.VolumeDriver, |
|
| 49 |
+ Memory: ctr.HostConfig.Memory, |
|
| 50 |
+ MemorySwap: ctr.HostConfig.MemorySwap, |
|
| 51 |
+ CPUShares: ctr.HostConfig.CPUShares, |
|
| 52 |
+ CPUSet: ctr.HostConfig.CpusetCpus, |
|
| 53 | 53 |
} |
| 54 |
- networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) |
|
| 54 |
+ networkSettings := daemon.getBackwardsCompatibleNetworkSettings(ctr.NetworkSettings) |
|
| 55 | 55 |
|
| 56 | 56 |
return &v1p19.ContainerJSON{
|
| 57 | 57 |
ContainerJSONBase: base, |
| ... | ... |
@@ -69,8 +69,8 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe |
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 | 71 |
func containerListContainsName(containers []*types.Container, name string) bool {
|
| 72 |
- for _, container := range containers {
|
|
| 73 |
- for _, containerName := range container.Names {
|
|
| 72 |
+ for _, ctr := range containers {
|
|
| 73 |
+ for _, containerName := range ctr.Names {
|
|
| 74 | 74 |
if containerName == name {
|
| 75 | 75 |
return true |
| 76 | 76 |
} |
| ... | ... |
@@ -33,20 +33,20 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c |
| 33 | 33 |
if !(config.ShowStdout || config.ShowStderr) {
|
| 34 | 34 |
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
|
| 35 | 35 |
} |
| 36 |
- container, err := daemon.GetContainer(containerName) |
|
| 36 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 37 | 37 |
if err != nil {
|
| 38 | 38 |
return nil, false, err |
| 39 | 39 |
} |
| 40 | 40 |
|
| 41 |
- if container.RemovalInProgress || container.Dead {
|
|
| 41 |
+ if ctr.RemovalInProgress || ctr.Dead {
|
|
| 42 | 42 |
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
|
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
- if container.HostConfig.LogConfig.Type == "none" {
|
|
| 45 |
+ if ctr.HostConfig.LogConfig.Type == "none" {
|
|
| 46 | 46 |
return nil, false, logger.ErrReadLogsNotSupported{}
|
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 |
- cLog, cLogCreated, err := daemon.getLogger(container) |
|
| 49 |
+ cLog, cLogCreated, err := daemon.getLogger(ctr) |
|
| 50 | 50 |
if err != nil {
|
| 51 | 51 |
return nil, false, err |
| 52 | 52 |
} |
| ... | ... |
@@ -157,7 +157,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c |
| 157 | 157 |
} |
| 158 | 158 |
} |
| 159 | 159 |
}() |
| 160 |
- return messageChan, container.Config.Tty, nil |
|
| 160 |
+ return messageChan, ctr.Config.Tty, nil |
|
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 | 163 |
func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) {
|
| ... | ... |
@@ -430,12 +430,12 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet |
| 430 | 430 |
|
| 431 | 431 |
// UpdateContainerServiceConfig updates a service configuration. |
| 432 | 432 |
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
|
| 433 |
- container, err := daemon.GetContainer(containerName) |
|
| 433 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 434 | 434 |
if err != nil {
|
| 435 | 435 |
return err |
| 436 | 436 |
} |
| 437 | 437 |
|
| 438 |
- container.NetworkSettings.Service = serviceConfig |
|
| 438 |
+ ctr.NetworkSettings.Service = serviceConfig |
|
| 439 | 439 |
return nil |
| 440 | 440 |
} |
| 441 | 441 |
|
| ... | ... |
@@ -443,24 +443,24 @@ func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, service |
| 443 | 443 |
// network. If either cannot be found, an err is returned. If the |
| 444 | 444 |
// network cannot be set up, an err is returned. |
| 445 | 445 |
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
|
| 446 |
- container, err := daemon.GetContainer(containerName) |
|
| 446 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 447 | 447 |
if err != nil {
|
| 448 | 448 |
return err |
| 449 | 449 |
} |
| 450 |
- return daemon.ConnectToNetwork(container, networkName, endpointConfig) |
|
| 450 |
+ return daemon.ConnectToNetwork(ctr, networkName, endpointConfig) |
|
| 451 | 451 |
} |
| 452 | 452 |
|
| 453 | 453 |
// DisconnectContainerFromNetwork disconnects the given container from |
| 454 | 454 |
// the given network. If either cannot be found, an err is returned. |
| 455 | 455 |
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
|
| 456 |
- container, err := daemon.GetContainer(containerName) |
|
| 456 |
+ ctr, err := daemon.GetContainer(containerName) |
|
| 457 | 457 |
if err != nil {
|
| 458 | 458 |
if force {
|
| 459 | 459 |
return daemon.ForceEndpointDelete(containerName, networkName) |
| 460 | 460 |
} |
| 461 | 461 |
return err |
| 462 | 462 |
} |
| 463 |
- return daemon.DisconnectFromNetwork(container, networkName, force) |
|
| 463 |
+ return daemon.DisconnectFromNetwork(ctr, networkName, force) |
|
| 464 | 464 |
} |
| 465 | 465 |
|
| 466 | 466 |
// GetNetworkDriverList returns the list of plugins drivers |
| ... | ... |
@@ -485,10 +485,10 @@ func (daemon *Daemon) GetNetworkDriverList() []string {
|
| 485 | 485 |
|
| 486 | 486 |
networks := daemon.netController.Networks() |
| 487 | 487 |
|
| 488 |
- for _, network := range networks {
|
|
| 489 |
- if !pluginMap[network.Type()] {
|
|
| 490 |
- pluginList = append(pluginList, network.Type()) |
|
| 491 |
- pluginMap[network.Type()] = true |
|
| 488 |
+ for _, nw := range networks {
|
|
| 489 |
+ if !pluginMap[nw.Type()] {
|
|
| 490 |
+ pluginList = append(pluginList, nw.Type()) |
|
| 491 |
+ pluginMap[nw.Type()] = true |
|
| 492 | 492 |
} |
| 493 | 493 |
} |
| 494 | 494 |
|
| ... | ... |
@@ -10,11 +10,11 @@ import ( |
| 10 | 10 |
|
| 11 | 11 |
// ContainerPause pauses a container |
| 12 | 12 |
func (daemon *Daemon) ContainerPause(name string) error {
|
| 13 |
- container, err := daemon.GetContainer(name) |
|
| 13 |
+ ctr, err := daemon.GetContainer(name) |
|
| 14 | 14 |
if err != nil {
|
| 15 | 15 |
return err |
| 16 | 16 |
} |
| 17 |
- return daemon.containerPause(container) |
|
| 17 |
+ return daemon.containerPause(ctr) |
|
| 18 | 18 |
} |
| 19 | 19 |
|
| 20 | 20 |
// containerPause pauses the container execution without stopping the process. |
| ... | ... |
@@ -15,15 +15,15 @@ import ( |
| 15 | 15 |
// stop. Returns an error if the container cannot be found, or if |
| 16 | 16 |
// there is an underlying error at any stage of the restart. |
| 17 | 17 |
func (daemon *Daemon) ContainerRestart(name string, seconds *int) error {
|
| 18 |
- container, err := daemon.GetContainer(name) |
|
| 18 |
+ ctr, err := daemon.GetContainer(name) |
|
| 19 | 19 |
if err != nil {
|
| 20 | 20 |
return err |
| 21 | 21 |
} |
| 22 | 22 |
if seconds == nil {
|
| 23 |
- stopTimeout := container.StopTimeout() |
|
| 23 |
+ stopTimeout := ctr.StopTimeout() |
|
| 24 | 24 |
seconds = &stopTimeout |
| 25 | 25 |
} |
| 26 |
- if err := daemon.containerRestart(container, *seconds); err != nil {
|
|
| 26 |
+ if err := daemon.containerRestart(ctr, *seconds); err != nil {
|
|
| 27 | 27 |
return fmt.Errorf("Cannot restart container %s: %v", name, err)
|
| 28 | 28 |
} |
| 29 | 29 |
return nil |
| ... | ... |
@@ -23,24 +23,24 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos |
| 23 | 23 |
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
- container, err := daemon.GetContainer(name) |
|
| 26 |
+ ctr, err := daemon.GetContainer(name) |
|
| 27 | 27 |
if err != nil {
|
| 28 | 28 |
return err |
| 29 | 29 |
} |
| 30 | 30 |
|
| 31 | 31 |
validateState := func() error {
|
| 32 |
- container.Lock() |
|
| 33 |
- defer container.Unlock() |
|
| 32 |
+ ctr.Lock() |
|
| 33 |
+ defer ctr.Unlock() |
|
| 34 | 34 |
|
| 35 |
- if container.Paused {
|
|
| 35 |
+ if ctr.Paused {
|
|
| 36 | 36 |
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
|
| 37 | 37 |
} |
| 38 | 38 |
|
| 39 |
- if container.Running {
|
|
| 39 |
+ if ctr.Running {
|
|
| 40 | 40 |
return containerNotModifiedError{running: true}
|
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 |
- if container.RemovalInProgress || container.Dead {
|
|
| 43 |
+ if ctr.RemovalInProgress || ctr.Dead {
|
|
| 44 | 44 |
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
|
| 45 | 45 |
} |
| 46 | 46 |
return nil |
| ... | ... |
@@ -56,26 +56,26 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos |
| 56 | 56 |
// creating a container, not during start. |
| 57 | 57 |
if hostConfig != nil {
|
| 58 | 58 |
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
| 59 |
- oldNetworkMode := container.HostConfig.NetworkMode |
|
| 60 |
- if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
|
|
| 59 |
+ oldNetworkMode := ctr.HostConfig.NetworkMode |
|
| 60 |
+ if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
|
|
| 61 | 61 |
return errdefs.InvalidParameter(err) |
| 62 | 62 |
} |
| 63 | 63 |
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
| 64 | 64 |
return errdefs.InvalidParameter(err) |
| 65 | 65 |
} |
| 66 |
- if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
|
| 66 |
+ if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
|
|
| 67 | 67 |
return errdefs.InvalidParameter(err) |
| 68 | 68 |
} |
| 69 |
- newNetworkMode := container.HostConfig.NetworkMode |
|
| 69 |
+ newNetworkMode := ctr.HostConfig.NetworkMode |
|
| 70 | 70 |
if string(oldNetworkMode) != string(newNetworkMode) {
|
| 71 | 71 |
// if user has change the network mode on starting, clean up the |
| 72 | 72 |
// old networks. It is a deprecated feature and has been removed in Docker 1.12 |
| 73 |
- container.NetworkSettings.Networks = nil |
|
| 74 |
- if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 73 |
+ ctr.NetworkSettings.Networks = nil |
|
| 74 |
+ if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 75 | 75 |
return errdefs.System(err) |
| 76 | 76 |
} |
| 77 | 77 |
} |
| 78 |
- container.InitDNSHostConfig() |
|
| 78 |
+ ctr.InitDNSHostConfig() |
|
| 79 | 79 |
} |
| 80 | 80 |
} else {
|
| 81 | 81 |
if hostConfig != nil {
|
| ... | ... |
@@ -85,17 +85,17 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos |
| 85 | 85 |
|
| 86 | 86 |
// check if hostConfig is in line with the current system settings. |
| 87 | 87 |
// It may happen cgroups are umounted or the like. |
| 88 |
- if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil {
|
|
| 88 |
+ if _, err = daemon.verifyContainerSettings(ctr.OS, ctr.HostConfig, nil, false); err != nil {
|
|
| 89 | 89 |
return errdefs.InvalidParameter(err) |
| 90 | 90 |
} |
| 91 | 91 |
// Adapt for old containers in case we have updates in this function and |
| 92 | 92 |
// old containers never have chance to call the new function in create stage. |
| 93 | 93 |
if hostConfig != nil {
|
| 94 |
- if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil {
|
|
| 94 |
+ if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
|
|
| 95 | 95 |
return errdefs.InvalidParameter(err) |
| 96 | 96 |
} |
| 97 | 97 |
} |
| 98 |
- return daemon.containerStart(container, checkpoint, checkpointDir, true) |
|
| 98 |
+ return daemon.containerStart(ctr, checkpoint, checkpointDir, true) |
|
| 99 | 99 |
} |
| 100 | 100 |
|
| 101 | 101 |
// containerStart prepares the container to run by setting up everything the |
| ... | ... |
@@ -25,16 +25,17 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c |
| 25 | 25 |
return errors.New("API versions pre v1.21 do not support stats on Windows")
|
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- container, err := daemon.GetContainer(prefixOrName) |
|
| 28 |
+ ctr, err := daemon.GetContainer(prefixOrName) |
|
| 29 | 29 |
if err != nil {
|
| 30 | 30 |
return err |
| 31 | 31 |
} |
| 32 | 32 |
|
| 33 | 33 |
// If the container is either not running or restarting and requires no stream, return an empty stats. |
| 34 |
- if (!container.IsRunning() || container.IsRestarting()) && !config.Stream {
|
|
| 34 |
+ if (!ctr.IsRunning() || ctr.IsRestarting()) && !config.Stream {
|
|
| 35 | 35 |
return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{
|
| 36 |
- Name: container.Name, |
|
| 37 |
- ID: container.ID}) |
|
| 36 |
+ Name: ctr.Name, |
|
| 37 |
+ ID: ctr.ID, |
|
| 38 |
+ }) |
|
| 38 | 39 |
} |
| 39 | 40 |
|
| 40 | 41 |
outStream := config.OutStream |
| ... | ... |
@@ -49,8 +50,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c |
| 49 | 49 |
var preRead time.Time |
| 50 | 50 |
getStatJSON := func(v interface{}) *types.StatsJSON {
|
| 51 | 51 |
ss := v.(types.StatsJSON) |
| 52 |
- ss.Name = container.Name |
|
| 53 |
- ss.ID = container.ID |
|
| 52 |
+ ss.Name = ctr.Name |
|
| 53 |
+ ss.ID = ctr.ID |
|
| 54 | 54 |
ss.PreCPUStats = preCPUStats |
| 55 | 55 |
ss.PreRead = preRead |
| 56 | 56 |
preCPUStats = ss.CPUStats |
| ... | ... |
@@ -60,8 +61,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c |
| 60 | 60 |
|
| 61 | 61 |
enc := json.NewEncoder(outStream) |
| 62 | 62 |
|
| 63 |
- updates := daemon.subscribeToContainerStats(container) |
|
| 64 |
- defer daemon.unsubscribeToContainerStats(container, updates) |
|
| 63 |
+ updates := daemon.subscribeToContainerStats(ctr) |
|
| 64 |
+ defer daemon.unsubscribeToContainerStats(ctr, updates) |
|
| 65 | 65 |
|
| 66 | 66 |
noStreamFirstFrame := true |
| 67 | 67 |
for {
|
| ... | ... |
@@ -144,20 +144,20 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta |
| 144 | 144 |
return nil, err |
| 145 | 145 |
} |
| 146 | 146 |
|
| 147 |
- container, err := daemon.GetContainer(name) |
|
| 147 |
+ ctr, err := daemon.GetContainer(name) |
|
| 148 | 148 |
if err != nil {
|
| 149 | 149 |
return nil, err |
| 150 | 150 |
} |
| 151 | 151 |
|
| 152 |
- if !container.IsRunning() {
|
|
| 153 |
- return nil, errNotRunning(container.ID) |
|
| 152 |
+ if !ctr.IsRunning() {
|
|
| 153 |
+ return nil, errNotRunning(ctr.ID) |
|
| 154 | 154 |
} |
| 155 | 155 |
|
| 156 |
- if container.IsRestarting() {
|
|
| 157 |
- return nil, errContainerIsRestarting(container.ID) |
|
| 156 |
+ if ctr.IsRestarting() {
|
|
| 157 |
+ return nil, errContainerIsRestarting(ctr.ID) |
|
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 |
- procs, err := daemon.containerd.ListPids(context.Background(), container.ID) |
|
| 160 |
+ procs, err := daemon.containerd.ListPids(context.Background(), ctr.ID) |
|
| 161 | 161 |
if err != nil {
|
| 162 | 162 |
return nil, err |
| 163 | 163 |
} |
| ... | ... |
@@ -184,6 +184,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta |
| 184 | 184 |
if err != nil {
|
| 185 | 185 |
return nil, err |
| 186 | 186 |
} |
| 187 |
- daemon.LogContainerEvent(container, "top") |
|
| 187 |
+ daemon.LogContainerEvent(ctr, "top") |
|
| 188 | 188 |
return procList, nil |
| 189 | 189 |
} |
| ... | ... |
@@ -10,33 +10,33 @@ import ( |
| 10 | 10 |
|
| 11 | 11 |
// ContainerUnpause unpauses a container |
| 12 | 12 |
func (daemon *Daemon) ContainerUnpause(name string) error {
|
| 13 |
- container, err := daemon.GetContainer(name) |
|
| 13 |
+ ctr, err := daemon.GetContainer(name) |
|
| 14 | 14 |
if err != nil {
|
| 15 | 15 |
return err |
| 16 | 16 |
} |
| 17 |
- return daemon.containerUnpause(container) |
|
| 17 |
+ return daemon.containerUnpause(ctr) |
|
| 18 | 18 |
} |
| 19 | 19 |
|
| 20 | 20 |
// containerUnpause resumes the container execution after the container is paused. |
| 21 |
-func (daemon *Daemon) containerUnpause(container *container.Container) error {
|
|
| 22 |
- container.Lock() |
|
| 23 |
- defer container.Unlock() |
|
| 21 |
+func (daemon *Daemon) containerUnpause(ctr *container.Container) error {
|
|
| 22 |
+ ctr.Lock() |
|
| 23 |
+ defer ctr.Unlock() |
|
| 24 | 24 |
|
| 25 | 25 |
// We cannot unpause the container which is not paused |
| 26 |
- if !container.Paused {
|
|
| 27 |
- return fmt.Errorf("Container %s is not paused", container.ID)
|
|
| 26 |
+ if !ctr.Paused {
|
|
| 27 |
+ return fmt.Errorf("Container %s is not paused", ctr.ID)
|
|
| 28 | 28 |
} |
| 29 | 29 |
|
| 30 |
- if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil {
|
|
| 31 |
- return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err)
|
|
| 30 |
+ if err := daemon.containerd.Resume(context.Background(), ctr.ID); err != nil {
|
|
| 31 |
+ return fmt.Errorf("Cannot unpause container %s: %s", ctr.ID, err)
|
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 |
- container.Paused = false |
|
| 35 |
- daemon.setStateCounter(container) |
|
| 36 |
- daemon.updateHealthMonitor(container) |
|
| 37 |
- daemon.LogContainerEvent(container, "unpause") |
|
| 34 |
+ ctr.Paused = false |
|
| 35 |
+ daemon.setStateCounter(ctr) |
|
| 36 |
+ daemon.updateHealthMonitor(ctr) |
|
| 37 |
+ daemon.LogContainerEvent(ctr, "unpause") |
|
| 38 | 38 |
|
| 39 |
- if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 39 |
+ if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 40 | 40 |
logrus.WithError(err).Warn("could not save container to disk")
|
| 41 | 41 |
} |
| 42 | 42 |
|
| ... | ... |
@@ -35,57 +35,57 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro |
| 35 | 35 |
return nil |
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 |
- container, err := daemon.GetContainer(name) |
|
| 38 |
+ ctr, err := daemon.GetContainer(name) |
|
| 39 | 39 |
if err != nil {
|
| 40 | 40 |
return err |
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 | 43 |
restoreConfig := false |
| 44 |
- backupHostConfig := *container.HostConfig |
|
| 44 |
+ backupHostConfig := *ctr.HostConfig |
|
| 45 | 45 |
defer func() {
|
| 46 | 46 |
if restoreConfig {
|
| 47 |
- container.Lock() |
|
| 48 |
- container.HostConfig = &backupHostConfig |
|
| 49 |
- container.CheckpointTo(daemon.containersReplica) |
|
| 50 |
- container.Unlock() |
|
| 47 |
+ ctr.Lock() |
|
| 48 |
+ ctr.HostConfig = &backupHostConfig |
|
| 49 |
+ ctr.CheckpointTo(daemon.containersReplica) |
|
| 50 |
+ ctr.Unlock() |
|
| 51 | 51 |
} |
| 52 | 52 |
}() |
| 53 | 53 |
|
| 54 |
- if container.RemovalInProgress || container.Dead {
|
|
| 55 |
- return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
|
|
| 54 |
+ if ctr.RemovalInProgress || ctr.Dead {
|
|
| 55 |
+ return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
|
|
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 |
- container.Lock() |
|
| 59 |
- if err := container.UpdateContainer(hostConfig); err != nil {
|
|
| 58 |
+ ctr.Lock() |
|
| 59 |
+ if err := ctr.UpdateContainer(hostConfig); err != nil {
|
|
| 60 | 60 |
restoreConfig = true |
| 61 |
- container.Unlock() |
|
| 62 |
- return errCannotUpdate(container.ID, err) |
|
| 61 |
+ ctr.Unlock() |
|
| 62 |
+ return errCannotUpdate(ctr.ID, err) |
|
| 63 | 63 |
} |
| 64 |
- if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 64 |
+ if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
|
| 65 | 65 |
restoreConfig = true |
| 66 |
- container.Unlock() |
|
| 67 |
- return errCannotUpdate(container.ID, err) |
|
| 66 |
+ ctr.Unlock() |
|
| 67 |
+ return errCannotUpdate(ctr.ID, err) |
|
| 68 | 68 |
} |
| 69 |
- container.Unlock() |
|
| 69 |
+ ctr.Unlock() |
|
| 70 | 70 |
|
| 71 | 71 |
// if Restart Policy changed, we need to update container monitor |
| 72 | 72 |
if hostConfig.RestartPolicy.Name != "" {
|
| 73 |
- container.UpdateMonitor(hostConfig.RestartPolicy) |
|
| 73 |
+ ctr.UpdateMonitor(hostConfig.RestartPolicy) |
|
| 74 | 74 |
} |
| 75 | 75 |
|
| 76 | 76 |
// If container is not running, update hostConfig struct is enough, |
| 77 | 77 |
// resources will be updated when the container is started again. |
| 78 | 78 |
// If container is running (including paused), we need to update configs |
| 79 | 79 |
// to the real world. |
| 80 |
- if container.IsRunning() && !container.IsRestarting() {
|
|
| 81 |
- if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
|
|
| 80 |
+ if ctr.IsRunning() && !ctr.IsRestarting() {
|
|
| 81 |
+ if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil {
|
|
| 82 | 82 |
restoreConfig = true |
| 83 | 83 |
// TODO: it would be nice if containerd responded with better errors here so we can classify this better. |
| 84 |
- return errCannotUpdate(container.ID, errdefs.System(err)) |
|
| 84 |
+ return errCannotUpdate(ctr.ID, errdefs.System(err)) |
|
| 85 | 85 |
} |
| 86 | 86 |
} |
| 87 | 87 |
|
| 88 |
- daemon.LogContainerEvent(container, "update") |
|
| 88 |
+ daemon.LogContainerEvent(ctr, "update") |
|
| 89 | 89 |
|
| 90 | 90 |
return nil |
| 91 | 91 |
} |
| ... | ... |
@@ -34,12 +34,12 @@ func TestBindDaemonRoot(t *testing.T) {
|
| 34 | 34 |
"source is /": "/", |
| 35 | 35 |
} {
|
| 36 | 36 |
t.Run(desc, func(t *testing.T) {
|
| 37 |
- mount := mount.Mount{
|
|
| 37 |
+ mnt := mount.Mount{
|
|
| 38 | 38 |
Type: mount.TypeBind, |
| 39 | 39 |
Source: source, |
| 40 | 40 |
BindOptions: test.opts, |
| 41 | 41 |
} |
| 42 |
- needsProp, err := d.validateBindDaemonRoot(mount) |
|
| 42 |
+ needsProp, err := d.validateBindDaemonRoot(mnt) |
|
| 43 | 43 |
if (err != nil) != test.err {
|
| 44 | 44 |
t.Fatalf("expected err=%v, got: %v", test.err, err)
|
| 45 | 45 |
} |
| ... | ... |
@@ -82,12 +82,12 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er |
| 82 | 82 |
// metadata, the ownership must be set properly for potential container |
| 83 | 83 |
// remapped root (user namespaces) |
| 84 | 84 |
rootIDs := daemon.idMapping.RootPair() |
| 85 |
- for _, mount := range netMounts {
|
|
| 85 |
+ for _, mnt := range netMounts {
|
|
| 86 | 86 |
// we should only modify ownership of network files within our own container |
| 87 | 87 |
// metadata repository. If the user specifies a mount path external, it is |
| 88 | 88 |
// up to the user to make sure the file has proper ownership for userns |
| 89 |
- if strings.Index(mount.Source, daemon.repository) == 0 {
|
|
| 90 |
- if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil {
|
|
| 89 |
+ if strings.Index(mnt.Source, daemon.repository) == 0 {
|
|
| 90 |
+ if err := os.Chown(mnt.Source, rootIDs.UID, rootIDs.GID); err != nil {
|
|
| 91 | 91 |
return nil, err |
| 92 | 92 |
} |
| 93 | 93 |
} |