So other packages don't need to import the daemon package when they
want to use this struct.
Signed-off-by: David Calavera <david.calavera@gmail.com>
Signed-off-by: Tibor Vass <tibor@docker.com>
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
"os" |
| 10 | 10 |
|
| 11 | 11 |
// TODO: remove dependency on daemon |
| 12 |
+ "github.com/docker/docker/container" |
|
| 12 | 13 |
"github.com/docker/docker/daemon" |
| 13 | 14 |
"github.com/docker/docker/image" |
| 14 | 15 |
"github.com/docker/docker/runconfig" |
| ... | ... |
@@ -115,13 +116,11 @@ type Docker interface {
|
| 115 | 115 |
// Pull tells Docker to pull image referenced by `name`. |
| 116 | 116 |
Pull(name string) (*image.Image, error) |
| 117 | 117 |
|
| 118 |
- // TODO: move daemon.Container to its own package |
|
| 119 |
- |
|
| 120 | 118 |
// Container looks up a Docker container referenced by `id`. |
| 121 |
- Container(id string) (*daemon.Container, error) |
|
| 119 |
+ Container(id string) (*container.Container, error) |
|
| 122 | 120 |
// Create creates a new Docker container and returns potential warnings |
| 123 | 121 |
// TODO: put warnings in the error |
| 124 |
- Create(*runconfig.Config, *runconfig.HostConfig) (*daemon.Container, []string, error) |
|
| 122 |
+ Create(*runconfig.Config, *runconfig.HostConfig) (*container.Container, []string, error) |
|
| 125 | 123 |
// Remove removes a container specified by `id`. |
| 126 | 124 |
Remove(id string, cfg *daemon.ContainerRmConfig) error |
| 127 | 125 |
// Commit creates a new Docker image from an existing Docker container. |
| ... | ... |
@@ -131,7 +130,7 @@ type Docker interface {
|
| 131 | 131 |
// TODO: make an Extract method instead of passing `decompress` |
| 132 | 132 |
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used |
| 133 | 133 |
// with Context.Walk |
| 134 |
- Copy(c *daemon.Container, destPath string, src FileInfo, decompress bool) error |
|
| 134 |
+ Copy(c *container.Container, destPath string, src FileInfo, decompress bool) error |
|
| 135 | 135 |
|
| 136 | 136 |
// Retain retains an image avoiding it to be removed or overwritten until a corresponding Release() call. |
| 137 | 137 |
// TODO: remove |
| ... | ... |
@@ -140,13 +139,13 @@ type Docker interface {
|
| 140 | 140 |
// TODO: remove |
| 141 | 141 |
Release(sessionID string, activeImages []string) |
| 142 | 142 |
// Kill stops the container execution abruptly. |
| 143 |
- Kill(c *daemon.Container) error |
|
| 143 |
+ Kill(c *container.Container) error |
|
| 144 | 144 |
// Mount mounts the root filesystem for the container. |
| 145 |
- Mount(c *daemon.Container) error |
|
| 145 |
+ Mount(c *container.Container) error |
|
| 146 | 146 |
// Unmount unmounts the root filesystem for the container. |
| 147 |
- Unmount(c *daemon.Container) error |
|
| 147 |
+ Unmount(c *container.Container) error |
|
| 148 | 148 |
// Start starts a new container |
| 149 |
- Start(c *daemon.Container) error |
|
| 149 |
+ Start(c *container.Container) error |
|
| 150 | 150 |
} |
| 151 | 151 |
|
| 152 | 152 |
// ImageCache abstracts an image cache store. |
| ... | ... |
@@ -22,6 +22,7 @@ import ( |
| 22 | 22 |
"github.com/docker/docker/api" |
| 23 | 23 |
"github.com/docker/docker/builder" |
| 24 | 24 |
"github.com/docker/docker/builder/dockerfile/parser" |
| 25 |
+ "github.com/docker/docker/container" |
|
| 25 | 26 |
"github.com/docker/docker/daemon" |
| 26 | 27 |
"github.com/docker/docker/image" |
| 27 | 28 |
"github.com/docker/docker/pkg/archive" |
| ... | ... |
@@ -419,8 +420,8 @@ func (b *Builder) processImageFrom(img *image.Image) error {
|
| 419 | 419 |
} |
| 420 | 420 |
|
| 421 | 421 |
// The default path will be blank on Windows (set by HCS) |
| 422 |
- if len(b.runConfig.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 423 |
- b.runConfig.Env = append(b.runConfig.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 422 |
+ if len(b.runConfig.Env) == 0 && container.DefaultPathEnv != "" {
|
|
| 423 |
+ b.runConfig.Env = append(b.runConfig.Env, "PATH="+container.DefaultPathEnv) |
|
| 424 | 424 |
} |
| 425 | 425 |
|
| 426 | 426 |
// Process ONBUILD triggers if they exist |
| ... | ... |
@@ -492,7 +493,7 @@ func (b *Builder) probeCache() (bool, error) {
|
| 492 | 492 |
return true, nil |
| 493 | 493 |
} |
| 494 | 494 |
|
| 495 |
-func (b *Builder) create() (*daemon.Container, error) {
|
|
| 495 |
+func (b *Builder) create() (*container.Container, error) {
|
|
| 496 | 496 |
if b.image == "" && !b.noBaseImage {
|
| 497 | 497 |
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
| 498 | 498 |
} |
| ... | ... |
@@ -542,7 +543,7 @@ func (b *Builder) create() (*daemon.Container, error) {
|
| 542 | 542 |
return c, nil |
| 543 | 543 |
} |
| 544 | 544 |
|
| 545 |
-func (b *Builder) run(c *daemon.Container) error {
|
|
| 545 |
+func (b *Builder) run(c *container.Container) error {
|
|
| 546 | 546 |
var errCh chan error |
| 547 | 547 |
if b.Verbose {
|
| 548 | 548 |
errCh = c.Attach(nil, b.Stdout, b.Stderr) |
| 549 | 549 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,69 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "os" |
|
| 4 |
+ "path/filepath" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/api/types" |
|
| 7 |
+ "github.com/docker/docker/pkg/archive" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// ResolvePath resolves the given path in the container to a resource on the |
|
| 11 |
+// host. Returns a resolved path (absolute path to the resource on the host), |
|
| 12 |
+// the absolute path to the resource relative to the container's rootfs, and |
|
| 13 |
+// a error if the path points to outside the container's rootfs. |
|
| 14 |
+func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
|
|
| 15 |
+ // Consider the given path as an absolute path in the container. |
|
| 16 |
+ absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) |
|
| 17 |
+ |
|
| 18 |
+ // Split the absPath into its Directory and Base components. We will |
|
| 19 |
+ // resolve the dir in the scope of the container then append the base. |
|
| 20 |
+ dirPath, basePath := filepath.Split(absPath) |
|
| 21 |
+ |
|
| 22 |
+ resolvedDirPath, err := container.GetResourcePath(dirPath) |
|
| 23 |
+ if err != nil {
|
|
| 24 |
+ return "", "", err |
|
| 25 |
+ } |
|
| 26 |
+ |
|
| 27 |
+ // resolvedDirPath will have been cleaned (no trailing path separators) so |
|
| 28 |
+ // we can manually join it with the base path element. |
|
| 29 |
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath |
|
| 30 |
+ |
|
| 31 |
+ return resolvedPath, absPath, nil |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+// StatPath is the unexported version of StatPath. Locks and mounts should |
|
| 35 |
+// be acquired before calling this method and the given path should be fully |
|
| 36 |
+// resolved to a path on the host corresponding to the given absolute path |
|
| 37 |
+// inside the container. |
|
| 38 |
+func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
|
| 39 |
+ lstat, err := os.Lstat(resolvedPath) |
|
| 40 |
+ if err != nil {
|
|
| 41 |
+ return nil, err |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ var linkTarget string |
|
| 45 |
+ if lstat.Mode()&os.ModeSymlink != 0 {
|
|
| 46 |
+ // Fully evaluate the symlink in the scope of the container rootfs. |
|
| 47 |
+ hostPath, err := container.GetResourcePath(absPath) |
|
| 48 |
+ if err != nil {
|
|
| 49 |
+ return nil, err |
|
| 50 |
+ } |
|
| 51 |
+ |
|
| 52 |
+ linkTarget, err = filepath.Rel(container.BaseFS, hostPath) |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ return nil, err |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ // Make it an absolute path. |
|
| 58 |
+ linkTarget = filepath.Join(string(filepath.Separator), linkTarget) |
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 61 |
+ return &types.ContainerPathStat{
|
|
| 62 |
+ Name: filepath.Base(absPath), |
|
| 63 |
+ Size: lstat.Size(), |
|
| 64 |
+ Mode: lstat.Mode(), |
|
| 65 |
+ Mtime: lstat.ModTime(), |
|
| 66 |
+ LinkTarget: linkTarget, |
|
| 67 |
+ }, nil |
|
| 68 |
+} |
| 0 | 69 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,559 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "encoding/json" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "sync" |
|
| 9 |
+ "syscall" |
|
| 10 |
+ "time" |
|
| 11 |
+ |
|
| 12 |
+ "github.com/Sirupsen/logrus" |
|
| 13 |
+ "github.com/docker/docker/daemon/exec" |
|
| 14 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 15 |
+ "github.com/docker/docker/daemon/logger" |
|
| 16 |
+ "github.com/docker/docker/daemon/logger/jsonfilelog" |
|
| 17 |
+ "github.com/docker/docker/daemon/network" |
|
| 18 |
+ derr "github.com/docker/docker/errors" |
|
| 19 |
+ "github.com/docker/docker/image" |
|
| 20 |
+ "github.com/docker/docker/layer" |
|
| 21 |
+ "github.com/docker/docker/pkg/nat" |
|
| 22 |
+ "github.com/docker/docker/pkg/promise" |
|
| 23 |
+ "github.com/docker/docker/pkg/signal" |
|
| 24 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 25 |
+ "github.com/docker/docker/runconfig" |
|
| 26 |
+ "github.com/docker/docker/volume" |
|
| 27 |
+ "github.com/opencontainers/runc/libcontainer/label" |
|
| 28 |
+) |
|
| 29 |
+ |
|
| 30 |
+const configFileName = "config.v2.json" |
|
| 31 |
+ |
|
| 32 |
+// CommonContainer holds the fields for a container which are |
|
| 33 |
+// applicable across all platforms supported by the daemon. |
|
| 34 |
+type CommonContainer struct {
|
|
| 35 |
+ *runconfig.StreamConfig |
|
| 36 |
+ // embed for Container to support states directly. |
|
| 37 |
+ *State `json:"State"` // Needed for remote api version <= 1.11 |
|
| 38 |
+ Root string `json:"-"` // Path to the "home" of the container, including metadata. |
|
| 39 |
+ BaseFS string `json:"-"` // Path to the graphdriver mountpoint |
|
| 40 |
+ RWLayer layer.RWLayer `json:"-"` |
|
| 41 |
+ ID string |
|
| 42 |
+ Created time.Time |
|
| 43 |
+ Path string |
|
| 44 |
+ Args []string |
|
| 45 |
+ Config *runconfig.Config |
|
| 46 |
+ ImageID image.ID `json:"Image"` |
|
| 47 |
+ NetworkSettings *network.Settings |
|
| 48 |
+ LogPath string |
|
| 49 |
+ Name string |
|
| 50 |
+ Driver string |
|
| 51 |
+ // MountLabel contains the options for the 'mount' command |
|
| 52 |
+ MountLabel string |
|
| 53 |
+ ProcessLabel string |
|
| 54 |
+ RestartCount int |
|
| 55 |
+ HasBeenStartedBefore bool |
|
| 56 |
+ HasBeenManuallyStopped bool // used for unless-stopped restart policy |
|
| 57 |
+ MountPoints map[string]*volume.MountPoint |
|
| 58 |
+ HostConfig *runconfig.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable |
|
| 59 |
+ Command *execdriver.Command `json:"-"` |
|
| 60 |
+ monitor *containerMonitor |
|
| 61 |
+ ExecCommands *exec.Store `json:"-"` |
|
| 62 |
+ // logDriver for closing |
|
| 63 |
+ LogDriver logger.Logger `json:"-"` |
|
| 64 |
+ LogCopier *logger.Copier `json:"-"` |
|
| 65 |
+} |
|
| 66 |
+ |
|
| 67 |
+// NewBaseContainer creates a new container with its |
|
| 68 |
+// basic configuration. |
|
| 69 |
+func NewBaseContainer(id, root string) *Container {
|
|
| 70 |
+ return &Container{
|
|
| 71 |
+ CommonContainer: CommonContainer{
|
|
| 72 |
+ ID: id, |
|
| 73 |
+ State: NewState(), |
|
| 74 |
+ ExecCommands: exec.NewStore(), |
|
| 75 |
+ Root: root, |
|
| 76 |
+ MountPoints: make(map[string]*volume.MountPoint), |
|
| 77 |
+ StreamConfig: runconfig.NewStreamConfig(), |
|
| 78 |
+ }, |
|
| 79 |
+ } |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+// FromDisk loads the container configuration stored in the host. |
|
| 83 |
+func (container *Container) FromDisk() error {
|
|
| 84 |
+ pth, err := container.ConfigPath() |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return err |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ jsonSource, err := os.Open(pth) |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return err |
|
| 92 |
+ } |
|
| 93 |
+ defer jsonSource.Close() |
|
| 94 |
+ |
|
| 95 |
+ dec := json.NewDecoder(jsonSource) |
|
| 96 |
+ |
|
| 97 |
+ // Load container settings |
|
| 98 |
+ if err := dec.Decode(container); err != nil {
|
|
| 99 |
+ return err |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ if err := label.ReserveLabel(container.ProcessLabel); err != nil {
|
|
| 103 |
+ return err |
|
| 104 |
+ } |
|
| 105 |
+ return container.readHostConfig() |
|
| 106 |
+} |
|
| 107 |
+ |
|
| 108 |
+// ToDisk saves the container configuration on disk. |
|
| 109 |
+func (container *Container) ToDisk() error {
|
|
| 110 |
+ pth, err := container.ConfigPath() |
|
| 111 |
+ if err != nil {
|
|
| 112 |
+ return err |
|
| 113 |
+ } |
|
| 114 |
+ |
|
| 115 |
+ jsonSource, err := os.Create(pth) |
|
| 116 |
+ if err != nil {
|
|
| 117 |
+ return err |
|
| 118 |
+ } |
|
| 119 |
+ defer jsonSource.Close() |
|
| 120 |
+ |
|
| 121 |
+ enc := json.NewEncoder(jsonSource) |
|
| 122 |
+ |
|
| 123 |
+ // Save container settings |
|
| 124 |
+ if err := enc.Encode(container); err != nil {
|
|
| 125 |
+ return err |
|
| 126 |
+ } |
|
| 127 |
+ |
|
| 128 |
+ return container.WriteHostConfig() |
|
| 129 |
+} |
|
| 130 |
+ |
|
| 131 |
+// ToDiskLocking saves the container configuration on disk in a thread safe way. |
|
| 132 |
+func (container *Container) ToDiskLocking() error {
|
|
| 133 |
+ container.Lock() |
|
| 134 |
+ err := container.ToDisk() |
|
| 135 |
+ container.Unlock() |
|
| 136 |
+ return err |
|
| 137 |
+} |
|
| 138 |
+ |
|
| 139 |
+// readHostConfig reads the host configuration from disk for the container. |
|
| 140 |
+func (container *Container) readHostConfig() error {
|
|
| 141 |
+ container.HostConfig = &runconfig.HostConfig{}
|
|
| 142 |
+ // If the hostconfig file does not exist, do not read it. |
|
| 143 |
+ // (We still have to initialize container.HostConfig, |
|
| 144 |
+ // but that's OK, since we just did that above.) |
|
| 145 |
+ pth, err := container.HostConfigPath() |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return err |
|
| 148 |
+ } |
|
| 149 |
+ |
|
| 150 |
+ f, err := os.Open(pth) |
|
| 151 |
+ if err != nil {
|
|
| 152 |
+ if os.IsNotExist(err) {
|
|
| 153 |
+ return nil |
|
| 154 |
+ } |
|
| 155 |
+ return err |
|
| 156 |
+ } |
|
| 157 |
+ defer f.Close() |
|
| 158 |
+ |
|
| 159 |
+ if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil {
|
|
| 160 |
+ return err |
|
| 161 |
+ } |
|
| 162 |
+ |
|
| 163 |
+ container.InitDNSHostConfig() |
|
| 164 |
+ |
|
| 165 |
+ return nil |
|
| 166 |
+} |
|
| 167 |
+ |
|
| 168 |
+// WriteHostConfig saves the host configuration on disk for the container. |
|
| 169 |
+func (container *Container) WriteHostConfig() error {
|
|
| 170 |
+ pth, err := container.HostConfigPath() |
|
| 171 |
+ if err != nil {
|
|
| 172 |
+ return err |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ f, err := os.Create(pth) |
|
| 176 |
+ if err != nil {
|
|
| 177 |
+ return err |
|
| 178 |
+ } |
|
| 179 |
+ defer f.Close() |
|
| 180 |
+ |
|
| 181 |
+ return json.NewEncoder(f).Encode(&container.HostConfig) |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path |
|
| 185 |
+// sanitisation. Symlinks are all scoped to the BaseFS of the container, as |
|
| 186 |
+// though the container's BaseFS was `/`. |
|
| 187 |
+// |
|
| 188 |
+// The BaseFS of a container is the host-facing path which is bind-mounted as |
|
| 189 |
+// `/` inside the container. This method is essentially used to access a |
|
| 190 |
+// particular path inside the container as though you were a process in that |
|
| 191 |
+// container. |
|
| 192 |
+// |
|
| 193 |
+// NOTE: The returned path is *only* safely scoped inside the container's BaseFS |
|
| 194 |
+// if no component of the returned path changes (such as a component |
|
| 195 |
+// symlinking to a different path) between using this method and using the |
|
| 196 |
+// path. See symlink.FollowSymlinkInScope for more details. |
|
| 197 |
+func (container *Container) GetResourcePath(path string) (string, error) {
|
|
| 198 |
+ // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 199 |
+ // any filepath operations must be done in an OS agnostic way. |
|
| 200 |
+ cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 201 |
+ r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) |
|
| 202 |
+ return r, e |
|
| 203 |
+} |
|
| 204 |
+ |
|
| 205 |
+// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path |
|
| 206 |
+// sanitisation. Symlinks are all scoped to the root of the container, as |
|
| 207 |
+// though the container's root was `/`. |
|
| 208 |
+// |
|
| 209 |
+// The root of a container is the host-facing configuration metadata directory. |
|
| 210 |
+// Only use this method to safely access the container's `container.json` or |
|
| 211 |
+// other metadata files. If in doubt, use container.GetResourcePath. |
|
| 212 |
+// |
|
| 213 |
+// NOTE: The returned path is *only* safely scoped inside the container's root |
|
| 214 |
+// if no component of the returned path changes (such as a component |
|
| 215 |
+// symlinking to a different path) between using this method and using the |
|
| 216 |
+// path. See symlink.FollowSymlinkInScope for more details. |
|
| 217 |
+func (container *Container) GetRootResourcePath(path string) (string, error) {
|
|
| 218 |
+ // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 219 |
+ // any filepath operations must be done in an OS agnostic way. |
|
| 220 |
+ cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 221 |
+ return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) |
|
| 222 |
+} |
|
| 223 |
+ |
|
| 224 |
+// ExitOnNext signals to the monitor that it should not restart the container |
|
| 225 |
+// after we send the kill signal. |
|
| 226 |
+func (container *Container) ExitOnNext() {
|
|
| 227 |
+ container.monitor.ExitOnNext() |
|
| 228 |
+} |
|
| 229 |
+ |
|
| 230 |
+// Resize changes the TTY of the process running inside the container |
|
| 231 |
+// to the given height and width. The container must be running. |
|
| 232 |
+func (container *Container) Resize(h, w int) error {
|
|
| 233 |
+ if err := container.Command.ProcessConfig.Terminal.Resize(h, w); err != nil {
|
|
| 234 |
+ return err |
|
| 235 |
+ } |
|
| 236 |
+ return nil |
|
| 237 |
+} |
|
| 238 |
+ |
|
| 239 |
+// HostConfigPath returns the path to the container's JSON hostconfig |
|
| 240 |
+func (container *Container) HostConfigPath() (string, error) {
|
|
| 241 |
+ return container.GetRootResourcePath("hostconfig.json")
|
|
| 242 |
+} |
|
| 243 |
+ |
|
| 244 |
+// ConfigPath returns the path to the container's JSON config |
|
| 245 |
+func (container *Container) ConfigPath() (string, error) {
|
|
| 246 |
+ return container.GetRootResourcePath(configFileName) |
|
| 247 |
+} |
|
| 248 |
+ |
|
| 249 |
+func validateID(id string) error {
|
|
| 250 |
+ if id == "" {
|
|
| 251 |
+ return derr.ErrorCodeEmptyID |
|
| 252 |
+ } |
|
| 253 |
+ return nil |
|
| 254 |
+} |
|
| 255 |
+ |
|
| 256 |
+// Returns true if the container exposes a certain port |
|
| 257 |
+func (container *Container) exposes(p nat.Port) bool {
|
|
| 258 |
+ _, exists := container.Config.ExposedPorts[p] |
|
| 259 |
+ return exists |
|
| 260 |
+} |
|
| 261 |
+ |
|
| 262 |
+// GetLogConfig returns the log configuration for the container. |
|
| 263 |
+func (container *Container) GetLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig {
|
|
| 264 |
+ cfg := container.HostConfig.LogConfig |
|
| 265 |
+ if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured
|
|
| 266 |
+ if cfg.Type == "" {
|
|
| 267 |
+ cfg.Type = jsonfilelog.Name |
|
| 268 |
+ } |
|
| 269 |
+ return cfg |
|
| 270 |
+ } |
|
| 271 |
+ // Use daemon's default log config for containers |
|
| 272 |
+ return defaultConfig |
|
| 273 |
+} |
|
| 274 |
+ |
|
| 275 |
+// StartLogger starts a new logger driver for the container. |
|
| 276 |
+func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger, error) {
|
|
| 277 |
+ c, err := logger.GetLogDriver(cfg.Type) |
|
| 278 |
+ if err != nil {
|
|
| 279 |
+ return nil, derr.ErrorCodeLoggingFactory.WithArgs(err) |
|
| 280 |
+ } |
|
| 281 |
+ ctx := logger.Context{
|
|
| 282 |
+ Config: cfg.Config, |
|
| 283 |
+ ContainerID: container.ID, |
|
| 284 |
+ ContainerName: container.Name, |
|
| 285 |
+ ContainerEntrypoint: container.Path, |
|
| 286 |
+ ContainerArgs: container.Args, |
|
| 287 |
+ ContainerImageID: container.ImageID.String(), |
|
| 288 |
+ ContainerImageName: container.Config.Image, |
|
| 289 |
+ ContainerCreated: container.Created, |
|
| 290 |
+ ContainerEnv: container.Config.Env, |
|
| 291 |
+ ContainerLabels: container.Config.Labels, |
|
| 292 |
+ } |
|
| 293 |
+ |
|
| 294 |
+ // Set logging file for "json-logger" |
|
| 295 |
+ if cfg.Type == jsonfilelog.Name {
|
|
| 296 |
+ ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
|
|
| 297 |
+ if err != nil {
|
|
| 298 |
+ return nil, err |
|
| 299 |
+ } |
|
| 300 |
+ } |
|
| 301 |
+ return c(ctx) |
|
| 302 |
+} |
|
| 303 |
+ |
|
| 304 |
+// GetProcessLabel returns the process label for the container. |
|
| 305 |
+func (container *Container) GetProcessLabel() string {
|
|
| 306 |
+ // even if we have a process label return "" if we are running |
|
| 307 |
+ // in privileged mode |
|
| 308 |
+ if container.HostConfig.Privileged {
|
|
| 309 |
+ return "" |
|
| 310 |
+ } |
|
| 311 |
+ return container.ProcessLabel |
|
| 312 |
+} |
|
| 313 |
+ |
|
| 314 |
+// GetMountLabel returns the mounting label for the container. |
|
| 315 |
+// This label is empty if the container is privileged. |
|
| 316 |
+func (container *Container) GetMountLabel() string {
|
|
| 317 |
+ if container.HostConfig.Privileged {
|
|
| 318 |
+ return "" |
|
| 319 |
+ } |
|
| 320 |
+ return container.MountLabel |
|
| 321 |
+} |
|
| 322 |
+ |
|
| 323 |
+// GetExecIDs returns the list of exec commands running on the container. |
|
| 324 |
+func (container *Container) GetExecIDs() []string {
|
|
| 325 |
+ return container.ExecCommands.List() |
|
| 326 |
+} |
|
| 327 |
+ |
|
| 328 |
+// Attach connects to the container's TTY, delegating to standard |
|
| 329 |
+// streams or websockets depending on the configuration. |
|
| 330 |
+func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
|
|
| 331 |
+ return AttachStreams(container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr) |
|
| 332 |
+} |
|
| 333 |
+ |
|
| 334 |
+// AttachStreams connects streams to a TTY. |
|
| 335 |
+// Used by exec too. Should this move somewhere else? |
|
| 336 |
+func AttachStreams(streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
|
|
| 337 |
+ var ( |
|
| 338 |
+ cStdout, cStderr io.ReadCloser |
|
| 339 |
+ cStdin io.WriteCloser |
|
| 340 |
+ wg sync.WaitGroup |
|
| 341 |
+ errors = make(chan error, 3) |
|
| 342 |
+ ) |
|
| 343 |
+ |
|
| 344 |
+ if stdin != nil && openStdin {
|
|
| 345 |
+ cStdin = streamConfig.StdinPipe() |
|
| 346 |
+ wg.Add(1) |
|
| 347 |
+ } |
|
| 348 |
+ |
|
| 349 |
+ if stdout != nil {
|
|
| 350 |
+ cStdout = streamConfig.StdoutPipe() |
|
| 351 |
+ wg.Add(1) |
|
| 352 |
+ } |
|
| 353 |
+ |
|
| 354 |
+ if stderr != nil {
|
|
| 355 |
+ cStderr = streamConfig.StderrPipe() |
|
| 356 |
+ wg.Add(1) |
|
| 357 |
+ } |
|
| 358 |
+ |
|
| 359 |
+ // Connect stdin of container to the http conn. |
|
| 360 |
+ go func() {
|
|
| 361 |
+ if stdin == nil || !openStdin {
|
|
| 362 |
+ return |
|
| 363 |
+ } |
|
| 364 |
+ logrus.Debugf("attach: stdin: begin")
|
|
| 365 |
+ defer func() {
|
|
| 366 |
+ if stdinOnce && !tty {
|
|
| 367 |
+ cStdin.Close() |
|
| 368 |
+ } else {
|
|
| 369 |
+ // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr |
|
| 370 |
+ if cStdout != nil {
|
|
| 371 |
+ cStdout.Close() |
|
| 372 |
+ } |
|
| 373 |
+ if cStderr != nil {
|
|
| 374 |
+ cStderr.Close() |
|
| 375 |
+ } |
|
| 376 |
+ } |
|
| 377 |
+ wg.Done() |
|
| 378 |
+ logrus.Debugf("attach: stdin: end")
|
|
| 379 |
+ }() |
|
| 380 |
+ |
|
| 381 |
+ var err error |
|
| 382 |
+ if tty {
|
|
| 383 |
+ _, err = copyEscapable(cStdin, stdin) |
|
| 384 |
+ } else {
|
|
| 385 |
+ _, err = io.Copy(cStdin, stdin) |
|
| 386 |
+ |
|
| 387 |
+ } |
|
| 388 |
+ if err == io.ErrClosedPipe {
|
|
| 389 |
+ err = nil |
|
| 390 |
+ } |
|
| 391 |
+ if err != nil {
|
|
| 392 |
+ logrus.Errorf("attach: stdin: %s", err)
|
|
| 393 |
+ errors <- err |
|
| 394 |
+ return |
|
| 395 |
+ } |
|
| 396 |
+ }() |
|
| 397 |
+ |
|
| 398 |
+ attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
|
|
| 399 |
+ if stream == nil {
|
|
| 400 |
+ return |
|
| 401 |
+ } |
|
| 402 |
+ defer func() {
|
|
| 403 |
+ // Make sure stdin gets closed |
|
| 404 |
+ if stdin != nil {
|
|
| 405 |
+ stdin.Close() |
|
| 406 |
+ } |
|
| 407 |
+ streamPipe.Close() |
|
| 408 |
+ wg.Done() |
|
| 409 |
+ logrus.Debugf("attach: %s: end", name)
|
|
| 410 |
+ }() |
|
| 411 |
+ |
|
| 412 |
+ logrus.Debugf("attach: %s: begin", name)
|
|
| 413 |
+ _, err := io.Copy(stream, streamPipe) |
|
| 414 |
+ if err == io.ErrClosedPipe {
|
|
| 415 |
+ err = nil |
|
| 416 |
+ } |
|
| 417 |
+ if err != nil {
|
|
| 418 |
+ logrus.Errorf("attach: %s: %v", name, err)
|
|
| 419 |
+ errors <- err |
|
| 420 |
+ } |
|
| 421 |
+ } |
|
| 422 |
+ |
|
| 423 |
+ go attachStream("stdout", stdout, cStdout)
|
|
| 424 |
+ go attachStream("stderr", stderr, cStderr)
|
|
| 425 |
+ |
|
| 426 |
+ return promise.Go(func() error {
|
|
| 427 |
+ wg.Wait() |
|
| 428 |
+ close(errors) |
|
| 429 |
+ for err := range errors {
|
|
| 430 |
+ if err != nil {
|
|
| 431 |
+ return err |
|
| 432 |
+ } |
|
| 433 |
+ } |
|
| 434 |
+ return nil |
|
| 435 |
+ }) |
|
| 436 |
+} |
|
| 437 |
+ |
|
| 438 |
+// Code c/c from io.Copy() modified to handle escape sequence |
|
| 439 |
+func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
|
|
| 440 |
+ buf := make([]byte, 32*1024) |
|
| 441 |
+ for {
|
|
| 442 |
+ nr, er := src.Read(buf) |
|
| 443 |
+ if nr > 0 {
|
|
| 444 |
+ // ---- Docker addition |
|
| 445 |
+ // char 16 is C-p |
|
| 446 |
+ if nr == 1 && buf[0] == 16 {
|
|
| 447 |
+ nr, er = src.Read(buf) |
|
| 448 |
+ // char 17 is C-q |
|
| 449 |
+ if nr == 1 && buf[0] == 17 {
|
|
| 450 |
+ if err := src.Close(); err != nil {
|
|
| 451 |
+ return 0, err |
|
| 452 |
+ } |
|
| 453 |
+ return 0, nil |
|
| 454 |
+ } |
|
| 455 |
+ } |
|
| 456 |
+ // ---- End of docker |
|
| 457 |
+ nw, ew := dst.Write(buf[0:nr]) |
|
| 458 |
+ if nw > 0 {
|
|
| 459 |
+ written += int64(nw) |
|
| 460 |
+ } |
|
| 461 |
+ if ew != nil {
|
|
| 462 |
+ err = ew |
|
| 463 |
+ break |
|
| 464 |
+ } |
|
| 465 |
+ if nr != nw {
|
|
| 466 |
+ err = io.ErrShortWrite |
|
| 467 |
+ break |
|
| 468 |
+ } |
|
| 469 |
+ } |
|
| 470 |
+ if er == io.EOF {
|
|
| 471 |
+ break |
|
| 472 |
+ } |
|
| 473 |
+ if er != nil {
|
|
| 474 |
+ err = er |
|
| 475 |
+ break |
|
| 476 |
+ } |
|
| 477 |
+ } |
|
| 478 |
+ return written, err |
|
| 479 |
+} |
|
| 480 |
+ |
|
| 481 |
+// ShouldRestart decides whether the daemon should restart the container or not. |
|
| 482 |
+// This is based on the container's restart policy. |
|
| 483 |
+func (container *Container) ShouldRestart() bool {
|
|
| 484 |
+ return container.HostConfig.RestartPolicy.Name == "always" || |
|
| 485 |
+ (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || |
|
| 486 |
+ (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) |
|
| 487 |
+} |
|
| 488 |
+ |
|
| 489 |
+// AddBindMountPoint adds a new bind mount point configuration to the container. |
|
| 490 |
+func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) {
|
|
| 491 |
+ container.MountPoints[destination] = &volume.MountPoint{
|
|
| 492 |
+ Name: name, |
|
| 493 |
+ Source: source, |
|
| 494 |
+ Destination: destination, |
|
| 495 |
+ RW: rw, |
|
| 496 |
+ } |
|
| 497 |
+} |
|
| 498 |
+ |
|
| 499 |
+// AddLocalMountPoint adds a new local mount point configuration to the container. |
|
| 500 |
+func (container *Container) AddLocalMountPoint(name, destination string, rw bool) {
|
|
| 501 |
+ container.MountPoints[destination] = &volume.MountPoint{
|
|
| 502 |
+ Name: name, |
|
| 503 |
+ Driver: volume.DefaultDriverName, |
|
| 504 |
+ Destination: destination, |
|
| 505 |
+ RW: rw, |
|
| 506 |
+ } |
|
| 507 |
+} |
|
| 508 |
+ |
|
| 509 |
+// AddMountPointWithVolume adds a new mount point configured with a volume to the container. |
|
| 510 |
+func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
|
|
| 511 |
+ container.MountPoints[destination] = &volume.MountPoint{
|
|
| 512 |
+ Name: vol.Name(), |
|
| 513 |
+ Driver: vol.DriverName(), |
|
| 514 |
+ Destination: destination, |
|
| 515 |
+ RW: rw, |
|
| 516 |
+ Volume: vol, |
|
| 517 |
+ } |
|
| 518 |
+} |
|
| 519 |
+ |
|
| 520 |
+// IsDestinationMounted checkes whether a path is mounted on the container or not. |
|
| 521 |
+func (container *Container) IsDestinationMounted(destination string) bool {
|
|
| 522 |
+ return container.MountPoints[destination] != nil |
|
| 523 |
+} |
|
| 524 |
+ |
|
| 525 |
+// StopSignal returns the signal used to stop the container. |
|
| 526 |
+func (container *Container) StopSignal() int {
|
|
| 527 |
+ var stopSignal syscall.Signal |
|
| 528 |
+ if container.Config.StopSignal != "" {
|
|
| 529 |
+ stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) |
|
| 530 |
+ } |
|
| 531 |
+ |
|
| 532 |
+ if int(stopSignal) == 0 {
|
|
| 533 |
+ stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) |
|
| 534 |
+ } |
|
| 535 |
+ return int(stopSignal) |
|
| 536 |
+} |
|
| 537 |
+ |
|
| 538 |
+// InitDNSHostConfig ensures that the dns fields are never nil. |
|
| 539 |
+// New containers don't ever have those fields nil, |
|
| 540 |
+// but pre created containers can still have those nil values. |
|
| 541 |
+// The non-recommended host configuration in the start api can |
|
| 542 |
+// make these fields nil again, this corrects that issue until |
|
| 543 |
+// we remove that behavior for good. |
|
| 544 |
+// See https://github.com/docker/docker/pull/17779 |
|
| 545 |
+// for a more detailed explanation on why we don't want that. |
|
| 546 |
+func (container *Container) InitDNSHostConfig() {
|
|
| 547 |
+ if container.HostConfig.DNS == nil {
|
|
| 548 |
+ container.HostConfig.DNS = make([]string, 0) |
|
| 549 |
+ } |
|
| 550 |
+ |
|
| 551 |
+ if container.HostConfig.DNSSearch == nil {
|
|
| 552 |
+ container.HostConfig.DNSSearch = make([]string, 0) |
|
| 553 |
+ } |
|
| 554 |
+ |
|
| 555 |
+ if container.HostConfig.DNSOptions == nil {
|
|
| 556 |
+ container.HostConfig.DNSOptions = make([]string, 0) |
|
| 557 |
+ } |
|
| 558 |
+} |
| 0 | 559 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,36 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "testing" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/docker/pkg/signal" |
|
| 6 |
+ "github.com/docker/docker/runconfig" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func TestContainerStopSignal(t *testing.T) {
|
|
| 10 |
+ c := &Container{
|
|
| 11 |
+ CommonContainer: CommonContainer{
|
|
| 12 |
+ Config: &runconfig.Config{},
|
|
| 13 |
+ }, |
|
| 14 |
+ } |
|
| 15 |
+ |
|
| 16 |
+ def, err := signal.ParseSignal(signal.DefaultStopSignal) |
|
| 17 |
+ if err != nil {
|
|
| 18 |
+ t.Fatal(err) |
|
| 19 |
+ } |
|
| 20 |
+ |
|
| 21 |
+ s := c.StopSignal() |
|
| 22 |
+ if s != int(def) {
|
|
| 23 |
+ t.Fatalf("Expected %v, got %v", def, s)
|
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ c = &Container{
|
|
| 27 |
+ CommonContainer: CommonContainer{
|
|
| 28 |
+ Config: &runconfig.Config{StopSignal: "SIGKILL"},
|
|
| 29 |
+ }, |
|
| 30 |
+ } |
|
| 31 |
+ s = c.StopSignal() |
|
| 32 |
+ if s != 9 {
|
|
| 33 |
+ t.Fatalf("Expected 9, got %v", s)
|
|
| 34 |
+ } |
|
| 35 |
+} |
| 0 | 36 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,701 @@ |
| 0 |
+// +build linux freebsd |
|
| 1 |
+ |
|
| 2 |
+package container |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "net" |
|
| 8 |
+ "os" |
|
| 9 |
+ "path/filepath" |
|
| 10 |
+ "strconv" |
|
| 11 |
+ "strings" |
|
| 12 |
+ "syscall" |
|
| 13 |
+ |
|
| 14 |
+ "github.com/Sirupsen/logrus" |
|
| 15 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 16 |
+ "github.com/docker/docker/daemon/network" |
|
| 17 |
+ derr "github.com/docker/docker/errors" |
|
| 18 |
+ "github.com/docker/docker/pkg/chrootarchive" |
|
| 19 |
+ "github.com/docker/docker/pkg/nat" |
|
| 20 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 21 |
+ "github.com/docker/docker/pkg/system" |
|
| 22 |
+ "github.com/docker/docker/runconfig" |
|
| 23 |
+ "github.com/docker/docker/utils" |
|
| 24 |
+ "github.com/docker/docker/volume" |
|
| 25 |
+ "github.com/docker/libnetwork" |
|
| 26 |
+ "github.com/docker/libnetwork/netlabel" |
|
| 27 |
+ "github.com/docker/libnetwork/options" |
|
| 28 |
+ "github.com/docker/libnetwork/types" |
|
| 29 |
+ "github.com/opencontainers/runc/libcontainer/label" |
|
| 30 |
+) |
|
| 31 |
+ |
|
| 32 |
+const ( |
|
| 33 |
+ // DefaultPathEnv is unix style list of directories to search for |
|
| 34 |
+ // executables. Each directory is separated from the next by a colon |
|
| 35 |
+ // ':' character . |
|
| 36 |
+ DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" |
|
| 37 |
+ |
|
| 38 |
+ // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container |
|
| 39 |
+ DefaultSHMSize int64 = 67108864 |
|
| 40 |
+) |
|
| 41 |
+ |
|
| 42 |
+// Container holds the fields specific to unixen implementations. See |
|
| 43 |
+// CommonContainer for standard fields common to all containers. |
|
| 44 |
+type Container struct {
|
|
| 45 |
+ CommonContainer |
|
| 46 |
+ |
|
| 47 |
+ // Fields below here are platform specific. |
|
| 48 |
+ AppArmorProfile string |
|
| 49 |
+ HostnamePath string |
|
| 50 |
+ HostsPath string |
|
| 51 |
+ ShmPath string |
|
| 52 |
+ MqueuePath string |
|
| 53 |
+ ResolvConfPath string |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+// CreateDaemonEnvironment returns the list of all environment variables given the list of |
|
| 57 |
+// environment variables related to links. |
|
| 58 |
+// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. |
|
| 59 |
+// The defaults set here do not override the values in container.Config.Env |
|
| 60 |
+func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string {
|
|
| 61 |
+ // if a domain name was specified, append it to the hostname (see #7851) |
|
| 62 |
+ fullHostname := container.Config.Hostname |
|
| 63 |
+ if container.Config.Domainname != "" {
|
|
| 64 |
+ fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
|
|
| 65 |
+ } |
|
| 66 |
+ // Setup environment |
|
| 67 |
+ env := []string{
|
|
| 68 |
+ "PATH=" + DefaultPathEnv, |
|
| 69 |
+ "HOSTNAME=" + fullHostname, |
|
| 70 |
+ // Note: we don't set HOME here because it'll get autoset intelligently |
|
| 71 |
+ // based on the value of USER inside dockerinit, but only if it isn't |
|
| 72 |
+ // set already (ie, that can be overridden by setting HOME via -e or ENV |
|
| 73 |
+ // in a Dockerfile). |
|
| 74 |
+ } |
|
| 75 |
+ if container.Config.Tty {
|
|
| 76 |
+ env = append(env, "TERM=xterm") |
|
| 77 |
+ } |
|
| 78 |
+ env = append(env, linkedEnv...) |
|
| 79 |
+ // because the env on the container can override certain default values |
|
| 80 |
+ // we need to replace the 'env' keys where they match and append anything |
|
| 81 |
+ // else. |
|
| 82 |
+ env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) |
|
| 83 |
+ |
|
| 84 |
+ return env |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+// TrySetNetworkMount attempts to set the network mounts given a provided destination and |
|
| 88 |
+// the path to use for it; return true if the given destination was a network mount file |
|
| 89 |
+func (container *Container) TrySetNetworkMount(destination string, path string) bool {
|
|
| 90 |
+ if destination == "/etc/resolv.conf" {
|
|
| 91 |
+ container.ResolvConfPath = path |
|
| 92 |
+ return true |
|
| 93 |
+ } |
|
| 94 |
+ if destination == "/etc/hostname" {
|
|
| 95 |
+ container.HostnamePath = path |
|
| 96 |
+ return true |
|
| 97 |
+ } |
|
| 98 |
+ if destination == "/etc/hosts" {
|
|
| 99 |
+ container.HostsPath = path |
|
| 100 |
+ return true |
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ return false |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// BuildHostnameFile writes the container's hostname file. |
|
| 107 |
+func (container *Container) BuildHostnameFile() error {
|
|
| 108 |
+ hostnamePath, err := container.GetRootResourcePath("hostname")
|
|
| 109 |
+ if err != nil {
|
|
| 110 |
+ return err |
|
| 111 |
+ } |
|
| 112 |
+ container.HostnamePath = hostnamePath |
|
| 113 |
+ |
|
| 114 |
+ if container.Config.Domainname != "" {
|
|
| 115 |
+ return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
|
|
| 116 |
+ } |
|
| 117 |
+ return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) |
|
| 118 |
+} |
|
| 119 |
+ |
|
| 120 |
+// GetEndpointInNetwork returns the container's endpoint to the provided network. |
|
| 121 |
+func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) {
|
|
| 122 |
+ endpointName := strings.TrimPrefix(container.Name, "/") |
|
| 123 |
+ return n.EndpointByName(endpointName) |
|
| 124 |
+} |
|
| 125 |
+ |
|
| 126 |
+func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error {
|
|
| 127 |
+ if ep == nil {
|
|
| 128 |
+ return derr.ErrorCodeEmptyEndpoint |
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ networkSettings := container.NetworkSettings |
|
| 132 |
+ if networkSettings == nil {
|
|
| 133 |
+ return derr.ErrorCodeEmptyNetwork |
|
| 134 |
+ } |
|
| 135 |
+ |
|
| 136 |
+ driverInfo, err := ep.DriverInfo() |
|
| 137 |
+ if err != nil {
|
|
| 138 |
+ return err |
|
| 139 |
+ } |
|
| 140 |
+ |
|
| 141 |
+ if driverInfo == nil {
|
|
| 142 |
+ // It is not an error for epInfo to be nil |
|
| 143 |
+ return nil |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ if networkSettings.Ports == nil {
|
|
| 147 |
+ networkSettings.Ports = nat.PortMap{}
|
|
| 148 |
+ } |
|
| 149 |
+ |
|
| 150 |
+ if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
|
|
| 151 |
+ if exposedPorts, ok := expData.([]types.TransportPort); ok {
|
|
| 152 |
+ for _, tp := range exposedPorts {
|
|
| 153 |
+ natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) |
|
| 154 |
+ if err != nil {
|
|
| 155 |
+ return derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) |
|
| 156 |
+ } |
|
| 157 |
+ networkSettings.Ports[natPort] = nil |
|
| 158 |
+ } |
|
| 159 |
+ } |
|
| 160 |
+ } |
|
| 161 |
+ |
|
| 162 |
+ mapData, ok := driverInfo[netlabel.PortMap] |
|
| 163 |
+ if !ok {
|
|
| 164 |
+ return nil |
|
| 165 |
+ } |
|
| 166 |
+ |
|
| 167 |
+ if portMapping, ok := mapData.([]types.PortBinding); ok {
|
|
| 168 |
+ for _, pp := range portMapping {
|
|
| 169 |
+ natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) |
|
| 170 |
+ if err != nil {
|
|
| 171 |
+ return err |
|
| 172 |
+ } |
|
| 173 |
+ natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
|
|
| 174 |
+ networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg) |
|
| 175 |
+ } |
|
| 176 |
+ } |
|
| 177 |
+ |
|
| 178 |
+ return nil |
|
| 179 |
+} |
|
| 180 |
+ |
|
| 181 |
+// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. |
|
| 182 |
+func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
|
|
| 183 |
+ if ep == nil {
|
|
| 184 |
+ return derr.ErrorCodeEmptyEndpoint |
|
| 185 |
+ } |
|
| 186 |
+ |
|
| 187 |
+ networkSettings := container.NetworkSettings |
|
| 188 |
+ if networkSettings == nil {
|
|
| 189 |
+ return derr.ErrorCodeEmptyNetwork |
|
| 190 |
+ } |
|
| 191 |
+ |
|
| 192 |
+ epInfo := ep.Info() |
|
| 193 |
+ if epInfo == nil {
|
|
| 194 |
+ // It is not an error to get an empty endpoint info |
|
| 195 |
+ return nil |
|
| 196 |
+ } |
|
| 197 |
+ |
|
| 198 |
+ if _, ok := networkSettings.Networks[n.Name()]; !ok {
|
|
| 199 |
+ networkSettings.Networks[n.Name()] = new(network.EndpointSettings) |
|
| 200 |
+ } |
|
| 201 |
+ networkSettings.Networks[n.Name()].EndpointID = ep.ID() |
|
| 202 |
+ |
|
| 203 |
+ iface := epInfo.Iface() |
|
| 204 |
+ if iface == nil {
|
|
| 205 |
+ return nil |
|
| 206 |
+ } |
|
| 207 |
+ |
|
| 208 |
+ if iface.MacAddress() != nil {
|
|
| 209 |
+ networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ if iface.Address() != nil {
|
|
| 213 |
+ ones, _ := iface.Address().Mask.Size() |
|
| 214 |
+ networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() |
|
| 215 |
+ networkSettings.Networks[n.Name()].IPPrefixLen = ones |
|
| 216 |
+ } |
|
| 217 |
+ |
|
| 218 |
+ if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
|
|
| 219 |
+ onesv6, _ := iface.AddressIPv6().Mask.Size() |
|
| 220 |
+ networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() |
|
| 221 |
+ networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 |
|
| 222 |
+ } |
|
| 223 |
+ |
|
| 224 |
+ return nil |
|
| 225 |
+} |
|
| 226 |
+ |
|
| 227 |
+// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. |
|
| 228 |
+func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
|
|
| 229 |
+ if err := container.buildPortMapInfo(ep); err != nil {
|
|
| 230 |
+ return err |
|
| 231 |
+ } |
|
| 232 |
+ |
|
| 233 |
+ epInfo := ep.Info() |
|
| 234 |
+ if epInfo == nil {
|
|
| 235 |
+ // It is not an error to get an empty endpoint info |
|
| 236 |
+ return nil |
|
| 237 |
+ } |
|
| 238 |
+ if epInfo.Gateway() != nil {
|
|
| 239 |
+ container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() |
|
| 240 |
+ } |
|
| 241 |
+ if epInfo.GatewayIPv6().To16() != nil {
|
|
| 242 |
+ container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() |
|
| 243 |
+ } |
|
| 244 |
+ |
|
| 245 |
+ return nil |
|
| 246 |
+} |
|
| 247 |
+ |
|
| 248 |
+// UpdateSandboxNetworkSettings updates the sandbox ID and Key. |
|
| 249 |
+func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error {
|
|
| 250 |
+ container.NetworkSettings.SandboxID = sb.ID() |
|
| 251 |
+ container.NetworkSettings.SandboxKey = sb.Key() |
|
| 252 |
+ return nil |
|
| 253 |
+} |
|
| 254 |
+ |
|
| 255 |
+// BuildCreateEndpointOptions builds endpoint options from a given network. |
|
| 256 |
+func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
|
|
| 257 |
+ var ( |
|
| 258 |
+ portSpecs = make(nat.PortSet) |
|
| 259 |
+ bindings = make(nat.PortMap) |
|
| 260 |
+ pbList []types.PortBinding |
|
| 261 |
+ exposeList []types.TransportPort |
|
| 262 |
+ createOptions []libnetwork.EndpointOption |
|
| 263 |
+ ) |
|
| 264 |
+ |
|
| 265 |
+ if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint {
|
|
| 266 |
+ createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) |
|
| 267 |
+ } |
|
| 268 |
+ |
|
| 269 |
+ // Other configs are applicable only for the endpoint in the network |
|
| 270 |
+ // to which container was connected to on docker run. |
|
| 271 |
+ if n.Name() != container.HostConfig.NetworkMode.NetworkName() && |
|
| 272 |
+ !(n.Name() == "bridge" && container.HostConfig.NetworkMode.IsDefault()) {
|
|
| 273 |
+ return createOptions, nil |
|
| 274 |
+ } |
|
| 275 |
+ |
|
| 276 |
+ if container.Config.ExposedPorts != nil {
|
|
| 277 |
+ portSpecs = container.Config.ExposedPorts |
|
| 278 |
+ } |
|
| 279 |
+ |
|
| 280 |
+ if container.HostConfig.PortBindings != nil {
|
|
| 281 |
+ for p, b := range container.HostConfig.PortBindings {
|
|
| 282 |
+ bindings[p] = []nat.PortBinding{}
|
|
| 283 |
+ for _, bb := range b {
|
|
| 284 |
+ bindings[p] = append(bindings[p], nat.PortBinding{
|
|
| 285 |
+ HostIP: bb.HostIP, |
|
| 286 |
+ HostPort: bb.HostPort, |
|
| 287 |
+ }) |
|
| 288 |
+ } |
|
| 289 |
+ } |
|
| 290 |
+ } |
|
| 291 |
+ |
|
| 292 |
+ ports := make([]nat.Port, len(portSpecs)) |
|
| 293 |
+ var i int |
|
| 294 |
+ for p := range portSpecs {
|
|
| 295 |
+ ports[i] = p |
|
| 296 |
+ i++ |
|
| 297 |
+ } |
|
| 298 |
+ nat.SortPortMap(ports, bindings) |
|
| 299 |
+ for _, port := range ports {
|
|
| 300 |
+ expose := types.TransportPort{}
|
|
| 301 |
+ expose.Proto = types.ParseProtocol(port.Proto()) |
|
| 302 |
+ expose.Port = uint16(port.Int()) |
|
| 303 |
+ exposeList = append(exposeList, expose) |
|
| 304 |
+ |
|
| 305 |
+ pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
|
|
| 306 |
+ binding := bindings[port] |
|
| 307 |
+ for i := 0; i < len(binding); i++ {
|
|
| 308 |
+ pbCopy := pb.GetCopy() |
|
| 309 |
+ newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) |
|
| 310 |
+ var portStart, portEnd int |
|
| 311 |
+ if err == nil {
|
|
| 312 |
+ portStart, portEnd, err = newP.Range() |
|
| 313 |
+ } |
|
| 314 |
+ if err != nil {
|
|
| 315 |
+ return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err) |
|
| 316 |
+ } |
|
| 317 |
+ pbCopy.HostPort = uint16(portStart) |
|
| 318 |
+ pbCopy.HostPortEnd = uint16(portEnd) |
|
| 319 |
+ pbCopy.HostIP = net.ParseIP(binding[i].HostIP) |
|
| 320 |
+ pbList = append(pbList, pbCopy) |
|
| 321 |
+ } |
|
| 322 |
+ |
|
| 323 |
+ if container.HostConfig.PublishAllPorts && len(binding) == 0 {
|
|
| 324 |
+ pbList = append(pbList, pb) |
|
| 325 |
+ } |
|
| 326 |
+ } |
|
| 327 |
+ |
|
| 328 |
+ createOptions = append(createOptions, |
|
| 329 |
+ libnetwork.CreateOptionPortMapping(pbList), |
|
| 330 |
+ libnetwork.CreateOptionExposedPorts(exposeList)) |
|
| 331 |
+ |
|
| 332 |
+ if container.Config.MacAddress != "" {
|
|
| 333 |
+ mac, err := net.ParseMAC(container.Config.MacAddress) |
|
| 334 |
+ if err != nil {
|
|
| 335 |
+ return nil, err |
|
| 336 |
+ } |
|
| 337 |
+ |
|
| 338 |
+ genericOption := options.Generic{
|
|
| 339 |
+ netlabel.MacAddress: mac, |
|
| 340 |
+ } |
|
| 341 |
+ |
|
| 342 |
+ createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) |
|
| 343 |
+ } |
|
| 344 |
+ |
|
| 345 |
+ return createOptions, nil |
|
| 346 |
+} |
|
| 347 |
+ |
|
| 348 |
+// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir |
|
| 349 |
+func (container *Container) SetupWorkingDirectory() error {
|
|
| 350 |
+ if container.Config.WorkingDir == "" {
|
|
| 351 |
+ return nil |
|
| 352 |
+ } |
|
| 353 |
+ container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) |
|
| 354 |
+ |
|
| 355 |
+ pth, err := container.GetResourcePath(container.Config.WorkingDir) |
|
| 356 |
+ if err != nil {
|
|
| 357 |
+ return err |
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ pthInfo, err := os.Stat(pth) |
|
| 361 |
+ if err != nil {
|
|
| 362 |
+ if !os.IsNotExist(err) {
|
|
| 363 |
+ return err |
|
| 364 |
+ } |
|
| 365 |
+ |
|
| 366 |
+ if err := system.MkdirAll(pth, 0755); err != nil {
|
|
| 367 |
+ return err |
|
| 368 |
+ } |
|
| 369 |
+ } |
|
| 370 |
+ if pthInfo != nil && !pthInfo.IsDir() {
|
|
| 371 |
+ return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir) |
|
| 372 |
+ } |
|
| 373 |
+ return nil |
|
| 374 |
+} |
|
| 375 |
+ |
|
| 376 |
+// DisconnectFromNetwork disconnects a container from a network |
|
| 377 |
+func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
|
|
| 378 |
+ if !container.Running {
|
|
| 379 |
+ return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
|
| 380 |
+ } |
|
| 381 |
+ |
|
| 382 |
+ if container.HostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
|
|
| 383 |
+ return runconfig.ErrConflictHostNetwork |
|
| 384 |
+ } |
|
| 385 |
+ |
|
| 386 |
+ if err := container.disconnectFromNetwork(n); err != nil {
|
|
| 387 |
+ return err |
|
| 388 |
+ } |
|
| 389 |
+ |
|
| 390 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 391 |
+ return fmt.Errorf("Error saving container to disk: %v", err)
|
|
| 392 |
+ } |
|
| 393 |
+ return nil |
|
| 394 |
+} |
|
| 395 |
+ |
|
| 396 |
+func (container *Container) disconnectFromNetwork(n libnetwork.Network) error {
|
|
| 397 |
+ var ( |
|
| 398 |
+ ep libnetwork.Endpoint |
|
| 399 |
+ sbox libnetwork.Sandbox |
|
| 400 |
+ ) |
|
| 401 |
+ |
|
| 402 |
+ s := func(current libnetwork.Endpoint) bool {
|
|
| 403 |
+ epInfo := current.Info() |
|
| 404 |
+ if epInfo == nil {
|
|
| 405 |
+ return false |
|
| 406 |
+ } |
|
| 407 |
+ if sb := epInfo.Sandbox(); sb != nil {
|
|
| 408 |
+ if sb.ContainerID() == container.ID {
|
|
| 409 |
+ ep = current |
|
| 410 |
+ sbox = sb |
|
| 411 |
+ return true |
|
| 412 |
+ } |
|
| 413 |
+ } |
|
| 414 |
+ return false |
|
| 415 |
+ } |
|
| 416 |
+ n.WalkEndpoints(s) |
|
| 417 |
+ |
|
| 418 |
+ if ep == nil {
|
|
| 419 |
+ return fmt.Errorf("container %s is not connected to the network", container.ID)
|
|
| 420 |
+ } |
|
| 421 |
+ |
|
| 422 |
+ if err := ep.Leave(sbox); err != nil {
|
|
| 423 |
+ return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
|
|
| 424 |
+ } |
|
| 425 |
+ |
|
| 426 |
+ if err := ep.Delete(); err != nil {
|
|
| 427 |
+ return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
|
|
| 428 |
+ } |
|
| 429 |
+ |
|
| 430 |
+ delete(container.NetworkSettings.Networks, n.Name()) |
|
| 431 |
+ return nil |
|
| 432 |
+} |
|
| 433 |
+ |
|
| 434 |
+// appendNetworkMounts appends any network mounts to the array of mount points passed in |
|
| 435 |
+func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
|
|
| 436 |
+ for _, mnt := range container.NetworkMounts() {
|
|
| 437 |
+ dest, err := container.GetResourcePath(mnt.Destination) |
|
| 438 |
+ if err != nil {
|
|
| 439 |
+ return nil, err |
|
| 440 |
+ } |
|
| 441 |
+ volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest})
|
|
| 442 |
+ } |
|
| 443 |
+ return volumeMounts, nil |
|
| 444 |
+} |
|
| 445 |
+ |
|
| 446 |
+// NetworkMounts returns the list of network mounts. |
|
| 447 |
+func (container *Container) NetworkMounts() []execdriver.Mount {
|
|
| 448 |
+ var mounts []execdriver.Mount |
|
| 449 |
+ shared := container.HostConfig.NetworkMode.IsContainer() |
|
| 450 |
+ if container.ResolvConfPath != "" {
|
|
| 451 |
+ if _, err := os.Stat(container.ResolvConfPath); err != nil {
|
|
| 452 |
+ logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
|
|
| 453 |
+ } else {
|
|
| 454 |
+ label.Relabel(container.ResolvConfPath, container.MountLabel, shared) |
|
| 455 |
+ writable := !container.HostConfig.ReadonlyRootfs |
|
| 456 |
+ if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
|
|
| 457 |
+ writable = m.RW |
|
| 458 |
+ } |
|
| 459 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 460 |
+ Source: container.ResolvConfPath, |
|
| 461 |
+ Destination: "/etc/resolv.conf", |
|
| 462 |
+ Writable: writable, |
|
| 463 |
+ Private: true, |
|
| 464 |
+ }) |
|
| 465 |
+ } |
|
| 466 |
+ } |
|
| 467 |
+ if container.HostnamePath != "" {
|
|
| 468 |
+ if _, err := os.Stat(container.HostnamePath); err != nil {
|
|
| 469 |
+ logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
|
|
| 470 |
+ } else {
|
|
| 471 |
+ label.Relabel(container.HostnamePath, container.MountLabel, shared) |
|
| 472 |
+ writable := !container.HostConfig.ReadonlyRootfs |
|
| 473 |
+ if m, exists := container.MountPoints["/etc/hostname"]; exists {
|
|
| 474 |
+ writable = m.RW |
|
| 475 |
+ } |
|
| 476 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 477 |
+ Source: container.HostnamePath, |
|
| 478 |
+ Destination: "/etc/hostname", |
|
| 479 |
+ Writable: writable, |
|
| 480 |
+ Private: true, |
|
| 481 |
+ }) |
|
| 482 |
+ } |
|
| 483 |
+ } |
|
| 484 |
+ if container.HostsPath != "" {
|
|
| 485 |
+ if _, err := os.Stat(container.HostsPath); err != nil {
|
|
| 486 |
+ logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
|
|
| 487 |
+ } else {
|
|
| 488 |
+ label.Relabel(container.HostsPath, container.MountLabel, shared) |
|
| 489 |
+ writable := !container.HostConfig.ReadonlyRootfs |
|
| 490 |
+ if m, exists := container.MountPoints["/etc/hosts"]; exists {
|
|
| 491 |
+ writable = m.RW |
|
| 492 |
+ } |
|
| 493 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 494 |
+ Source: container.HostsPath, |
|
| 495 |
+ Destination: "/etc/hosts", |
|
| 496 |
+ Writable: writable, |
|
| 497 |
+ Private: true, |
|
| 498 |
+ }) |
|
| 499 |
+ } |
|
| 500 |
+ } |
|
| 501 |
+ return mounts |
|
| 502 |
+} |
|
| 503 |
+ |
|
| 504 |
+// CopyImagePathContent copies files in destination to the volume. |
|
| 505 |
+func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
|
| 506 |
+ rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) |
|
| 507 |
+ if err != nil {
|
|
| 508 |
+ return err |
|
| 509 |
+ } |
|
| 510 |
+ |
|
| 511 |
+ if _, err = ioutil.ReadDir(rootfs); err != nil {
|
|
| 512 |
+ if os.IsNotExist(err) {
|
|
| 513 |
+ return nil |
|
| 514 |
+ } |
|
| 515 |
+ return err |
|
| 516 |
+ } |
|
| 517 |
+ |
|
| 518 |
+ path, err := v.Mount() |
|
| 519 |
+ if err != nil {
|
|
| 520 |
+ return err |
|
| 521 |
+ } |
|
| 522 |
+ |
|
| 523 |
+ if err := copyExistingContents(rootfs, path); err != nil {
|
|
| 524 |
+ return err |
|
| 525 |
+ } |
|
| 526 |
+ |
|
| 527 |
+ return v.Unmount() |
|
| 528 |
+} |
|
| 529 |
+ |
|
| 530 |
+// ShmResourcePath returns path to shm |
|
| 531 |
+func (container *Container) ShmResourcePath() (string, error) {
|
|
| 532 |
+ return container.GetRootResourcePath("shm")
|
|
| 533 |
+} |
|
| 534 |
+ |
|
| 535 |
+// MqueueResourcePath returns path to mqueue |
|
| 536 |
+func (container *Container) MqueueResourcePath() (string, error) {
|
|
| 537 |
+ return container.GetRootResourcePath("mqueue")
|
|
| 538 |
+} |
|
| 539 |
+ |
|
| 540 |
+// HasMountFor checks if path is a mountpoint |
|
| 541 |
+func (container *Container) HasMountFor(path string) bool {
|
|
| 542 |
+ _, exists := container.MountPoints[path] |
|
| 543 |
+ return exists |
|
| 544 |
+} |
|
| 545 |
+ |
|
| 546 |
+// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted |
|
| 547 |
+func (container *Container) UnmountIpcMounts(unmount func(pth string) error) {
|
|
| 548 |
+ if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() {
|
|
| 549 |
+ return |
|
| 550 |
+ } |
|
| 551 |
+ |
|
| 552 |
+ var warnings []string |
|
| 553 |
+ |
|
| 554 |
+ if !container.HasMountFor("/dev/shm") {
|
|
| 555 |
+ shmPath, err := container.ShmResourcePath() |
|
| 556 |
+ if err != nil {
|
|
| 557 |
+ logrus.Error(err) |
|
| 558 |
+ warnings = append(warnings, err.Error()) |
|
| 559 |
+ } else if shmPath != "" {
|
|
| 560 |
+ if err := unmount(shmPath); err != nil {
|
|
| 561 |
+ warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err))
|
|
| 562 |
+ } |
|
| 563 |
+ |
|
| 564 |
+ } |
|
| 565 |
+ } |
|
| 566 |
+ |
|
| 567 |
+ if !container.HasMountFor("/dev/mqueue") {
|
|
| 568 |
+ mqueuePath, err := container.MqueueResourcePath() |
|
| 569 |
+ if err != nil {
|
|
| 570 |
+ logrus.Error(err) |
|
| 571 |
+ warnings = append(warnings, err.Error()) |
|
| 572 |
+ } else if mqueuePath != "" {
|
|
| 573 |
+ if err := unmount(mqueuePath); err != nil {
|
|
| 574 |
+ warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", mqueuePath, err))
|
|
| 575 |
+ } |
|
| 576 |
+ } |
|
| 577 |
+ } |
|
| 578 |
+ |
|
| 579 |
+ if len(warnings) > 0 {
|
|
| 580 |
+ logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n"))
|
|
| 581 |
+ } |
|
| 582 |
+} |
|
| 583 |
+ |
|
| 584 |
+// IpcMounts returns the list of IPC mounts |
|
| 585 |
+func (container *Container) IpcMounts() []execdriver.Mount {
|
|
| 586 |
+ var mounts []execdriver.Mount |
|
| 587 |
+ |
|
| 588 |
+ if !container.HasMountFor("/dev/shm") {
|
|
| 589 |
+ label.SetFileLabel(container.ShmPath, container.MountLabel) |
|
| 590 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 591 |
+ Source: container.ShmPath, |
|
| 592 |
+ Destination: "/dev/shm", |
|
| 593 |
+ Writable: true, |
|
| 594 |
+ Private: true, |
|
| 595 |
+ }) |
|
| 596 |
+ } |
|
| 597 |
+ |
|
| 598 |
+ if !container.HasMountFor("/dev/mqueue") {
|
|
| 599 |
+ label.SetFileLabel(container.MqueuePath, container.MountLabel) |
|
| 600 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 601 |
+ Source: container.MqueuePath, |
|
| 602 |
+ Destination: "/dev/mqueue", |
|
| 603 |
+ Writable: true, |
|
| 604 |
+ Private: true, |
|
| 605 |
+ }) |
|
| 606 |
+ } |
|
| 607 |
+ return mounts |
|
| 608 |
+} |
|
| 609 |
+ |
|
| 610 |
+func detachMounted(path string) error {
|
|
| 611 |
+ return syscall.Unmount(path, syscall.MNT_DETACH) |
|
| 612 |
+} |
|
| 613 |
+ |
|
| 614 |
+// UnmountVolumes unmounts all volumes |
|
| 615 |
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
|
|
| 616 |
+ var ( |
|
| 617 |
+ volumeMounts []volume.MountPoint |
|
| 618 |
+ err error |
|
| 619 |
+ ) |
|
| 620 |
+ |
|
| 621 |
+ for _, mntPoint := range container.MountPoints {
|
|
| 622 |
+ dest, err := container.GetResourcePath(mntPoint.Destination) |
|
| 623 |
+ if err != nil {
|
|
| 624 |
+ return err |
|
| 625 |
+ } |
|
| 626 |
+ |
|
| 627 |
+ volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume})
|
|
| 628 |
+ } |
|
| 629 |
+ |
|
| 630 |
+ // Append any network mounts to the list (this is a no-op on Windows) |
|
| 631 |
+ if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil {
|
|
| 632 |
+ return err |
|
| 633 |
+ } |
|
| 634 |
+ |
|
| 635 |
+ for _, volumeMount := range volumeMounts {
|
|
| 636 |
+ if forceSyscall {
|
|
| 637 |
+ if err := detachMounted(volumeMount.Destination); err != nil {
|
|
| 638 |
+ logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err)
|
|
| 639 |
+ } |
|
| 640 |
+ } |
|
| 641 |
+ |
|
| 642 |
+ if volumeMount.Volume != nil {
|
|
| 643 |
+ if err := volumeMount.Volume.Unmount(); err != nil {
|
|
| 644 |
+ return err |
|
| 645 |
+ } |
|
| 646 |
+ } |
|
| 647 |
+ } |
|
| 648 |
+ |
|
| 649 |
+ return nil |
|
| 650 |
+} |
|
| 651 |
+ |
|
| 652 |
+// copyExistingContents copies from the source to the destination and |
|
| 653 |
+// ensures the ownership is appropriately set. |
|
| 654 |
+func copyExistingContents(source, destination string) error {
|
|
| 655 |
+ volList, err := ioutil.ReadDir(source) |
|
| 656 |
+ if err != nil {
|
|
| 657 |
+ return err |
|
| 658 |
+ } |
|
| 659 |
+ if len(volList) > 0 {
|
|
| 660 |
+ srcList, err := ioutil.ReadDir(destination) |
|
| 661 |
+ if err != nil {
|
|
| 662 |
+ return err |
|
| 663 |
+ } |
|
| 664 |
+ if len(srcList) == 0 {
|
|
| 665 |
+ // If the source volume is empty copy files from the root into the volume |
|
| 666 |
+ if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
|
| 667 |
+ return err |
|
| 668 |
+ } |
|
| 669 |
+ } |
|
| 670 |
+ } |
|
| 671 |
+ return copyOwnership(source, destination) |
|
| 672 |
+} |
|
| 673 |
+ |
|
| 674 |
+// copyOwnership copies the permissions and uid:gid of the source file |
|
| 675 |
+// to the destination file |
|
| 676 |
+func copyOwnership(source, destination string) error {
|
|
| 677 |
+ stat, err := system.Stat(source) |
|
| 678 |
+ if err != nil {
|
|
| 679 |
+ return err |
|
| 680 |
+ } |
|
| 681 |
+ |
|
| 682 |
+ if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil {
|
|
| 683 |
+ return err |
|
| 684 |
+ } |
|
| 685 |
+ |
|
| 686 |
+ return os.Chmod(destination, os.FileMode(stat.Mode())) |
|
| 687 |
+} |
|
| 688 |
+ |
|
| 689 |
+// TmpfsMounts returns the list of tmpfs mounts |
|
| 690 |
+func (container *Container) TmpfsMounts() []execdriver.Mount {
|
|
| 691 |
+ var mounts []execdriver.Mount |
|
| 692 |
+ for dest, data := range container.HostConfig.Tmpfs {
|
|
| 693 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 694 |
+ Source: "tmpfs", |
|
| 695 |
+ Destination: dest, |
|
| 696 |
+ Data: data, |
|
| 697 |
+ }) |
|
| 698 |
+ } |
|
| 699 |
+ return mounts |
|
| 700 |
+} |
| 0 | 701 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,65 @@ |
| 0 |
+// +build windows |
|
| 1 |
+ |
|
| 2 |
+package container |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 6 |
+ "github.com/docker/docker/volume" |
|
| 7 |
+ "github.com/docker/libnetwork" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by |
|
| 11 |
+// the container. Docker has no context of what the default path should be. |
|
| 12 |
+const DefaultPathEnv = "" |
|
| 13 |
+ |
|
| 14 |
+// Container holds fields specific to the Windows implementation. See |
|
| 15 |
+// CommonContainer for standard fields common to all containers. |
|
| 16 |
+type Container struct {
|
|
| 17 |
+ CommonContainer |
|
| 18 |
+ |
|
| 19 |
+ // Fields below here are platform specific. |
|
| 20 |
+} |
|
| 21 |
+ |
|
| 22 |
+// CreateDaemonEnvironment creates a new environment variable slice for this container. |
|
| 23 |
+func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string {
|
|
| 24 |
+ // On Windows, nothing to link. Just return the container environment. |
|
| 25 |
+ return container.Config.Env |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// DisconnectFromNetwork disconnects a container from the network. |
|
| 29 |
+func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
|
|
| 30 |
+ return nil |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// SetupWorkingDirectory initializes the container working directory. |
|
| 34 |
+// This is a NOOP In windows. |
|
| 35 |
+func (container *Container) SetupWorkingDirectory() error {
|
|
| 36 |
+ return nil |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+// UnmountIpcMounts unmount Ipc related mounts. |
|
| 40 |
+// This is a NOOP on windows. |
|
| 41 |
+func (container *Container) UnmountIpcMounts(unmount func(pth string) error) {
|
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// IpcMounts returns the list of Ipc related mounts. |
|
| 45 |
+func (container *Container) IpcMounts() []execdriver.Mount {
|
|
| 46 |
+ return nil |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+// UnmountVolumes explicitely unmounts volumes from the container. |
|
| 50 |
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
|
|
| 51 |
+ return nil |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// TmpfsMounts returns the list of tmpfs mounts |
|
| 55 |
+func (container *Container) TmpfsMounts() []execdriver.Mount {
|
|
| 56 |
+ return nil |
|
| 57 |
+} |
|
| 58 |
+ |
|
| 59 |
+// appendNetworkMounts appends any network mounts to the array of mount points passed in. |
|
| 60 |
+// Windows does not support network mounts (not to be confused with SMB network mounts), so |
|
| 61 |
+// this is a no-op. |
|
| 62 |
+func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
|
|
| 63 |
+ return volumeMounts, nil |
|
| 64 |
+} |
| 0 | 65 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,398 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "io" |
|
| 4 |
+ "os/exec" |
|
| 5 |
+ "strings" |
|
| 6 |
+ "sync" |
|
| 7 |
+ "syscall" |
|
| 8 |
+ "time" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/Sirupsen/logrus" |
|
| 11 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 12 |
+ derr "github.com/docker/docker/errors" |
|
| 13 |
+ "github.com/docker/docker/pkg/promise" |
|
| 14 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 15 |
+ "github.com/docker/docker/runconfig" |
|
| 16 |
+ "github.com/docker/docker/utils" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+const ( |
|
| 20 |
+ defaultTimeIncrement = 100 |
|
| 21 |
+ loggerCloseTimeout = 10 * time.Second |
|
| 22 |
+) |
|
| 23 |
+ |
|
| 24 |
+// supervisor defines the interface that a supervisor must implement |
|
| 25 |
+type supervisor interface {
|
|
| 26 |
+ // LogContainerEvent generates events related to a given container |
|
| 27 |
+ LogContainerEvent(*Container, string) |
|
| 28 |
+ // Cleanup ensures that the container is properly unmounted |
|
| 29 |
+ Cleanup(*Container) |
|
| 30 |
+ // StartLogging starts the logging driver for the container |
|
| 31 |
+ StartLogging(*Container) error |
|
| 32 |
+ // Run starts a container |
|
| 33 |
+ Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) |
|
| 34 |
+ // IsShuttingDown tells whether the supervisor is shutting down or not |
|
| 35 |
+ IsShuttingDown() bool |
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+// containerMonitor monitors the execution of a container's main process. |
|
| 39 |
+// If a restart policy is specified for the container the monitor will ensure that the |
|
| 40 |
+// process is restarted based on the rules of the policy. When the container is finally stopped |
|
| 41 |
+// the monitor will reset and cleanup any of the container resources such as networking allocations |
|
| 42 |
+// and the rootfs |
|
| 43 |
+type containerMonitor struct {
|
|
| 44 |
+ mux sync.Mutex |
|
| 45 |
+ |
|
| 46 |
+ // supervisor keeps track of the container and the events it generates |
|
| 47 |
+ supervisor supervisor |
|
| 48 |
+ |
|
| 49 |
+ // container is the container being monitored |
|
| 50 |
+ container *Container |
|
| 51 |
+ |
|
| 52 |
+ // restartPolicy is the current policy being applied to the container monitor |
|
| 53 |
+ restartPolicy runconfig.RestartPolicy |
|
| 54 |
+ |
|
| 55 |
+ // failureCount is the number of times the container has failed to |
|
| 56 |
+ // start in a row |
|
| 57 |
+ failureCount int |
|
| 58 |
+ |
|
| 59 |
+ // shouldStop signals the monitor that the next time the container exits it is |
|
| 60 |
+ // either because docker or the user asked for the container to be stopped |
|
| 61 |
+ shouldStop bool |
|
| 62 |
+ |
|
| 63 |
+ // startSignal is a channel that is closes after the container initially starts |
|
| 64 |
+ startSignal chan struct{}
|
|
| 65 |
+ |
|
| 66 |
+ // stopChan is used to signal to the monitor whenever there is a wait for the |
|
| 67 |
+ // next restart so that the timeIncrement is not honored and the user is not |
|
| 68 |
+ // left waiting for nothing to happen during this time |
|
| 69 |
+ stopChan chan struct{}
|
|
| 70 |
+ |
|
| 71 |
+ // timeIncrement is the amount of time to wait between restarts |
|
| 72 |
+ // this is in milliseconds |
|
| 73 |
+ timeIncrement int |
|
| 74 |
+ |
|
| 75 |
+ // lastStartTime is the time which the monitor last exec'd the container's process |
|
| 76 |
+ lastStartTime time.Time |
|
| 77 |
+} |
|
| 78 |
+ |
|
| 79 |
+// StartMonitor initializes a containerMonitor for this container with the provided supervisor and restart policy |
|
| 80 |
+// and starts the container's process. |
|
| 81 |
+func (container *Container) StartMonitor(s supervisor, policy runconfig.RestartPolicy) error {
|
|
| 82 |
+ container.monitor = &containerMonitor{
|
|
| 83 |
+ supervisor: s, |
|
| 84 |
+ container: container, |
|
| 85 |
+ restartPolicy: policy, |
|
| 86 |
+ timeIncrement: defaultTimeIncrement, |
|
| 87 |
+ stopChan: make(chan struct{}),
|
|
| 88 |
+ startSignal: make(chan struct{}),
|
|
| 89 |
+ } |
|
| 90 |
+ |
|
| 91 |
+ return container.monitor.wait() |
|
| 92 |
+} |
|
| 93 |
+ |
|
| 94 |
+// wait starts the container and wait until |
|
| 95 |
+// we either receive an error from the initial start of the container's |
|
| 96 |
+// process or until the process is running in the container |
|
| 97 |
+func (m *containerMonitor) wait() error {
|
|
| 98 |
+ select {
|
|
| 99 |
+ case <-m.startSignal: |
|
| 100 |
+ case err := <-promise.Go(m.start): |
|
| 101 |
+ return err |
|
| 102 |
+ } |
|
| 103 |
+ |
|
| 104 |
+ return nil |
|
| 105 |
+} |
|
| 106 |
+ |
|
| 107 |
+// Stop signals to the container monitor that it should stop monitoring the container |
|
| 108 |
+// for exits the next time the process dies |
|
| 109 |
+func (m *containerMonitor) ExitOnNext() {
|
|
| 110 |
+ m.mux.Lock() |
|
| 111 |
+ |
|
| 112 |
+ // we need to protect having a double close of the channel when stop is called |
|
| 113 |
+ // twice or else we will get a panic |
|
| 114 |
+ if !m.shouldStop {
|
|
| 115 |
+ m.shouldStop = true |
|
| 116 |
+ close(m.stopChan) |
|
| 117 |
+ } |
|
| 118 |
+ |
|
| 119 |
+ m.mux.Unlock() |
|
| 120 |
+} |
|
| 121 |
+ |
|
| 122 |
+// Close closes the container's resources such as networking allocations and |
|
| 123 |
+// unmounts the contatiner's root filesystem |
|
| 124 |
+func (m *containerMonitor) Close() error {
|
|
| 125 |
+ // Cleanup networking and mounts |
|
| 126 |
+ m.supervisor.Cleanup(m.container) |
|
| 127 |
+ |
|
| 128 |
+ // FIXME: here is race condition between two RUN instructions in Dockerfile |
|
| 129 |
+ // because they share same runconfig and change image. Must be fixed |
|
| 130 |
+ // in builder/builder.go |
|
| 131 |
+ if err := m.container.ToDisk(); err != nil {
|
|
| 132 |
+ logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
|
|
| 133 |
+ |
|
| 134 |
+ return err |
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ return nil |
|
| 138 |
+} |
|
| 139 |
+ |
|
| 140 |
+// Start starts the containers process and monitors it according to the restart policy |
|
| 141 |
+func (m *containerMonitor) start() error {
|
|
| 142 |
+ var ( |
|
| 143 |
+ err error |
|
| 144 |
+ exitStatus execdriver.ExitStatus |
|
| 145 |
+ // this variable indicates where we in execution flow: |
|
| 146 |
+ // before Run or after |
|
| 147 |
+ afterRun bool |
|
| 148 |
+ ) |
|
| 149 |
+ |
|
| 150 |
+ // ensure that when the monitor finally exits we release the networking and unmount the rootfs |
|
| 151 |
+ defer func() {
|
|
| 152 |
+ if afterRun {
|
|
| 153 |
+ m.container.Lock() |
|
| 154 |
+ defer m.container.Unlock() |
|
| 155 |
+ m.container.SetStopped(&exitStatus) |
|
| 156 |
+ } |
|
| 157 |
+ m.Close() |
|
| 158 |
+ }() |
|
| 159 |
+ // reset stopped flag |
|
| 160 |
+ if m.container.HasBeenManuallyStopped {
|
|
| 161 |
+ m.container.HasBeenManuallyStopped = false |
|
| 162 |
+ } |
|
| 163 |
+ |
|
| 164 |
+ // reset the restart count |
|
| 165 |
+ m.container.RestartCount = -1 |
|
| 166 |
+ |
|
| 167 |
+ for {
|
|
| 168 |
+ m.container.RestartCount++ |
|
| 169 |
+ |
|
| 170 |
+ if err := m.supervisor.StartLogging(m.container); err != nil {
|
|
| 171 |
+ m.resetContainer(false) |
|
| 172 |
+ |
|
| 173 |
+ return err |
|
| 174 |
+ } |
|
| 175 |
+ |
|
| 176 |
+ pipes := execdriver.NewPipes(m.container.Stdin(), m.container.Stdout(), m.container.Stderr(), m.container.Config.OpenStdin) |
|
| 177 |
+ |
|
| 178 |
+ m.logEvent("start")
|
|
| 179 |
+ |
|
| 180 |
+ m.lastStartTime = time.Now() |
|
| 181 |
+ |
|
| 182 |
+ if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
|
|
| 183 |
+ // if we receive an internal error from the initial start of a container then lets |
|
| 184 |
+ // return it instead of entering the restart loop |
|
| 185 |
+ // set to 127 for container cmd not found/does not exist) |
|
| 186 |
+ if strings.Contains(err.Error(), "executable file not found") || |
|
| 187 |
+ strings.Contains(err.Error(), "no such file or directory") || |
|
| 188 |
+ strings.Contains(err.Error(), "system cannot find the file specified") {
|
|
| 189 |
+ if m.container.RestartCount == 0 {
|
|
| 190 |
+ m.container.ExitCode = 127 |
|
| 191 |
+ m.resetContainer(false) |
|
| 192 |
+ return derr.ErrorCodeCmdNotFound |
|
| 193 |
+ } |
|
| 194 |
+ } |
|
| 195 |
+ // set to 126 for container cmd can't be invoked errors |
|
| 196 |
+ if strings.Contains(err.Error(), syscall.EACCES.Error()) {
|
|
| 197 |
+ if m.container.RestartCount == 0 {
|
|
| 198 |
+ m.container.ExitCode = 126 |
|
| 199 |
+ m.resetContainer(false) |
|
| 200 |
+ return derr.ErrorCodeCmdCouldNotBeInvoked |
|
| 201 |
+ } |
|
| 202 |
+ } |
|
| 203 |
+ |
|
| 204 |
+ if m.container.RestartCount == 0 {
|
|
| 205 |
+ m.container.ExitCode = -1 |
|
| 206 |
+ m.resetContainer(false) |
|
| 207 |
+ |
|
| 208 |
+ return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err)) |
|
| 209 |
+ } |
|
| 210 |
+ |
|
| 211 |
+ logrus.Errorf("Error running container: %s", err)
|
|
| 212 |
+ } |
|
| 213 |
+ |
|
| 214 |
+ // here container.Lock is already lost |
|
| 215 |
+ afterRun = true |
|
| 216 |
+ |
|
| 217 |
+ m.resetMonitor(err == nil && exitStatus.ExitCode == 0) |
|
| 218 |
+ |
|
| 219 |
+ if m.shouldRestart(exitStatus.ExitCode) {
|
|
| 220 |
+ m.container.SetRestarting(&exitStatus) |
|
| 221 |
+ m.logEvent("die")
|
|
| 222 |
+ m.resetContainer(true) |
|
| 223 |
+ |
|
| 224 |
+ // sleep with a small time increment between each restart to help avoid issues cased by quickly |
|
| 225 |
+ // restarting the container because of some types of errors ( networking cut out, etc... ) |
|
| 226 |
+ m.waitForNextRestart() |
|
| 227 |
+ |
|
| 228 |
+ // we need to check this before reentering the loop because the waitForNextRestart could have |
|
| 229 |
+ // been terminated by a request from a user |
|
| 230 |
+ if m.shouldStop {
|
|
| 231 |
+ return err |
|
| 232 |
+ } |
|
| 233 |
+ continue |
|
| 234 |
+ } |
|
| 235 |
+ |
|
| 236 |
+ m.logEvent("die")
|
|
| 237 |
+ m.resetContainer(true) |
|
| 238 |
+ return err |
|
| 239 |
+ } |
|
| 240 |
+} |
|
| 241 |
+ |
|
| 242 |
+// resetMonitor resets the stateful fields on the containerMonitor based on the |
|
| 243 |
+// previous runs success or failure. Regardless of success, if the container had |
|
| 244 |
+// an execution time of more than 10s then reset the timer back to the default |
|
| 245 |
+func (m *containerMonitor) resetMonitor(successful bool) {
|
|
| 246 |
+ executionTime := time.Now().Sub(m.lastStartTime).Seconds() |
|
| 247 |
+ |
|
| 248 |
+ if executionTime > 10 {
|
|
| 249 |
+ m.timeIncrement = defaultTimeIncrement |
|
| 250 |
+ } else {
|
|
| 251 |
+ // otherwise we need to increment the amount of time we wait before restarting |
|
| 252 |
+ // the process. We will build up by multiplying the increment by 2 |
|
| 253 |
+ m.timeIncrement *= 2 |
|
| 254 |
+ } |
|
| 255 |
+ |
|
| 256 |
+ // the container exited successfully so we need to reset the failure counter |
|
| 257 |
+ if successful {
|
|
| 258 |
+ m.failureCount = 0 |
|
| 259 |
+ } else {
|
|
| 260 |
+ m.failureCount++ |
|
| 261 |
+ } |
|
| 262 |
+} |
|
| 263 |
+ |
|
| 264 |
+// waitForNextRestart waits with the default time increment to restart the container unless |
|
| 265 |
+// a user or docker asks for the container to be stopped |
|
| 266 |
+func (m *containerMonitor) waitForNextRestart() {
|
|
| 267 |
+ select {
|
|
| 268 |
+ case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): |
|
| 269 |
+ case <-m.stopChan: |
|
| 270 |
+ } |
|
| 271 |
+} |
|
| 272 |
+ |
|
| 273 |
+// shouldRestart checks the restart policy and applies the rules to determine if |
|
| 274 |
+// the container's process should be restarted |
|
| 275 |
+func (m *containerMonitor) shouldRestart(exitCode int) bool {
|
|
| 276 |
+ m.mux.Lock() |
|
| 277 |
+ defer m.mux.Unlock() |
|
| 278 |
+ |
|
| 279 |
+ // do not restart if the user or docker has requested that this container be stopped |
|
| 280 |
+ if m.shouldStop {
|
|
| 281 |
+ m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown() |
|
| 282 |
+ return false |
|
| 283 |
+ } |
|
| 284 |
+ |
|
| 285 |
+ switch {
|
|
| 286 |
+ case m.restartPolicy.IsAlways(), m.restartPolicy.IsUnlessStopped(): |
|
| 287 |
+ return true |
|
| 288 |
+ case m.restartPolicy.IsOnFailure(): |
|
| 289 |
+ // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count |
|
| 290 |
+ if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
|
|
| 291 |
+ logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
|
|
| 292 |
+ stringid.TruncateID(m.container.ID), max) |
|
| 293 |
+ return false |
|
| 294 |
+ } |
|
| 295 |
+ |
|
| 296 |
+ return exitCode != 0 |
|
| 297 |
+ } |
|
| 298 |
+ |
|
| 299 |
+ return false |
|
| 300 |
+} |
|
| 301 |
+ |
|
| 302 |
+// callback ensures that the container's state is properly updated after we |
|
| 303 |
+// received ack from the execution drivers |
|
| 304 |
+func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 305 |
+ go func() {
|
|
| 306 |
+ _, ok := <-chOOM |
|
| 307 |
+ if ok {
|
|
| 308 |
+ m.logEvent("oom")
|
|
| 309 |
+ } |
|
| 310 |
+ }() |
|
| 311 |
+ |
|
| 312 |
+ if processConfig.Tty {
|
|
| 313 |
+ // The callback is called after the process start() |
|
| 314 |
+ // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave |
|
| 315 |
+ // which we close here. |
|
| 316 |
+ if c, ok := processConfig.Stdout.(io.Closer); ok {
|
|
| 317 |
+ c.Close() |
|
| 318 |
+ } |
|
| 319 |
+ } |
|
| 320 |
+ |
|
| 321 |
+ m.container.SetRunning(pid) |
|
| 322 |
+ |
|
| 323 |
+ // signal that the process has started |
|
| 324 |
+ // close channel only if not closed |
|
| 325 |
+ select {
|
|
| 326 |
+ case <-m.startSignal: |
|
| 327 |
+ default: |
|
| 328 |
+ close(m.startSignal) |
|
| 329 |
+ } |
|
| 330 |
+ |
|
| 331 |
+ if err := m.container.ToDiskLocking(); err != nil {
|
|
| 332 |
+ logrus.Errorf("Error saving container to disk: %v", err)
|
|
| 333 |
+ } |
|
| 334 |
+ return nil |
|
| 335 |
+} |
|
| 336 |
+ |
|
| 337 |
+// resetContainer resets the container's IO and ensures that the command is able to be executed again |
|
| 338 |
+// by copying the data into a new struct |
|
| 339 |
+// if lock is true, then container locked during reset |
|
| 340 |
+func (m *containerMonitor) resetContainer(lock bool) {
|
|
| 341 |
+ container := m.container |
|
| 342 |
+ if lock {
|
|
| 343 |
+ container.Lock() |
|
| 344 |
+ defer container.Unlock() |
|
| 345 |
+ } |
|
| 346 |
+ |
|
| 347 |
+ if err := container.CloseStreams(); err != nil {
|
|
| 348 |
+ logrus.Errorf("%s: %s", container.ID, err)
|
|
| 349 |
+ } |
|
| 350 |
+ |
|
| 351 |
+ if container.Command != nil && container.Command.ProcessConfig.Terminal != nil {
|
|
| 352 |
+ if err := container.Command.ProcessConfig.Terminal.Close(); err != nil {
|
|
| 353 |
+ logrus.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
|
| 354 |
+ } |
|
| 355 |
+ } |
|
| 356 |
+ |
|
| 357 |
+ // Re-create a brand new stdin pipe once the container exited |
|
| 358 |
+ if container.Config.OpenStdin {
|
|
| 359 |
+ container.NewInputPipes() |
|
| 360 |
+ } |
|
| 361 |
+ |
|
| 362 |
+ if container.LogDriver != nil {
|
|
| 363 |
+ if container.LogCopier != nil {
|
|
| 364 |
+ exit := make(chan struct{})
|
|
| 365 |
+ go func() {
|
|
| 366 |
+ container.LogCopier.Wait() |
|
| 367 |
+ close(exit) |
|
| 368 |
+ }() |
|
| 369 |
+ select {
|
|
| 370 |
+ case <-time.After(loggerCloseTimeout): |
|
| 371 |
+ logrus.Warnf("Logger didn't exit in time: logs may be truncated")
|
|
| 372 |
+ case <-exit: |
|
| 373 |
+ } |
|
| 374 |
+ } |
|
| 375 |
+ container.LogDriver.Close() |
|
| 376 |
+ container.LogCopier = nil |
|
| 377 |
+ container.LogDriver = nil |
|
| 378 |
+ } |
|
| 379 |
+ |
|
| 380 |
+ c := container.Command.ProcessConfig.Cmd |
|
| 381 |
+ |
|
| 382 |
+ container.Command.ProcessConfig.Cmd = exec.Cmd{
|
|
| 383 |
+ Stdin: c.Stdin, |
|
| 384 |
+ Stdout: c.Stdout, |
|
| 385 |
+ Stderr: c.Stderr, |
|
| 386 |
+ Path: c.Path, |
|
| 387 |
+ Env: c.Env, |
|
| 388 |
+ ExtraFiles: c.ExtraFiles, |
|
| 389 |
+ Args: c.Args, |
|
| 390 |
+ Dir: c.Dir, |
|
| 391 |
+ SysProcAttr: c.SysProcAttr, |
|
| 392 |
+ } |
|
| 393 |
+} |
|
| 394 |
+ |
|
| 395 |
+func (m *containerMonitor) logEvent(action string) {
|
|
| 396 |
+ m.supervisor.LogContainerEvent(m.container, action) |
|
| 397 |
+} |
| 0 | 398 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,273 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "sync" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 8 |
+ derr "github.com/docker/docker/errors" |
|
| 9 |
+ "github.com/docker/docker/pkg/units" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+// State holds the current container state, and has methods to get and |
|
| 13 |
+// set the state. Container has an embed, which allows all of the |
|
| 14 |
+// functions defined against State to run against Container. |
|
| 15 |
+type State struct {
|
|
| 16 |
+ sync.Mutex |
|
| 17 |
+ // FIXME: Why do we have both paused and running if a |
|
| 18 |
+ // container cannot be paused and running at the same time? |
|
| 19 |
+ Running bool |
|
| 20 |
+ Paused bool |
|
| 21 |
+ Restarting bool |
|
| 22 |
+ OOMKilled bool |
|
| 23 |
+ RemovalInProgress bool // Not need for this to be persistent on disk. |
|
| 24 |
+ Dead bool |
|
| 25 |
+ Pid int |
|
| 26 |
+ ExitCode int |
|
| 27 |
+ Error string // contains last known error when starting the container |
|
| 28 |
+ StartedAt time.Time |
|
| 29 |
+ FinishedAt time.Time |
|
| 30 |
+ waitChan chan struct{}
|
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// NewState creates a default state object with a fresh channel for state changes. |
|
| 34 |
+func NewState() *State {
|
|
| 35 |
+ return &State{
|
|
| 36 |
+ waitChan: make(chan struct{}),
|
|
| 37 |
+ } |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+// String returns a human-readable description of the state |
|
| 41 |
+func (s *State) String() string {
|
|
| 42 |
+ if s.Running {
|
|
| 43 |
+ if s.Paused {
|
|
| 44 |
+ return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
|
| 45 |
+ } |
|
| 46 |
+ if s.Restarting {
|
|
| 47 |
+ return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ if s.RemovalInProgress {
|
|
| 54 |
+ return "Removal In Progress" |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ if s.Dead {
|
|
| 58 |
+ return "Dead" |
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 61 |
+ if s.StartedAt.IsZero() {
|
|
| 62 |
+ return "Created" |
|
| 63 |
+ } |
|
| 64 |
+ |
|
| 65 |
+ if s.FinishedAt.IsZero() {
|
|
| 66 |
+ return "" |
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 69 |
+ return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
|
| 70 |
+} |
|
| 71 |
+ |
|
| 72 |
+// StateString returns a single string to describe state |
|
| 73 |
+func (s *State) StateString() string {
|
|
| 74 |
+ if s.Running {
|
|
| 75 |
+ if s.Paused {
|
|
| 76 |
+ return "paused" |
|
| 77 |
+ } |
|
| 78 |
+ if s.Restarting {
|
|
| 79 |
+ return "restarting" |
|
| 80 |
+ } |
|
| 81 |
+ return "running" |
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ if s.Dead {
|
|
| 85 |
+ return "dead" |
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ if s.StartedAt.IsZero() {
|
|
| 89 |
+ return "created" |
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ return "exited" |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+// IsValidStateString checks if the provided string is a valid container state or not. |
|
| 96 |
+func IsValidStateString(s string) bool {
|
|
| 97 |
+ if s != "paused" && |
|
| 98 |
+ s != "restarting" && |
|
| 99 |
+ s != "running" && |
|
| 100 |
+ s != "dead" && |
|
| 101 |
+ s != "created" && |
|
| 102 |
+ s != "exited" {
|
|
| 103 |
+ return false |
|
| 104 |
+ } |
|
| 105 |
+ return true |
|
| 106 |
+} |
|
| 107 |
+ |
|
| 108 |
+func wait(waitChan <-chan struct{}, timeout time.Duration) error {
|
|
| 109 |
+ if timeout < 0 {
|
|
| 110 |
+ <-waitChan |
|
| 111 |
+ return nil |
|
| 112 |
+ } |
|
| 113 |
+ select {
|
|
| 114 |
+ case <-time.After(timeout): |
|
| 115 |
+ return derr.ErrorCodeTimedOut.WithArgs(timeout) |
|
| 116 |
+ case <-waitChan: |
|
| 117 |
+ return nil |
|
| 118 |
+ } |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+// waitRunning waits until state is running. If state is already |
|
| 122 |
+// running it returns immediately. If you want wait forever you must |
|
| 123 |
+// supply negative timeout. Returns pid, that was passed to |
|
| 124 |
+// SetRunning. |
|
| 125 |
+func (s *State) waitRunning(timeout time.Duration) (int, error) {
|
|
| 126 |
+ s.Lock() |
|
| 127 |
+ if s.Running {
|
|
| 128 |
+ pid := s.Pid |
|
| 129 |
+ s.Unlock() |
|
| 130 |
+ return pid, nil |
|
| 131 |
+ } |
|
| 132 |
+ waitChan := s.waitChan |
|
| 133 |
+ s.Unlock() |
|
| 134 |
+ if err := wait(waitChan, timeout); err != nil {
|
|
| 135 |
+ return -1, err |
|
| 136 |
+ } |
|
| 137 |
+ return s.GetPID(), nil |
|
| 138 |
+} |
|
| 139 |
+ |
|
| 140 |
+// WaitStop waits until state is stopped. If state already stopped it returns |
|
| 141 |
+// immediately. If you want wait forever you must supply negative timeout. |
|
| 142 |
+// Returns exit code, that was passed to SetStoppedLocking |
|
| 143 |
+func (s *State) WaitStop(timeout time.Duration) (int, error) {
|
|
| 144 |
+ s.Lock() |
|
| 145 |
+ if !s.Running {
|
|
| 146 |
+ exitCode := s.ExitCode |
|
| 147 |
+ s.Unlock() |
|
| 148 |
+ return exitCode, nil |
|
| 149 |
+ } |
|
| 150 |
+ waitChan := s.waitChan |
|
| 151 |
+ s.Unlock() |
|
| 152 |
+ if err := wait(waitChan, timeout); err != nil {
|
|
| 153 |
+ return -1, err |
|
| 154 |
+ } |
|
| 155 |
+ return s.getExitCode(), nil |
|
| 156 |
+} |
|
| 157 |
+ |
|
| 158 |
+// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. |
|
| 159 |
+func (s *State) IsRunning() bool {
|
|
| 160 |
+ s.Lock() |
|
| 161 |
+ res := s.Running |
|
| 162 |
+ s.Unlock() |
|
| 163 |
+ return res |
|
| 164 |
+} |
|
| 165 |
+ |
|
| 166 |
+// GetPID holds the process id of a container. |
|
| 167 |
+func (s *State) GetPID() int {
|
|
| 168 |
+ s.Lock() |
|
| 169 |
+ res := s.Pid |
|
| 170 |
+ s.Unlock() |
|
| 171 |
+ return res |
|
| 172 |
+} |
|
| 173 |
+ |
|
| 174 |
+func (s *State) getExitCode() int {
|
|
| 175 |
+ s.Lock() |
|
| 176 |
+ res := s.ExitCode |
|
| 177 |
+ s.Unlock() |
|
| 178 |
+ return res |
|
| 179 |
+} |
|
| 180 |
+ |
|
| 181 |
+// SetRunning sets the state of the container to "running". |
|
| 182 |
+func (s *State) SetRunning(pid int) {
|
|
| 183 |
+ s.Error = "" |
|
| 184 |
+ s.Running = true |
|
| 185 |
+ s.Paused = false |
|
| 186 |
+ s.Restarting = false |
|
| 187 |
+ s.ExitCode = 0 |
|
| 188 |
+ s.Pid = pid |
|
| 189 |
+ s.StartedAt = time.Now().UTC() |
|
| 190 |
+ close(s.waitChan) // fire waiters for start |
|
| 191 |
+ s.waitChan = make(chan struct{})
|
|
| 192 |
+} |
|
| 193 |
+ |
|
| 194 |
+// SetStoppedLocking locks the container state is sets it to "stopped". |
|
| 195 |
+func (s *State) SetStoppedLocking(exitStatus *execdriver.ExitStatus) {
|
|
| 196 |
+ s.Lock() |
|
| 197 |
+ s.SetStopped(exitStatus) |
|
| 198 |
+ s.Unlock() |
|
| 199 |
+} |
|
| 200 |
+ |
|
| 201 |
+// SetStopped sets the container state to "stopped" without locking. |
|
| 202 |
+func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) {
|
|
| 203 |
+ s.Running = false |
|
| 204 |
+ s.Restarting = false |
|
| 205 |
+ s.Pid = 0 |
|
| 206 |
+ s.FinishedAt = time.Now().UTC() |
|
| 207 |
+ s.setFromExitStatus(exitStatus) |
|
| 208 |
+ close(s.waitChan) // fire waiters for stop |
|
| 209 |
+ s.waitChan = make(chan struct{})
|
|
| 210 |
+} |
|
| 211 |
+ |
|
| 212 |
+// SetRestartingLocking is when docker handles the auto restart of containers when they are |
|
| 213 |
+// in the middle of a stop and being restarted again |
|
| 214 |
+func (s *State) SetRestartingLocking(exitStatus *execdriver.ExitStatus) {
|
|
| 215 |
+ s.Lock() |
|
| 216 |
+ s.SetRestarting(exitStatus) |
|
| 217 |
+ s.Unlock() |
|
| 218 |
+} |
|
| 219 |
+ |
|
| 220 |
+// SetRestarting sets the container state to "restarting". |
|
| 221 |
+// It also sets the container PID to 0. |
|
| 222 |
+func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
|
|
| 223 |
+ // we should consider the container running when it is restarting because of |
|
| 224 |
+ // all the checks in docker around rm/stop/etc |
|
| 225 |
+ s.Running = true |
|
| 226 |
+ s.Restarting = true |
|
| 227 |
+ s.Pid = 0 |
|
| 228 |
+ s.FinishedAt = time.Now().UTC() |
|
| 229 |
+ s.setFromExitStatus(exitStatus) |
|
| 230 |
+ close(s.waitChan) // fire waiters for stop |
|
| 231 |
+ s.waitChan = make(chan struct{})
|
|
| 232 |
+} |
|
| 233 |
+ |
|
| 234 |
+// SetError sets the container's error state. This is useful when we want to |
|
| 235 |
+// know the error that occurred when container transits to another state |
|
| 236 |
+// when inspecting it |
|
| 237 |
+func (s *State) SetError(err error) {
|
|
| 238 |
+ s.Error = err.Error() |
|
| 239 |
+} |
|
| 240 |
+ |
|
| 241 |
+// IsPaused returns whether the container is paused or not. |
|
| 242 |
+func (s *State) IsPaused() bool {
|
|
| 243 |
+ s.Lock() |
|
| 244 |
+ res := s.Paused |
|
| 245 |
+ s.Unlock() |
|
| 246 |
+ return res |
|
| 247 |
+} |
|
| 248 |
+ |
|
| 249 |
+// SetRemovalInProgress sets the container state as being removed. |
|
| 250 |
+func (s *State) SetRemovalInProgress() error {
|
|
| 251 |
+ s.Lock() |
|
| 252 |
+ defer s.Unlock() |
|
| 253 |
+ if s.RemovalInProgress {
|
|
| 254 |
+ return derr.ErrorCodeAlreadyRemoving |
|
| 255 |
+ } |
|
| 256 |
+ s.RemovalInProgress = true |
|
| 257 |
+ return nil |
|
| 258 |
+} |
|
| 259 |
+ |
|
| 260 |
+// ResetRemovalInProgress make the RemovalInProgress state to false. |
|
| 261 |
+func (s *State) ResetRemovalInProgress() {
|
|
| 262 |
+ s.Lock() |
|
| 263 |
+ s.RemovalInProgress = false |
|
| 264 |
+ s.Unlock() |
|
| 265 |
+} |
|
| 266 |
+ |
|
| 267 |
+// SetDead sets the container state to "dead" |
|
| 268 |
+func (s *State) SetDead() {
|
|
| 269 |
+ s.Lock() |
|
| 270 |
+ s.Dead = true |
|
| 271 |
+ s.Unlock() |
|
| 272 |
+} |
| 0 | 273 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,111 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "sync/atomic" |
|
| 4 |
+ "testing" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+func TestStateRunStop(t *testing.T) {
|
|
| 11 |
+ s := NewState() |
|
| 12 |
+ for i := 1; i < 3; i++ { // full lifecycle two times
|
|
| 13 |
+ started := make(chan struct{})
|
|
| 14 |
+ var pid int64 |
|
| 15 |
+ go func() {
|
|
| 16 |
+ runPid, _ := s.waitRunning(-1 * time.Second) |
|
| 17 |
+ atomic.StoreInt64(&pid, int64(runPid)) |
|
| 18 |
+ close(started) |
|
| 19 |
+ }() |
|
| 20 |
+ s.Lock() |
|
| 21 |
+ s.SetRunning(i + 100) |
|
| 22 |
+ s.Unlock() |
|
| 23 |
+ |
|
| 24 |
+ if !s.IsRunning() {
|
|
| 25 |
+ t.Fatal("State not running")
|
|
| 26 |
+ } |
|
| 27 |
+ if s.Pid != i+100 {
|
|
| 28 |
+ t.Fatalf("Pid %v, expected %v", s.Pid, i+100)
|
|
| 29 |
+ } |
|
| 30 |
+ if s.ExitCode != 0 {
|
|
| 31 |
+ t.Fatalf("ExitCode %v, expected 0", s.ExitCode)
|
|
| 32 |
+ } |
|
| 33 |
+ select {
|
|
| 34 |
+ case <-time.After(100 * time.Millisecond): |
|
| 35 |
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 36 |
+ case <-started: |
|
| 37 |
+ t.Log("Start callback fired")
|
|
| 38 |
+ } |
|
| 39 |
+ runPid := int(atomic.LoadInt64(&pid)) |
|
| 40 |
+ if runPid != i+100 {
|
|
| 41 |
+ t.Fatalf("Pid %v, expected %v", runPid, i+100)
|
|
| 42 |
+ } |
|
| 43 |
+ if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 {
|
|
| 44 |
+ t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
|
|
| 45 |
+ } |
|
| 46 |
+ |
|
| 47 |
+ stopped := make(chan struct{})
|
|
| 48 |
+ var exit int64 |
|
| 49 |
+ go func() {
|
|
| 50 |
+ exitCode, _ := s.WaitStop(-1 * time.Second) |
|
| 51 |
+ atomic.StoreInt64(&exit, int64(exitCode)) |
|
| 52 |
+ close(stopped) |
|
| 53 |
+ }() |
|
| 54 |
+ s.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
|
|
| 55 |
+ if s.IsRunning() {
|
|
| 56 |
+ t.Fatal("State is running")
|
|
| 57 |
+ } |
|
| 58 |
+ if s.ExitCode != i {
|
|
| 59 |
+ t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i)
|
|
| 60 |
+ } |
|
| 61 |
+ if s.Pid != 0 {
|
|
| 62 |
+ t.Fatalf("Pid %v, expected 0", s.Pid)
|
|
| 63 |
+ } |
|
| 64 |
+ select {
|
|
| 65 |
+ case <-time.After(100 * time.Millisecond): |
|
| 66 |
+ t.Fatal("Stop callback doesn't fire in 100 milliseconds")
|
|
| 67 |
+ case <-stopped: |
|
| 68 |
+ t.Log("Stop callback fired")
|
|
| 69 |
+ } |
|
| 70 |
+ exitCode := int(atomic.LoadInt64(&exit)) |
|
| 71 |
+ if exitCode != i {
|
|
| 72 |
+ t.Fatalf("ExitCode %v, expected %v", exitCode, i)
|
|
| 73 |
+ } |
|
| 74 |
+ if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i {
|
|
| 75 |
+ t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
|
|
| 76 |
+ } |
|
| 77 |
+ } |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+func TestStateTimeoutWait(t *testing.T) {
|
|
| 81 |
+ s := NewState() |
|
| 82 |
+ started := make(chan struct{})
|
|
| 83 |
+ go func() {
|
|
| 84 |
+ s.waitRunning(100 * time.Millisecond) |
|
| 85 |
+ close(started) |
|
| 86 |
+ }() |
|
| 87 |
+ select {
|
|
| 88 |
+ case <-time.After(200 * time.Millisecond): |
|
| 89 |
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 90 |
+ case <-started: |
|
| 91 |
+ t.Log("Start callback fired")
|
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ s.Lock() |
|
| 95 |
+ s.SetRunning(49) |
|
| 96 |
+ s.Unlock() |
|
| 97 |
+ |
|
| 98 |
+ stopped := make(chan struct{})
|
|
| 99 |
+ go func() {
|
|
| 100 |
+ s.waitRunning(100 * time.Millisecond) |
|
| 101 |
+ close(stopped) |
|
| 102 |
+ }() |
|
| 103 |
+ select {
|
|
| 104 |
+ case <-time.After(200 * time.Millisecond): |
|
| 105 |
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 106 |
+ case <-stopped: |
|
| 107 |
+ t.Log("Start callback fired")
|
|
| 108 |
+ } |
|
| 109 |
+ |
|
| 110 |
+} |
| 0 | 111 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,12 @@ |
| 0 |
+// +build linux freebsd |
|
| 1 |
+ |
|
| 2 |
+package container |
|
| 3 |
+ |
|
| 4 |
+import "github.com/docker/docker/daemon/execdriver" |
|
| 5 |
+ |
|
| 6 |
+// setFromExitStatus is a platform specific helper function to set the state |
|
| 7 |
+// based on the ExitStatus structure. |
|
| 8 |
+func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) {
|
|
| 9 |
+ s.ExitCode = exitStatus.ExitCode |
|
| 10 |
+ s.OOMKilled = exitStatus.OOMKilled |
|
| 11 |
+} |
| 0 | 12 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,9 @@ |
| 0 |
+package container |
|
| 1 |
+ |
|
| 2 |
+import "github.com/docker/docker/daemon/execdriver" |
|
| 3 |
+ |
|
| 4 |
+// setFromExitStatus is a platform specific helper function to set the state |
|
| 5 |
+// based on the ExitStatus structure. |
|
| 6 |
+func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) {
|
|
| 7 |
+ s.ExitCode = exitStatus.ExitCode |
|
| 8 |
+} |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/api/types" |
| 11 |
+ "github.com/docker/docker/container" |
|
| 11 | 12 |
"github.com/docker/docker/pkg/archive" |
| 12 | 13 |
"github.com/docker/docker/pkg/chrootarchive" |
| 13 | 14 |
"github.com/docker/docker/pkg/ioutils" |
| ... | ... |
@@ -71,69 +72,9 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon |
| 71 | 71 |
return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) |
| 72 | 72 |
} |
| 73 | 73 |
|
| 74 |
-// resolvePath resolves the given path in the container to a resource on the |
|
| 75 |
-// host. Returns a resolved path (absolute path to the resource on the host), |
|
| 76 |
-// the absolute path to the resource relative to the container's rootfs, and |
|
| 77 |
-// a error if the path points to outside the container's rootfs. |
|
| 78 |
-func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) {
|
|
| 79 |
- // Consider the given path as an absolute path in the container. |
|
| 80 |
- absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) |
|
| 81 |
- |
|
| 82 |
- // Split the absPath into its Directory and Base components. We will |
|
| 83 |
- // resolve the dir in the scope of the container then append the base. |
|
| 84 |
- dirPath, basePath := filepath.Split(absPath) |
|
| 85 |
- |
|
| 86 |
- resolvedDirPath, err := container.GetResourcePath(dirPath) |
|
| 87 |
- if err != nil {
|
|
| 88 |
- return "", "", err |
|
| 89 |
- } |
|
| 90 |
- |
|
| 91 |
- // resolvedDirPath will have been cleaned (no trailing path separators) so |
|
| 92 |
- // we can manually join it with the base path element. |
|
| 93 |
- resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath |
|
| 94 |
- |
|
| 95 |
- return resolvedPath, absPath, nil |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-// statPath is the unexported version of StatPath. Locks and mounts should |
|
| 99 |
-// be acquired before calling this method and the given path should be fully |
|
| 100 |
-// resolved to a path on the host corresponding to the given absolute path |
|
| 101 |
-// inside the container. |
|
| 102 |
-func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
|
| 103 |
- lstat, err := os.Lstat(resolvedPath) |
|
| 104 |
- if err != nil {
|
|
| 105 |
- return nil, err |
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
- var linkTarget string |
|
| 109 |
- if lstat.Mode()&os.ModeSymlink != 0 {
|
|
| 110 |
- // Fully evaluate the symlink in the scope of the container rootfs. |
|
| 111 |
- hostPath, err := container.GetResourcePath(absPath) |
|
| 112 |
- if err != nil {
|
|
| 113 |
- return nil, err |
|
| 114 |
- } |
|
| 115 |
- |
|
| 116 |
- linkTarget, err = filepath.Rel(container.basefs, hostPath) |
|
| 117 |
- if err != nil {
|
|
| 118 |
- return nil, err |
|
| 119 |
- } |
|
| 120 |
- |
|
| 121 |
- // Make it an absolute path. |
|
| 122 |
- linkTarget = filepath.Join(string(filepath.Separator), linkTarget) |
|
| 123 |
- } |
|
| 124 |
- |
|
| 125 |
- return &types.ContainerPathStat{
|
|
| 126 |
- Name: filepath.Base(absPath), |
|
| 127 |
- Size: lstat.Size(), |
|
| 128 |
- Mode: lstat.Mode(), |
|
| 129 |
- Mtime: lstat.ModTime(), |
|
| 130 |
- LinkTarget: linkTarget, |
|
| 131 |
- }, nil |
|
| 132 |
-} |
|
| 133 |
- |
|
| 134 | 74 |
// containerStatPath stats the filesystem resource at the specified path in this |
| 135 | 75 |
// container. Returns stat info about the resource. |
| 136 |
-func (daemon *Daemon) containerStatPath(container *Container, path string) (stat *types.ContainerPathStat, err error) {
|
|
| 76 |
+func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) {
|
|
| 137 | 77 |
container.Lock() |
| 138 | 78 |
defer container.Unlock() |
| 139 | 79 |
|
| ... | ... |
@@ -143,23 +84,23 @@ func (daemon *Daemon) containerStatPath(container *Container, path string) (stat |
| 143 | 143 |
defer daemon.Unmount(container) |
| 144 | 144 |
|
| 145 | 145 |
err = daemon.mountVolumes(container) |
| 146 |
- defer container.unmountVolumes(true) |
|
| 146 |
+ defer container.UnmountVolumes(true) |
|
| 147 | 147 |
if err != nil {
|
| 148 | 148 |
return nil, err |
| 149 | 149 |
} |
| 150 | 150 |
|
| 151 |
- resolvedPath, absPath, err := container.resolvePath(path) |
|
| 151 |
+ resolvedPath, absPath, err := container.ResolvePath(path) |
|
| 152 | 152 |
if err != nil {
|
| 153 | 153 |
return nil, err |
| 154 | 154 |
} |
| 155 | 155 |
|
| 156 |
- return container.statPath(resolvedPath, absPath) |
|
| 156 |
+ return container.StatPath(resolvedPath, absPath) |
|
| 157 | 157 |
} |
| 158 | 158 |
|
| 159 | 159 |
// containerArchivePath creates an archive of the filesystem resource at the specified |
| 160 | 160 |
// path in this container. Returns a tar archive of the resource and stat info |
| 161 | 161 |
// about the resource. |
| 162 |
-func (daemon *Daemon) containerArchivePath(container *Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 162 |
+func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
|
| 163 | 163 |
container.Lock() |
| 164 | 164 |
|
| 165 | 165 |
defer func() {
|
| ... | ... |
@@ -178,7 +119,7 @@ func (daemon *Daemon) containerArchivePath(container *Container, path string) (c |
| 178 | 178 |
defer func() {
|
| 179 | 179 |
if err != nil {
|
| 180 | 180 |
// unmount any volumes |
| 181 |
- container.unmountVolumes(true) |
|
| 181 |
+ container.UnmountVolumes(true) |
|
| 182 | 182 |
// unmount the container's rootfs |
| 183 | 183 |
daemon.Unmount(container) |
| 184 | 184 |
} |
| ... | ... |
@@ -188,12 +129,12 @@ func (daemon *Daemon) containerArchivePath(container *Container, path string) (c |
| 188 | 188 |
return nil, nil, err |
| 189 | 189 |
} |
| 190 | 190 |
|
| 191 |
- resolvedPath, absPath, err := container.resolvePath(path) |
|
| 191 |
+ resolvedPath, absPath, err := container.ResolvePath(path) |
|
| 192 | 192 |
if err != nil {
|
| 193 | 193 |
return nil, nil, err |
| 194 | 194 |
} |
| 195 | 195 |
|
| 196 |
- stat, err = container.statPath(resolvedPath, absPath) |
|
| 196 |
+ stat, err = container.StatPath(resolvedPath, absPath) |
|
| 197 | 197 |
if err != nil {
|
| 198 | 198 |
return nil, nil, err |
| 199 | 199 |
} |
| ... | ... |
@@ -213,7 +154,7 @@ func (daemon *Daemon) containerArchivePath(container *Container, path string) (c |
| 213 | 213 |
|
| 214 | 214 |
content = ioutils.NewReadCloserWrapper(data, func() error {
|
| 215 | 215 |
err := data.Close() |
| 216 |
- container.unmountVolumes(true) |
|
| 216 |
+ container.UnmountVolumes(true) |
|
| 217 | 217 |
daemon.Unmount(container) |
| 218 | 218 |
container.Unlock() |
| 219 | 219 |
return err |
| ... | ... |
@@ -230,7 +171,7 @@ func (daemon *Daemon) containerArchivePath(container *Container, path string) (c |
| 230 | 230 |
// noOverwriteDirNonDir is true then it will be an error if unpacking the |
| 231 | 231 |
// given content would cause an existing directory to be replaced with a non- |
| 232 | 232 |
// directory and vice versa. |
| 233 |
-func (daemon *Daemon) containerExtractToDir(container *Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
|
|
| 233 |
+func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
|
|
| 234 | 234 |
container.Lock() |
| 235 | 235 |
defer container.Unlock() |
| 236 | 236 |
|
| ... | ... |
@@ -240,14 +181,14 @@ func (daemon *Daemon) containerExtractToDir(container *Container, path string, n |
| 240 | 240 |
defer daemon.Unmount(container) |
| 241 | 241 |
|
| 242 | 242 |
err = daemon.mountVolumes(container) |
| 243 |
- defer container.unmountVolumes(true) |
|
| 243 |
+ defer container.UnmountVolumes(true) |
|
| 244 | 244 |
if err != nil {
|
| 245 | 245 |
return err |
| 246 | 246 |
} |
| 247 | 247 |
|
| 248 | 248 |
// The destination path needs to be resolved to a host path, with all |
| 249 | 249 |
// symbolic links followed in the scope of the container's rootfs. Note |
| 250 |
- // that we do not use `container.resolvePath(path)` here because we need |
|
| 250 |
+ // that we do not use `container.ResolvePath(path)` here because we need |
|
| 251 | 251 |
// to also evaluate the last path element if it is a symlink. This is so |
| 252 | 252 |
// that you can extract an archive to a symlink that points to a directory. |
| 253 | 253 |
|
| ... | ... |
@@ -283,14 +224,14 @@ func (daemon *Daemon) containerExtractToDir(container *Container, path string, n |
| 283 | 283 |
// a volume file path. |
| 284 | 284 |
var baseRel string |
| 285 | 285 |
if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
|
| 286 |
- if strings.HasPrefix(resolvedPath, container.basefs) {
|
|
| 287 |
- baseRel = resolvedPath[len(container.basefs):] |
|
| 286 |
+ if strings.HasPrefix(resolvedPath, container.BaseFS) {
|
|
| 287 |
+ baseRel = resolvedPath[len(container.BaseFS):] |
|
| 288 | 288 |
if baseRel[:1] == `\` {
|
| 289 | 289 |
baseRel = baseRel[1:] |
| 290 | 290 |
} |
| 291 | 291 |
} |
| 292 | 292 |
} else {
|
| 293 |
- baseRel, err = filepath.Rel(container.basefs, resolvedPath) |
|
| 293 |
+ baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) |
|
| 294 | 294 |
} |
| 295 | 295 |
if err != nil {
|
| 296 | 296 |
return err |
| ... | ... |
@@ -303,7 +244,7 @@ func (daemon *Daemon) containerExtractToDir(container *Container, path string, n |
| 303 | 303 |
return err |
| 304 | 304 |
} |
| 305 | 305 |
|
| 306 |
- if !toVolume && container.hostConfig.ReadonlyRootfs {
|
|
| 306 |
+ if !toVolume && container.HostConfig.ReadonlyRootfs {
|
|
| 307 | 307 |
return ErrRootFSReadOnly |
| 308 | 308 |
} |
| 309 | 309 |
|
| ... | ... |
@@ -323,7 +264,7 @@ func (daemon *Daemon) containerExtractToDir(container *Container, path string, n |
| 323 | 323 |
return nil |
| 324 | 324 |
} |
| 325 | 325 |
|
| 326 |
-func (daemon *Daemon) containerCopy(container *Container, resource string) (rc io.ReadCloser, err error) {
|
|
| 326 |
+func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) {
|
|
| 327 | 327 |
container.Lock() |
| 328 | 328 |
|
| 329 | 329 |
defer func() {
|
| ... | ... |
@@ -342,7 +283,7 @@ func (daemon *Daemon) containerCopy(container *Container, resource string) (rc i |
| 342 | 342 |
defer func() {
|
| 343 | 343 |
if err != nil {
|
| 344 | 344 |
// unmount any volumes |
| 345 |
- container.unmountVolumes(true) |
|
| 345 |
+ container.UnmountVolumes(true) |
|
| 346 | 346 |
// unmount the container's rootfs |
| 347 | 347 |
daemon.Unmount(container) |
| 348 | 348 |
} |
| ... | ... |
@@ -379,7 +320,7 @@ func (daemon *Daemon) containerCopy(container *Container, resource string) (rc i |
| 379 | 379 |
|
| 380 | 380 |
reader := ioutils.NewReadCloserWrapper(archive, func() error {
|
| 381 | 381 |
err := archive.Close() |
| 382 |
- container.unmountVolumes(true) |
|
| 382 |
+ container.UnmountVolumes(true) |
|
| 383 | 383 |
daemon.Unmount(container) |
| 384 | 384 |
container.Unlock() |
| 385 | 385 |
return err |
| ... | ... |
@@ -2,10 +2,12 @@ |
| 2 | 2 |
|
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 |
+import "github.com/docker/docker/container" |
|
| 6 |
+ |
|
| 5 | 7 |
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it |
| 6 | 8 |
// cannot be in a read-only volume. If it is not in a volume, the container |
| 7 | 9 |
// cannot be configured with a read-only rootfs. |
| 8 |
-func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
|
|
| 10 |
+func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
|
|
| 9 | 11 |
var toVolume bool |
| 10 | 12 |
for _, mnt := range container.MountPoints {
|
| 11 | 13 |
if toVolume = mnt.HasResource(absPath); toVolume {
|
| ... | ... |
@@ -1,11 +1,13 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
+import "github.com/docker/docker/container" |
|
| 4 |
+ |
|
| 3 | 5 |
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it |
| 4 | 6 |
// cannot be in a read-only volume. If it is not in a volume, the container |
| 5 | 7 |
// cannot be configured with a read-only rootfs. |
| 6 | 8 |
// |
| 7 | 9 |
// This is a no-op on Windows which does not support read-only volumes, or |
| 8 | 10 |
// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP4 |
| 9 |
-func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
|
|
| 11 |
+func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
|
|
| 10 | 12 |
return false, nil |
| 11 | 13 |
} |
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"time" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/Sirupsen/logrus" |
| 8 |
+ "github.com/docker/docker/container" |
|
| 8 | 9 |
"github.com/docker/docker/daemon/logger" |
| 9 | 10 |
"github.com/docker/docker/pkg/stdcopy" |
| 10 | 11 |
) |
| ... | ... |
@@ -66,7 +67,7 @@ func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *Containe |
| 66 | 66 |
return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) |
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
-func (daemon *Daemon) attachWithLogs(container *Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
|
|
| 69 |
+func (daemon *Daemon) attachWithLogs(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
|
|
| 70 | 70 |
if logs {
|
| 71 | 71 |
logDriver, err := daemon.getLogger(container) |
| 72 | 72 |
if err != nil {
|
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"time" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/distribution/reference" |
| 11 |
+ "github.com/docker/docker/container" |
|
| 11 | 12 |
"github.com/docker/docker/dockerversion" |
| 12 | 13 |
"github.com/docker/docker/image" |
| 13 | 14 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -42,7 +43,7 @@ func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (string, err |
| 42 | 42 |
return "", fmt.Errorf("Windows does not support commit of a running container")
|
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
- if c.Pause && !container.isPaused() {
|
|
| 45 |
+ if c.Pause && !container.IsPaused() {
|
|
| 46 | 46 |
daemon.containerPause(container) |
| 47 | 47 |
defer daemon.containerUnpause(container) |
| 48 | 48 |
} |
| ... | ... |
@@ -145,12 +146,12 @@ func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (string, err |
| 145 | 145 |
return id.String(), nil |
| 146 | 146 |
} |
| 147 | 147 |
|
| 148 |
-func (daemon *Daemon) exportContainerRw(container *Container) (archive.Archive, error) {
|
|
| 148 |
+func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) {
|
|
| 149 | 149 |
if err := daemon.Mount(container); err != nil {
|
| 150 | 150 |
return nil, err |
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 |
- archive, err := container.rwlayer.TarStream() |
|
| 153 |
+ archive, err := container.RWLayer.TarStream() |
|
| 154 | 154 |
if err != nil {
|
| 155 | 155 |
return nil, err |
| 156 | 156 |
} |
| 157 | 157 |
deleted file mode 100644 |
| ... | ... |
@@ -1,552 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/json" |
|
| 5 |
- "errors" |
|
| 6 |
- "fmt" |
|
| 7 |
- "io" |
|
| 8 |
- "os" |
|
| 9 |
- "path/filepath" |
|
| 10 |
- "sync" |
|
| 11 |
- "syscall" |
|
| 12 |
- "time" |
|
| 13 |
- |
|
| 14 |
- "github.com/opencontainers/runc/libcontainer/label" |
|
| 15 |
- |
|
| 16 |
- "github.com/Sirupsen/logrus" |
|
| 17 |
- "github.com/docker/docker/daemon/exec" |
|
| 18 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 19 |
- "github.com/docker/docker/daemon/logger" |
|
| 20 |
- "github.com/docker/docker/daemon/logger/jsonfilelog" |
|
| 21 |
- "github.com/docker/docker/daemon/network" |
|
| 22 |
- derr "github.com/docker/docker/errors" |
|
| 23 |
- "github.com/docker/docker/image" |
|
| 24 |
- "github.com/docker/docker/layer" |
|
| 25 |
- "github.com/docker/docker/pkg/nat" |
|
| 26 |
- "github.com/docker/docker/pkg/promise" |
|
| 27 |
- "github.com/docker/docker/pkg/signal" |
|
| 28 |
- "github.com/docker/docker/pkg/symlink" |
|
| 29 |
- |
|
| 30 |
- "github.com/docker/docker/runconfig" |
|
| 31 |
- "github.com/docker/docker/volume" |
|
| 32 |
-) |
|
| 33 |
- |
|
| 34 |
-const configFileName = "config.v2.json" |
|
| 35 |
- |
|
| 36 |
-var ( |
|
| 37 |
- // ErrRootFSReadOnly is returned when a container |
|
| 38 |
- // rootfs is marked readonly. |
|
| 39 |
- ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
|
|
| 40 |
-) |
|
| 41 |
- |
|
| 42 |
-// CommonContainer holds the fields for a container which are |
|
| 43 |
-// applicable across all platforms supported by the daemon. |
|
| 44 |
-type CommonContainer struct {
|
|
| 45 |
- *runconfig.StreamConfig |
|
| 46 |
- // embed for Container to support states directly. |
|
| 47 |
- *State `json:"State"` // Needed for remote api version <= 1.11 |
|
| 48 |
- root string // Path to the "home" of the container, including metadata. |
|
| 49 |
- basefs string // Path to the graphdriver mountpoint |
|
| 50 |
- rwlayer layer.RWLayer |
|
| 51 |
- ID string |
|
| 52 |
- Created time.Time |
|
| 53 |
- Path string |
|
| 54 |
- Args []string |
|
| 55 |
- Config *runconfig.Config |
|
| 56 |
- ImageID image.ID `json:"Image"` |
|
| 57 |
- NetworkSettings *network.Settings |
|
| 58 |
- LogPath string |
|
| 59 |
- Name string |
|
| 60 |
- Driver string |
|
| 61 |
- // MountLabel contains the options for the 'mount' command |
|
| 62 |
- MountLabel string |
|
| 63 |
- ProcessLabel string |
|
| 64 |
- RestartCount int |
|
| 65 |
- HasBeenStartedBefore bool |
|
| 66 |
- HasBeenManuallyStopped bool // used for unless-stopped restart policy |
|
| 67 |
- MountPoints map[string]*volume.MountPoint |
|
| 68 |
- hostConfig *runconfig.HostConfig |
|
| 69 |
- command *execdriver.Command |
|
| 70 |
- monitor *containerMonitor |
|
| 71 |
- execCommands *exec.Store |
|
| 72 |
- // logDriver for closing |
|
| 73 |
- logDriver logger.Logger |
|
| 74 |
- logCopier *logger.Copier |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-// newBaseContainer creates a new container with its |
|
| 78 |
-// basic configuration. |
|
| 79 |
-func newBaseContainer(id, root string) *Container {
|
|
| 80 |
- return &Container{
|
|
| 81 |
- CommonContainer: CommonContainer{
|
|
| 82 |
- ID: id, |
|
| 83 |
- State: NewState(), |
|
| 84 |
- execCommands: exec.NewStore(), |
|
| 85 |
- root: root, |
|
| 86 |
- MountPoints: make(map[string]*volume.MountPoint), |
|
| 87 |
- StreamConfig: runconfig.NewStreamConfig(), |
|
| 88 |
- }, |
|
| 89 |
- } |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-func (container *Container) fromDisk() error {
|
|
| 93 |
- pth, err := container.jsonPath() |
|
| 94 |
- if err != nil {
|
|
| 95 |
- return err |
|
| 96 |
- } |
|
| 97 |
- |
|
| 98 |
- jsonSource, err := os.Open(pth) |
|
| 99 |
- if err != nil {
|
|
| 100 |
- return err |
|
| 101 |
- } |
|
| 102 |
- defer jsonSource.Close() |
|
| 103 |
- |
|
| 104 |
- dec := json.NewDecoder(jsonSource) |
|
| 105 |
- |
|
| 106 |
- // Load container settings |
|
| 107 |
- if err := dec.Decode(container); err != nil {
|
|
| 108 |
- return err |
|
| 109 |
- } |
|
| 110 |
- |
|
| 111 |
- if err := label.ReserveLabel(container.ProcessLabel); err != nil {
|
|
| 112 |
- return err |
|
| 113 |
- } |
|
| 114 |
- return container.readHostConfig() |
|
| 115 |
-} |
|
| 116 |
- |
|
| 117 |
-func (container *Container) toDisk() error {
|
|
| 118 |
- pth, err := container.jsonPath() |
|
| 119 |
- if err != nil {
|
|
| 120 |
- return err |
|
| 121 |
- } |
|
| 122 |
- |
|
| 123 |
- jsonSource, err := os.Create(pth) |
|
| 124 |
- if err != nil {
|
|
| 125 |
- return err |
|
| 126 |
- } |
|
| 127 |
- defer jsonSource.Close() |
|
| 128 |
- |
|
| 129 |
- enc := json.NewEncoder(jsonSource) |
|
| 130 |
- |
|
| 131 |
- // Save container settings |
|
| 132 |
- if err := enc.Encode(container); err != nil {
|
|
| 133 |
- return err |
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- return container.writeHostConfig() |
|
| 137 |
-} |
|
| 138 |
- |
|
| 139 |
-func (container *Container) toDiskLocking() error {
|
|
| 140 |
- container.Lock() |
|
| 141 |
- err := container.toDisk() |
|
| 142 |
- container.Unlock() |
|
| 143 |
- return err |
|
| 144 |
-} |
|
| 145 |
- |
|
| 146 |
-func (container *Container) readHostConfig() error {
|
|
| 147 |
- container.hostConfig = &runconfig.HostConfig{}
|
|
| 148 |
- // If the hostconfig file does not exist, do not read it. |
|
| 149 |
- // (We still have to initialize container.hostConfig, |
|
| 150 |
- // but that's OK, since we just did that above.) |
|
| 151 |
- pth, err := container.hostConfigPath() |
|
| 152 |
- if err != nil {
|
|
| 153 |
- return err |
|
| 154 |
- } |
|
| 155 |
- |
|
| 156 |
- f, err := os.Open(pth) |
|
| 157 |
- if err != nil {
|
|
| 158 |
- if os.IsNotExist(err) {
|
|
| 159 |
- return nil |
|
| 160 |
- } |
|
| 161 |
- return err |
|
| 162 |
- } |
|
| 163 |
- defer f.Close() |
|
| 164 |
- |
|
| 165 |
- if err := json.NewDecoder(f).Decode(&container.hostConfig); err != nil {
|
|
| 166 |
- return err |
|
| 167 |
- } |
|
| 168 |
- |
|
| 169 |
- initDNSHostConfig(container) |
|
| 170 |
- |
|
| 171 |
- return nil |
|
| 172 |
-} |
|
| 173 |
- |
|
| 174 |
-func (container *Container) writeHostConfig() error {
|
|
| 175 |
- pth, err := container.hostConfigPath() |
|
| 176 |
- if err != nil {
|
|
| 177 |
- return err |
|
| 178 |
- } |
|
| 179 |
- |
|
| 180 |
- f, err := os.Create(pth) |
|
| 181 |
- if err != nil {
|
|
| 182 |
- return err |
|
| 183 |
- } |
|
| 184 |
- defer f.Close() |
|
| 185 |
- |
|
| 186 |
- return json.NewEncoder(f).Encode(&container.hostConfig) |
|
| 187 |
-} |
|
| 188 |
- |
|
| 189 |
-// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path |
|
| 190 |
-// sanitisation. Symlinks are all scoped to the basefs of the container, as |
|
| 191 |
-// though the container's basefs was `/`. |
|
| 192 |
-// |
|
| 193 |
-// The basefs of a container is the host-facing path which is bind-mounted as |
|
| 194 |
-// `/` inside the container. This method is essentially used to access a |
|
| 195 |
-// particular path inside the container as though you were a process in that |
|
| 196 |
-// container. |
|
| 197 |
-// |
|
| 198 |
-// NOTE: The returned path is *only* safely scoped inside the container's basefs |
|
| 199 |
-// if no component of the returned path changes (such as a component |
|
| 200 |
-// symlinking to a different path) between using this method and using the |
|
| 201 |
-// path. See symlink.FollowSymlinkInScope for more details. |
|
| 202 |
-func (container *Container) GetResourcePath(path string) (string, error) {
|
|
| 203 |
- // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 204 |
- // any filepath operations must be done in an OS agnostic way. |
|
| 205 |
- cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 206 |
- r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) |
|
| 207 |
- return r, e |
|
| 208 |
-} |
|
| 209 |
- |
|
| 210 |
-// Evaluates `path` in the scope of the container's root, with proper path |
|
| 211 |
-// sanitisation. Symlinks are all scoped to the root of the container, as |
|
| 212 |
-// though the container's root was `/`. |
|
| 213 |
-// |
|
| 214 |
-// The root of a container is the host-facing configuration metadata directory. |
|
| 215 |
-// Only use this method to safely access the container's `container.json` or |
|
| 216 |
-// other metadata files. If in doubt, use container.GetResourcePath. |
|
| 217 |
-// |
|
| 218 |
-// NOTE: The returned path is *only* safely scoped inside the container's root |
|
| 219 |
-// if no component of the returned path changes (such as a component |
|
| 220 |
-// symlinking to a different path) between using this method and using the |
|
| 221 |
-// path. See symlink.FollowSymlinkInScope for more details. |
|
| 222 |
-func (container *Container) getRootResourcePath(path string) (string, error) {
|
|
| 223 |
- // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 224 |
- // any filepath operations must be done in an OS agnostic way. |
|
| 225 |
- cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 226 |
- return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) |
|
| 227 |
-} |
|
| 228 |
- |
|
| 229 |
-// ExitOnNext signals to the monitor that it should not restart the container |
|
| 230 |
-// after we send the kill signal. |
|
| 231 |
-func (container *Container) ExitOnNext() {
|
|
| 232 |
- container.monitor.ExitOnNext() |
|
| 233 |
-} |
|
| 234 |
- |
|
| 235 |
-// Resize changes the TTY of the process running inside the container |
|
| 236 |
-// to the given height and width. The container must be running. |
|
| 237 |
-func (container *Container) Resize(h, w int) error {
|
|
| 238 |
- if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
|
|
| 239 |
- return err |
|
| 240 |
- } |
|
| 241 |
- return nil |
|
| 242 |
-} |
|
| 243 |
- |
|
| 244 |
-func (container *Container) hostConfigPath() (string, error) {
|
|
| 245 |
- return container.getRootResourcePath("hostconfig.json")
|
|
| 246 |
-} |
|
| 247 |
- |
|
| 248 |
-func (container *Container) jsonPath() (string, error) {
|
|
| 249 |
- return container.getRootResourcePath(configFileName) |
|
| 250 |
-} |
|
| 251 |
- |
|
| 252 |
-// This directory is only usable when the container is running |
|
| 253 |
-func (container *Container) rootfsPath() string {
|
|
| 254 |
- return container.basefs |
|
| 255 |
-} |
|
| 256 |
- |
|
| 257 |
-func validateID(id string) error {
|
|
| 258 |
- if id == "" {
|
|
| 259 |
- return derr.ErrorCodeEmptyID |
|
| 260 |
- } |
|
| 261 |
- return nil |
|
| 262 |
-} |
|
| 263 |
- |
|
| 264 |
-// Returns true if the container exposes a certain port |
|
| 265 |
-func (container *Container) exposes(p nat.Port) bool {
|
|
| 266 |
- _, exists := container.Config.ExposedPorts[p] |
|
| 267 |
- return exists |
|
| 268 |
-} |
|
| 269 |
- |
|
| 270 |
-func (container *Container) getLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig {
|
|
| 271 |
- cfg := container.hostConfig.LogConfig |
|
| 272 |
- if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured
|
|
| 273 |
- if cfg.Type == "" {
|
|
| 274 |
- cfg.Type = jsonfilelog.Name |
|
| 275 |
- } |
|
| 276 |
- return cfg |
|
| 277 |
- } |
|
| 278 |
- // Use daemon's default log config for containers |
|
| 279 |
- return defaultConfig |
|
| 280 |
-} |
|
| 281 |
- |
|
| 282 |
-// StartLogger starts a new logger driver for the container. |
|
| 283 |
-func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger, error) {
|
|
| 284 |
- c, err := logger.GetLogDriver(cfg.Type) |
|
| 285 |
- if err != nil {
|
|
| 286 |
- return nil, derr.ErrorCodeLoggingFactory.WithArgs(err) |
|
| 287 |
- } |
|
| 288 |
- ctx := logger.Context{
|
|
| 289 |
- Config: cfg.Config, |
|
| 290 |
- ContainerID: container.ID, |
|
| 291 |
- ContainerName: container.Name, |
|
| 292 |
- ContainerEntrypoint: container.Path, |
|
| 293 |
- ContainerArgs: container.Args, |
|
| 294 |
- ContainerImageID: container.ImageID.String(), |
|
| 295 |
- ContainerImageName: container.Config.Image, |
|
| 296 |
- ContainerCreated: container.Created, |
|
| 297 |
- ContainerEnv: container.Config.Env, |
|
| 298 |
- ContainerLabels: container.Config.Labels, |
|
| 299 |
- } |
|
| 300 |
- |
|
| 301 |
- // Set logging file for "json-logger" |
|
| 302 |
- if cfg.Type == jsonfilelog.Name {
|
|
| 303 |
- ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
|
|
| 304 |
- if err != nil {
|
|
| 305 |
- return nil, err |
|
| 306 |
- } |
|
| 307 |
- } |
|
| 308 |
- return c(ctx) |
|
| 309 |
-} |
|
| 310 |
- |
|
| 311 |
-func (container *Container) getProcessLabel() string {
|
|
| 312 |
- // even if we have a process label return "" if we are running |
|
| 313 |
- // in privileged mode |
|
| 314 |
- if container.hostConfig.Privileged {
|
|
| 315 |
- return "" |
|
| 316 |
- } |
|
| 317 |
- return container.ProcessLabel |
|
| 318 |
-} |
|
| 319 |
- |
|
| 320 |
-func (container *Container) getMountLabel() string {
|
|
| 321 |
- if container.hostConfig.Privileged {
|
|
| 322 |
- return "" |
|
| 323 |
- } |
|
| 324 |
- return container.MountLabel |
|
| 325 |
-} |
|
| 326 |
- |
|
| 327 |
-func (container *Container) getExecIDs() []string {
|
|
| 328 |
- return container.execCommands.List() |
|
| 329 |
-} |
|
| 330 |
- |
|
| 331 |
-// Attach connects to the container's TTY, delegating to standard |
|
| 332 |
-// streams or websockets depending on the configuration. |
|
| 333 |
-func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
|
|
| 334 |
- return attach(container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr) |
|
| 335 |
-} |
|
| 336 |
- |
|
| 337 |
-func attach(streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
|
|
| 338 |
- var ( |
|
| 339 |
- cStdout, cStderr io.ReadCloser |
|
| 340 |
- cStdin io.WriteCloser |
|
| 341 |
- wg sync.WaitGroup |
|
| 342 |
- errors = make(chan error, 3) |
|
| 343 |
- ) |
|
| 344 |
- |
|
| 345 |
- if stdin != nil && openStdin {
|
|
| 346 |
- cStdin = streamConfig.StdinPipe() |
|
| 347 |
- wg.Add(1) |
|
| 348 |
- } |
|
| 349 |
- |
|
| 350 |
- if stdout != nil {
|
|
| 351 |
- cStdout = streamConfig.StdoutPipe() |
|
| 352 |
- wg.Add(1) |
|
| 353 |
- } |
|
| 354 |
- |
|
| 355 |
- if stderr != nil {
|
|
| 356 |
- cStderr = streamConfig.StderrPipe() |
|
| 357 |
- wg.Add(1) |
|
| 358 |
- } |
|
| 359 |
- |
|
| 360 |
- // Connect stdin of container to the http conn. |
|
| 361 |
- go func() {
|
|
| 362 |
- if stdin == nil || !openStdin {
|
|
| 363 |
- return |
|
| 364 |
- } |
|
| 365 |
- logrus.Debugf("attach: stdin: begin")
|
|
| 366 |
- defer func() {
|
|
| 367 |
- if stdinOnce && !tty {
|
|
| 368 |
- cStdin.Close() |
|
| 369 |
- } else {
|
|
| 370 |
- // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr |
|
| 371 |
- if cStdout != nil {
|
|
| 372 |
- cStdout.Close() |
|
| 373 |
- } |
|
| 374 |
- if cStderr != nil {
|
|
| 375 |
- cStderr.Close() |
|
| 376 |
- } |
|
| 377 |
- } |
|
| 378 |
- wg.Done() |
|
| 379 |
- logrus.Debugf("attach: stdin: end")
|
|
| 380 |
- }() |
|
| 381 |
- |
|
| 382 |
- var err error |
|
| 383 |
- if tty {
|
|
| 384 |
- _, err = copyEscapable(cStdin, stdin) |
|
| 385 |
- } else {
|
|
| 386 |
- _, err = io.Copy(cStdin, stdin) |
|
| 387 |
- |
|
| 388 |
- } |
|
| 389 |
- if err == io.ErrClosedPipe {
|
|
| 390 |
- err = nil |
|
| 391 |
- } |
|
| 392 |
- if err != nil {
|
|
| 393 |
- logrus.Errorf("attach: stdin: %s", err)
|
|
| 394 |
- errors <- err |
|
| 395 |
- return |
|
| 396 |
- } |
|
| 397 |
- }() |
|
| 398 |
- |
|
| 399 |
- attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
|
|
| 400 |
- if stream == nil {
|
|
| 401 |
- return |
|
| 402 |
- } |
|
| 403 |
- defer func() {
|
|
| 404 |
- // Make sure stdin gets closed |
|
| 405 |
- if stdin != nil {
|
|
| 406 |
- stdin.Close() |
|
| 407 |
- } |
|
| 408 |
- streamPipe.Close() |
|
| 409 |
- wg.Done() |
|
| 410 |
- logrus.Debugf("attach: %s: end", name)
|
|
| 411 |
- }() |
|
| 412 |
- |
|
| 413 |
- logrus.Debugf("attach: %s: begin", name)
|
|
| 414 |
- _, err := io.Copy(stream, streamPipe) |
|
| 415 |
- if err == io.ErrClosedPipe {
|
|
| 416 |
- err = nil |
|
| 417 |
- } |
|
| 418 |
- if err != nil {
|
|
| 419 |
- logrus.Errorf("attach: %s: %v", name, err)
|
|
| 420 |
- errors <- err |
|
| 421 |
- } |
|
| 422 |
- } |
|
| 423 |
- |
|
| 424 |
- go attachStream("stdout", stdout, cStdout)
|
|
| 425 |
- go attachStream("stderr", stderr, cStderr)
|
|
| 426 |
- |
|
| 427 |
- return promise.Go(func() error {
|
|
| 428 |
- wg.Wait() |
|
| 429 |
- close(errors) |
|
| 430 |
- for err := range errors {
|
|
| 431 |
- if err != nil {
|
|
| 432 |
- return err |
|
| 433 |
- } |
|
| 434 |
- } |
|
| 435 |
- return nil |
|
| 436 |
- }) |
|
| 437 |
-} |
|
| 438 |
- |
|
| 439 |
-// Code c/c from io.Copy() modified to handle escape sequence |
|
| 440 |
-func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
|
|
| 441 |
- buf := make([]byte, 32*1024) |
|
| 442 |
- for {
|
|
| 443 |
- nr, er := src.Read(buf) |
|
| 444 |
- if nr > 0 {
|
|
| 445 |
- // ---- Docker addition |
|
| 446 |
- // char 16 is C-p |
|
| 447 |
- if nr == 1 && buf[0] == 16 {
|
|
| 448 |
- nr, er = src.Read(buf) |
|
| 449 |
- // char 17 is C-q |
|
| 450 |
- if nr == 1 && buf[0] == 17 {
|
|
| 451 |
- if err := src.Close(); err != nil {
|
|
| 452 |
- return 0, err |
|
| 453 |
- } |
|
| 454 |
- return 0, nil |
|
| 455 |
- } |
|
| 456 |
- } |
|
| 457 |
- // ---- End of docker |
|
| 458 |
- nw, ew := dst.Write(buf[0:nr]) |
|
| 459 |
- if nw > 0 {
|
|
| 460 |
- written += int64(nw) |
|
| 461 |
- } |
|
| 462 |
- if ew != nil {
|
|
| 463 |
- err = ew |
|
| 464 |
- break |
|
| 465 |
- } |
|
| 466 |
- if nr != nw {
|
|
| 467 |
- err = io.ErrShortWrite |
|
| 468 |
- break |
|
| 469 |
- } |
|
| 470 |
- } |
|
| 471 |
- if er == io.EOF {
|
|
| 472 |
- break |
|
| 473 |
- } |
|
| 474 |
- if er != nil {
|
|
| 475 |
- err = er |
|
| 476 |
- break |
|
| 477 |
- } |
|
| 478 |
- } |
|
| 479 |
- return written, err |
|
| 480 |
-} |
|
| 481 |
- |
|
| 482 |
-func (container *Container) shouldRestart() bool {
|
|
| 483 |
- return container.hostConfig.RestartPolicy.Name == "always" || |
|
| 484 |
- (container.hostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || |
|
| 485 |
- (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) |
|
| 486 |
-} |
|
| 487 |
- |
|
| 488 |
-func (container *Container) addBindMountPoint(name, source, destination string, rw bool) {
|
|
| 489 |
- container.MountPoints[destination] = &volume.MountPoint{
|
|
| 490 |
- Name: name, |
|
| 491 |
- Source: source, |
|
| 492 |
- Destination: destination, |
|
| 493 |
- RW: rw, |
|
| 494 |
- } |
|
| 495 |
-} |
|
| 496 |
- |
|
| 497 |
-func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
|
|
| 498 |
- container.MountPoints[destination] = &volume.MountPoint{
|
|
| 499 |
- Name: name, |
|
| 500 |
- Driver: volume.DefaultDriverName, |
|
| 501 |
- Destination: destination, |
|
| 502 |
- RW: rw, |
|
| 503 |
- } |
|
| 504 |
-} |
|
| 505 |
- |
|
| 506 |
-func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
|
|
| 507 |
- container.MountPoints[destination] = &volume.MountPoint{
|
|
| 508 |
- Name: vol.Name(), |
|
| 509 |
- Driver: vol.DriverName(), |
|
| 510 |
- Destination: destination, |
|
| 511 |
- RW: rw, |
|
| 512 |
- Volume: vol, |
|
| 513 |
- } |
|
| 514 |
-} |
|
| 515 |
- |
|
| 516 |
-func (container *Container) isDestinationMounted(destination string) bool {
|
|
| 517 |
- return container.MountPoints[destination] != nil |
|
| 518 |
-} |
|
| 519 |
- |
|
| 520 |
-func (container *Container) stopSignal() int {
|
|
| 521 |
- var stopSignal syscall.Signal |
|
| 522 |
- if container.Config.StopSignal != "" {
|
|
| 523 |
- stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) |
|
| 524 |
- } |
|
| 525 |
- |
|
| 526 |
- if int(stopSignal) == 0 {
|
|
| 527 |
- stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) |
|
| 528 |
- } |
|
| 529 |
- return int(stopSignal) |
|
| 530 |
-} |
|
| 531 |
- |
|
| 532 |
-// initDNSHostConfig ensures that the dns fields are never nil. |
|
| 533 |
-// New containers don't ever have those fields nil, |
|
| 534 |
-// but pre created containers can still have those nil values. |
|
| 535 |
-// The non-recommended host configuration in the start api can |
|
| 536 |
-// make these fields nil again, this corrects that issue until |
|
| 537 |
-// we remove that behavior for good. |
|
| 538 |
-// See https://github.com/docker/docker/pull/17779 |
|
| 539 |
-// for a more detailed explanation on why we don't want that. |
|
| 540 |
-func initDNSHostConfig(container *Container) {
|
|
| 541 |
- if container.hostConfig.DNS == nil {
|
|
| 542 |
- container.hostConfig.DNS = make([]string, 0) |
|
| 543 |
- } |
|
| 544 |
- |
|
| 545 |
- if container.hostConfig.DNSSearch == nil {
|
|
| 546 |
- container.hostConfig.DNSSearch = make([]string, 0) |
|
| 547 |
- } |
|
| 548 |
- |
|
| 549 |
- if container.hostConfig.DNSOptions == nil {
|
|
| 550 |
- container.hostConfig.DNSOptions = make([]string, 0) |
|
| 551 |
- } |
|
| 552 |
-} |
| 0 | 9 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,928 @@ |
| 0 |
+// +build linux freebsd |
|
| 1 |
+ |
|
| 2 |
+package daemon |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path" |
|
| 8 |
+ "path/filepath" |
|
| 9 |
+ "strconv" |
|
| 10 |
+ "strings" |
|
| 11 |
+ "syscall" |
|
| 12 |
+ "time" |
|
| 13 |
+ |
|
| 14 |
+ "github.com/Sirupsen/logrus" |
|
| 15 |
+ "github.com/docker/docker/container" |
|
| 16 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 17 |
+ "github.com/docker/docker/daemon/links" |
|
| 18 |
+ "github.com/docker/docker/daemon/network" |
|
| 19 |
+ derr "github.com/docker/docker/errors" |
|
| 20 |
+ "github.com/docker/docker/pkg/fileutils" |
|
| 21 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 22 |
+ "github.com/docker/docker/pkg/mount" |
|
| 23 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 24 |
+ "github.com/docker/docker/pkg/ulimit" |
|
| 25 |
+ "github.com/docker/docker/runconfig" |
|
| 26 |
+ "github.com/docker/libnetwork" |
|
| 27 |
+ "github.com/docker/libnetwork/netlabel" |
|
| 28 |
+ "github.com/docker/libnetwork/options" |
|
| 29 |
+ "github.com/opencontainers/runc/libcontainer/configs" |
|
| 30 |
+ "github.com/opencontainers/runc/libcontainer/devices" |
|
| 31 |
+ "github.com/opencontainers/runc/libcontainer/label" |
|
| 32 |
+) |
|
| 33 |
+ |
|
| 34 |
+func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
|
|
| 35 |
+ var env []string |
|
| 36 |
+ children, err := daemon.children(container.Name) |
|
| 37 |
+ if err != nil {
|
|
| 38 |
+ return nil, err |
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ bridgeSettings := container.NetworkSettings.Networks["bridge"] |
|
| 42 |
+ if bridgeSettings == nil {
|
|
| 43 |
+ return nil, nil |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ if len(children) > 0 {
|
|
| 47 |
+ for linkAlias, child := range children {
|
|
| 48 |
+ if !child.IsRunning() {
|
|
| 49 |
+ return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias) |
|
| 50 |
+ } |
|
| 51 |
+ |
|
| 52 |
+ childBridgeSettings := child.NetworkSettings.Networks["bridge"] |
|
| 53 |
+ if childBridgeSettings == nil {
|
|
| 54 |
+ return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
|
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ link := links.NewLink( |
|
| 58 |
+ bridgeSettings.IPAddress, |
|
| 59 |
+ childBridgeSettings.IPAddress, |
|
| 60 |
+ linkAlias, |
|
| 61 |
+ child.Config.Env, |
|
| 62 |
+ child.Config.ExposedPorts, |
|
| 63 |
+ ) |
|
| 64 |
+ |
|
| 65 |
+ for _, envVar := range link.ToEnv() {
|
|
| 66 |
+ env = append(env, envVar) |
|
| 67 |
+ } |
|
| 68 |
+ } |
|
| 69 |
+ } |
|
| 70 |
+ return env, nil |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
|
|
| 74 |
+ var en *execdriver.Network |
|
| 75 |
+ if !c.Config.NetworkDisabled {
|
|
| 76 |
+ en = &execdriver.Network{}
|
|
| 77 |
+ if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() {
|
|
| 78 |
+ en.NamespacePath = c.NetworkSettings.SandboxKey |
|
| 79 |
+ } |
|
| 80 |
+ |
|
| 81 |
+ if c.HostConfig.NetworkMode.IsContainer() {
|
|
| 82 |
+ nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) |
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ return err |
|
| 85 |
+ } |
|
| 86 |
+ en.ContainerID = nc.ID |
|
| 87 |
+ } |
|
| 88 |
+ } |
|
| 89 |
+ |
|
| 90 |
+ ipc := &execdriver.Ipc{}
|
|
| 91 |
+ var err error |
|
| 92 |
+ c.ShmPath, err = c.ShmResourcePath() |
|
| 93 |
+ if err != nil {
|
|
| 94 |
+ return err |
|
| 95 |
+ } |
|
| 96 |
+ |
|
| 97 |
+ c.MqueuePath, err = c.MqueueResourcePath() |
|
| 98 |
+ if err != nil {
|
|
| 99 |
+ return err |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ if c.HostConfig.IpcMode.IsContainer() {
|
|
| 103 |
+ ic, err := daemon.getIpcContainer(c) |
|
| 104 |
+ if err != nil {
|
|
| 105 |
+ return err |
|
| 106 |
+ } |
|
| 107 |
+ ipc.ContainerID = ic.ID |
|
| 108 |
+ c.ShmPath = ic.ShmPath |
|
| 109 |
+ c.MqueuePath = ic.MqueuePath |
|
| 110 |
+ } else {
|
|
| 111 |
+ ipc.HostIpc = c.HostConfig.IpcMode.IsHost() |
|
| 112 |
+ if ipc.HostIpc {
|
|
| 113 |
+ if _, err := os.Stat("/dev/shm"); err != nil {
|
|
| 114 |
+ return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
|
|
| 115 |
+ } |
|
| 116 |
+ if _, err := os.Stat("/dev/mqueue"); err != nil {
|
|
| 117 |
+ return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
|
|
| 118 |
+ } |
|
| 119 |
+ c.ShmPath = "/dev/shm" |
|
| 120 |
+ c.MqueuePath = "/dev/mqueue" |
|
| 121 |
+ } |
|
| 122 |
+ } |
|
| 123 |
+ |
|
| 124 |
+ pid := &execdriver.Pid{}
|
|
| 125 |
+ pid.HostPid = c.HostConfig.PidMode.IsHost() |
|
| 126 |
+ |
|
| 127 |
+ uts := &execdriver.UTS{
|
|
| 128 |
+ HostUTS: c.HostConfig.UTSMode.IsHost(), |
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ // Build lists of devices allowed and created within the container. |
|
| 132 |
+ var userSpecifiedDevices []*configs.Device |
|
| 133 |
+ for _, deviceMapping := range c.HostConfig.Devices {
|
|
| 134 |
+ devs, err := getDevicesFromPath(deviceMapping) |
|
| 135 |
+ if err != nil {
|
|
| 136 |
+ return err |
|
| 137 |
+ } |
|
| 138 |
+ |
|
| 139 |
+ userSpecifiedDevices = append(userSpecifiedDevices, devs...) |
|
| 140 |
+ } |
|
| 141 |
+ |
|
| 142 |
+ allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) |
|
| 143 |
+ |
|
| 144 |
+ autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) |
|
| 145 |
+ |
|
| 146 |
+ var rlimits []*ulimit.Rlimit |
|
| 147 |
+ ulimits := c.HostConfig.Ulimits |
|
| 148 |
+ |
|
| 149 |
+ // Merge ulimits with daemon defaults |
|
| 150 |
+ ulIdx := make(map[string]*ulimit.Ulimit) |
|
| 151 |
+ for _, ul := range ulimits {
|
|
| 152 |
+ ulIdx[ul.Name] = ul |
|
| 153 |
+ } |
|
| 154 |
+ for name, ul := range daemon.configStore.Ulimits {
|
|
| 155 |
+ if _, exists := ulIdx[name]; !exists {
|
|
| 156 |
+ ulimits = append(ulimits, ul) |
|
| 157 |
+ } |
|
| 158 |
+ } |
|
| 159 |
+ |
|
| 160 |
+ weightDevices, err := getBlkioWeightDevices(c.HostConfig) |
|
| 161 |
+ if err != nil {
|
|
| 162 |
+ return err |
|
| 163 |
+ } |
|
| 164 |
+ |
|
| 165 |
+ for _, limit := range ulimits {
|
|
| 166 |
+ rl, err := limit.GetRlimit() |
|
| 167 |
+ if err != nil {
|
|
| 168 |
+ return err |
|
| 169 |
+ } |
|
| 170 |
+ rlimits = append(rlimits, rl) |
|
| 171 |
+ } |
|
| 172 |
+ |
|
| 173 |
+ resources := &execdriver.Resources{
|
|
| 174 |
+ CommonResources: execdriver.CommonResources{
|
|
| 175 |
+ Memory: c.HostConfig.Memory, |
|
| 176 |
+ MemoryReservation: c.HostConfig.MemoryReservation, |
|
| 177 |
+ CPUShares: c.HostConfig.CPUShares, |
|
| 178 |
+ BlkioWeight: c.HostConfig.BlkioWeight, |
|
| 179 |
+ }, |
|
| 180 |
+ MemorySwap: c.HostConfig.MemorySwap, |
|
| 181 |
+ KernelMemory: c.HostConfig.KernelMemory, |
|
| 182 |
+ CpusetCpus: c.HostConfig.CpusetCpus, |
|
| 183 |
+ CpusetMems: c.HostConfig.CpusetMems, |
|
| 184 |
+ CPUPeriod: c.HostConfig.CPUPeriod, |
|
| 185 |
+ CPUQuota: c.HostConfig.CPUQuota, |
|
| 186 |
+ Rlimits: rlimits, |
|
| 187 |
+ BlkioWeightDevice: weightDevices, |
|
| 188 |
+ OomKillDisable: c.HostConfig.OomKillDisable, |
|
| 189 |
+ MemorySwappiness: *c.HostConfig.MemorySwappiness, |
|
| 190 |
+ } |
|
| 191 |
+ |
|
| 192 |
+ processConfig := execdriver.ProcessConfig{
|
|
| 193 |
+ CommonProcessConfig: execdriver.CommonProcessConfig{
|
|
| 194 |
+ Entrypoint: c.Path, |
|
| 195 |
+ Arguments: c.Args, |
|
| 196 |
+ Tty: c.Config.Tty, |
|
| 197 |
+ }, |
|
| 198 |
+ Privileged: c.HostConfig.Privileged, |
|
| 199 |
+ User: c.Config.User, |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
|
| 203 |
+ processConfig.Env = env |
|
| 204 |
+ |
|
| 205 |
+ remappedRoot := &execdriver.User{}
|
|
| 206 |
+ rootUID, rootGID := daemon.GetRemappedUIDGID() |
|
| 207 |
+ if rootUID != 0 {
|
|
| 208 |
+ remappedRoot.UID = rootUID |
|
| 209 |
+ remappedRoot.GID = rootGID |
|
| 210 |
+ } |
|
| 211 |
+ uidMap, gidMap := daemon.GetUIDGIDMaps() |
|
| 212 |
+ |
|
| 213 |
+ c.Command = &execdriver.Command{
|
|
| 214 |
+ CommonCommand: execdriver.CommonCommand{
|
|
| 215 |
+ ID: c.ID, |
|
| 216 |
+ InitPath: "/.dockerinit", |
|
| 217 |
+ MountLabel: c.GetMountLabel(), |
|
| 218 |
+ Network: en, |
|
| 219 |
+ ProcessConfig: processConfig, |
|
| 220 |
+ ProcessLabel: c.GetProcessLabel(), |
|
| 221 |
+ Rootfs: c.BaseFS, |
|
| 222 |
+ Resources: resources, |
|
| 223 |
+ WorkingDir: c.Config.WorkingDir, |
|
| 224 |
+ }, |
|
| 225 |
+ AllowedDevices: allowedDevices, |
|
| 226 |
+ AppArmorProfile: c.AppArmorProfile, |
|
| 227 |
+ AutoCreatedDevices: autoCreatedDevices, |
|
| 228 |
+ CapAdd: c.HostConfig.CapAdd.Slice(), |
|
| 229 |
+ CapDrop: c.HostConfig.CapDrop.Slice(), |
|
| 230 |
+ CgroupParent: c.HostConfig.CgroupParent, |
|
| 231 |
+ GIDMapping: gidMap, |
|
| 232 |
+ GroupAdd: c.HostConfig.GroupAdd, |
|
| 233 |
+ Ipc: ipc, |
|
| 234 |
+ OomScoreAdj: c.HostConfig.OomScoreAdj, |
|
| 235 |
+ Pid: pid, |
|
| 236 |
+ ReadonlyRootfs: c.HostConfig.ReadonlyRootfs, |
|
| 237 |
+ RemappedRoot: remappedRoot, |
|
| 238 |
+ UIDMapping: uidMap, |
|
| 239 |
+ UTS: uts, |
|
| 240 |
+ } |
|
| 241 |
+ |
|
| 242 |
+ return nil |
|
| 243 |
+} |
|
| 244 |
+ |
|
| 245 |
+// getSize returns the real size & virtual size of the container. |
|
| 246 |
+func (daemon *Daemon) getSize(container *container.Container) (int64, int64) {
|
|
| 247 |
+ var ( |
|
| 248 |
+ sizeRw, sizeRootfs int64 |
|
| 249 |
+ err error |
|
| 250 |
+ ) |
|
| 251 |
+ |
|
| 252 |
+ if err := daemon.Mount(container); err != nil {
|
|
| 253 |
+ logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
|
|
| 254 |
+ return sizeRw, sizeRootfs |
|
| 255 |
+ } |
|
| 256 |
+ defer daemon.Unmount(container) |
|
| 257 |
+ |
|
| 258 |
+ sizeRw, err = container.RWLayer.Size() |
|
| 259 |
+ if err != nil {
|
|
| 260 |
+ logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err)
|
|
| 261 |
+ // FIXME: GetSize should return an error. Not changing it now in case |
|
| 262 |
+ // there is a side-effect. |
|
| 263 |
+ sizeRw = -1 |
|
| 264 |
+ } |
|
| 265 |
+ |
|
| 266 |
+ if parent := container.RWLayer.Parent(); parent != nil {
|
|
| 267 |
+ sizeRootfs, err = parent.Size() |
|
| 268 |
+ if err != nil {
|
|
| 269 |
+ sizeRootfs = -1 |
|
| 270 |
+ } else if sizeRw != -1 {
|
|
| 271 |
+ sizeRootfs += sizeRw |
|
| 272 |
+ } |
|
| 273 |
+ } |
|
| 274 |
+ return sizeRw, sizeRootfs |
|
| 275 |
+} |
|
| 276 |
+ |
|
| 277 |
+func (daemon *Daemon) buildSandboxOptions(container *container.Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
|
|
| 278 |
+ var ( |
|
| 279 |
+ sboxOptions []libnetwork.SandboxOption |
|
| 280 |
+ err error |
|
| 281 |
+ dns []string |
|
| 282 |
+ dnsSearch []string |
|
| 283 |
+ dnsOptions []string |
|
| 284 |
+ ) |
|
| 285 |
+ |
|
| 286 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), |
|
| 287 |
+ libnetwork.OptionDomainname(container.Config.Domainname)) |
|
| 288 |
+ |
|
| 289 |
+ if container.HostConfig.NetworkMode.IsHost() {
|
|
| 290 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) |
|
| 291 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts"))
|
|
| 292 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf"))
|
|
| 293 |
+ } else if daemon.execDriver.SupportsHooks() {
|
|
| 294 |
+ // OptionUseExternalKey is mandatory for userns support. |
|
| 295 |
+ // But optional for non-userns support |
|
| 296 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) |
|
| 297 |
+ } |
|
| 298 |
+ |
|
| 299 |
+ container.HostsPath, err = container.GetRootResourcePath("hosts")
|
|
| 300 |
+ if err != nil {
|
|
| 301 |
+ return nil, err |
|
| 302 |
+ } |
|
| 303 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) |
|
| 304 |
+ |
|
| 305 |
+ container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
|
|
| 306 |
+ if err != nil {
|
|
| 307 |
+ return nil, err |
|
| 308 |
+ } |
|
| 309 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) |
|
| 310 |
+ |
|
| 311 |
+ if len(container.HostConfig.DNS) > 0 {
|
|
| 312 |
+ dns = container.HostConfig.DNS |
|
| 313 |
+ } else if len(daemon.configStore.DNS) > 0 {
|
|
| 314 |
+ dns = daemon.configStore.DNS |
|
| 315 |
+ } |
|
| 316 |
+ |
|
| 317 |
+ for _, d := range dns {
|
|
| 318 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) |
|
| 319 |
+ } |
|
| 320 |
+ |
|
| 321 |
+ if len(container.HostConfig.DNSSearch) > 0 {
|
|
| 322 |
+ dnsSearch = container.HostConfig.DNSSearch |
|
| 323 |
+ } else if len(daemon.configStore.DNSSearch) > 0 {
|
|
| 324 |
+ dnsSearch = daemon.configStore.DNSSearch |
|
| 325 |
+ } |
|
| 326 |
+ |
|
| 327 |
+ for _, ds := range dnsSearch {
|
|
| 328 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) |
|
| 329 |
+ } |
|
| 330 |
+ |
|
| 331 |
+ if len(container.HostConfig.DNSOptions) > 0 {
|
|
| 332 |
+ dnsOptions = container.HostConfig.DNSOptions |
|
| 333 |
+ } else if len(daemon.configStore.DNSOptions) > 0 {
|
|
| 334 |
+ dnsOptions = daemon.configStore.DNSOptions |
|
| 335 |
+ } |
|
| 336 |
+ |
|
| 337 |
+ for _, ds := range dnsOptions {
|
|
| 338 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) |
|
| 339 |
+ } |
|
| 340 |
+ |
|
| 341 |
+ if container.NetworkSettings.SecondaryIPAddresses != nil {
|
|
| 342 |
+ name := container.Config.Hostname |
|
| 343 |
+ if container.Config.Domainname != "" {
|
|
| 344 |
+ name = name + "." + container.Config.Domainname |
|
| 345 |
+ } |
|
| 346 |
+ |
|
| 347 |
+ for _, a := range container.NetworkSettings.SecondaryIPAddresses {
|
|
| 348 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) |
|
| 349 |
+ } |
|
| 350 |
+ } |
|
| 351 |
+ |
|
| 352 |
+ for _, extraHost := range container.HostConfig.ExtraHosts {
|
|
| 353 |
+ // allow IPv6 addresses in extra hosts; only split on first ":" |
|
| 354 |
+ parts := strings.SplitN(extraHost, ":", 2) |
|
| 355 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) |
|
| 356 |
+ } |
|
| 357 |
+ |
|
| 358 |
+ // Link feature is supported only for the default bridge network. |
|
| 359 |
+ // return if this call to build join options is not for default bridge network |
|
| 360 |
+ if n.Name() != "bridge" {
|
|
| 361 |
+ return sboxOptions, nil |
|
| 362 |
+ } |
|
| 363 |
+ |
|
| 364 |
+ ep, _ := container.GetEndpointInNetwork(n) |
|
| 365 |
+ if ep == nil {
|
|
| 366 |
+ return sboxOptions, nil |
|
| 367 |
+ } |
|
| 368 |
+ |
|
| 369 |
+ var childEndpoints, parentEndpoints []string |
|
| 370 |
+ |
|
| 371 |
+ children, err := daemon.children(container.Name) |
|
| 372 |
+ if err != nil {
|
|
| 373 |
+ return nil, err |
|
| 374 |
+ } |
|
| 375 |
+ |
|
| 376 |
+ for linkAlias, child := range children {
|
|
| 377 |
+ if !isLinkable(child) {
|
|
| 378 |
+ return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
|
|
| 379 |
+ } |
|
| 380 |
+ _, alias := path.Split(linkAlias) |
|
| 381 |
+ // allow access to the linked container via the alias, real name, and container hostname |
|
| 382 |
+ aliasList := alias + " " + child.Config.Hostname |
|
| 383 |
+ // only add the name if alias isn't equal to the name |
|
| 384 |
+ if alias != child.Name[1:] {
|
|
| 385 |
+ aliasList = aliasList + " " + child.Name[1:] |
|
| 386 |
+ } |
|
| 387 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress)) |
|
| 388 |
+ cEndpoint, _ := child.GetEndpointInNetwork(n) |
|
| 389 |
+ if cEndpoint != nil && cEndpoint.ID() != "" {
|
|
| 390 |
+ childEndpoints = append(childEndpoints, cEndpoint.ID()) |
|
| 391 |
+ } |
|
| 392 |
+ } |
|
| 393 |
+ |
|
| 394 |
+ bridgeSettings := container.NetworkSettings.Networks["bridge"] |
|
| 395 |
+ refs := daemon.containerGraph().RefPaths(container.ID) |
|
| 396 |
+ for _, ref := range refs {
|
|
| 397 |
+ if ref.ParentID == "0" {
|
|
| 398 |
+ continue |
|
| 399 |
+ } |
|
| 400 |
+ |
|
| 401 |
+ c, err := daemon.Get(ref.ParentID) |
|
| 402 |
+ if err != nil {
|
|
| 403 |
+ logrus.Error(err) |
|
| 404 |
+ } |
|
| 405 |
+ |
|
| 406 |
+ if c != nil && !daemon.configStore.DisableBridge && container.HostConfig.NetworkMode.IsPrivate() {
|
|
| 407 |
+ logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
|
|
| 408 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress)) |
|
| 409 |
+ if ep.ID() != "" {
|
|
| 410 |
+ parentEndpoints = append(parentEndpoints, ep.ID()) |
|
| 411 |
+ } |
|
| 412 |
+ } |
|
| 413 |
+ } |
|
| 414 |
+ |
|
| 415 |
+ linkOptions := options.Generic{
|
|
| 416 |
+ netlabel.GenericData: options.Generic{
|
|
| 417 |
+ "ParentEndpoints": parentEndpoints, |
|
| 418 |
+ "ChildEndpoints": childEndpoints, |
|
| 419 |
+ }, |
|
| 420 |
+ } |
|
| 421 |
+ |
|
| 422 |
+ sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) |
|
| 423 |
+ |
|
| 424 |
+ return sboxOptions, nil |
|
| 425 |
+} |
|
| 426 |
+ |
|
| 427 |
+func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network) error {
|
|
| 428 |
+ if container.NetworkSettings == nil {
|
|
| 429 |
+ container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
|
|
| 430 |
+ } |
|
| 431 |
+ |
|
| 432 |
+ if !container.HostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
|
|
| 433 |
+ return runconfig.ErrConflictHostNetwork |
|
| 434 |
+ } |
|
| 435 |
+ |
|
| 436 |
+ for s := range container.NetworkSettings.Networks {
|
|
| 437 |
+ sn, err := daemon.FindNetwork(s) |
|
| 438 |
+ if err != nil {
|
|
| 439 |
+ continue |
|
| 440 |
+ } |
|
| 441 |
+ |
|
| 442 |
+ if sn.Name() == n.Name() {
|
|
| 443 |
+ // Avoid duplicate config |
|
| 444 |
+ return nil |
|
| 445 |
+ } |
|
| 446 |
+ if !runconfig.NetworkMode(sn.Type()).IsPrivate() || |
|
| 447 |
+ !runconfig.NetworkMode(n.Type()).IsPrivate() {
|
|
| 448 |
+ return runconfig.ErrConflictSharedNetwork |
|
| 449 |
+ } |
|
| 450 |
+ if runconfig.NetworkMode(sn.Name()).IsNone() || |
|
| 451 |
+ runconfig.NetworkMode(n.Name()).IsNone() {
|
|
| 452 |
+ return runconfig.ErrConflictNoNetwork |
|
| 453 |
+ } |
|
| 454 |
+ } |
|
| 455 |
+ container.NetworkSettings.Networks[n.Name()] = new(network.EndpointSettings) |
|
| 456 |
+ |
|
| 457 |
+ return nil |
|
| 458 |
+} |
|
| 459 |
+ |
|
| 460 |
+func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
|
|
| 461 |
+ if err := container.BuildEndpointInfo(n, ep); err != nil {
|
|
| 462 |
+ return err |
|
| 463 |
+ } |
|
| 464 |
+ |
|
| 465 |
+ if container.HostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
|
|
| 466 |
+ container.NetworkSettings.Bridge = daemon.configStore.Bridge.Iface |
|
| 467 |
+ } |
|
| 468 |
+ |
|
| 469 |
+ return nil |
|
| 470 |
+} |
|
| 471 |
+ |
|
| 472 |
+// UpdateNetwork is used to update the container's network (e.g. when linked containers |
|
| 473 |
+// get removed/unlinked). |
|
| 474 |
+func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
|
| 475 |
+ ctrl := daemon.netController |
|
| 476 |
+ sid := container.NetworkSettings.SandboxID |
|
| 477 |
+ |
|
| 478 |
+ sb, err := ctrl.SandboxByID(sid) |
|
| 479 |
+ if err != nil {
|
|
| 480 |
+ return derr.ErrorCodeNoSandbox.WithArgs(sid, err) |
|
| 481 |
+ } |
|
| 482 |
+ |
|
| 483 |
+ // Find if container is connected to the default bridge network |
|
| 484 |
+ var n libnetwork.Network |
|
| 485 |
+ for name := range container.NetworkSettings.Networks {
|
|
| 486 |
+ sn, err := daemon.FindNetwork(name) |
|
| 487 |
+ if err != nil {
|
|
| 488 |
+ continue |
|
| 489 |
+ } |
|
| 490 |
+ if sn.Name() == "bridge" {
|
|
| 491 |
+ n = sn |
|
| 492 |
+ break |
|
| 493 |
+ } |
|
| 494 |
+ } |
|
| 495 |
+ |
|
| 496 |
+ if n == nil {
|
|
| 497 |
+ // Not connected to the default bridge network; Nothing to do |
|
| 498 |
+ return nil |
|
| 499 |
+ } |
|
| 500 |
+ |
|
| 501 |
+ options, err := daemon.buildSandboxOptions(container, n) |
|
| 502 |
+ if err != nil {
|
|
| 503 |
+ return derr.ErrorCodeNetworkUpdate.WithArgs(err) |
|
| 504 |
+ } |
|
| 505 |
+ |
|
| 506 |
+ if err := sb.Refresh(options...); err != nil {
|
|
| 507 |
+ return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err) |
|
| 508 |
+ } |
|
| 509 |
+ |
|
| 510 |
+ return nil |
|
| 511 |
+} |
|
| 512 |
+ |
|
| 513 |
+func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
|
| 514 |
+ controller := daemon.netController |
|
| 515 |
+ |
|
| 516 |
+ // Cleanup any stale sandbox left over due to ungraceful daemon shutdown |
|
| 517 |
+ if err := controller.SandboxDestroy(container.ID); err != nil {
|
|
| 518 |
+ logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
|
|
| 519 |
+ } |
|
| 520 |
+ |
|
| 521 |
+ updateSettings := false |
|
| 522 |
+ if len(container.NetworkSettings.Networks) == 0 {
|
|
| 523 |
+ mode := container.HostConfig.NetworkMode |
|
| 524 |
+ if container.Config.NetworkDisabled || mode.IsContainer() {
|
|
| 525 |
+ return nil |
|
| 526 |
+ } |
|
| 527 |
+ |
|
| 528 |
+ networkName := mode.NetworkName() |
|
| 529 |
+ if mode.IsDefault() {
|
|
| 530 |
+ networkName = controller.Config().Daemon.DefaultNetwork |
|
| 531 |
+ } |
|
| 532 |
+ if mode.IsUserDefined() {
|
|
| 533 |
+ n, err := daemon.FindNetwork(networkName) |
|
| 534 |
+ if err != nil {
|
|
| 535 |
+ return err |
|
| 536 |
+ } |
|
| 537 |
+ networkName = n.Name() |
|
| 538 |
+ } |
|
| 539 |
+ container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) |
|
| 540 |
+ container.NetworkSettings.Networks[networkName] = new(network.EndpointSettings) |
|
| 541 |
+ updateSettings = true |
|
| 542 |
+ } |
|
| 543 |
+ |
|
| 544 |
+ for n := range container.NetworkSettings.Networks {
|
|
| 545 |
+ if err := daemon.connectToNetwork(container, n, updateSettings); err != nil {
|
|
| 546 |
+ return err |
|
| 547 |
+ } |
|
| 548 |
+ } |
|
| 549 |
+ |
|
| 550 |
+ return container.WriteHostConfig() |
|
| 551 |
+} |
|
| 552 |
+ |
|
| 553 |
+func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox {
|
|
| 554 |
+ var sb libnetwork.Sandbox |
|
| 555 |
+ daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
|
|
| 556 |
+ if s.ContainerID() == container.ID {
|
|
| 557 |
+ sb = s |
|
| 558 |
+ return true |
|
| 559 |
+ } |
|
| 560 |
+ return false |
|
| 561 |
+ }) |
|
| 562 |
+ return sb |
|
| 563 |
+} |
|
| 564 |
+ |
|
| 565 |
+// ConnectToNetwork connects a container to a network |
|
| 566 |
+func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string) error {
|
|
| 567 |
+ if !container.Running {
|
|
| 568 |
+ return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
|
| 569 |
+ } |
|
| 570 |
+ if err := daemon.connectToNetwork(container, idOrName, true); err != nil {
|
|
| 571 |
+ return err |
|
| 572 |
+ } |
|
| 573 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 574 |
+ return fmt.Errorf("Error saving container to disk: %v", err)
|
|
| 575 |
+ } |
|
| 576 |
+ return nil |
|
| 577 |
+} |
|
| 578 |
+ |
|
| 579 |
+func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, updateSettings bool) (err error) {
|
|
| 580 |
+ if container.HostConfig.NetworkMode.IsContainer() {
|
|
| 581 |
+ return runconfig.ErrConflictSharedNetwork |
|
| 582 |
+ } |
|
| 583 |
+ |
|
| 584 |
+ if runconfig.NetworkMode(idOrName).IsBridge() && |
|
| 585 |
+ daemon.configStore.DisableBridge {
|
|
| 586 |
+ container.Config.NetworkDisabled = true |
|
| 587 |
+ return nil |
|
| 588 |
+ } |
|
| 589 |
+ |
|
| 590 |
+ controller := daemon.netController |
|
| 591 |
+ |
|
| 592 |
+ n, err := daemon.FindNetwork(idOrName) |
|
| 593 |
+ if err != nil {
|
|
| 594 |
+ return err |
|
| 595 |
+ } |
|
| 596 |
+ |
|
| 597 |
+ if updateSettings {
|
|
| 598 |
+ if err := daemon.updateNetworkSettings(container, n); err != nil {
|
|
| 599 |
+ return err |
|
| 600 |
+ } |
|
| 601 |
+ } |
|
| 602 |
+ |
|
| 603 |
+ ep, err := container.GetEndpointInNetwork(n) |
|
| 604 |
+ if err == nil {
|
|
| 605 |
+ return fmt.Errorf("container already connected to network %s", idOrName)
|
|
| 606 |
+ } |
|
| 607 |
+ |
|
| 608 |
+ if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
|
|
| 609 |
+ return err |
|
| 610 |
+ } |
|
| 611 |
+ |
|
| 612 |
+ createOptions, err := container.BuildCreateEndpointOptions(n) |
|
| 613 |
+ if err != nil {
|
|
| 614 |
+ return err |
|
| 615 |
+ } |
|
| 616 |
+ |
|
| 617 |
+ endpointName := strings.TrimPrefix(container.Name, "/") |
|
| 618 |
+ ep, err = n.CreateEndpoint(endpointName, createOptions...) |
|
| 619 |
+ if err != nil {
|
|
| 620 |
+ return err |
|
| 621 |
+ } |
|
| 622 |
+ defer func() {
|
|
| 623 |
+ if err != nil {
|
|
| 624 |
+ if e := ep.Delete(); e != nil {
|
|
| 625 |
+ logrus.Warnf("Could not rollback container connection to network %s", idOrName)
|
|
| 626 |
+ } |
|
| 627 |
+ } |
|
| 628 |
+ }() |
|
| 629 |
+ |
|
| 630 |
+ if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
|
|
| 631 |
+ return err |
|
| 632 |
+ } |
|
| 633 |
+ |
|
| 634 |
+ sb := daemon.getNetworkSandbox(container) |
|
| 635 |
+ if sb == nil {
|
|
| 636 |
+ options, err := daemon.buildSandboxOptions(container, n) |
|
| 637 |
+ if err != nil {
|
|
| 638 |
+ return err |
|
| 639 |
+ } |
|
| 640 |
+ sb, err = controller.NewSandbox(container.ID, options...) |
|
| 641 |
+ if err != nil {
|
|
| 642 |
+ return err |
|
| 643 |
+ } |
|
| 644 |
+ |
|
| 645 |
+ container.UpdateSandboxNetworkSettings(sb) |
|
| 646 |
+ } |
|
| 647 |
+ |
|
| 648 |
+ if err := ep.Join(sb); err != nil {
|
|
| 649 |
+ return err |
|
| 650 |
+ } |
|
| 651 |
+ |
|
| 652 |
+ if err := container.UpdateJoinInfo(n, ep); err != nil {
|
|
| 653 |
+ return derr.ErrorCodeJoinInfo.WithArgs(err) |
|
| 654 |
+ } |
|
| 655 |
+ |
|
| 656 |
+ return nil |
|
| 657 |
+} |
|
| 658 |
+ |
|
| 659 |
+func (daemon *Daemon) initializeNetworking(container *container.Container) error {
|
|
| 660 |
+ var err error |
|
| 661 |
+ |
|
| 662 |
+ if container.HostConfig.NetworkMode.IsContainer() {
|
|
| 663 |
+ // we need to get the hosts files from the container to join |
|
| 664 |
+ nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) |
|
| 665 |
+ if err != nil {
|
|
| 666 |
+ return err |
|
| 667 |
+ } |
|
| 668 |
+ container.HostnamePath = nc.HostnamePath |
|
| 669 |
+ container.HostsPath = nc.HostsPath |
|
| 670 |
+ container.ResolvConfPath = nc.ResolvConfPath |
|
| 671 |
+ container.Config.Hostname = nc.Config.Hostname |
|
| 672 |
+ container.Config.Domainname = nc.Config.Domainname |
|
| 673 |
+ return nil |
|
| 674 |
+ } |
|
| 675 |
+ |
|
| 676 |
+ if container.HostConfig.NetworkMode.IsHost() {
|
|
| 677 |
+ container.Config.Hostname, err = os.Hostname() |
|
| 678 |
+ if err != nil {
|
|
| 679 |
+ return err |
|
| 680 |
+ } |
|
| 681 |
+ |
|
| 682 |
+ parts := strings.SplitN(container.Config.Hostname, ".", 2) |
|
| 683 |
+ if len(parts) > 1 {
|
|
| 684 |
+ container.Config.Hostname = parts[0] |
|
| 685 |
+ container.Config.Domainname = parts[1] |
|
| 686 |
+ } |
|
| 687 |
+ |
|
| 688 |
+ } |
|
| 689 |
+ |
|
| 690 |
+ if err := daemon.allocateNetwork(container); err != nil {
|
|
| 691 |
+ return err |
|
| 692 |
+ } |
|
| 693 |
+ |
|
| 694 |
+ return container.BuildHostnameFile() |
|
| 695 |
+} |
|
| 696 |
+ |
|
| 697 |
+// called from the libcontainer pre-start hook to set the network |
|
| 698 |
+// namespace configuration linkage to the libnetwork "sandbox" entity |
|
| 699 |
+func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
|
|
| 700 |
+ path := fmt.Sprintf("/proc/%d/ns/net", pid)
|
|
| 701 |
+ var sandbox libnetwork.Sandbox |
|
| 702 |
+ search := libnetwork.SandboxContainerWalker(&sandbox, containerID) |
|
| 703 |
+ daemon.netController.WalkSandboxes(search) |
|
| 704 |
+ if sandbox == nil {
|
|
| 705 |
+ return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found") |
|
| 706 |
+ } |
|
| 707 |
+ |
|
| 708 |
+ return sandbox.SetKey(path) |
|
| 709 |
+} |
|
| 710 |
+ |
|
| 711 |
+func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) {
|
|
| 712 |
+ containerID := container.HostConfig.IpcMode.Container() |
|
| 713 |
+ c, err := daemon.Get(containerID) |
|
| 714 |
+ if err != nil {
|
|
| 715 |
+ return nil, err |
|
| 716 |
+ } |
|
| 717 |
+ if !c.IsRunning() {
|
|
| 718 |
+ return nil, derr.ErrorCodeIPCRunning |
|
| 719 |
+ } |
|
| 720 |
+ return c, nil |
|
| 721 |
+} |
|
| 722 |
+ |
|
| 723 |
+func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) {
|
|
| 724 |
+ nc, err := daemon.Get(connectedContainerID) |
|
| 725 |
+ if err != nil {
|
|
| 726 |
+ return nil, err |
|
| 727 |
+ } |
|
| 728 |
+ if containerID == nc.ID {
|
|
| 729 |
+ return nil, derr.ErrorCodeJoinSelf |
|
| 730 |
+ } |
|
| 731 |
+ if !nc.IsRunning() {
|
|
| 732 |
+ return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID) |
|
| 733 |
+ } |
|
| 734 |
+ return nc, nil |
|
| 735 |
+} |
|
| 736 |
+ |
|
| 737 |
+func (daemon *Daemon) releaseNetwork(container *container.Container) {
|
|
| 738 |
+ if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
|
|
| 739 |
+ return |
|
| 740 |
+ } |
|
| 741 |
+ |
|
| 742 |
+ sid := container.NetworkSettings.SandboxID |
|
| 743 |
+ networks := container.NetworkSettings.Networks |
|
| 744 |
+ for n := range networks {
|
|
| 745 |
+ networks[n] = &network.EndpointSettings{}
|
|
| 746 |
+ } |
|
| 747 |
+ |
|
| 748 |
+ container.NetworkSettings = &network.Settings{Networks: networks}
|
|
| 749 |
+ |
|
| 750 |
+ if sid == "" || len(networks) == 0 {
|
|
| 751 |
+ return |
|
| 752 |
+ } |
|
| 753 |
+ |
|
| 754 |
+ sb, err := daemon.netController.SandboxByID(sid) |
|
| 755 |
+ if err != nil {
|
|
| 756 |
+ logrus.Errorf("error locating sandbox id %s: %v", sid, err)
|
|
| 757 |
+ return |
|
| 758 |
+ } |
|
| 759 |
+ |
|
| 760 |
+ if err := sb.Delete(); err != nil {
|
|
| 761 |
+ logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
|
|
| 762 |
+ } |
|
| 763 |
+} |
|
| 764 |
+ |
|
| 765 |
+func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
|
|
| 766 |
+ rootUID, rootGID := daemon.GetRemappedUIDGID() |
|
| 767 |
+ if !c.HasMountFor("/dev/shm") {
|
|
| 768 |
+ shmPath, err := c.ShmResourcePath() |
|
| 769 |
+ if err != nil {
|
|
| 770 |
+ return err |
|
| 771 |
+ } |
|
| 772 |
+ |
|
| 773 |
+ if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
|
|
| 774 |
+ return err |
|
| 775 |
+ } |
|
| 776 |
+ |
|
| 777 |
+ shmSize := container.DefaultSHMSize |
|
| 778 |
+ if c.HostConfig.ShmSize != nil {
|
|
| 779 |
+ shmSize = *c.HostConfig.ShmSize |
|
| 780 |
+ } |
|
| 781 |
+ shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) |
|
| 782 |
+ if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
|
|
| 783 |
+ return fmt.Errorf("mounting shm tmpfs: %s", err)
|
|
| 784 |
+ } |
|
| 785 |
+ if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
|
|
| 786 |
+ return err |
|
| 787 |
+ } |
|
| 788 |
+ } |
|
| 789 |
+ |
|
| 790 |
+ if !c.HasMountFor("/dev/mqueue") {
|
|
| 791 |
+ mqueuePath, err := c.MqueueResourcePath() |
|
| 792 |
+ if err != nil {
|
|
| 793 |
+ return err |
|
| 794 |
+ } |
|
| 795 |
+ |
|
| 796 |
+ if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil {
|
|
| 797 |
+ return err |
|
| 798 |
+ } |
|
| 799 |
+ |
|
| 800 |
+ if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil {
|
|
| 801 |
+ return fmt.Errorf("mounting mqueue mqueue : %s", err)
|
|
| 802 |
+ } |
|
| 803 |
+ if err := os.Chown(mqueuePath, rootUID, rootGID); err != nil {
|
|
| 804 |
+ return err |
|
| 805 |
+ } |
|
| 806 |
+ } |
|
| 807 |
+ |
|
| 808 |
+ return nil |
|
| 809 |
+} |
|
| 810 |
+ |
|
| 811 |
+func (daemon *Daemon) mountVolumes(container *container.Container) error {
|
|
| 812 |
+ mounts, err := daemon.setupMounts(container) |
|
| 813 |
+ if err != nil {
|
|
| 814 |
+ return err |
|
| 815 |
+ } |
|
| 816 |
+ |
|
| 817 |
+ for _, m := range mounts {
|
|
| 818 |
+ dest, err := container.GetResourcePath(m.Destination) |
|
| 819 |
+ if err != nil {
|
|
| 820 |
+ return err |
|
| 821 |
+ } |
|
| 822 |
+ |
|
| 823 |
+ var stat os.FileInfo |
|
| 824 |
+ stat, err = os.Stat(m.Source) |
|
| 825 |
+ if err != nil {
|
|
| 826 |
+ return err |
|
| 827 |
+ } |
|
| 828 |
+ if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
|
|
| 829 |
+ return err |
|
| 830 |
+ } |
|
| 831 |
+ |
|
| 832 |
+ opts := "rbind,ro" |
|
| 833 |
+ if m.Writable {
|
|
| 834 |
+ opts = "rbind,rw" |
|
| 835 |
+ } |
|
| 836 |
+ |
|
| 837 |
+ if err := mount.Mount(m.Source, dest, "bind", opts); err != nil {
|
|
| 838 |
+ return err |
|
| 839 |
+ } |
|
| 840 |
+ } |
|
| 841 |
+ |
|
| 842 |
+ return nil |
|
| 843 |
+} |
|
| 844 |
+ |
|
| 845 |
+func killProcessDirectly(container *container.Container) error {
|
|
| 846 |
+ if _, err := container.WaitStop(10 * time.Second); err != nil {
|
|
| 847 |
+ // Ensure that we don't kill ourselves |
|
| 848 |
+ if pid := container.GetPID(); pid != 0 {
|
|
| 849 |
+ logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
|
|
| 850 |
+ if err := syscall.Kill(pid, 9); err != nil {
|
|
| 851 |
+ if err != syscall.ESRCH {
|
|
| 852 |
+ return err |
|
| 853 |
+ } |
|
| 854 |
+ logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
|
|
| 855 |
+ } |
|
| 856 |
+ } |
|
| 857 |
+ } |
|
| 858 |
+ return nil |
|
| 859 |
+} |
|
| 860 |
+ |
|
| 861 |
+func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
|
|
| 862 |
+ device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) |
|
| 863 |
+ // if there was no error, return the device |
|
| 864 |
+ if err == nil {
|
|
| 865 |
+ device.Path = deviceMapping.PathInContainer |
|
| 866 |
+ return append(devs, device), nil |
|
| 867 |
+ } |
|
| 868 |
+ |
|
| 869 |
+ // if the device is not a device node |
|
| 870 |
+ // try to see if it's a directory holding many devices |
|
| 871 |
+ if err == devices.ErrNotADevice {
|
|
| 872 |
+ |
|
| 873 |
+ // check if it is a directory |
|
| 874 |
+ if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
|
|
| 875 |
+ |
|
| 876 |
+ // mount the internal devices recursively |
|
| 877 |
+ filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
|
|
| 878 |
+ childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) |
|
| 879 |
+ if e != nil {
|
|
| 880 |
+ // ignore the device |
|
| 881 |
+ return nil |
|
| 882 |
+ } |
|
| 883 |
+ |
|
| 884 |
+ // add the device to userSpecified devices |
|
| 885 |
+ childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1) |
|
| 886 |
+ devs = append(devs, childDevice) |
|
| 887 |
+ |
|
| 888 |
+ return nil |
|
| 889 |
+ }) |
|
| 890 |
+ } |
|
| 891 |
+ } |
|
| 892 |
+ |
|
| 893 |
+ if len(devs) > 0 {
|
|
| 894 |
+ return devs, nil |
|
| 895 |
+ } |
|
| 896 |
+ |
|
| 897 |
+ return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err) |
|
| 898 |
+} |
|
| 899 |
+ |
|
| 900 |
+func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {
|
|
| 901 |
+ if len(userDevices) == 0 {
|
|
| 902 |
+ return defaultDevices |
|
| 903 |
+ } |
|
| 904 |
+ |
|
| 905 |
+ paths := map[string]*configs.Device{}
|
|
| 906 |
+ for _, d := range userDevices {
|
|
| 907 |
+ paths[d.Path] = d |
|
| 908 |
+ } |
|
| 909 |
+ |
|
| 910 |
+ var devs []*configs.Device |
|
| 911 |
+ for _, d := range defaultDevices {
|
|
| 912 |
+ if _, defined := paths[d.Path]; !defined {
|
|
| 913 |
+ devs = append(devs, d) |
|
| 914 |
+ } |
|
| 915 |
+ } |
|
| 916 |
+ return append(devs, userDevices...) |
|
| 917 |
+} |
|
| 918 |
+ |
|
| 919 |
+func detachMounted(path string) error {
|
|
| 920 |
+ return syscall.Unmount(path, syscall.MNT_DETACH) |
|
| 921 |
+} |
|
| 922 |
+ |
|
| 923 |
+func isLinkable(child *container.Container) bool {
|
|
| 924 |
+ // A container is linkable only if it belongs to the default network |
|
| 925 |
+ _, ok := child.NetworkSettings.Networks["bridge"] |
|
| 926 |
+ return ok |
|
| 927 |
+} |
| 0 | 928 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,165 @@ |
| 0 |
+// +build windows |
|
| 1 |
+ |
|
| 2 |
+package daemon |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "strings" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/container" |
|
| 8 |
+ "github.com/docker/docker/daemon/execdriver" |
|
| 9 |
+ derr "github.com/docker/docker/errors" |
|
| 10 |
+ "github.com/docker/docker/layer" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
|
|
| 14 |
+ return nil, nil |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+func (daemon *Daemon) initializeNetworking(container *container.Container) error {
|
|
| 18 |
+ return nil |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+// ConnectToNetwork connects a container to the network |
|
| 22 |
+func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string) error {
|
|
| 23 |
+ return nil |
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
|
|
| 27 |
+ en := &execdriver.Network{
|
|
| 28 |
+ Interface: nil, |
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) |
|
| 32 |
+ switch parts[0] {
|
|
| 33 |
+ case "none": |
|
| 34 |
+ case "default", "": // empty string to support existing containers |
|
| 35 |
+ if !c.Config.NetworkDisabled {
|
|
| 36 |
+ en.Interface = &execdriver.NetworkInterface{
|
|
| 37 |
+ MacAddress: c.Config.MacAddress, |
|
| 38 |
+ Bridge: daemon.configStore.Bridge.VirtualSwitchName, |
|
| 39 |
+ PortBindings: c.HostConfig.PortBindings, |
|
| 40 |
+ |
|
| 41 |
+ // TODO Windows. Include IPAddress. There already is a |
|
| 42 |
+ // property IPAddress on execDrive.CommonNetworkInterface, |
|
| 43 |
+ // but there is no CLI option in docker to pass through |
|
| 44 |
+ // an IPAddress on docker run. |
|
| 45 |
+ } |
|
| 46 |
+ } |
|
| 47 |
+ default: |
|
| 48 |
+ return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode) |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ // TODO Windows. More resource controls to be implemented later. |
|
| 52 |
+ resources := &execdriver.Resources{
|
|
| 53 |
+ CommonResources: execdriver.CommonResources{
|
|
| 54 |
+ CPUShares: c.HostConfig.CPUShares, |
|
| 55 |
+ }, |
|
| 56 |
+ } |
|
| 57 |
+ |
|
| 58 |
+ processConfig := execdriver.ProcessConfig{
|
|
| 59 |
+ CommonProcessConfig: execdriver.CommonProcessConfig{
|
|
| 60 |
+ Entrypoint: c.Path, |
|
| 61 |
+ Arguments: c.Args, |
|
| 62 |
+ Tty: c.Config.Tty, |
|
| 63 |
+ }, |
|
| 64 |
+ ConsoleSize: c.HostConfig.ConsoleSize, |
|
| 65 |
+ } |
|
| 66 |
+ |
|
| 67 |
+ processConfig.Env = env |
|
| 68 |
+ |
|
| 69 |
+ var layerPaths []string |
|
| 70 |
+ img, err := daemon.imageStore.Get(c.ImageID) |
|
| 71 |
+ if err != nil {
|
|
| 72 |
+ return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ if img.RootFS != nil && img.RootFS.Type == "layers+base" {
|
|
| 76 |
+ max := len(img.RootFS.DiffIDs) |
|
| 77 |
+ for i := 0; i <= max; i++ {
|
|
| 78 |
+ img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] |
|
| 79 |
+ path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ return derr.ErrorCodeGetLayer.WithArgs(err) |
|
| 82 |
+ } |
|
| 83 |
+ // Reverse order, expecting parent most first |
|
| 84 |
+ layerPaths = append([]string{path}, layerPaths...)
|
|
| 85 |
+ } |
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ m, err := layer.RWLayerMetadata(daemon.layerStore, c.ID) |
|
| 89 |
+ if err != nil {
|
|
| 90 |
+ return derr.ErrorCodeGetLayerMetadata.WithArgs(err) |
|
| 91 |
+ } |
|
| 92 |
+ layerFolder := m["dir"] |
|
| 93 |
+ |
|
| 94 |
+ c.Command = &execdriver.Command{
|
|
| 95 |
+ CommonCommand: execdriver.CommonCommand{
|
|
| 96 |
+ ID: c.ID, |
|
| 97 |
+ Rootfs: c.BaseFS, |
|
| 98 |
+ InitPath: "/.dockerinit", |
|
| 99 |
+ WorkingDir: c.Config.WorkingDir, |
|
| 100 |
+ Network: en, |
|
| 101 |
+ MountLabel: c.GetMountLabel(), |
|
| 102 |
+ Resources: resources, |
|
| 103 |
+ ProcessConfig: processConfig, |
|
| 104 |
+ ProcessLabel: c.GetProcessLabel(), |
|
| 105 |
+ }, |
|
| 106 |
+ FirstStart: !c.HasBeenStartedBefore, |
|
| 107 |
+ LayerFolder: layerFolder, |
|
| 108 |
+ LayerPaths: layerPaths, |
|
| 109 |
+ Hostname: c.Config.Hostname, |
|
| 110 |
+ Isolation: c.HostConfig.Isolation, |
|
| 111 |
+ ArgsEscaped: c.Config.ArgsEscaped, |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ return nil |
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+// getSize returns real size & virtual size |
|
| 118 |
+func (daemon *Daemon) getSize(container *container.Container) (int64, int64) {
|
|
| 119 |
+ // TODO Windows |
|
| 120 |
+ return 0, 0 |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+// setNetworkNamespaceKey is a no-op on Windows. |
|
| 124 |
+func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
|
|
| 125 |
+ return nil |
|
| 126 |
+} |
|
| 127 |
+ |
|
| 128 |
+// allocateNetwork is a no-op on Windows. |
|
| 129 |
+func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
|
| 130 |
+ return nil |
|
| 131 |
+} |
|
| 132 |
+ |
|
| 133 |
+func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
|
| 134 |
+ return nil |
|
| 135 |
+} |
|
| 136 |
+ |
|
| 137 |
+func (daemon *Daemon) releaseNetwork(container *container.Container) {
|
|
| 138 |
+} |
|
| 139 |
+ |
|
| 140 |
+func (daemon *Daemon) setupIpcDirs(container *container.Container) error {
|
|
| 141 |
+ return nil |
|
| 142 |
+} |
|
| 143 |
+ |
|
| 144 |
+// TODO Windows: Fix Post-TP4. This is a hack to allow docker cp to work |
|
| 145 |
+// against containers which have volumes. You will still be able to cp |
|
| 146 |
+// to somewhere on the container drive, but not to any mounted volumes |
|
| 147 |
+// inside the container. Without this fix, docker cp is broken to any |
|
| 148 |
+// container which has a volume, regardless of where the file is inside the |
|
| 149 |
+// container. |
|
| 150 |
+func (daemon *Daemon) mountVolumes(container *container.Container) error {
|
|
| 151 |
+ return nil |
|
| 152 |
+} |
|
| 153 |
+ |
|
| 154 |
+func detachMounted(path string) error {
|
|
| 155 |
+ return nil |
|
| 156 |
+} |
|
| 157 |
+ |
|
| 158 |
+func getDefaultRouteMtu() (int, error) {
|
|
| 159 |
+ return -1, errSystemNotSupported |
|
| 160 |
+} |
|
| 161 |
+ |
|
| 162 |
+func killProcessDirectly(container *container.Container) error {
|
|
| 163 |
+ return nil |
|
| 164 |
+} |
| 0 | 165 |
deleted file mode 100644 |
| ... | ... |
@@ -1,136 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "io/ioutil" |
|
| 5 |
- "os" |
|
| 6 |
- "path/filepath" |
|
| 7 |
- "testing" |
|
| 8 |
- |
|
| 9 |
- "github.com/docker/docker/pkg/signal" |
|
| 10 |
- "github.com/docker/docker/runconfig" |
|
| 11 |
- "github.com/docker/docker/volume" |
|
| 12 |
- "github.com/docker/docker/volume/drivers" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-func TestGetFullName(t *testing.T) {
|
|
| 16 |
- name, err := GetFullContainerName("testing")
|
|
| 17 |
- if err != nil {
|
|
| 18 |
- t.Fatal(err) |
|
| 19 |
- } |
|
| 20 |
- if name != "/testing" {
|
|
| 21 |
- t.Fatalf("Expected /testing got %s", name)
|
|
| 22 |
- } |
|
| 23 |
- if _, err := GetFullContainerName(""); err == nil {
|
|
| 24 |
- t.Fatal("Error should not be nil")
|
|
| 25 |
- } |
|
| 26 |
-} |
|
| 27 |
- |
|
| 28 |
-func TestValidContainerNames(t *testing.T) {
|
|
| 29 |
- invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
|
|
| 30 |
- validNames := []string{"word-word", "word_word", "1weoid"}
|
|
| 31 |
- |
|
| 32 |
- for _, name := range invalidNames {
|
|
| 33 |
- if validContainerNamePattern.MatchString(name) {
|
|
| 34 |
- t.Fatalf("%q is not a valid container name and was returned as valid.", name)
|
|
| 35 |
- } |
|
| 36 |
- } |
|
| 37 |
- |
|
| 38 |
- for _, name := range validNames {
|
|
| 39 |
- if !validContainerNamePattern.MatchString(name) {
|
|
| 40 |
- t.Fatalf("%q is a valid container name and was returned as invalid.", name)
|
|
| 41 |
- } |
|
| 42 |
- } |
|
| 43 |
-} |
|
| 44 |
- |
|
| 45 |
-func TestContainerStopSignal(t *testing.T) {
|
|
| 46 |
- c := &Container{
|
|
| 47 |
- CommonContainer: CommonContainer{
|
|
| 48 |
- Config: &runconfig.Config{},
|
|
| 49 |
- }, |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- def, err := signal.ParseSignal(signal.DefaultStopSignal) |
|
| 53 |
- if err != nil {
|
|
| 54 |
- t.Fatal(err) |
|
| 55 |
- } |
|
| 56 |
- |
|
| 57 |
- s := c.stopSignal() |
|
| 58 |
- if s != int(def) {
|
|
| 59 |
- t.Fatalf("Expected %v, got %v", def, s)
|
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 |
- c = &Container{
|
|
| 63 |
- CommonContainer: CommonContainer{
|
|
| 64 |
- Config: &runconfig.Config{StopSignal: "SIGKILL"},
|
|
| 65 |
- }, |
|
| 66 |
- } |
|
| 67 |
- s = c.stopSignal() |
|
| 68 |
- if s != 9 {
|
|
| 69 |
- t.Fatalf("Expected 9, got %v", s)
|
|
| 70 |
- } |
|
| 71 |
-} |
|
| 72 |
- |
|
| 73 |
-func TestContainerInitDNS(t *testing.T) {
|
|
| 74 |
- tmp, err := ioutil.TempDir("", "docker-container-test-")
|
|
| 75 |
- if err != nil {
|
|
| 76 |
- t.Fatal(err) |
|
| 77 |
- } |
|
| 78 |
- defer os.RemoveAll(tmp) |
|
| 79 |
- |
|
| 80 |
- containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" |
|
| 81 |
- containerPath := filepath.Join(tmp, containerID) |
|
| 82 |
- if err := os.MkdirAll(containerPath, 0755); err != nil {
|
|
| 83 |
- t.Fatal(err) |
|
| 84 |
- } |
|
| 85 |
- |
|
| 86 |
- config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
|
|
| 87 |
-"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, |
|
| 88 |
-"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", |
|
| 89 |
-"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
|
|
| 90 |
-"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, |
|
| 91 |
-"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, |
|
| 92 |
-"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
|
|
| 93 |
-"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
|
|
| 94 |
-"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}},
|
|
| 95 |
-"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", |
|
| 96 |
-"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", |
|
| 97 |
-"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", |
|
| 98 |
-"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", |
|
| 99 |
-"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, |
|
| 100 |
-"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
|
|
| 101 |
- |
|
| 102 |
- if err = ioutil.WriteFile(filepath.Join(containerPath, configFileName), []byte(config), 0644); err != nil {
|
|
| 103 |
- t.Fatal(err) |
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
- hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
|
|
| 107 |
-"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
|
|
| 108 |
-"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
|
| 109 |
-"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
|
| 110 |
- if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
|
|
| 111 |
- t.Fatal(err) |
|
| 112 |
- } |
|
| 113 |
- |
|
| 114 |
- daemon, err := initDaemonWithVolumeStore(tmp) |
|
| 115 |
- if err != nil {
|
|
| 116 |
- t.Fatal(err) |
|
| 117 |
- } |
|
| 118 |
- defer volumedrivers.Unregister(volume.DefaultDriverName) |
|
| 119 |
- |
|
| 120 |
- c, err := daemon.load(containerID) |
|
| 121 |
- if err != nil {
|
|
| 122 |
- t.Fatal(err) |
|
| 123 |
- } |
|
| 124 |
- |
|
| 125 |
- if c.hostConfig.DNS == nil {
|
|
| 126 |
- t.Fatal("Expected container DNS to not be nil")
|
|
| 127 |
- } |
|
| 128 |
- |
|
| 129 |
- if c.hostConfig.DNSSearch == nil {
|
|
| 130 |
- t.Fatal("Expected container DNSSearch to not be nil")
|
|
| 131 |
- } |
|
| 132 |
- |
|
| 133 |
- if c.hostConfig.DNSOptions == nil {
|
|
| 134 |
- t.Fatal("Expected container DNSOptions to not be nil")
|
|
| 135 |
- } |
|
| 136 |
-} |
| 137 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,1545 +0,0 @@ |
| 1 |
-// +build linux freebsd |
|
| 2 |
- |
|
| 3 |
-package daemon |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "fmt" |
|
| 7 |
- "io/ioutil" |
|
| 8 |
- "net" |
|
| 9 |
- "os" |
|
| 10 |
- "path" |
|
| 11 |
- "path/filepath" |
|
| 12 |
- "strconv" |
|
| 13 |
- "strings" |
|
| 14 |
- "syscall" |
|
| 15 |
- "time" |
|
| 16 |
- |
|
| 17 |
- "github.com/Sirupsen/logrus" |
|
| 18 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 19 |
- "github.com/docker/docker/daemon/links" |
|
| 20 |
- "github.com/docker/docker/daemon/network" |
|
| 21 |
- derr "github.com/docker/docker/errors" |
|
| 22 |
- "github.com/docker/docker/pkg/fileutils" |
|
| 23 |
- "github.com/docker/docker/pkg/idtools" |
|
| 24 |
- "github.com/docker/docker/pkg/mount" |
|
| 25 |
- "github.com/docker/docker/pkg/nat" |
|
| 26 |
- "github.com/docker/docker/pkg/stringid" |
|
| 27 |
- "github.com/docker/docker/pkg/symlink" |
|
| 28 |
- "github.com/docker/docker/pkg/system" |
|
| 29 |
- "github.com/docker/docker/pkg/ulimit" |
|
| 30 |
- "github.com/docker/docker/runconfig" |
|
| 31 |
- "github.com/docker/docker/utils" |
|
| 32 |
- "github.com/docker/docker/volume" |
|
| 33 |
- "github.com/docker/libnetwork" |
|
| 34 |
- "github.com/docker/libnetwork/netlabel" |
|
| 35 |
- "github.com/docker/libnetwork/options" |
|
| 36 |
- "github.com/docker/libnetwork/types" |
|
| 37 |
- "github.com/opencontainers/runc/libcontainer/configs" |
|
| 38 |
- "github.com/opencontainers/runc/libcontainer/devices" |
|
| 39 |
- "github.com/opencontainers/runc/libcontainer/label" |
|
| 40 |
-) |
|
| 41 |
- |
|
| 42 |
-const ( |
|
| 43 |
- // DefaultPathEnv is unix style list of directories to search for |
|
| 44 |
- // executables. Each directory is separated from the next by a colon |
|
| 45 |
- // ':' character . |
|
| 46 |
- DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" |
|
| 47 |
- |
|
| 48 |
- // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container |
|
| 49 |
- DefaultSHMSize int64 = 67108864 |
|
| 50 |
-) |
|
| 51 |
- |
|
| 52 |
-// Container holds the fields specific to unixen implementations. See |
|
| 53 |
-// CommonContainer for standard fields common to all containers. |
|
| 54 |
-type Container struct {
|
|
| 55 |
- CommonContainer |
|
| 56 |
- |
|
| 57 |
- // Fields below here are platform specific. |
|
| 58 |
- activeLinks map[string]*links.Link |
|
| 59 |
- AppArmorProfile string |
|
| 60 |
- HostnamePath string |
|
| 61 |
- HostsPath string |
|
| 62 |
- ShmPath string |
|
| 63 |
- MqueuePath string |
|
| 64 |
- ResolvConfPath string |
|
| 65 |
-} |
|
| 66 |
- |
|
| 67 |
-func killProcessDirectly(container *Container) error {
|
|
| 68 |
- if _, err := container.WaitStop(10 * time.Second); err != nil {
|
|
| 69 |
- // Ensure that we don't kill ourselves |
|
| 70 |
- if pid := container.GetPID(); pid != 0 {
|
|
| 71 |
- logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
|
|
| 72 |
- if err := syscall.Kill(pid, 9); err != nil {
|
|
| 73 |
- if err != syscall.ESRCH {
|
|
| 74 |
- return err |
|
| 75 |
- } |
|
| 76 |
- logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
|
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
- } |
|
| 80 |
- return nil |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
|
|
| 84 |
- var env []string |
|
| 85 |
- children, err := daemon.children(container.Name) |
|
| 86 |
- if err != nil {
|
|
| 87 |
- return nil, err |
|
| 88 |
- } |
|
| 89 |
- |
|
| 90 |
- bridgeSettings := container.NetworkSettings.Networks["bridge"] |
|
| 91 |
- if bridgeSettings == nil {
|
|
| 92 |
- return nil, nil |
|
| 93 |
- } |
|
| 94 |
- |
|
| 95 |
- if len(children) > 0 {
|
|
| 96 |
- for linkAlias, child := range children {
|
|
| 97 |
- if !child.IsRunning() {
|
|
| 98 |
- return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias) |
|
| 99 |
- } |
|
| 100 |
- |
|
| 101 |
- childBridgeSettings := child.NetworkSettings.Networks["bridge"] |
|
| 102 |
- if childBridgeSettings == nil {
|
|
| 103 |
- return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
|
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
- link := links.NewLink( |
|
| 107 |
- bridgeSettings.IPAddress, |
|
| 108 |
- childBridgeSettings.IPAddress, |
|
| 109 |
- linkAlias, |
|
| 110 |
- child.Config.Env, |
|
| 111 |
- child.Config.ExposedPorts, |
|
| 112 |
- ) |
|
| 113 |
- |
|
| 114 |
- for _, envVar := range link.ToEnv() {
|
|
| 115 |
- env = append(env, envVar) |
|
| 116 |
- } |
|
| 117 |
- } |
|
| 118 |
- } |
|
| 119 |
- return env, nil |
|
| 120 |
-} |
|
| 121 |
- |
|
| 122 |
-func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
|
|
| 123 |
- // if a domain name was specified, append it to the hostname (see #7851) |
|
| 124 |
- fullHostname := container.Config.Hostname |
|
| 125 |
- if container.Config.Domainname != "" {
|
|
| 126 |
- fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
|
|
| 127 |
- } |
|
| 128 |
- // Setup environment |
|
| 129 |
- env := []string{
|
|
| 130 |
- "PATH=" + DefaultPathEnv, |
|
| 131 |
- "HOSTNAME=" + fullHostname, |
|
| 132 |
- // Note: we don't set HOME here because it'll get autoset intelligently |
|
| 133 |
- // based on the value of USER inside dockerinit, but only if it isn't |
|
| 134 |
- // set already (ie, that can be overridden by setting HOME via -e or ENV |
|
| 135 |
- // in a Dockerfile). |
|
| 136 |
- } |
|
| 137 |
- if container.Config.Tty {
|
|
| 138 |
- env = append(env, "TERM=xterm") |
|
| 139 |
- } |
|
| 140 |
- env = append(env, linkedEnv...) |
|
| 141 |
- // because the env on the container can override certain default values |
|
| 142 |
- // we need to replace the 'env' keys where they match and append anything |
|
| 143 |
- // else. |
|
| 144 |
- env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) |
|
| 145 |
- |
|
| 146 |
- return env |
|
| 147 |
-} |
|
| 148 |
- |
|
| 149 |
-func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
|
|
| 150 |
- device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) |
|
| 151 |
- // if there was no error, return the device |
|
| 152 |
- if err == nil {
|
|
| 153 |
- device.Path = deviceMapping.PathInContainer |
|
| 154 |
- return append(devs, device), nil |
|
| 155 |
- } |
|
| 156 |
- |
|
| 157 |
- // if the device is not a device node |
|
| 158 |
- // try to see if it's a directory holding many devices |
|
| 159 |
- if err == devices.ErrNotADevice {
|
|
| 160 |
- |
|
| 161 |
- // check if it is a directory |
|
| 162 |
- if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
|
|
| 163 |
- |
|
| 164 |
- // mount the internal devices recursively |
|
| 165 |
- filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
|
|
| 166 |
- childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) |
|
| 167 |
- if e != nil {
|
|
| 168 |
- // ignore the device |
|
| 169 |
- return nil |
|
| 170 |
- } |
|
| 171 |
- |
|
| 172 |
- // add the device to userSpecified devices |
|
| 173 |
- childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1) |
|
| 174 |
- devs = append(devs, childDevice) |
|
| 175 |
- |
|
| 176 |
- return nil |
|
| 177 |
- }) |
|
| 178 |
- } |
|
| 179 |
- } |
|
| 180 |
- |
|
| 181 |
- if len(devs) > 0 {
|
|
| 182 |
- return devs, nil |
|
| 183 |
- } |
|
| 184 |
- |
|
| 185 |
- return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err) |
|
| 186 |
-} |
|
| 187 |
- |
|
| 188 |
-func (daemon *Daemon) populateCommand(c *Container, env []string) error {
|
|
| 189 |
- var en *execdriver.Network |
|
| 190 |
- if !c.Config.NetworkDisabled {
|
|
| 191 |
- en = &execdriver.Network{}
|
|
| 192 |
- if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
|
|
| 193 |
- en.NamespacePath = c.NetworkSettings.SandboxKey |
|
| 194 |
- } |
|
| 195 |
- |
|
| 196 |
- if c.hostConfig.NetworkMode.IsContainer() {
|
|
| 197 |
- nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer()) |
|
| 198 |
- if err != nil {
|
|
| 199 |
- return err |
|
| 200 |
- } |
|
| 201 |
- en.ContainerID = nc.ID |
|
| 202 |
- } |
|
| 203 |
- } |
|
| 204 |
- |
|
| 205 |
- ipc := &execdriver.Ipc{}
|
|
| 206 |
- var err error |
|
| 207 |
- c.ShmPath, err = c.shmPath() |
|
| 208 |
- if err != nil {
|
|
| 209 |
- return err |
|
| 210 |
- } |
|
| 211 |
- |
|
| 212 |
- c.MqueuePath, err = c.mqueuePath() |
|
| 213 |
- if err != nil {
|
|
| 214 |
- return err |
|
| 215 |
- } |
|
| 216 |
- |
|
| 217 |
- if c.hostConfig.IpcMode.IsContainer() {
|
|
| 218 |
- ic, err := daemon.getIpcContainer(c) |
|
| 219 |
- if err != nil {
|
|
| 220 |
- return err |
|
| 221 |
- } |
|
| 222 |
- ipc.ContainerID = ic.ID |
|
| 223 |
- c.ShmPath = ic.ShmPath |
|
| 224 |
- c.MqueuePath = ic.MqueuePath |
|
| 225 |
- } else {
|
|
| 226 |
- ipc.HostIpc = c.hostConfig.IpcMode.IsHost() |
|
| 227 |
- if ipc.HostIpc {
|
|
| 228 |
- if _, err := os.Stat("/dev/shm"); err != nil {
|
|
| 229 |
- return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
|
|
| 230 |
- } |
|
| 231 |
- if _, err := os.Stat("/dev/mqueue"); err != nil {
|
|
| 232 |
- return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
|
|
| 233 |
- } |
|
| 234 |
- c.ShmPath = "/dev/shm" |
|
| 235 |
- c.MqueuePath = "/dev/mqueue" |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- |
|
| 239 |
- pid := &execdriver.Pid{}
|
|
| 240 |
- pid.HostPid = c.hostConfig.PidMode.IsHost() |
|
| 241 |
- |
|
| 242 |
- uts := &execdriver.UTS{
|
|
| 243 |
- HostUTS: c.hostConfig.UTSMode.IsHost(), |
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- // Build lists of devices allowed and created within the container. |
|
| 247 |
- var userSpecifiedDevices []*configs.Device |
|
| 248 |
- for _, deviceMapping := range c.hostConfig.Devices {
|
|
| 249 |
- devs, err := getDevicesFromPath(deviceMapping) |
|
| 250 |
- if err != nil {
|
|
| 251 |
- return err |
|
| 252 |
- } |
|
| 253 |
- |
|
| 254 |
- userSpecifiedDevices = append(userSpecifiedDevices, devs...) |
|
| 255 |
- } |
|
| 256 |
- |
|
| 257 |
- allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) |
|
| 258 |
- |
|
| 259 |
- autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) |
|
| 260 |
- |
|
| 261 |
- var rlimits []*ulimit.Rlimit |
|
| 262 |
- ulimits := c.hostConfig.Ulimits |
|
| 263 |
- |
|
| 264 |
- // Merge ulimits with daemon defaults |
|
| 265 |
- ulIdx := make(map[string]*ulimit.Ulimit) |
|
| 266 |
- for _, ul := range ulimits {
|
|
| 267 |
- ulIdx[ul.Name] = ul |
|
| 268 |
- } |
|
| 269 |
- for name, ul := range daemon.configStore.Ulimits {
|
|
| 270 |
- if _, exists := ulIdx[name]; !exists {
|
|
| 271 |
- ulimits = append(ulimits, ul) |
|
| 272 |
- } |
|
| 273 |
- } |
|
| 274 |
- |
|
| 275 |
- weightDevices, err := getBlkioWeightDevices(c.hostConfig) |
|
| 276 |
- if err != nil {
|
|
| 277 |
- return err |
|
| 278 |
- } |
|
| 279 |
- |
|
| 280 |
- for _, limit := range ulimits {
|
|
| 281 |
- rl, err := limit.GetRlimit() |
|
| 282 |
- if err != nil {
|
|
| 283 |
- return err |
|
| 284 |
- } |
|
| 285 |
- rlimits = append(rlimits, rl) |
|
| 286 |
- } |
|
| 287 |
- |
|
| 288 |
- resources := &execdriver.Resources{
|
|
| 289 |
- CommonResources: execdriver.CommonResources{
|
|
| 290 |
- Memory: c.hostConfig.Memory, |
|
| 291 |
- MemoryReservation: c.hostConfig.MemoryReservation, |
|
| 292 |
- CPUShares: c.hostConfig.CPUShares, |
|
| 293 |
- BlkioWeight: c.hostConfig.BlkioWeight, |
|
| 294 |
- }, |
|
| 295 |
- MemorySwap: c.hostConfig.MemorySwap, |
|
| 296 |
- KernelMemory: c.hostConfig.KernelMemory, |
|
| 297 |
- CpusetCpus: c.hostConfig.CpusetCpus, |
|
| 298 |
- CpusetMems: c.hostConfig.CpusetMems, |
|
| 299 |
- CPUPeriod: c.hostConfig.CPUPeriod, |
|
| 300 |
- CPUQuota: c.hostConfig.CPUQuota, |
|
| 301 |
- Rlimits: rlimits, |
|
| 302 |
- BlkioWeightDevice: weightDevices, |
|
| 303 |
- OomKillDisable: c.hostConfig.OomKillDisable, |
|
| 304 |
- MemorySwappiness: *c.hostConfig.MemorySwappiness, |
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- processConfig := execdriver.ProcessConfig{
|
|
| 308 |
- CommonProcessConfig: execdriver.CommonProcessConfig{
|
|
| 309 |
- Entrypoint: c.Path, |
|
| 310 |
- Arguments: c.Args, |
|
| 311 |
- Tty: c.Config.Tty, |
|
| 312 |
- }, |
|
| 313 |
- Privileged: c.hostConfig.Privileged, |
|
| 314 |
- User: c.Config.User, |
|
| 315 |
- } |
|
| 316 |
- |
|
| 317 |
- processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
|
| 318 |
- processConfig.Env = env |
|
| 319 |
- |
|
| 320 |
- remappedRoot := &execdriver.User{}
|
|
| 321 |
- rootUID, rootGID := daemon.GetRemappedUIDGID() |
|
| 322 |
- if rootUID != 0 {
|
|
| 323 |
- remappedRoot.UID = rootUID |
|
| 324 |
- remappedRoot.GID = rootGID |
|
| 325 |
- } |
|
| 326 |
- uidMap, gidMap := daemon.GetUIDGIDMaps() |
|
| 327 |
- |
|
| 328 |
- c.command = &execdriver.Command{
|
|
| 329 |
- CommonCommand: execdriver.CommonCommand{
|
|
| 330 |
- ID: c.ID, |
|
| 331 |
- InitPath: "/.dockerinit", |
|
| 332 |
- MountLabel: c.getMountLabel(), |
|
| 333 |
- Network: en, |
|
| 334 |
- ProcessConfig: processConfig, |
|
| 335 |
- ProcessLabel: c.getProcessLabel(), |
|
| 336 |
- Rootfs: c.rootfsPath(), |
|
| 337 |
- Resources: resources, |
|
| 338 |
- WorkingDir: c.Config.WorkingDir, |
|
| 339 |
- }, |
|
| 340 |
- AllowedDevices: allowedDevices, |
|
| 341 |
- AppArmorProfile: c.AppArmorProfile, |
|
| 342 |
- AutoCreatedDevices: autoCreatedDevices, |
|
| 343 |
- CapAdd: c.hostConfig.CapAdd.Slice(), |
|
| 344 |
- CapDrop: c.hostConfig.CapDrop.Slice(), |
|
| 345 |
- CgroupParent: c.hostConfig.CgroupParent, |
|
| 346 |
- GIDMapping: gidMap, |
|
| 347 |
- GroupAdd: c.hostConfig.GroupAdd, |
|
| 348 |
- Ipc: ipc, |
|
| 349 |
- OomScoreAdj: c.hostConfig.OomScoreAdj, |
|
| 350 |
- Pid: pid, |
|
| 351 |
- ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, |
|
| 352 |
- RemappedRoot: remappedRoot, |
|
| 353 |
- UIDMapping: uidMap, |
|
| 354 |
- UTS: uts, |
|
| 355 |
- } |
|
| 356 |
- |
|
| 357 |
- return nil |
|
| 358 |
-} |
|
| 359 |
- |
|
| 360 |
-func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {
|
|
| 361 |
- if len(userDevices) == 0 {
|
|
| 362 |
- return defaultDevices |
|
| 363 |
- } |
|
| 364 |
- |
|
| 365 |
- paths := map[string]*configs.Device{}
|
|
| 366 |
- for _, d := range userDevices {
|
|
| 367 |
- paths[d.Path] = d |
|
| 368 |
- } |
|
| 369 |
- |
|
| 370 |
- var devs []*configs.Device |
|
| 371 |
- for _, d := range defaultDevices {
|
|
| 372 |
- if _, defined := paths[d.Path]; !defined {
|
|
| 373 |
- devs = append(devs, d) |
|
| 374 |
- } |
|
| 375 |
- } |
|
| 376 |
- return append(devs, userDevices...) |
|
| 377 |
-} |
|
| 378 |
- |
|
| 379 |
-// getSize returns the real size & virtual size of the container. |
|
| 380 |
-func (daemon *Daemon) getSize(container *Container) (int64, int64) {
|
|
| 381 |
- var ( |
|
| 382 |
- sizeRw, sizeRootfs int64 |
|
| 383 |
- err error |
|
| 384 |
- ) |
|
| 385 |
- |
|
| 386 |
- if err := daemon.Mount(container); err != nil {
|
|
| 387 |
- logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
|
|
| 388 |
- return sizeRw, sizeRootfs |
|
| 389 |
- } |
|
| 390 |
- defer daemon.Unmount(container) |
|
| 391 |
- |
|
| 392 |
- sizeRw, err = container.rwlayer.Size() |
|
| 393 |
- if err != nil {
|
|
| 394 |
- logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err)
|
|
| 395 |
- // FIXME: GetSize should return an error. Not changing it now in case |
|
| 396 |
- // there is a side-effect. |
|
| 397 |
- sizeRw = -1 |
|
| 398 |
- } |
|
| 399 |
- |
|
| 400 |
- if parent := container.rwlayer.Parent(); parent != nil {
|
|
| 401 |
- sizeRootfs, err = parent.Size() |
|
| 402 |
- if err != nil {
|
|
| 403 |
- sizeRootfs = -1 |
|
| 404 |
- } else if sizeRw != -1 {
|
|
| 405 |
- sizeRootfs += sizeRw |
|
| 406 |
- } |
|
| 407 |
- } |
|
| 408 |
- return sizeRw, sizeRootfs |
|
| 409 |
-} |
|
| 410 |
- |
|
| 411 |
-// Attempt to set the network mounts given a provided destination and |
|
| 412 |
-// the path to use for it; return true if the given destination was a |
|
| 413 |
-// network mount file |
|
| 414 |
-func (container *Container) trySetNetworkMount(destination string, path string) bool {
|
|
| 415 |
- if destination == "/etc/resolv.conf" {
|
|
| 416 |
- container.ResolvConfPath = path |
|
| 417 |
- return true |
|
| 418 |
- } |
|
| 419 |
- if destination == "/etc/hostname" {
|
|
| 420 |
- container.HostnamePath = path |
|
| 421 |
- return true |
|
| 422 |
- } |
|
| 423 |
- if destination == "/etc/hosts" {
|
|
| 424 |
- container.HostsPath = path |
|
| 425 |
- return true |
|
| 426 |
- } |
|
| 427 |
- |
|
| 428 |
- return false |
|
| 429 |
-} |
|
| 430 |
- |
|
| 431 |
-func (container *Container) buildHostnameFile() error {
|
|
| 432 |
- hostnamePath, err := container.getRootResourcePath("hostname")
|
|
| 433 |
- if err != nil {
|
|
| 434 |
- return err |
|
| 435 |
- } |
|
| 436 |
- container.HostnamePath = hostnamePath |
|
| 437 |
- |
|
| 438 |
- if container.Config.Domainname != "" {
|
|
| 439 |
- return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
|
|
| 440 |
- } |
|
| 441 |
- return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) |
|
| 442 |
-} |
|
| 443 |
- |
|
| 444 |
-func (daemon *Daemon) buildSandboxOptions(container *Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
|
|
| 445 |
- var ( |
|
| 446 |
- sboxOptions []libnetwork.SandboxOption |
|
| 447 |
- err error |
|
| 448 |
- dns []string |
|
| 449 |
- dnsSearch []string |
|
| 450 |
- dnsOptions []string |
|
| 451 |
- ) |
|
| 452 |
- |
|
| 453 |
- sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), |
|
| 454 |
- libnetwork.OptionDomainname(container.Config.Domainname)) |
|
| 455 |
- |
|
| 456 |
- if container.hostConfig.NetworkMode.IsHost() {
|
|
| 457 |
- sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) |
|
| 458 |
- sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts"))
|
|
| 459 |
- sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf"))
|
|
| 460 |
- } else if daemon.execDriver.SupportsHooks() {
|
|
| 461 |
- // OptionUseExternalKey is mandatory for userns support. |
|
| 462 |
- // But optional for non-userns support |
|
| 463 |
- sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) |
|
| 464 |
- } |
|
| 465 |
- |
|
| 466 |
- container.HostsPath, err = container.getRootResourcePath("hosts")
|
|
| 467 |
- if err != nil {
|
|
| 468 |
- return nil, err |
|
| 469 |
- } |
|
| 470 |
- sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) |
|
| 471 |
- |
|
| 472 |
- container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
|
|
| 473 |
- if err != nil {
|
|
| 474 |
- return nil, err |
|
| 475 |
- } |
|
| 476 |
- sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) |
|
| 477 |
- |
|
| 478 |
- if len(container.hostConfig.DNS) > 0 {
|
|
| 479 |
- dns = container.hostConfig.DNS |
|
| 480 |
- } else if len(daemon.configStore.DNS) > 0 {
|
|
| 481 |
- dns = daemon.configStore.DNS |
|
| 482 |
- } |
|
| 483 |
- |
|
| 484 |
- for _, d := range dns {
|
|
| 485 |
- sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) |
|
| 486 |
- } |
|
| 487 |
- |
|
| 488 |
- if len(container.hostConfig.DNSSearch) > 0 {
|
|
| 489 |
- dnsSearch = container.hostConfig.DNSSearch |
|
| 490 |
- } else if len(daemon.configStore.DNSSearch) > 0 {
|
|
| 491 |
- dnsSearch = daemon.configStore.DNSSearch |
|
| 492 |
- } |
|
| 493 |
- |
|
| 494 |
- for _, ds := range dnsSearch {
|
|
| 495 |
- sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) |
|
| 496 |
- } |
|
| 497 |
- |
|
| 498 |
- if len(container.hostConfig.DNSOptions) > 0 {
|
|
| 499 |
- dnsOptions = container.hostConfig.DNSOptions |
|
| 500 |
- } else if len(daemon.configStore.DNSOptions) > 0 {
|
|
| 501 |
- dnsOptions = daemon.configStore.DNSOptions |
|
| 502 |
- } |
|
| 503 |
- |
|
| 504 |
- for _, ds := range dnsOptions {
|
|
| 505 |
- sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) |
|
| 506 |
- } |
|
| 507 |
- |
|
| 508 |
- if container.NetworkSettings.SecondaryIPAddresses != nil {
|
|
| 509 |
- name := container.Config.Hostname |
|
| 510 |
- if container.Config.Domainname != "" {
|
|
| 511 |
- name = name + "." + container.Config.Domainname |
|
| 512 |
- } |
|
| 513 |
- |
|
| 514 |
- for _, a := range container.NetworkSettings.SecondaryIPAddresses {
|
|
| 515 |
- sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) |
|
| 516 |
- } |
|
| 517 |
- } |
|
| 518 |
- |
|
| 519 |
- for _, extraHost := range container.hostConfig.ExtraHosts {
|
|
| 520 |
- // allow IPv6 addresses in extra hosts; only split on first ":" |
|
| 521 |
- parts := strings.SplitN(extraHost, ":", 2) |
|
| 522 |
- sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) |
|
| 523 |
- } |
|
| 524 |
- |
|
| 525 |
- // Link feature is supported only for the default bridge network. |
|
| 526 |
- // return if this call to build join options is not for default bridge network |
|
| 527 |
- if n.Name() != "bridge" {
|
|
| 528 |
- return sboxOptions, nil |
|
| 529 |
- } |
|
| 530 |
- |
|
| 531 |
- ep, _ := container.getEndpointInNetwork(n) |
|
| 532 |
- if ep == nil {
|
|
| 533 |
- return sboxOptions, nil |
|
| 534 |
- } |
|
| 535 |
- |
|
| 536 |
- var childEndpoints, parentEndpoints []string |
|
| 537 |
- |
|
| 538 |
- children, err := daemon.children(container.Name) |
|
| 539 |
- if err != nil {
|
|
| 540 |
- return nil, err |
|
| 541 |
- } |
|
| 542 |
- |
|
| 543 |
- for linkAlias, child := range children {
|
|
| 544 |
- if !isLinkable(child) {
|
|
| 545 |
- return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
|
|
| 546 |
- } |
|
| 547 |
- _, alias := path.Split(linkAlias) |
|
| 548 |
- // allow access to the linked container via the alias, real name, and container hostname |
|
| 549 |
- aliasList := alias + " " + child.Config.Hostname |
|
| 550 |
- // only add the name if alias isn't equal to the name |
|
| 551 |
- if alias != child.Name[1:] {
|
|
| 552 |
- aliasList = aliasList + " " + child.Name[1:] |
|
| 553 |
- } |
|
| 554 |
- sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress)) |
|
| 555 |
- cEndpoint, _ := child.getEndpointInNetwork(n) |
|
| 556 |
- if cEndpoint != nil && cEndpoint.ID() != "" {
|
|
| 557 |
- childEndpoints = append(childEndpoints, cEndpoint.ID()) |
|
| 558 |
- } |
|
| 559 |
- } |
|
| 560 |
- |
|
| 561 |
- bridgeSettings := container.NetworkSettings.Networks["bridge"] |
|
| 562 |
- refs := daemon.containerGraph().RefPaths(container.ID) |
|
| 563 |
- for _, ref := range refs {
|
|
| 564 |
- if ref.ParentID == "0" {
|
|
| 565 |
- continue |
|
| 566 |
- } |
|
| 567 |
- |
|
| 568 |
- c, err := daemon.Get(ref.ParentID) |
|
| 569 |
- if err != nil {
|
|
| 570 |
- logrus.Error(err) |
|
| 571 |
- } |
|
| 572 |
- |
|
| 573 |
- if c != nil && !daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
|
|
| 574 |
- logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
|
|
| 575 |
- sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress)) |
|
| 576 |
- if ep.ID() != "" {
|
|
| 577 |
- parentEndpoints = append(parentEndpoints, ep.ID()) |
|
| 578 |
- } |
|
| 579 |
- } |
|
| 580 |
- } |
|
| 581 |
- |
|
| 582 |
- linkOptions := options.Generic{
|
|
| 583 |
- netlabel.GenericData: options.Generic{
|
|
| 584 |
- "ParentEndpoints": parentEndpoints, |
|
| 585 |
- "ChildEndpoints": childEndpoints, |
|
| 586 |
- }, |
|
| 587 |
- } |
|
| 588 |
- |
|
| 589 |
- sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) |
|
| 590 |
- |
|
| 591 |
- return sboxOptions, nil |
|
| 592 |
-} |
|
| 593 |
- |
|
| 594 |
-func isLinkable(child *Container) bool {
|
|
| 595 |
- // A container is linkable only if it belongs to the default network |
|
| 596 |
- _, ok := child.NetworkSettings.Networks["bridge"] |
|
| 597 |
- return ok |
|
| 598 |
-} |
|
| 599 |
- |
|
| 600 |
-func (container *Container) getEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) {
|
|
| 601 |
- endpointName := strings.TrimPrefix(container.Name, "/") |
|
| 602 |
- return n.EndpointByName(endpointName) |
|
| 603 |
-} |
|
| 604 |
- |
|
| 605 |
-func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
|
|
| 606 |
- if ep == nil {
|
|
| 607 |
- return nil, derr.ErrorCodeEmptyEndpoint |
|
| 608 |
- } |
|
| 609 |
- |
|
| 610 |
- if networkSettings == nil {
|
|
| 611 |
- return nil, derr.ErrorCodeEmptyNetwork |
|
| 612 |
- } |
|
| 613 |
- |
|
| 614 |
- driverInfo, err := ep.DriverInfo() |
|
| 615 |
- if err != nil {
|
|
| 616 |
- return nil, err |
|
| 617 |
- } |
|
| 618 |
- |
|
| 619 |
- if driverInfo == nil {
|
|
| 620 |
- // It is not an error for epInfo to be nil |
|
| 621 |
- return networkSettings, nil |
|
| 622 |
- } |
|
| 623 |
- |
|
| 624 |
- if networkSettings.Ports == nil {
|
|
| 625 |
- networkSettings.Ports = nat.PortMap{}
|
|
| 626 |
- } |
|
| 627 |
- |
|
| 628 |
- if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
|
|
| 629 |
- if exposedPorts, ok := expData.([]types.TransportPort); ok {
|
|
| 630 |
- for _, tp := range exposedPorts {
|
|
| 631 |
- natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) |
|
| 632 |
- if err != nil {
|
|
| 633 |
- return nil, derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) |
|
| 634 |
- } |
|
| 635 |
- networkSettings.Ports[natPort] = nil |
|
| 636 |
- } |
|
| 637 |
- } |
|
| 638 |
- } |
|
| 639 |
- |
|
| 640 |
- mapData, ok := driverInfo[netlabel.PortMap] |
|
| 641 |
- if !ok {
|
|
| 642 |
- return networkSettings, nil |
|
| 643 |
- } |
|
| 644 |
- |
|
| 645 |
- if portMapping, ok := mapData.([]types.PortBinding); ok {
|
|
| 646 |
- for _, pp := range portMapping {
|
|
| 647 |
- natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) |
|
| 648 |
- if err != nil {
|
|
| 649 |
- return nil, err |
|
| 650 |
- } |
|
| 651 |
- natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
|
|
| 652 |
- networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg) |
|
| 653 |
- } |
|
| 654 |
- } |
|
| 655 |
- |
|
| 656 |
- return networkSettings, nil |
|
| 657 |
-} |
|
| 658 |
- |
|
| 659 |
-func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
|
|
| 660 |
- if ep == nil {
|
|
| 661 |
- return nil, derr.ErrorCodeEmptyEndpoint |
|
| 662 |
- } |
|
| 663 |
- |
|
| 664 |
- if networkSettings == nil {
|
|
| 665 |
- return nil, derr.ErrorCodeEmptyNetwork |
|
| 666 |
- } |
|
| 667 |
- |
|
| 668 |
- epInfo := ep.Info() |
|
| 669 |
- if epInfo == nil {
|
|
| 670 |
- // It is not an error to get an empty endpoint info |
|
| 671 |
- return networkSettings, nil |
|
| 672 |
- } |
|
| 673 |
- |
|
| 674 |
- if _, ok := networkSettings.Networks[n.Name()]; !ok {
|
|
| 675 |
- networkSettings.Networks[n.Name()] = new(network.EndpointSettings) |
|
| 676 |
- } |
|
| 677 |
- networkSettings.Networks[n.Name()].EndpointID = ep.ID() |
|
| 678 |
- |
|
| 679 |
- iface := epInfo.Iface() |
|
| 680 |
- if iface == nil {
|
|
| 681 |
- return networkSettings, nil |
|
| 682 |
- } |
|
| 683 |
- |
|
| 684 |
- if iface.MacAddress() != nil {
|
|
| 685 |
- networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() |
|
| 686 |
- } |
|
| 687 |
- |
|
| 688 |
- if iface.Address() != nil {
|
|
| 689 |
- ones, _ := iface.Address().Mask.Size() |
|
| 690 |
- networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() |
|
| 691 |
- networkSettings.Networks[n.Name()].IPPrefixLen = ones |
|
| 692 |
- } |
|
| 693 |
- |
|
| 694 |
- if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
|
|
| 695 |
- onesv6, _ := iface.AddressIPv6().Mask.Size() |
|
| 696 |
- networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() |
|
| 697 |
- networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 |
|
| 698 |
- } |
|
| 699 |
- |
|
| 700 |
- return networkSettings, nil |
|
| 701 |
-} |
|
| 702 |
- |
|
| 703 |
-func (container *Container) updateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
|
|
| 704 |
- if _, err := container.buildPortMapInfo(ep, container.NetworkSettings); err != nil {
|
|
| 705 |
- return err |
|
| 706 |
- } |
|
| 707 |
- |
|
| 708 |
- epInfo := ep.Info() |
|
| 709 |
- if epInfo == nil {
|
|
| 710 |
- // It is not an error to get an empty endpoint info |
|
| 711 |
- return nil |
|
| 712 |
- } |
|
| 713 |
- if epInfo.Gateway() != nil {
|
|
| 714 |
- container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() |
|
| 715 |
- } |
|
| 716 |
- if epInfo.GatewayIPv6().To16() != nil {
|
|
| 717 |
- container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() |
|
| 718 |
- } |
|
| 719 |
- |
|
| 720 |
- return nil |
|
| 721 |
-} |
|
| 722 |
- |
|
| 723 |
-func (daemon *Daemon) updateNetworkSettings(container *Container, n libnetwork.Network) error {
|
|
| 724 |
- if container.NetworkSettings == nil {
|
|
| 725 |
- container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
|
|
| 726 |
- } |
|
| 727 |
- |
|
| 728 |
- if !container.hostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
|
|
| 729 |
- return runconfig.ErrConflictHostNetwork |
|
| 730 |
- } |
|
| 731 |
- |
|
| 732 |
- for s := range container.NetworkSettings.Networks {
|
|
| 733 |
- sn, err := daemon.FindNetwork(s) |
|
| 734 |
- if err != nil {
|
|
| 735 |
- continue |
|
| 736 |
- } |
|
| 737 |
- |
|
| 738 |
- if sn.Name() == n.Name() {
|
|
| 739 |
- // Avoid duplicate config |
|
| 740 |
- return nil |
|
| 741 |
- } |
|
| 742 |
- if !runconfig.NetworkMode(sn.Type()).IsPrivate() || |
|
| 743 |
- !runconfig.NetworkMode(n.Type()).IsPrivate() {
|
|
| 744 |
- return runconfig.ErrConflictSharedNetwork |
|
| 745 |
- } |
|
| 746 |
- if runconfig.NetworkMode(sn.Name()).IsNone() || |
|
| 747 |
- runconfig.NetworkMode(n.Name()).IsNone() {
|
|
| 748 |
- return runconfig.ErrConflictNoNetwork |
|
| 749 |
- } |
|
| 750 |
- } |
|
| 751 |
- container.NetworkSettings.Networks[n.Name()] = new(network.EndpointSettings) |
|
| 752 |
- |
|
| 753 |
- return nil |
|
| 754 |
-} |
|
| 755 |
- |
|
| 756 |
-func (daemon *Daemon) updateEndpointNetworkSettings(container *Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
|
|
| 757 |
- networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings) |
|
| 758 |
- if err != nil {
|
|
| 759 |
- return err |
|
| 760 |
- } |
|
| 761 |
- |
|
| 762 |
- if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
|
|
| 763 |
- networkSettings.Bridge = daemon.configStore.Bridge.Iface |
|
| 764 |
- } |
|
| 765 |
- |
|
| 766 |
- return nil |
|
| 767 |
-} |
|
| 768 |
- |
|
| 769 |
-func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox) error {
|
|
| 770 |
- container.NetworkSettings.SandboxID = sb.ID() |
|
| 771 |
- container.NetworkSettings.SandboxKey = sb.Key() |
|
| 772 |
- return nil |
|
| 773 |
-} |
|
| 774 |
- |
|
| 775 |
-// UpdateNetwork is used to update the container's network (e.g. when linked containers |
|
| 776 |
-// get removed/unlinked). |
|
| 777 |
-func (daemon *Daemon) updateNetwork(container *Container) error {
|
|
| 778 |
- ctrl := daemon.netController |
|
| 779 |
- sid := container.NetworkSettings.SandboxID |
|
| 780 |
- |
|
| 781 |
- sb, err := ctrl.SandboxByID(sid) |
|
| 782 |
- if err != nil {
|
|
| 783 |
- return derr.ErrorCodeNoSandbox.WithArgs(sid, err) |
|
| 784 |
- } |
|
| 785 |
- |
|
| 786 |
- // Find if container is connected to the default bridge network |
|
| 787 |
- var n libnetwork.Network |
|
| 788 |
- for name := range container.NetworkSettings.Networks {
|
|
| 789 |
- sn, err := daemon.FindNetwork(name) |
|
| 790 |
- if err != nil {
|
|
| 791 |
- continue |
|
| 792 |
- } |
|
| 793 |
- if sn.Name() == "bridge" {
|
|
| 794 |
- n = sn |
|
| 795 |
- break |
|
| 796 |
- } |
|
| 797 |
- } |
|
| 798 |
- |
|
| 799 |
- if n == nil {
|
|
| 800 |
- // Not connected to the default bridge network; Nothing to do |
|
| 801 |
- return nil |
|
| 802 |
- } |
|
| 803 |
- |
|
| 804 |
- options, err := daemon.buildSandboxOptions(container, n) |
|
| 805 |
- if err != nil {
|
|
| 806 |
- return derr.ErrorCodeNetworkUpdate.WithArgs(err) |
|
| 807 |
- } |
|
| 808 |
- |
|
| 809 |
- if err := sb.Refresh(options...); err != nil {
|
|
| 810 |
- return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err) |
|
| 811 |
- } |
|
| 812 |
- |
|
| 813 |
- return nil |
|
| 814 |
-} |
|
| 815 |
- |
|
| 816 |
-func (container *Container) buildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
|
|
| 817 |
- var ( |
|
| 818 |
- portSpecs = make(nat.PortSet) |
|
| 819 |
- bindings = make(nat.PortMap) |
|
| 820 |
- pbList []types.PortBinding |
|
| 821 |
- exposeList []types.TransportPort |
|
| 822 |
- createOptions []libnetwork.EndpointOption |
|
| 823 |
- ) |
|
| 824 |
- |
|
| 825 |
- if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint {
|
|
| 826 |
- createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) |
|
| 827 |
- } |
|
| 828 |
- |
|
| 829 |
- // Other configs are applicable only for the endpoint in the network |
|
| 830 |
- // to which container was connected to on docker run. |
|
| 831 |
- if n.Name() != container.hostConfig.NetworkMode.NetworkName() && |
|
| 832 |
- !(n.Name() == "bridge" && container.hostConfig.NetworkMode.IsDefault()) {
|
|
| 833 |
- return createOptions, nil |
|
| 834 |
- } |
|
| 835 |
- |
|
| 836 |
- if container.Config.ExposedPorts != nil {
|
|
| 837 |
- portSpecs = container.Config.ExposedPorts |
|
| 838 |
- } |
|
| 839 |
- |
|
| 840 |
- if container.hostConfig.PortBindings != nil {
|
|
| 841 |
- for p, b := range container.hostConfig.PortBindings {
|
|
| 842 |
- bindings[p] = []nat.PortBinding{}
|
|
| 843 |
- for _, bb := range b {
|
|
| 844 |
- bindings[p] = append(bindings[p], nat.PortBinding{
|
|
| 845 |
- HostIP: bb.HostIP, |
|
| 846 |
- HostPort: bb.HostPort, |
|
| 847 |
- }) |
|
| 848 |
- } |
|
| 849 |
- } |
|
| 850 |
- } |
|
| 851 |
- |
|
| 852 |
- ports := make([]nat.Port, len(portSpecs)) |
|
| 853 |
- var i int |
|
| 854 |
- for p := range portSpecs {
|
|
| 855 |
- ports[i] = p |
|
| 856 |
- i++ |
|
| 857 |
- } |
|
| 858 |
- nat.SortPortMap(ports, bindings) |
|
| 859 |
- for _, port := range ports {
|
|
| 860 |
- expose := types.TransportPort{}
|
|
| 861 |
- expose.Proto = types.ParseProtocol(port.Proto()) |
|
| 862 |
- expose.Port = uint16(port.Int()) |
|
| 863 |
- exposeList = append(exposeList, expose) |
|
| 864 |
- |
|
| 865 |
- pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
|
|
| 866 |
- binding := bindings[port] |
|
| 867 |
- for i := 0; i < len(binding); i++ {
|
|
| 868 |
- pbCopy := pb.GetCopy() |
|
| 869 |
- newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) |
|
| 870 |
- var portStart, portEnd int |
|
| 871 |
- if err == nil {
|
|
| 872 |
- portStart, portEnd, err = newP.Range() |
|
| 873 |
- } |
|
| 874 |
- if err != nil {
|
|
| 875 |
- return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err) |
|
| 876 |
- } |
|
| 877 |
- pbCopy.HostPort = uint16(portStart) |
|
| 878 |
- pbCopy.HostPortEnd = uint16(portEnd) |
|
| 879 |
- pbCopy.HostIP = net.ParseIP(binding[i].HostIP) |
|
| 880 |
- pbList = append(pbList, pbCopy) |
|
| 881 |
- } |
|
| 882 |
- |
|
| 883 |
- if container.hostConfig.PublishAllPorts && len(binding) == 0 {
|
|
| 884 |
- pbList = append(pbList, pb) |
|
| 885 |
- } |
|
| 886 |
- } |
|
| 887 |
- |
|
| 888 |
- createOptions = append(createOptions, |
|
| 889 |
- libnetwork.CreateOptionPortMapping(pbList), |
|
| 890 |
- libnetwork.CreateOptionExposedPorts(exposeList)) |
|
| 891 |
- |
|
| 892 |
- if container.Config.MacAddress != "" {
|
|
| 893 |
- mac, err := net.ParseMAC(container.Config.MacAddress) |
|
| 894 |
- if err != nil {
|
|
| 895 |
- return nil, err |
|
| 896 |
- } |
|
| 897 |
- |
|
| 898 |
- genericOption := options.Generic{
|
|
| 899 |
- netlabel.MacAddress: mac, |
|
| 900 |
- } |
|
| 901 |
- |
|
| 902 |
- createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) |
|
| 903 |
- } |
|
| 904 |
- |
|
| 905 |
- return createOptions, nil |
|
| 906 |
-} |
|
| 907 |
- |
|
| 908 |
-func (daemon *Daemon) allocateNetwork(container *Container) error {
|
|
| 909 |
- controller := daemon.netController |
|
| 910 |
- |
|
| 911 |
- // Cleanup any stale sandbox left over due to ungraceful daemon shutdown |
|
| 912 |
- if err := controller.SandboxDestroy(container.ID); err != nil {
|
|
| 913 |
- logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
|
|
| 914 |
- } |
|
| 915 |
- |
|
| 916 |
- updateSettings := false |
|
| 917 |
- if len(container.NetworkSettings.Networks) == 0 {
|
|
| 918 |
- mode := container.hostConfig.NetworkMode |
|
| 919 |
- if container.Config.NetworkDisabled || mode.IsContainer() {
|
|
| 920 |
- return nil |
|
| 921 |
- } |
|
| 922 |
- |
|
| 923 |
- networkName := mode.NetworkName() |
|
| 924 |
- if mode.IsDefault() {
|
|
| 925 |
- networkName = controller.Config().Daemon.DefaultNetwork |
|
| 926 |
- } |
|
| 927 |
- if mode.IsUserDefined() {
|
|
| 928 |
- n, err := daemon.FindNetwork(networkName) |
|
| 929 |
- if err != nil {
|
|
| 930 |
- return err |
|
| 931 |
- } |
|
| 932 |
- networkName = n.Name() |
|
| 933 |
- } |
|
| 934 |
- container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) |
|
| 935 |
- container.NetworkSettings.Networks[networkName] = new(network.EndpointSettings) |
|
| 936 |
- updateSettings = true |
|
| 937 |
- } |
|
| 938 |
- |
|
| 939 |
- for n := range container.NetworkSettings.Networks {
|
|
| 940 |
- if err := daemon.connectToNetwork(container, n, updateSettings); err != nil {
|
|
| 941 |
- return err |
|
| 942 |
- } |
|
| 943 |
- } |
|
| 944 |
- |
|
| 945 |
- return container.writeHostConfig() |
|
| 946 |
-} |
|
| 947 |
- |
|
| 948 |
-func (daemon *Daemon) getNetworkSandbox(container *Container) libnetwork.Sandbox {
|
|
| 949 |
- var sb libnetwork.Sandbox |
|
| 950 |
- daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
|
|
| 951 |
- if s.ContainerID() == container.ID {
|
|
| 952 |
- sb = s |
|
| 953 |
- return true |
|
| 954 |
- } |
|
| 955 |
- return false |
|
| 956 |
- }) |
|
| 957 |
- return sb |
|
| 958 |
-} |
|
| 959 |
- |
|
| 960 |
-// ConnectToNetwork connects a container to a network |
|
| 961 |
-func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
|
|
| 962 |
- if !container.Running {
|
|
| 963 |
- return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
|
| 964 |
- } |
|
| 965 |
- if err := daemon.connectToNetwork(container, idOrName, true); err != nil {
|
|
| 966 |
- return err |
|
| 967 |
- } |
|
| 968 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 969 |
- return fmt.Errorf("Error saving container to disk: %v", err)
|
|
| 970 |
- } |
|
| 971 |
- return nil |
|
| 972 |
-} |
|
| 973 |
- |
|
| 974 |
-func (daemon *Daemon) connectToNetwork(container *Container, idOrName string, updateSettings bool) (err error) {
|
|
| 975 |
- if container.hostConfig.NetworkMode.IsContainer() {
|
|
| 976 |
- return runconfig.ErrConflictSharedNetwork |
|
| 977 |
- } |
|
| 978 |
- |
|
| 979 |
- if runconfig.NetworkMode(idOrName).IsBridge() && |
|
| 980 |
- daemon.configStore.DisableBridge {
|
|
| 981 |
- container.Config.NetworkDisabled = true |
|
| 982 |
- return nil |
|
| 983 |
- } |
|
| 984 |
- |
|
| 985 |
- controller := daemon.netController |
|
| 986 |
- |
|
| 987 |
- n, err := daemon.FindNetwork(idOrName) |
|
| 988 |
- if err != nil {
|
|
| 989 |
- return err |
|
| 990 |
- } |
|
| 991 |
- |
|
| 992 |
- if updateSettings {
|
|
| 993 |
- if err := daemon.updateNetworkSettings(container, n); err != nil {
|
|
| 994 |
- return err |
|
| 995 |
- } |
|
| 996 |
- } |
|
| 997 |
- |
|
| 998 |
- ep, err := container.getEndpointInNetwork(n) |
|
| 999 |
- if err == nil {
|
|
| 1000 |
- return fmt.Errorf("container already connected to network %s", idOrName)
|
|
| 1001 |
- } |
|
| 1002 |
- |
|
| 1003 |
- if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
|
|
| 1004 |
- return err |
|
| 1005 |
- } |
|
| 1006 |
- |
|
| 1007 |
- createOptions, err := container.buildCreateEndpointOptions(n) |
|
| 1008 |
- if err != nil {
|
|
| 1009 |
- return err |
|
| 1010 |
- } |
|
| 1011 |
- |
|
| 1012 |
- endpointName := strings.TrimPrefix(container.Name, "/") |
|
| 1013 |
- ep, err = n.CreateEndpoint(endpointName, createOptions...) |
|
| 1014 |
- if err != nil {
|
|
| 1015 |
- return err |
|
| 1016 |
- } |
|
| 1017 |
- defer func() {
|
|
| 1018 |
- if err != nil {
|
|
| 1019 |
- if e := ep.Delete(); e != nil {
|
|
| 1020 |
- logrus.Warnf("Could not rollback container connection to network %s", idOrName)
|
|
| 1021 |
- } |
|
| 1022 |
- } |
|
| 1023 |
- }() |
|
| 1024 |
- |
|
| 1025 |
- if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
|
|
| 1026 |
- return err |
|
| 1027 |
- } |
|
| 1028 |
- |
|
| 1029 |
- sb := daemon.getNetworkSandbox(container) |
|
| 1030 |
- if sb == nil {
|
|
| 1031 |
- options, err := daemon.buildSandboxOptions(container, n) |
|
| 1032 |
- if err != nil {
|
|
| 1033 |
- return err |
|
| 1034 |
- } |
|
| 1035 |
- sb, err = controller.NewSandbox(container.ID, options...) |
|
| 1036 |
- if err != nil {
|
|
| 1037 |
- return err |
|
| 1038 |
- } |
|
| 1039 |
- |
|
| 1040 |
- container.updateSandboxNetworkSettings(sb) |
|
| 1041 |
- } |
|
| 1042 |
- |
|
| 1043 |
- if err := ep.Join(sb); err != nil {
|
|
| 1044 |
- return err |
|
| 1045 |
- } |
|
| 1046 |
- |
|
| 1047 |
- if err := container.updateJoinInfo(n, ep); err != nil {
|
|
| 1048 |
- return derr.ErrorCodeJoinInfo.WithArgs(err) |
|
| 1049 |
- } |
|
| 1050 |
- |
|
| 1051 |
- return nil |
|
| 1052 |
-} |
|
| 1053 |
- |
|
| 1054 |
-func (daemon *Daemon) initializeNetworking(container *Container) error {
|
|
| 1055 |
- var err error |
|
| 1056 |
- |
|
| 1057 |
- if container.hostConfig.NetworkMode.IsContainer() {
|
|
| 1058 |
- // we need to get the hosts files from the container to join |
|
| 1059 |
- nc, err := daemon.getNetworkedContainer(container.ID, container.hostConfig.NetworkMode.ConnectedContainer()) |
|
| 1060 |
- if err != nil {
|
|
| 1061 |
- return err |
|
| 1062 |
- } |
|
| 1063 |
- container.HostnamePath = nc.HostnamePath |
|
| 1064 |
- container.HostsPath = nc.HostsPath |
|
| 1065 |
- container.ResolvConfPath = nc.ResolvConfPath |
|
| 1066 |
- container.Config.Hostname = nc.Config.Hostname |
|
| 1067 |
- container.Config.Domainname = nc.Config.Domainname |
|
| 1068 |
- return nil |
|
| 1069 |
- } |
|
| 1070 |
- |
|
| 1071 |
- if container.hostConfig.NetworkMode.IsHost() {
|
|
| 1072 |
- container.Config.Hostname, err = os.Hostname() |
|
| 1073 |
- if err != nil {
|
|
| 1074 |
- return err |
|
| 1075 |
- } |
|
| 1076 |
- |
|
| 1077 |
- parts := strings.SplitN(container.Config.Hostname, ".", 2) |
|
| 1078 |
- if len(parts) > 1 {
|
|
| 1079 |
- container.Config.Hostname = parts[0] |
|
| 1080 |
- container.Config.Domainname = parts[1] |
|
| 1081 |
- } |
|
| 1082 |
- |
|
| 1083 |
- } |
|
| 1084 |
- |
|
| 1085 |
- if err := daemon.allocateNetwork(container); err != nil {
|
|
| 1086 |
- return err |
|
| 1087 |
- } |
|
| 1088 |
- |
|
| 1089 |
- return container.buildHostnameFile() |
|
| 1090 |
-} |
|
| 1091 |
- |
|
| 1092 |
-// called from the libcontainer pre-start hook to set the network |
|
| 1093 |
-// namespace configuration linkage to the libnetwork "sandbox" entity |
|
| 1094 |
-func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
|
|
| 1095 |
- path := fmt.Sprintf("/proc/%d/ns/net", pid)
|
|
| 1096 |
- var sandbox libnetwork.Sandbox |
|
| 1097 |
- search := libnetwork.SandboxContainerWalker(&sandbox, containerID) |
|
| 1098 |
- daemon.netController.WalkSandboxes(search) |
|
| 1099 |
- if sandbox == nil {
|
|
| 1100 |
- return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found") |
|
| 1101 |
- } |
|
| 1102 |
- |
|
| 1103 |
- return sandbox.SetKey(path) |
|
| 1104 |
-} |
|
| 1105 |
- |
|
| 1106 |
-func (daemon *Daemon) getIpcContainer(container *Container) (*Container, error) {
|
|
| 1107 |
- containerID := container.hostConfig.IpcMode.Container() |
|
| 1108 |
- c, err := daemon.Get(containerID) |
|
| 1109 |
- if err != nil {
|
|
| 1110 |
- return nil, err |
|
| 1111 |
- } |
|
| 1112 |
- if !c.IsRunning() {
|
|
| 1113 |
- return nil, derr.ErrorCodeIPCRunning |
|
| 1114 |
- } |
|
| 1115 |
- return c, nil |
|
| 1116 |
-} |
|
| 1117 |
- |
|
| 1118 |
-func (container *Container) setupWorkingDirectory() error {
|
|
| 1119 |
- if container.Config.WorkingDir == "" {
|
|
| 1120 |
- return nil |
|
| 1121 |
- } |
|
| 1122 |
- container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) |
|
| 1123 |
- |
|
| 1124 |
- pth, err := container.GetResourcePath(container.Config.WorkingDir) |
|
| 1125 |
- if err != nil {
|
|
| 1126 |
- return err |
|
| 1127 |
- } |
|
| 1128 |
- |
|
| 1129 |
- pthInfo, err := os.Stat(pth) |
|
| 1130 |
- if err != nil {
|
|
| 1131 |
- if !os.IsNotExist(err) {
|
|
| 1132 |
- return err |
|
| 1133 |
- } |
|
| 1134 |
- |
|
| 1135 |
- if err := system.MkdirAll(pth, 0755); err != nil {
|
|
| 1136 |
- return err |
|
| 1137 |
- } |
|
| 1138 |
- } |
|
| 1139 |
- if pthInfo != nil && !pthInfo.IsDir() {
|
|
| 1140 |
- return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir) |
|
| 1141 |
- } |
|
| 1142 |
- return nil |
|
| 1143 |
-} |
|
| 1144 |
- |
|
| 1145 |
-func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*Container, error) {
|
|
| 1146 |
- nc, err := daemon.Get(connectedContainerID) |
|
| 1147 |
- if err != nil {
|
|
| 1148 |
- return nil, err |
|
| 1149 |
- } |
|
| 1150 |
- if containerID == nc.ID {
|
|
| 1151 |
- return nil, derr.ErrorCodeJoinSelf |
|
| 1152 |
- } |
|
| 1153 |
- if !nc.IsRunning() {
|
|
| 1154 |
- return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID) |
|
| 1155 |
- } |
|
| 1156 |
- return nc, nil |
|
| 1157 |
-} |
|
| 1158 |
- |
|
| 1159 |
-func (daemon *Daemon) releaseNetwork(container *Container) {
|
|
| 1160 |
- if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
|
|
| 1161 |
- return |
|
| 1162 |
- } |
|
| 1163 |
- |
|
| 1164 |
- sid := container.NetworkSettings.SandboxID |
|
| 1165 |
- networks := container.NetworkSettings.Networks |
|
| 1166 |
- for n := range networks {
|
|
| 1167 |
- networks[n] = &network.EndpointSettings{}
|
|
| 1168 |
- } |
|
| 1169 |
- |
|
| 1170 |
- container.NetworkSettings = &network.Settings{Networks: networks}
|
|
| 1171 |
- |
|
| 1172 |
- if sid == "" || len(networks) == 0 {
|
|
| 1173 |
- return |
|
| 1174 |
- } |
|
| 1175 |
- |
|
| 1176 |
- sb, err := daemon.netController.SandboxByID(sid) |
|
| 1177 |
- if err != nil {
|
|
| 1178 |
- logrus.Errorf("error locating sandbox id %s: %v", sid, err)
|
|
| 1179 |
- return |
|
| 1180 |
- } |
|
| 1181 |
- |
|
| 1182 |
- if err := sb.Delete(); err != nil {
|
|
| 1183 |
- logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
|
|
| 1184 |
- } |
|
| 1185 |
-} |
|
| 1186 |
- |
|
| 1187 |
-// DisconnectFromNetwork disconnects a container from a network |
|
| 1188 |
-func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
|
|
| 1189 |
- if !container.Running {
|
|
| 1190 |
- return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
|
| 1191 |
- } |
|
| 1192 |
- |
|
| 1193 |
- if container.hostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
|
|
| 1194 |
- return runconfig.ErrConflictHostNetwork |
|
| 1195 |
- } |
|
| 1196 |
- |
|
| 1197 |
- if err := container.disconnectFromNetwork(n); err != nil {
|
|
| 1198 |
- return err |
|
| 1199 |
- } |
|
| 1200 |
- |
|
| 1201 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 1202 |
- return fmt.Errorf("Error saving container to disk: %v", err)
|
|
| 1203 |
- } |
|
| 1204 |
- return nil |
|
| 1205 |
-} |
|
| 1206 |
- |
|
| 1207 |
-func (container *Container) disconnectFromNetwork(n libnetwork.Network) error {
|
|
| 1208 |
- var ( |
|
| 1209 |
- ep libnetwork.Endpoint |
|
| 1210 |
- sbox libnetwork.Sandbox |
|
| 1211 |
- ) |
|
| 1212 |
- |
|
| 1213 |
- s := func(current libnetwork.Endpoint) bool {
|
|
| 1214 |
- epInfo := current.Info() |
|
| 1215 |
- if epInfo == nil {
|
|
| 1216 |
- return false |
|
| 1217 |
- } |
|
| 1218 |
- if sb := epInfo.Sandbox(); sb != nil {
|
|
| 1219 |
- if sb.ContainerID() == container.ID {
|
|
| 1220 |
- ep = current |
|
| 1221 |
- sbox = sb |
|
| 1222 |
- return true |
|
| 1223 |
- } |
|
| 1224 |
- } |
|
| 1225 |
- return false |
|
| 1226 |
- } |
|
| 1227 |
- n.WalkEndpoints(s) |
|
| 1228 |
- |
|
| 1229 |
- if ep == nil {
|
|
| 1230 |
- return fmt.Errorf("container %s is not connected to the network", container.ID)
|
|
| 1231 |
- } |
|
| 1232 |
- |
|
| 1233 |
- if err := ep.Leave(sbox); err != nil {
|
|
| 1234 |
- return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
|
|
| 1235 |
- } |
|
| 1236 |
- |
|
| 1237 |
- if err := ep.Delete(); err != nil {
|
|
| 1238 |
- return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
|
|
| 1239 |
- } |
|
| 1240 |
- |
|
| 1241 |
- delete(container.NetworkSettings.Networks, n.Name()) |
|
| 1242 |
- return nil |
|
| 1243 |
-} |
|
| 1244 |
- |
|
| 1245 |
-// appendNetworkMounts appends any network mounts to the array of mount points passed in |
|
| 1246 |
-func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
|
|
| 1247 |
- for _, mnt := range container.networkMounts() {
|
|
| 1248 |
- dest, err := container.GetResourcePath(mnt.Destination) |
|
| 1249 |
- if err != nil {
|
|
| 1250 |
- return nil, err |
|
| 1251 |
- } |
|
| 1252 |
- volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest})
|
|
| 1253 |
- } |
|
| 1254 |
- return volumeMounts, nil |
|
| 1255 |
-} |
|
| 1256 |
- |
|
| 1257 |
-func (container *Container) networkMounts() []execdriver.Mount {
|
|
| 1258 |
- var mounts []execdriver.Mount |
|
| 1259 |
- shared := container.hostConfig.NetworkMode.IsContainer() |
|
| 1260 |
- if container.ResolvConfPath != "" {
|
|
| 1261 |
- if _, err := os.Stat(container.ResolvConfPath); err != nil {
|
|
| 1262 |
- logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
|
|
| 1263 |
- } else {
|
|
| 1264 |
- label.Relabel(container.ResolvConfPath, container.MountLabel, shared) |
|
| 1265 |
- writable := !container.hostConfig.ReadonlyRootfs |
|
| 1266 |
- if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
|
|
| 1267 |
- writable = m.RW |
|
| 1268 |
- } |
|
| 1269 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1270 |
- Source: container.ResolvConfPath, |
|
| 1271 |
- Destination: "/etc/resolv.conf", |
|
| 1272 |
- Writable: writable, |
|
| 1273 |
- Private: true, |
|
| 1274 |
- }) |
|
| 1275 |
- } |
|
| 1276 |
- } |
|
| 1277 |
- if container.HostnamePath != "" {
|
|
| 1278 |
- if _, err := os.Stat(container.HostnamePath); err != nil {
|
|
| 1279 |
- logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
|
|
| 1280 |
- } else {
|
|
| 1281 |
- label.Relabel(container.HostnamePath, container.MountLabel, shared) |
|
| 1282 |
- writable := !container.hostConfig.ReadonlyRootfs |
|
| 1283 |
- if m, exists := container.MountPoints["/etc/hostname"]; exists {
|
|
| 1284 |
- writable = m.RW |
|
| 1285 |
- } |
|
| 1286 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1287 |
- Source: container.HostnamePath, |
|
| 1288 |
- Destination: "/etc/hostname", |
|
| 1289 |
- Writable: writable, |
|
| 1290 |
- Private: true, |
|
| 1291 |
- }) |
|
| 1292 |
- } |
|
| 1293 |
- } |
|
| 1294 |
- if container.HostsPath != "" {
|
|
| 1295 |
- if _, err := os.Stat(container.HostsPath); err != nil {
|
|
| 1296 |
- logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
|
|
| 1297 |
- } else {
|
|
| 1298 |
- label.Relabel(container.HostsPath, container.MountLabel, shared) |
|
| 1299 |
- writable := !container.hostConfig.ReadonlyRootfs |
|
| 1300 |
- if m, exists := container.MountPoints["/etc/hosts"]; exists {
|
|
| 1301 |
- writable = m.RW |
|
| 1302 |
- } |
|
| 1303 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1304 |
- Source: container.HostsPath, |
|
| 1305 |
- Destination: "/etc/hosts", |
|
| 1306 |
- Writable: writable, |
|
| 1307 |
- Private: true, |
|
| 1308 |
- }) |
|
| 1309 |
- } |
|
| 1310 |
- } |
|
| 1311 |
- return mounts |
|
| 1312 |
-} |
|
| 1313 |
- |
|
| 1314 |
-func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
|
|
| 1315 |
- rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) |
|
| 1316 |
- if err != nil {
|
|
| 1317 |
- return err |
|
| 1318 |
- } |
|
| 1319 |
- |
|
| 1320 |
- if _, err = ioutil.ReadDir(rootfs); err != nil {
|
|
| 1321 |
- if os.IsNotExist(err) {
|
|
| 1322 |
- return nil |
|
| 1323 |
- } |
|
| 1324 |
- return err |
|
| 1325 |
- } |
|
| 1326 |
- |
|
| 1327 |
- path, err := v.Mount() |
|
| 1328 |
- if err != nil {
|
|
| 1329 |
- return err |
|
| 1330 |
- } |
|
| 1331 |
- |
|
| 1332 |
- if err := copyExistingContents(rootfs, path); err != nil {
|
|
| 1333 |
- return err |
|
| 1334 |
- } |
|
| 1335 |
- |
|
| 1336 |
- return v.Unmount() |
|
| 1337 |
-} |
|
| 1338 |
- |
|
| 1339 |
-func (container *Container) shmPath() (string, error) {
|
|
| 1340 |
- return container.getRootResourcePath("shm")
|
|
| 1341 |
-} |
|
| 1342 |
-func (container *Container) mqueuePath() (string, error) {
|
|
| 1343 |
- return container.getRootResourcePath("mqueue")
|
|
| 1344 |
-} |
|
| 1345 |
- |
|
| 1346 |
-func (container *Container) hasMountFor(path string) bool {
|
|
| 1347 |
- _, exists := container.MountPoints[path] |
|
| 1348 |
- return exists |
|
| 1349 |
-} |
|
| 1350 |
- |
|
| 1351 |
-func (daemon *Daemon) setupIpcDirs(container *Container) error {
|
|
| 1352 |
- rootUID, rootGID := daemon.GetRemappedUIDGID() |
|
| 1353 |
- if !container.hasMountFor("/dev/shm") {
|
|
| 1354 |
- shmPath, err := container.shmPath() |
|
| 1355 |
- if err != nil {
|
|
| 1356 |
- return err |
|
| 1357 |
- } |
|
| 1358 |
- |
|
| 1359 |
- if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
|
|
| 1360 |
- return err |
|
| 1361 |
- } |
|
| 1362 |
- |
|
| 1363 |
- shmSize := DefaultSHMSize |
|
| 1364 |
- if container.hostConfig.ShmSize != nil {
|
|
| 1365 |
- shmSize = *container.hostConfig.ShmSize |
|
| 1366 |
- } |
|
| 1367 |
- |
|
| 1368 |
- shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) |
|
| 1369 |
- if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, container.getMountLabel())); err != nil {
|
|
| 1370 |
- return fmt.Errorf("mounting shm tmpfs: %s", err)
|
|
| 1371 |
- } |
|
| 1372 |
- if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
|
|
| 1373 |
- return err |
|
| 1374 |
- } |
|
| 1375 |
- } |
|
| 1376 |
- |
|
| 1377 |
- if !container.hasMountFor("/dev/mqueue") {
|
|
| 1378 |
- mqueuePath, err := container.mqueuePath() |
|
| 1379 |
- if err != nil {
|
|
| 1380 |
- return err |
|
| 1381 |
- } |
|
| 1382 |
- |
|
| 1383 |
- if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil {
|
|
| 1384 |
- return err |
|
| 1385 |
- } |
|
| 1386 |
- |
|
| 1387 |
- if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil {
|
|
| 1388 |
- return fmt.Errorf("mounting mqueue mqueue : %s", err)
|
|
| 1389 |
- } |
|
| 1390 |
- if err := os.Chown(mqueuePath, rootUID, rootGID); err != nil {
|
|
| 1391 |
- return err |
|
| 1392 |
- } |
|
| 1393 |
- } |
|
| 1394 |
- |
|
| 1395 |
- return nil |
|
| 1396 |
-} |
|
| 1397 |
- |
|
| 1398 |
-func (container *Container) unmountIpcMounts(unmount func(pth string) error) {
|
|
| 1399 |
- if container.hostConfig.IpcMode.IsContainer() || container.hostConfig.IpcMode.IsHost() {
|
|
| 1400 |
- return |
|
| 1401 |
- } |
|
| 1402 |
- |
|
| 1403 |
- var warnings []string |
|
| 1404 |
- |
|
| 1405 |
- if !container.hasMountFor("/dev/shm") {
|
|
| 1406 |
- shmPath, err := container.shmPath() |
|
| 1407 |
- if err != nil {
|
|
| 1408 |
- logrus.Error(err) |
|
| 1409 |
- warnings = append(warnings, err.Error()) |
|
| 1410 |
- } else if shmPath != "" {
|
|
| 1411 |
- if err := unmount(shmPath); err != nil {
|
|
| 1412 |
- warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err))
|
|
| 1413 |
- } |
|
| 1414 |
- |
|
| 1415 |
- } |
|
| 1416 |
- } |
|
| 1417 |
- |
|
| 1418 |
- if !container.hasMountFor("/dev/mqueue") {
|
|
| 1419 |
- mqueuePath, err := container.mqueuePath() |
|
| 1420 |
- if err != nil {
|
|
| 1421 |
- logrus.Error(err) |
|
| 1422 |
- warnings = append(warnings, err.Error()) |
|
| 1423 |
- } else if mqueuePath != "" {
|
|
| 1424 |
- if err := unmount(mqueuePath); err != nil {
|
|
| 1425 |
- warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", mqueuePath, err))
|
|
| 1426 |
- } |
|
| 1427 |
- } |
|
| 1428 |
- } |
|
| 1429 |
- |
|
| 1430 |
- if len(warnings) > 0 {
|
|
| 1431 |
- logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n"))
|
|
| 1432 |
- } |
|
| 1433 |
-} |
|
| 1434 |
- |
|
| 1435 |
-func (container *Container) ipcMounts() []execdriver.Mount {
|
|
| 1436 |
- var mounts []execdriver.Mount |
|
| 1437 |
- |
|
| 1438 |
- if !container.hasMountFor("/dev/shm") {
|
|
| 1439 |
- label.SetFileLabel(container.ShmPath, container.MountLabel) |
|
| 1440 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1441 |
- Source: container.ShmPath, |
|
| 1442 |
- Destination: "/dev/shm", |
|
| 1443 |
- Writable: true, |
|
| 1444 |
- Private: true, |
|
| 1445 |
- }) |
|
| 1446 |
- } |
|
| 1447 |
- |
|
| 1448 |
- if !container.hasMountFor("/dev/mqueue") {
|
|
| 1449 |
- label.SetFileLabel(container.MqueuePath, container.MountLabel) |
|
| 1450 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1451 |
- Source: container.MqueuePath, |
|
| 1452 |
- Destination: "/dev/mqueue", |
|
| 1453 |
- Writable: true, |
|
| 1454 |
- Private: true, |
|
| 1455 |
- }) |
|
| 1456 |
- } |
|
| 1457 |
- return mounts |
|
| 1458 |
-} |
|
| 1459 |
- |
|
| 1460 |
-func detachMounted(path string) error {
|
|
| 1461 |
- return syscall.Unmount(path, syscall.MNT_DETACH) |
|
| 1462 |
-} |
|
| 1463 |
- |
|
| 1464 |
-func (daemon *Daemon) mountVolumes(container *Container) error {
|
|
| 1465 |
- mounts, err := daemon.setupMounts(container) |
|
| 1466 |
- if err != nil {
|
|
| 1467 |
- return err |
|
| 1468 |
- } |
|
| 1469 |
- |
|
| 1470 |
- for _, m := range mounts {
|
|
| 1471 |
- dest, err := container.GetResourcePath(m.Destination) |
|
| 1472 |
- if err != nil {
|
|
| 1473 |
- return err |
|
| 1474 |
- } |
|
| 1475 |
- |
|
| 1476 |
- var stat os.FileInfo |
|
| 1477 |
- stat, err = os.Stat(m.Source) |
|
| 1478 |
- if err != nil {
|
|
| 1479 |
- return err |
|
| 1480 |
- } |
|
| 1481 |
- if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
|
|
| 1482 |
- return err |
|
| 1483 |
- } |
|
| 1484 |
- |
|
| 1485 |
- opts := "rbind,ro" |
|
| 1486 |
- if m.Writable {
|
|
| 1487 |
- opts = "rbind,rw" |
|
| 1488 |
- } |
|
| 1489 |
- |
|
| 1490 |
- if err := mount.Mount(m.Source, dest, "bind", opts); err != nil {
|
|
| 1491 |
- return err |
|
| 1492 |
- } |
|
| 1493 |
- } |
|
| 1494 |
- |
|
| 1495 |
- return nil |
|
| 1496 |
-} |
|
| 1497 |
- |
|
| 1498 |
-func (container *Container) unmountVolumes(forceSyscall bool) error {
|
|
| 1499 |
- var ( |
|
| 1500 |
- volumeMounts []volume.MountPoint |
|
| 1501 |
- err error |
|
| 1502 |
- ) |
|
| 1503 |
- |
|
| 1504 |
- for _, mntPoint := range container.MountPoints {
|
|
| 1505 |
- dest, err := container.GetResourcePath(mntPoint.Destination) |
|
| 1506 |
- if err != nil {
|
|
| 1507 |
- return err |
|
| 1508 |
- } |
|
| 1509 |
- |
|
| 1510 |
- volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume})
|
|
| 1511 |
- } |
|
| 1512 |
- |
|
| 1513 |
- // Append any network mounts to the list (this is a no-op on Windows) |
|
| 1514 |
- if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil {
|
|
| 1515 |
- return err |
|
| 1516 |
- } |
|
| 1517 |
- |
|
| 1518 |
- for _, volumeMount := range volumeMounts {
|
|
| 1519 |
- if forceSyscall {
|
|
| 1520 |
- if err := detachMounted(volumeMount.Destination); err != nil {
|
|
| 1521 |
- logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err)
|
|
| 1522 |
- } |
|
| 1523 |
- } |
|
| 1524 |
- |
|
| 1525 |
- if volumeMount.Volume != nil {
|
|
| 1526 |
- if err := volumeMount.Volume.Unmount(); err != nil {
|
|
| 1527 |
- return err |
|
| 1528 |
- } |
|
| 1529 |
- } |
|
| 1530 |
- } |
|
| 1531 |
- |
|
| 1532 |
- return nil |
|
| 1533 |
-} |
|
| 1534 |
- |
|
| 1535 |
-func (container *Container) tmpfsMounts() []execdriver.Mount {
|
|
| 1536 |
- var mounts []execdriver.Mount |
|
| 1537 |
- for dest, data := range container.hostConfig.Tmpfs {
|
|
| 1538 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 1539 |
- Source: "tmpfs", |
|
| 1540 |
- Destination: dest, |
|
| 1541 |
- Data: data, |
|
| 1542 |
- }) |
|
| 1543 |
- } |
|
| 1544 |
- return mounts |
|
| 1545 |
-} |
| 1546 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,213 +0,0 @@ |
| 1 |
-// +build windows |
|
| 2 |
- |
|
| 3 |
-package daemon |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "strings" |
|
| 7 |
- |
|
| 8 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 9 |
- derr "github.com/docker/docker/errors" |
|
| 10 |
- "github.com/docker/docker/layer" |
|
| 11 |
- "github.com/docker/docker/volume" |
|
| 12 |
- "github.com/docker/libnetwork" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-// DefaultPathEnv is deliberately empty on Windows as the default path will be set by |
|
| 16 |
-// the container. Docker has no context of what the default path should be. |
|
| 17 |
-const DefaultPathEnv = "" |
|
| 18 |
- |
|
| 19 |
-// Container holds fields specific to the Windows implementation. See |
|
| 20 |
-// CommonContainer for standard fields common to all containers. |
|
| 21 |
-type Container struct {
|
|
| 22 |
- CommonContainer |
|
| 23 |
- |
|
| 24 |
- // Fields below here are platform specific. |
|
| 25 |
-} |
|
| 26 |
- |
|
| 27 |
-func killProcessDirectly(container *Container) error {
|
|
| 28 |
- return nil |
|
| 29 |
-} |
|
| 30 |
- |
|
| 31 |
-func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
|
|
| 32 |
- return nil, nil |
|
| 33 |
-} |
|
| 34 |
- |
|
| 35 |
-func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
|
|
| 36 |
- // On Windows, nothing to link. Just return the container environment. |
|
| 37 |
- return container.Config.Env |
|
| 38 |
-} |
|
| 39 |
- |
|
| 40 |
-func (daemon *Daemon) initializeNetworking(container *Container) error {
|
|
| 41 |
- return nil |
|
| 42 |
-} |
|
| 43 |
- |
|
| 44 |
-// ConnectToNetwork connects a container to the network |
|
| 45 |
-func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
|
|
| 46 |
- return nil |
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-// DisconnectFromNetwork disconnects a container from, the network |
|
| 50 |
-func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
|
|
| 51 |
- return nil |
|
| 52 |
-} |
|
| 53 |
- |
|
| 54 |
-func (container *Container) setupWorkingDirectory() error {
|
|
| 55 |
- return nil |
|
| 56 |
-} |
|
| 57 |
- |
|
| 58 |
-func (daemon *Daemon) populateCommand(c *Container, env []string) error {
|
|
| 59 |
- en := &execdriver.Network{
|
|
| 60 |
- Interface: nil, |
|
| 61 |
- } |
|
| 62 |
- |
|
| 63 |
- parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) |
|
| 64 |
- switch parts[0] {
|
|
| 65 |
- case "none": |
|
| 66 |
- case "default", "": // empty string to support existing containers |
|
| 67 |
- if !c.Config.NetworkDisabled {
|
|
| 68 |
- en.Interface = &execdriver.NetworkInterface{
|
|
| 69 |
- MacAddress: c.Config.MacAddress, |
|
| 70 |
- Bridge: daemon.configStore.Bridge.VirtualSwitchName, |
|
| 71 |
- PortBindings: c.hostConfig.PortBindings, |
|
| 72 |
- |
|
| 73 |
- // TODO Windows. Include IPAddress. There already is a |
|
| 74 |
- // property IPAddress on execDrive.CommonNetworkInterface, |
|
| 75 |
- // but there is no CLI option in docker to pass through |
|
| 76 |
- // an IPAddress on docker run. |
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
- default: |
|
| 80 |
- return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode) |
|
| 81 |
- } |
|
| 82 |
- |
|
| 83 |
- // TODO Windows. More resource controls to be implemented later. |
|
| 84 |
- resources := &execdriver.Resources{
|
|
| 85 |
- CommonResources: execdriver.CommonResources{
|
|
| 86 |
- CPUShares: c.hostConfig.CPUShares, |
|
| 87 |
- }, |
|
| 88 |
- } |
|
| 89 |
- |
|
| 90 |
- processConfig := execdriver.ProcessConfig{
|
|
| 91 |
- CommonProcessConfig: execdriver.CommonProcessConfig{
|
|
| 92 |
- Entrypoint: c.Path, |
|
| 93 |
- Arguments: c.Args, |
|
| 94 |
- Tty: c.Config.Tty, |
|
| 95 |
- }, |
|
| 96 |
- ConsoleSize: c.hostConfig.ConsoleSize, |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- processConfig.Env = env |
|
| 100 |
- |
|
| 101 |
- var layerPaths []string |
|
| 102 |
- img, err := daemon.imageStore.Get(c.ImageID) |
|
| 103 |
- if err != nil {
|
|
| 104 |
- return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) |
|
| 105 |
- } |
|
| 106 |
- |
|
| 107 |
- if img.RootFS != nil && img.RootFS.Type == "layers+base" {
|
|
| 108 |
- max := len(img.RootFS.DiffIDs) |
|
| 109 |
- for i := 0; i <= max; i++ {
|
|
| 110 |
- img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] |
|
| 111 |
- path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) |
|
| 112 |
- if err != nil {
|
|
| 113 |
- return derr.ErrorCodeGetLayer.WithArgs(err) |
|
| 114 |
- } |
|
| 115 |
- // Reverse order, expecting parent most first |
|
| 116 |
- layerPaths = append([]string{path}, layerPaths...)
|
|
| 117 |
- } |
|
| 118 |
- } |
|
| 119 |
- |
|
| 120 |
- m, err := layer.RWLayerMetadata(daemon.layerStore, c.ID) |
|
| 121 |
- if err != nil {
|
|
| 122 |
- return derr.ErrorCodeGetLayerMetadata.WithArgs(err) |
|
| 123 |
- } |
|
| 124 |
- layerFolder := m["dir"] |
|
| 125 |
- |
|
| 126 |
- c.command = &execdriver.Command{
|
|
| 127 |
- CommonCommand: execdriver.CommonCommand{
|
|
| 128 |
- ID: c.ID, |
|
| 129 |
- Rootfs: c.rootfsPath(), |
|
| 130 |
- InitPath: "/.dockerinit", |
|
| 131 |
- WorkingDir: c.Config.WorkingDir, |
|
| 132 |
- Network: en, |
|
| 133 |
- MountLabel: c.getMountLabel(), |
|
| 134 |
- Resources: resources, |
|
| 135 |
- ProcessConfig: processConfig, |
|
| 136 |
- ProcessLabel: c.getProcessLabel(), |
|
| 137 |
- }, |
|
| 138 |
- FirstStart: !c.HasBeenStartedBefore, |
|
| 139 |
- LayerFolder: layerFolder, |
|
| 140 |
- LayerPaths: layerPaths, |
|
| 141 |
- Hostname: c.Config.Hostname, |
|
| 142 |
- Isolation: c.hostConfig.Isolation, |
|
| 143 |
- ArgsEscaped: c.Config.ArgsEscaped, |
|
| 144 |
- } |
|
| 145 |
- |
|
| 146 |
- return nil |
|
| 147 |
-} |
|
| 148 |
- |
|
| 149 |
-// getSize returns real size & virtual size |
|
| 150 |
-func (daemon *Daemon) getSize(container *Container) (int64, int64) {
|
|
| 151 |
- // TODO Windows |
|
| 152 |
- return 0, 0 |
|
| 153 |
-} |
|
| 154 |
- |
|
| 155 |
-// setNetworkNamespaceKey is a no-op on Windows. |
|
| 156 |
-func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
|
|
| 157 |
- return nil |
|
| 158 |
-} |
|
| 159 |
- |
|
| 160 |
-// allocateNetwork is a no-op on Windows. |
|
| 161 |
-func (daemon *Daemon) allocateNetwork(container *Container) error {
|
|
| 162 |
- return nil |
|
| 163 |
-} |
|
| 164 |
- |
|
| 165 |
-func (daemon *Daemon) updateNetwork(container *Container) error {
|
|
| 166 |
- return nil |
|
| 167 |
-} |
|
| 168 |
- |
|
| 169 |
-func (daemon *Daemon) releaseNetwork(container *Container) {
|
|
| 170 |
-} |
|
| 171 |
- |
|
| 172 |
-// appendNetworkMounts appends any network mounts to the array of mount points passed in. |
|
| 173 |
-// Windows does not support network mounts (not to be confused with SMB network mounts), so |
|
| 174 |
-// this is a no-op. |
|
| 175 |
-func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
|
|
| 176 |
- return volumeMounts, nil |
|
| 177 |
-} |
|
| 178 |
- |
|
| 179 |
-func (daemon *Daemon) setupIpcDirs(container *Container) error {
|
|
| 180 |
- return nil |
|
| 181 |
-} |
|
| 182 |
- |
|
| 183 |
-func (container *Container) unmountIpcMounts(unmount func(pth string) error) {
|
|
| 184 |
-} |
|
| 185 |
- |
|
| 186 |
-func detachMounted(path string) error {
|
|
| 187 |
- return nil |
|
| 188 |
-} |
|
| 189 |
- |
|
| 190 |
-func (container *Container) ipcMounts() []execdriver.Mount {
|
|
| 191 |
- return nil |
|
| 192 |
-} |
|
| 193 |
- |
|
| 194 |
-func (container *Container) tmpfsMounts() []execdriver.Mount {
|
|
| 195 |
- return nil |
|
| 196 |
-} |
|
| 197 |
- |
|
| 198 |
-func getDefaultRouteMtu() (int, error) {
|
|
| 199 |
- return -1, errSystemNotSupported |
|
| 200 |
-} |
|
| 201 |
- |
|
| 202 |
-// TODO Windows: Fix Post-TP4. This is a hack to allow docker cp to work |
|
| 203 |
-// against containers which have volumes. You will still be able to cp |
|
| 204 |
-// to somewhere on the container drive, but not to any mounted volumes |
|
| 205 |
-// inside the container. Without this fix, docker cp is broken to any |
|
| 206 |
-// container which has a volume, regardless of where the file is inside the |
|
| 207 |
-// container. |
|
| 208 |
-func (daemon *Daemon) mountVolumes(container *Container) error {
|
|
| 209 |
- return nil |
|
| 210 |
-} |
|
| 211 |
-func (container *Container) unmountVolumes(forceSyscall bool) error {
|
|
| 212 |
- return nil |
|
| 213 |
-} |
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/Sirupsen/logrus" |
| 5 | 5 |
"github.com/docker/docker/api/types" |
| 6 |
+ "github.com/docker/docker/container" |
|
| 6 | 7 |
derr "github.com/docker/docker/errors" |
| 7 | 8 |
"github.com/docker/docker/image" |
| 8 | 9 |
"github.com/docker/docker/pkg/idtools" |
| ... | ... |
@@ -48,9 +49,9 @@ func (daemon *Daemon) ContainerCreate(params *ContainerCreateConfig) (types.Cont |
| 48 | 48 |
} |
| 49 | 49 |
|
| 50 | 50 |
// Create creates a new container from the given configuration with a given name. |
| 51 |
-func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, retErr error) {
|
|
| 51 |
+func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *container.Container, retErr error) {
|
|
| 52 | 52 |
var ( |
| 53 |
- container *Container |
|
| 53 |
+ container *container.Container |
|
| 54 | 54 |
img *image.Image |
| 55 | 55 |
imgID image.ID |
| 56 | 56 |
err error |
| ... | ... |
@@ -86,7 +87,7 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re |
| 86 | 86 |
if err != nil {
|
| 87 | 87 |
return nil, err |
| 88 | 88 |
} |
| 89 |
- if err := idtools.MkdirAs(container.root, 0700, rootUID, rootGID); err != nil {
|
|
| 89 |
+ if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil {
|
|
| 90 | 90 |
return nil, err |
| 91 | 91 |
} |
| 92 | 92 |
|
| ... | ... |
@@ -105,7 +106,7 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re |
| 105 | 105 |
return nil, err |
| 106 | 106 |
} |
| 107 | 107 |
|
| 108 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 108 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 109 | 109 |
logrus.Errorf("Error saving new container to disk: %v", err)
|
| 110 | 110 |
return nil, err |
| 111 | 111 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"os" |
| 7 | 7 |
"path/filepath" |
| 8 | 8 |
|
| 9 |
+ "github.com/docker/docker/container" |
|
| 9 | 10 |
derr "github.com/docker/docker/errors" |
| 10 | 11 |
"github.com/docker/docker/image" |
| 11 | 12 |
"github.com/docker/docker/pkg/stringid" |
| ... | ... |
@@ -15,7 +16,7 @@ import ( |
| 15 | 15 |
) |
| 16 | 16 |
|
| 17 | 17 |
// createContainerPlatformSpecificSettings performs platform specific container create functionality |
| 18 |
-func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
|
|
| 18 |
+func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
|
|
| 19 | 19 |
if err := daemon.Mount(container); err != nil {
|
| 20 | 20 |
return err |
| 21 | 21 |
} |
| ... | ... |
@@ -27,7 +28,7 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain |
| 27 | 27 |
|
| 28 | 28 |
// Skip volumes for which we already have something mounted on that |
| 29 | 29 |
// destination because of a --volume-from. |
| 30 |
- if container.isDestinationMounted(destination) {
|
|
| 30 |
+ if container.IsDestinationMounted(destination) {
|
|
| 31 | 31 |
continue |
| 32 | 32 |
} |
| 33 | 33 |
path, err := container.GetResourcePath(destination) |
| ... | ... |
@@ -61,12 +62,12 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain |
| 61 | 61 |
|
| 62 | 62 |
// never attempt to copy existing content in a container FS to a shared volume |
| 63 | 63 |
if v.DriverName() == volume.DefaultDriverName {
|
| 64 |
- if err := container.copyImagePathContent(v, destination); err != nil {
|
|
| 64 |
+ if err := container.CopyImagePathContent(v, destination); err != nil {
|
|
| 65 | 65 |
return err |
| 66 | 66 |
} |
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
- container.addMountPointWithVolume(destination, v, true) |
|
| 69 |
+ container.AddMountPointWithVolume(destination, v, true) |
|
| 70 | 70 |
} |
| 71 | 71 |
return nil |
| 72 | 72 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 6 | 7 |
"github.com/docker/docker/image" |
| 7 | 8 |
"github.com/docker/docker/pkg/stringid" |
| 8 | 9 |
"github.com/docker/docker/runconfig" |
| ... | ... |
@@ -10,7 +11,7 @@ import ( |
| 10 | 10 |
) |
| 11 | 11 |
|
| 12 | 12 |
// createContainerPlatformSpecificSettings performs platform specific container create functionality |
| 13 |
-func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
|
|
| 13 |
+func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
|
|
| 14 | 14 |
for spec := range config.Volumes {
|
| 15 | 15 |
|
| 16 | 16 |
mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) |
| ... | ... |
@@ -25,7 +26,7 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain |
| 25 | 25 |
|
| 26 | 26 |
// Skip volumes for which we already have something mounted on that |
| 27 | 27 |
// destination because of a --volume-from. |
| 28 |
- if container.isDestinationMounted(mp.Destination) {
|
|
| 28 |
+ if container.IsDestinationMounted(mp.Destination) {
|
|
| 29 | 29 |
continue |
| 30 | 30 |
} |
| 31 | 31 |
|
| ... | ... |
@@ -71,13 +72,13 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain |
| 71 | 71 |
// |
| 72 | 72 |
// // never attempt to copy existing content in a container FS to a shared volume |
| 73 | 73 |
// if v.DriverName() == volume.DefaultDriverName {
|
| 74 |
- // if err := container.copyImagePathContent(v, mp.Destination); err != nil {
|
|
| 74 |
+ // if err := container.CopyImagePathContent(v, mp.Destination); err != nil {
|
|
| 75 | 75 |
// return err |
| 76 | 76 |
// } |
| 77 | 77 |
// } |
| 78 | 78 |
|
| 79 | 79 |
// Add it to container.MountPoints |
| 80 |
- container.addMountPointWithVolume(mp.Destination, v, mp.RW) |
|
| 80 |
+ container.AddMountPointWithVolume(mp.Destination, v, mp.RW) |
|
| 81 | 81 |
} |
| 82 | 82 |
return nil |
| 83 | 83 |
} |
| ... | ... |
@@ -23,6 +23,7 @@ import ( |
| 23 | 23 |
"github.com/docker/docker/api" |
| 24 | 24 |
"github.com/docker/docker/api/types" |
| 25 | 25 |
"github.com/docker/docker/cliconfig" |
| 26 |
+ "github.com/docker/docker/container" |
|
| 26 | 27 |
"github.com/docker/docker/daemon/events" |
| 27 | 28 |
"github.com/docker/docker/daemon/exec" |
| 28 | 29 |
"github.com/docker/docker/daemon/execdriver" |
| ... | ... |
@@ -84,17 +85,17 @@ func (e ErrImageDoesNotExist) Error() string {
|
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 | 86 |
type contStore struct {
|
| 87 |
- s map[string]*Container |
|
| 87 |
+ s map[string]*container.Container |
|
| 88 | 88 |
sync.Mutex |
| 89 | 89 |
} |
| 90 | 90 |
|
| 91 |
-func (c *contStore) Add(id string, cont *Container) {
|
|
| 91 |
+func (c *contStore) Add(id string, cont *container.Container) {
|
|
| 92 | 92 |
c.Lock() |
| 93 | 93 |
c.s[id] = cont |
| 94 | 94 |
c.Unlock() |
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 |
-func (c *contStore) Get(id string) *Container {
|
|
| 97 |
+func (c *contStore) Get(id string) *container.Container {
|
|
| 98 | 98 |
c.Lock() |
| 99 | 99 |
res := c.s[id] |
| 100 | 100 |
c.Unlock() |
| ... | ... |
@@ -107,7 +108,7 @@ func (c *contStore) Delete(id string) {
|
| 107 | 107 |
c.Unlock() |
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 |
-func (c *contStore) List() []*Container {
|
|
| 110 |
+func (c *contStore) List() []*container.Container {
|
|
| 111 | 111 |
containers := new(History) |
| 112 | 112 |
c.Lock() |
| 113 | 113 |
for _, cont := range c.s {
|
| ... | ... |
@@ -155,7 +156,7 @@ type Daemon struct {
|
| 155 | 155 |
// - A partial container ID prefix (e.g. short ID) of any length that is |
| 156 | 156 |
// unique enough to only return a single container object |
| 157 | 157 |
// If none of these searches succeed, an error is returned |
| 158 |
-func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
|
|
| 158 |
+func (daemon *Daemon) Get(prefixOrName string) (*container.Container, error) {
|
|
| 159 | 159 |
if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
|
| 160 | 160 |
// prefix is an exact match to a full container ID |
| 161 | 161 |
return containerByID, nil |
| ... | ... |
@@ -188,7 +189,7 @@ func (daemon *Daemon) Exists(id string) bool {
|
| 188 | 188 |
// IsPaused returns a bool indicating if the specified container is paused. |
| 189 | 189 |
func (daemon *Daemon) IsPaused(id string) bool {
|
| 190 | 190 |
c, _ := daemon.Get(id) |
| 191 |
- return c.State.isPaused() |
|
| 191 |
+ return c.State.IsPaused() |
|
| 192 | 192 |
} |
| 193 | 193 |
|
| 194 | 194 |
func (daemon *Daemon) containerRoot(id string) string {
|
| ... | ... |
@@ -197,10 +198,10 @@ func (daemon *Daemon) containerRoot(id string) string {
|
| 197 | 197 |
|
| 198 | 198 |
// Load reads the contents of a container from disk |
| 199 | 199 |
// This is typically done at startup. |
| 200 |
-func (daemon *Daemon) load(id string) (*Container, error) {
|
|
| 200 |
+func (daemon *Daemon) load(id string) (*container.Container, error) {
|
|
| 201 | 201 |
container := daemon.newBaseContainer(id) |
| 202 | 202 |
|
| 203 |
- if err := container.fromDisk(); err != nil {
|
|
| 203 |
+ if err := container.FromDisk(); err != nil {
|
|
| 204 | 204 |
return nil, err |
| 205 | 205 |
} |
| 206 | 206 |
|
| ... | ... |
@@ -212,7 +213,7 @@ func (daemon *Daemon) load(id string) (*Container, error) {
|
| 212 | 212 |
} |
| 213 | 213 |
|
| 214 | 214 |
// Register makes a container object usable by the daemon as <container.ID> |
| 215 |
-func (daemon *Daemon) Register(container *Container) error {
|
|
| 215 |
+func (daemon *Daemon) Register(container *container.Container) error {
|
|
| 216 | 216 |
if daemon.Exists(container.ID) {
|
| 217 | 217 |
return fmt.Errorf("Container is already loaded")
|
| 218 | 218 |
} |
| ... | ... |
@@ -238,7 +239,7 @@ func (daemon *Daemon) Register(container *Container) error {
|
| 238 | 238 |
if container.IsRunning() {
|
| 239 | 239 |
logrus.Debugf("killing old running container %s", container.ID)
|
| 240 | 240 |
// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit |
| 241 |
- container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
|
|
| 241 |
+ container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
|
|
| 242 | 242 |
// use the current driver and ensure that the container is dead x.x |
| 243 | 243 |
cmd := &execdriver.Command{
|
| 244 | 244 |
CommonCommand: execdriver.CommonCommand{
|
| ... | ... |
@@ -247,10 +248,10 @@ func (daemon *Daemon) Register(container *Container) error {
|
| 247 | 247 |
} |
| 248 | 248 |
daemon.execDriver.Terminate(cmd) |
| 249 | 249 |
|
| 250 |
- container.unmountIpcMounts(mount.Unmount) |
|
| 250 |
+ container.UnmountIpcMounts(mount.Unmount) |
|
| 251 | 251 |
|
| 252 | 252 |
daemon.Unmount(container) |
| 253 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 253 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 254 | 254 |
logrus.Errorf("Error saving stopped state to disk: %v", err)
|
| 255 | 255 |
} |
| 256 | 256 |
} |
| ... | ... |
@@ -262,7 +263,7 @@ func (daemon *Daemon) Register(container *Container) error {
|
| 262 | 262 |
return nil |
| 263 | 263 |
} |
| 264 | 264 |
|
| 265 |
-func (daemon *Daemon) ensureName(container *Container) error {
|
|
| 265 |
+func (daemon *Daemon) ensureName(container *container.Container) error {
|
|
| 266 | 266 |
if container.Name == "" {
|
| 267 | 267 |
name, err := daemon.generateNewName(container.ID) |
| 268 | 268 |
if err != nil {
|
| ... | ... |
@@ -270,7 +271,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
| 270 | 270 |
} |
| 271 | 271 |
container.Name = name |
| 272 | 272 |
|
| 273 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 273 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 274 | 274 |
logrus.Errorf("Error saving container name to disk: %v", err)
|
| 275 | 275 |
} |
| 276 | 276 |
} |
| ... | ... |
@@ -279,7 +280,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
| 279 | 279 |
|
| 280 | 280 |
func (daemon *Daemon) restore() error {
|
| 281 | 281 |
type cr struct {
|
| 282 |
- container *Container |
|
| 282 |
+ container *container.Container |
|
| 283 | 283 |
registered bool |
| 284 | 284 |
} |
| 285 | 285 |
|
| ... | ... |
@@ -336,7 +337,7 @@ func (daemon *Daemon) restore() error {
|
| 336 | 336 |
for _, c := range containers {
|
| 337 | 337 |
group.Add(1) |
| 338 | 338 |
|
| 339 |
- go func(container *Container, registered bool) {
|
|
| 339 |
+ go func(container *container.Container, registered bool) {
|
|
| 340 | 340 |
defer group.Done() |
| 341 | 341 |
|
| 342 | 342 |
if !registered {
|
| ... | ... |
@@ -355,7 +356,7 @@ func (daemon *Daemon) restore() error {
|
| 355 | 355 |
|
| 356 | 356 |
// check the restart policy on the containers and restart any container with |
| 357 | 357 |
// the restart policy of "always" |
| 358 |
- if daemon.configStore.AutoRestart && container.shouldRestart() {
|
|
| 358 |
+ if daemon.configStore.AutoRestart && container.ShouldRestart() {
|
|
| 359 | 359 |
logrus.Debugf("Starting container %s", container.ID)
|
| 360 | 360 |
|
| 361 | 361 |
if err := daemon.containerStart(container); err != nil {
|
| ... | ... |
@@ -474,7 +475,7 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic |
| 474 | 474 |
return cmdSlice[0], cmdSlice[1:] |
| 475 | 475 |
} |
| 476 | 476 |
|
| 477 |
-func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID image.ID) (*Container, error) {
|
|
| 477 |
+func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID image.ID) (*container.Container, error) {
|
|
| 478 | 478 |
var ( |
| 479 | 479 |
id string |
| 480 | 480 |
err error |
| ... | ... |
@@ -493,7 +494,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID |
| 493 | 493 |
base.Path = entrypoint |
| 494 | 494 |
base.Args = args //FIXME: de-duplicate from config |
| 495 | 495 |
base.Config = config |
| 496 |
- base.hostConfig = &runconfig.HostConfig{}
|
|
| 496 |
+ base.HostConfig = &runconfig.HostConfig{}
|
|
| 497 | 497 |
base.ImageID = imgID |
| 498 | 498 |
base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
|
| 499 | 499 |
base.Name = name |
| ... | ... |
@@ -516,7 +517,7 @@ func GetFullContainerName(name string) (string, error) {
|
| 516 | 516 |
} |
| 517 | 517 |
|
| 518 | 518 |
// GetByName returns a container given a name. |
| 519 |
-func (daemon *Daemon) GetByName(name string) (*Container, error) {
|
|
| 519 |
+func (daemon *Daemon) GetByName(name string) (*container.Container, error) {
|
|
| 520 | 520 |
fullName, err := GetFullContainerName(name) |
| 521 | 521 |
if err != nil {
|
| 522 | 522 |
return nil, err |
| ... | ... |
@@ -576,12 +577,12 @@ func (daemon *Daemon) GetLabels(id string) map[string]string {
|
| 576 | 576 |
// children returns all child containers of the container with the |
| 577 | 577 |
// given name. The containers are returned as a map from the container |
| 578 | 578 |
// name to a pointer to Container. |
| 579 |
-func (daemon *Daemon) children(name string) (map[string]*Container, error) {
|
|
| 579 |
+func (daemon *Daemon) children(name string) (map[string]*container.Container, error) {
|
|
| 580 | 580 |
name, err := GetFullContainerName(name) |
| 581 | 581 |
if err != nil {
|
| 582 | 582 |
return nil, err |
| 583 | 583 |
} |
| 584 |
- children := make(map[string]*Container) |
|
| 584 |
+ children := make(map[string]*container.Container) |
|
| 585 | 585 |
|
| 586 | 586 |
err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
|
| 587 | 587 |
c, err := daemon.Get(e.ID()) |
| ... | ... |
@@ -609,7 +610,7 @@ func (daemon *Daemon) parents(name string) ([]string, error) {
|
| 609 | 609 |
return daemon.containerGraphDB.Parents(name) |
| 610 | 610 |
} |
| 611 | 611 |
|
| 612 |
-func (daemon *Daemon) registerLink(parent, child *Container, alias string) error {
|
|
| 612 |
+func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
|
|
| 613 | 613 |
fullName := filepath.Join(parent.Name, alias) |
| 614 | 614 |
if !daemon.containerGraphDB.Exists(fullName) {
|
| 615 | 615 |
_, err := daemon.containerGraphDB.Set(fullName, child.ID) |
| ... | ... |
@@ -830,7 +831,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 830 | 830 |
|
| 831 | 831 |
d.ID = trustKey.PublicKey().KeyID() |
| 832 | 832 |
d.repository = daemonRepo |
| 833 |
- d.containers = &contStore{s: make(map[string]*Container)}
|
|
| 833 |
+ d.containers = &contStore{s: make(map[string]*container.Container)}
|
|
| 834 | 834 |
d.execCommands = exec.NewStore() |
| 835 | 835 |
d.tagStore = tagStore |
| 836 | 836 |
d.distributionPool = distributionPool |
| ... | ... |
@@ -861,9 +862,9 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 861 | 861 |
return d, nil |
| 862 | 862 |
} |
| 863 | 863 |
|
| 864 |
-func (daemon *Daemon) shutdownContainer(c *Container) error {
|
|
| 864 |
+func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
|
| 865 | 865 |
// TODO(windows): Handle docker restart with paused containers |
| 866 |
- if c.isPaused() {
|
|
| 866 |
+ if c.IsPaused() {
|
|
| 867 | 867 |
// To terminate a process in freezer cgroup, we should send |
| 868 | 868 |
// SIGTERM to this process then unfreeze it, and the process will |
| 869 | 869 |
// force to terminate immediately. |
| ... | ... |
@@ -906,20 +907,20 @@ func (daemon *Daemon) Shutdown() error {
|
| 906 | 906 |
if daemon.containers != nil {
|
| 907 | 907 |
group := sync.WaitGroup{}
|
| 908 | 908 |
logrus.Debug("starting clean shutdown of all containers...")
|
| 909 |
- for _, container := range daemon.List() {
|
|
| 910 |
- if !container.IsRunning() {
|
|
| 909 |
+ for _, cont := range daemon.List() {
|
|
| 910 |
+ if !cont.IsRunning() {
|
|
| 911 | 911 |
continue |
| 912 | 912 |
} |
| 913 |
- logrus.Debugf("stopping %s", container.ID)
|
|
| 913 |
+ logrus.Debugf("stopping %s", cont.ID)
|
|
| 914 | 914 |
group.Add(1) |
| 915 |
- go func(c *Container) {
|
|
| 915 |
+ go func(c *container.Container) {
|
|
| 916 | 916 |
defer group.Done() |
| 917 | 917 |
if err := daemon.shutdownContainer(c); err != nil {
|
| 918 | 918 |
logrus.Errorf("Stop container error: %v", err)
|
| 919 | 919 |
return |
| 920 | 920 |
} |
| 921 | 921 |
logrus.Debugf("container stopped %s", c.ID)
|
| 922 |
- }(container) |
|
| 922 |
+ }(cont) |
|
| 923 | 923 |
} |
| 924 | 924 |
group.Wait() |
| 925 | 925 |
} |
| ... | ... |
@@ -948,9 +949,9 @@ func (daemon *Daemon) Shutdown() error {
|
| 948 | 948 |
return nil |
| 949 | 949 |
} |
| 950 | 950 |
|
| 951 |
-// Mount sets container.basefs |
|
| 951 |
+// Mount sets container.BaseFS |
|
| 952 | 952 |
// (is it not set coming in? why is it unset?) |
| 953 |
-func (daemon *Daemon) Mount(container *Container) error {
|
|
| 953 |
+func (daemon *Daemon) Mount(container *container.Container) error {
|
|
| 954 | 954 |
var layerID layer.ChainID |
| 955 | 955 |
if container.ImageID != "" {
|
| 956 | 956 |
img, err := daemon.imageStore.Get(container.ImageID) |
| ... | ... |
@@ -959,7 +960,7 @@ func (daemon *Daemon) Mount(container *Container) error {
|
| 959 | 959 |
} |
| 960 | 960 |
layerID = img.RootFS.ChainID() |
| 961 | 961 |
} |
| 962 |
- rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.getMountLabel(), daemon.setupInitLayer) |
|
| 962 |
+ rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.GetMountLabel(), daemon.setupInitLayer) |
|
| 963 | 963 |
if err != nil {
|
| 964 | 964 |
return err |
| 965 | 965 |
} |
| ... | ... |
@@ -969,56 +970,56 @@ func (daemon *Daemon) Mount(container *Container) error {
|
| 969 | 969 |
} |
| 970 | 970 |
logrus.Debugf("container mounted via layerStore: %v", dir)
|
| 971 | 971 |
|
| 972 |
- if container.basefs != dir {
|
|
| 972 |
+ if container.BaseFS != dir {
|
|
| 973 | 973 |
// The mount path reported by the graph driver should always be trusted on Windows, since the |
| 974 | 974 |
// volume path for a given mounted layer may change over time. This should only be an error |
| 975 | 975 |
// on non-Windows operating systems. |
| 976 |
- if container.basefs != "" && runtime.GOOS != "windows" {
|
|
| 976 |
+ if container.BaseFS != "" && runtime.GOOS != "windows" {
|
|
| 977 | 977 |
daemon.Unmount(container) |
| 978 | 978 |
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
|
| 979 |
- daemon.driver, container.ID, container.basefs, dir) |
|
| 979 |
+ daemon.driver, container.ID, container.BaseFS, dir) |
|
| 980 | 980 |
} |
| 981 | 981 |
} |
| 982 |
- container.basefs = dir // TODO: combine these fields |
|
| 983 |
- container.rwlayer = rwlayer |
|
| 982 |
+ container.BaseFS = dir // TODO: combine these fields |
|
| 983 |
+ container.RWLayer = rwlayer |
|
| 984 | 984 |
return nil |
| 985 | 985 |
} |
| 986 | 986 |
|
| 987 | 987 |
// Unmount unsets the container base filesystem |
| 988 |
-func (daemon *Daemon) Unmount(container *Container) {
|
|
| 988 |
+func (daemon *Daemon) Unmount(container *container.Container) {
|
|
| 989 | 989 |
if err := daemon.layerStore.Unmount(container.ID); err != nil {
|
| 990 | 990 |
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
|
| 991 | 991 |
} |
| 992 | 992 |
} |
| 993 | 993 |
|
| 994 | 994 |
// Run uses the execution driver to run a given container |
| 995 |
-func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
|
|
| 995 |
+func (daemon *Daemon) Run(c *container.Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
|
|
| 996 | 996 |
hooks := execdriver.Hooks{
|
| 997 | 997 |
Start: startCallback, |
| 998 | 998 |
} |
| 999 | 999 |
hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
| 1000 | 1000 |
return daemon.setNetworkNamespaceKey(c.ID, pid) |
| 1001 | 1001 |
}) |
| 1002 |
- return daemon.execDriver.Run(c.command, pipes, hooks) |
|
| 1002 |
+ return daemon.execDriver.Run(c.Command, pipes, hooks) |
|
| 1003 | 1003 |
} |
| 1004 | 1004 |
|
| 1005 |
-func (daemon *Daemon) kill(c *Container, sig int) error {
|
|
| 1006 |
- return daemon.execDriver.Kill(c.command, sig) |
|
| 1005 |
+func (daemon *Daemon) kill(c *container.Container, sig int) error {
|
|
| 1006 |
+ return daemon.execDriver.Kill(c.Command, sig) |
|
| 1007 | 1007 |
} |
| 1008 | 1008 |
|
| 1009 |
-func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) {
|
|
| 1009 |
+func (daemon *Daemon) stats(c *container.Container) (*execdriver.ResourceStats, error) {
|
|
| 1010 | 1010 |
return daemon.execDriver.Stats(c.ID) |
| 1011 | 1011 |
} |
| 1012 | 1012 |
|
| 1013 |
-func (daemon *Daemon) subscribeToContainerStats(c *Container) chan interface{} {
|
|
| 1013 |
+func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} {
|
|
| 1014 | 1014 |
return daemon.statsCollector.collect(c) |
| 1015 | 1015 |
} |
| 1016 | 1016 |
|
| 1017 |
-func (daemon *Daemon) unsubscribeToContainerStats(c *Container, ch chan interface{}) {
|
|
| 1017 |
+func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) {
|
|
| 1018 | 1018 |
daemon.statsCollector.unsubscribe(c, ch) |
| 1019 | 1019 |
} |
| 1020 | 1020 |
|
| 1021 |
-func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) {
|
|
| 1021 |
+func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) {
|
|
| 1022 | 1022 |
return daemon.layerStore.Changes(container.ID) |
| 1023 | 1023 |
} |
| 1024 | 1024 |
|
| ... | ... |
@@ -1347,7 +1348,7 @@ func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
|
| 1347 | 1347 |
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) |
| 1348 | 1348 |
} |
| 1349 | 1349 |
|
| 1350 |
-func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 1350 |
+func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 1351 | 1351 |
container.Lock() |
| 1352 | 1352 |
if err := parseSecurityOpt(container, hostConfig); err != nil {
|
| 1353 | 1353 |
container.Unlock() |
| ... | ... |
@@ -1369,8 +1370,8 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig. |
| 1369 | 1369 |
return err |
| 1370 | 1370 |
} |
| 1371 | 1371 |
|
| 1372 |
- container.hostConfig = hostConfig |
|
| 1373 |
- container.toDisk() |
|
| 1372 |
+ container.HostConfig = hostConfig |
|
| 1373 |
+ container.ToDisk() |
|
| 1374 | 1374 |
return nil |
| 1375 | 1375 |
} |
| 1376 | 1376 |
|
| ... | ... |
@@ -1466,7 +1467,7 @@ func (daemon *Daemon) IsShuttingDown() bool {
|
| 1466 | 1466 |
} |
| 1467 | 1467 |
|
| 1468 | 1468 |
// GetContainerStats collects all the stats published by a container |
| 1469 |
-func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) {
|
|
| 1469 |
+func (daemon *Daemon) GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error) {
|
|
| 1470 | 1470 |
stats, err := daemon.stats(container) |
| 1471 | 1471 |
if err != nil {
|
| 1472 | 1472 |
return nil, err |
| ... | ... |
@@ -1482,7 +1483,7 @@ func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.Resou |
| 1482 | 1482 |
return stats, nil |
| 1483 | 1483 |
} |
| 1484 | 1484 |
|
| 1485 |
-func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInterface, error) {
|
|
| 1485 |
+func (daemon *Daemon) getNetworkStats(c *container.Container) ([]*libcontainer.NetworkInterface, error) {
|
|
| 1486 | 1486 |
var list []*libcontainer.NetworkInterface |
| 1487 | 1487 |
|
| 1488 | 1488 |
sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) |
| ... | ... |
@@ -1505,8 +1506,8 @@ func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInte |
| 1505 | 1505 |
|
| 1506 | 1506 |
// newBaseContainer creates a new container with its initial |
| 1507 | 1507 |
// configuration based on the root storage from the daemon. |
| 1508 |
-func (daemon *Daemon) newBaseContainer(id string) *Container {
|
|
| 1509 |
- return newBaseContainer(id, daemon.containerRoot(id)) |
|
| 1508 |
+func (daemon *Daemon) newBaseContainer(id string) *container.Container {
|
|
| 1509 |
+ return container.NewBaseContainer(id, daemon.containerRoot(id)) |
|
| 1510 | 1510 |
} |
| 1511 | 1511 |
|
| 1512 | 1512 |
func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
|
| ... | ... |
@@ -1521,3 +1522,10 @@ func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *lib |
| 1521 | 1521 |
n.TxDropped = stats.TxDropped |
| 1522 | 1522 |
return n |
| 1523 | 1523 |
} |
| 1524 |
+ |
|
| 1525 |
+func validateID(id string) error {
|
|
| 1526 |
+ if id == "" {
|
|
| 1527 |
+ return derr.ErrorCodeEmptyID |
|
| 1528 |
+ } |
|
| 1529 |
+ return nil |
|
| 1530 |
+} |
| ... | ... |
@@ -1,13 +1,17 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "io/ioutil" |
|
| 4 | 5 |
"os" |
| 5 | 6 |
"path" |
| 7 |
+ "path/filepath" |
|
| 6 | 8 |
"testing" |
| 7 | 9 |
|
| 10 |
+ "github.com/docker/docker/container" |
|
| 8 | 11 |
"github.com/docker/docker/pkg/graphdb" |
| 9 | 12 |
"github.com/docker/docker/pkg/truncindex" |
| 10 | 13 |
"github.com/docker/docker/runconfig" |
| 14 |
+ "github.com/docker/docker/volume" |
|
| 11 | 15 |
volumedrivers "github.com/docker/docker/volume/drivers" |
| 12 | 16 |
"github.com/docker/docker/volume/local" |
| 13 | 17 |
"github.com/docker/docker/volume/store" |
| ... | ... |
@@ -18,43 +22,43 @@ import ( |
| 18 | 18 |
// |
| 19 | 19 |
|
| 20 | 20 |
func TestGet(t *testing.T) {
|
| 21 |
- c1 := &Container{
|
|
| 22 |
- CommonContainer: CommonContainer{
|
|
| 21 |
+ c1 := &container.Container{
|
|
| 22 |
+ CommonContainer: container.CommonContainer{
|
|
| 23 | 23 |
ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", |
| 24 | 24 |
Name: "tender_bardeen", |
| 25 | 25 |
}, |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- c2 := &Container{
|
|
| 29 |
- CommonContainer: CommonContainer{
|
|
| 28 |
+ c2 := &container.Container{
|
|
| 29 |
+ CommonContainer: container.CommonContainer{
|
|
| 30 | 30 |
ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", |
| 31 | 31 |
Name: "drunk_hawking", |
| 32 | 32 |
}, |
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 |
- c3 := &Container{
|
|
| 36 |
- CommonContainer: CommonContainer{
|
|
| 35 |
+ c3 := &container.Container{
|
|
| 36 |
+ CommonContainer: container.CommonContainer{
|
|
| 37 | 37 |
ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", |
| 38 | 38 |
Name: "3cdbd1aa", |
| 39 | 39 |
}, |
| 40 | 40 |
} |
| 41 | 41 |
|
| 42 |
- c4 := &Container{
|
|
| 43 |
- CommonContainer: CommonContainer{
|
|
| 42 |
+ c4 := &container.Container{
|
|
| 43 |
+ CommonContainer: container.CommonContainer{
|
|
| 44 | 44 |
ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", |
| 45 | 45 |
Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", |
| 46 | 46 |
}, |
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 |
- c5 := &Container{
|
|
| 50 |
- CommonContainer: CommonContainer{
|
|
| 49 |
+ c5 := &container.Container{
|
|
| 50 |
+ CommonContainer: container.CommonContainer{
|
|
| 51 | 51 |
ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", |
| 52 | 52 |
Name: "d22d69a2b896", |
| 53 | 53 |
}, |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 | 56 |
store := &contStore{
|
| 57 |
- s: map[string]*Container{
|
|
| 57 |
+ s: map[string]*container.Container{
|
|
| 58 | 58 |
c1.ID: c1, |
| 59 | 59 |
c2.ID: c2, |
| 60 | 60 |
c3.ID: c3, |
| ... | ... |
@@ -136,7 +140,7 @@ func initDaemonWithVolumeStore(tmp string) (*Daemon, error) {
|
| 136 | 136 |
} |
| 137 | 137 |
|
| 138 | 138 |
func TestParseSecurityOpt(t *testing.T) {
|
| 139 |
- container := &Container{}
|
|
| 139 |
+ container := &container.Container{}
|
|
| 140 | 140 |
config := &runconfig.HostConfig{}
|
| 141 | 141 |
|
| 142 | 142 |
// test apparmor |
| ... | ... |
@@ -190,3 +194,109 @@ func TestNetworkOptions(t *testing.T) {
|
| 190 | 190 |
t.Fatalf("Expected networkOptions error, got nil")
|
| 191 | 191 |
} |
| 192 | 192 |
} |
| 193 |
+ |
|
| 194 |
+func TestGetFullName(t *testing.T) {
|
|
| 195 |
+ name, err := GetFullContainerName("testing")
|
|
| 196 |
+ if err != nil {
|
|
| 197 |
+ t.Fatal(err) |
|
| 198 |
+ } |
|
| 199 |
+ if name != "/testing" {
|
|
| 200 |
+ t.Fatalf("Expected /testing got %s", name)
|
|
| 201 |
+ } |
|
| 202 |
+ if _, err := GetFullContainerName(""); err == nil {
|
|
| 203 |
+ t.Fatal("Error should not be nil")
|
|
| 204 |
+ } |
|
| 205 |
+} |
|
| 206 |
+ |
|
| 207 |
+func TestValidContainerNames(t *testing.T) {
|
|
| 208 |
+ invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
|
|
| 209 |
+ validNames := []string{"word-word", "word_word", "1weoid"}
|
|
| 210 |
+ |
|
| 211 |
+ for _, name := range invalidNames {
|
|
| 212 |
+ if validContainerNamePattern.MatchString(name) {
|
|
| 213 |
+ t.Fatalf("%q is not a valid container name and was returned as valid.", name)
|
|
| 214 |
+ } |
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ for _, name := range validNames {
|
|
| 218 |
+ if !validContainerNamePattern.MatchString(name) {
|
|
| 219 |
+ t.Fatalf("%q is a valid container name and was returned as invalid.", name)
|
|
| 220 |
+ } |
|
| 221 |
+ } |
|
| 222 |
+} |
|
| 223 |
+ |
|
| 224 |
+func TestContainerInitDNS(t *testing.T) {
|
|
| 225 |
+ tmp, err := ioutil.TempDir("", "docker-container-test-")
|
|
| 226 |
+ if err != nil {
|
|
| 227 |
+ t.Fatal(err) |
|
| 228 |
+ } |
|
| 229 |
+ defer os.RemoveAll(tmp) |
|
| 230 |
+ |
|
| 231 |
+ containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" |
|
| 232 |
+ containerPath := filepath.Join(tmp, containerID) |
|
| 233 |
+ if err := os.MkdirAll(containerPath, 0755); err != nil {
|
|
| 234 |
+ t.Fatal(err) |
|
| 235 |
+ } |
|
| 236 |
+ |
|
| 237 |
+ config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
|
|
| 238 |
+"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, |
|
| 239 |
+"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", |
|
| 240 |
+"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
|
|
| 241 |
+"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, |
|
| 242 |
+"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, |
|
| 243 |
+"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
|
|
| 244 |
+"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
|
|
| 245 |
+"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}},
|
|
| 246 |
+"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", |
|
| 247 |
+"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", |
|
| 248 |
+"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", |
|
| 249 |
+"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", |
|
| 250 |
+"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, |
|
| 251 |
+"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
|
|
| 252 |
+ |
|
| 253 |
+ // Container struct only used to retrieve path to config file |
|
| 254 |
+ container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}}
|
|
| 255 |
+ configPath, err := container.ConfigPath() |
|
| 256 |
+ if err != nil {
|
|
| 257 |
+ t.Fatal(err) |
|
| 258 |
+ } |
|
| 259 |
+ if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil {
|
|
| 260 |
+ t.Fatal(err) |
|
| 261 |
+ } |
|
| 262 |
+ |
|
| 263 |
+ hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
|
|
| 264 |
+"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
|
|
| 265 |
+"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
|
| 266 |
+"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
|
| 267 |
+ |
|
| 268 |
+ hostConfigPath, err := container.HostConfigPath() |
|
| 269 |
+ if err != nil {
|
|
| 270 |
+ t.Fatal(err) |
|
| 271 |
+ } |
|
| 272 |
+ if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil {
|
|
| 273 |
+ t.Fatal(err) |
|
| 274 |
+ } |
|
| 275 |
+ |
|
| 276 |
+ daemon, err := initDaemonWithVolumeStore(tmp) |
|
| 277 |
+ if err != nil {
|
|
| 278 |
+ t.Fatal(err) |
|
| 279 |
+ } |
|
| 280 |
+ defer volumedrivers.Unregister(volume.DefaultDriverName) |
|
| 281 |
+ |
|
| 282 |
+ c, err := daemon.load(containerID) |
|
| 283 |
+ if err != nil {
|
|
| 284 |
+ t.Fatal(err) |
|
| 285 |
+ } |
|
| 286 |
+ |
|
| 287 |
+ if c.HostConfig.DNS == nil {
|
|
| 288 |
+ t.Fatal("Expected container DNS to not be nil")
|
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ if c.HostConfig.DNSSearch == nil {
|
|
| 292 |
+ t.Fatal("Expected container DNSSearch to not be nil")
|
|
| 293 |
+ } |
|
| 294 |
+ |
|
| 295 |
+ if c.HostConfig.DNSOptions == nil {
|
|
| 296 |
+ t.Fatal("Expected container DNSOptions to not be nil")
|
|
| 297 |
+ } |
|
| 298 |
+} |
| ... | ... |
@@ -12,6 +12,7 @@ import ( |
| 12 | 12 |
"syscall" |
| 13 | 13 |
|
| 14 | 14 |
"github.com/Sirupsen/logrus" |
| 15 |
+ "github.com/docker/docker/container" |
|
| 15 | 16 |
"github.com/docker/docker/daemon/graphdriver" |
| 16 | 17 |
derr "github.com/docker/docker/errors" |
| 17 | 18 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -57,7 +58,7 @@ func getBlkioWeightDevices(config *runconfig.HostConfig) ([]*blkiodev.WeightDevi |
| 57 | 57 |
return BlkioWeightDevices, nil |
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
|
|
| 60 |
+func parseSecurityOpt(container *container.Container, config *runconfig.HostConfig) error {
|
|
| 61 | 61 |
var ( |
| 62 | 62 |
labelOpts []string |
| 63 | 63 |
err error |
| ... | ... |
@@ -128,7 +129,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a |
| 128 | 128 |
hostConfig.MemorySwap = hostConfig.Memory * 2 |
| 129 | 129 |
} |
| 130 | 130 |
if hostConfig.ShmSize == nil {
|
| 131 |
- shmSize := DefaultSHMSize |
|
| 131 |
+ shmSize := container.DefaultSHMSize |
|
| 132 | 132 |
hostConfig.ShmSize = &shmSize |
| 133 | 133 |
} |
| 134 | 134 |
var err error |
| ... | ... |
@@ -575,7 +576,7 @@ func setupInitLayer(initLayer string, rootUID, rootGID int) error {
|
| 575 | 575 |
} |
| 576 | 576 |
|
| 577 | 577 |
// registerLinks writes the links to a file. |
| 578 |
-func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 578 |
+func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 579 | 579 |
if hostConfig == nil || hostConfig.Links == nil {
|
| 580 | 580 |
return nil |
| 581 | 581 |
} |
| ... | ... |
@@ -590,14 +591,14 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig. |
| 590 | 590 |
//An error from daemon.Get() means this name could not be found |
| 591 | 591 |
return fmt.Errorf("Could not get container for %s", name)
|
| 592 | 592 |
} |
| 593 |
- for child.hostConfig.NetworkMode.IsContainer() {
|
|
| 594 |
- parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2) |
|
| 593 |
+ for child.HostConfig.NetworkMode.IsContainer() {
|
|
| 594 |
+ parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) |
|
| 595 | 595 |
child, err = daemon.Get(parts[1]) |
| 596 | 596 |
if err != nil {
|
| 597 | 597 |
return fmt.Errorf("Could not get container for %s", parts[1])
|
| 598 | 598 |
} |
| 599 | 599 |
} |
| 600 |
- if child.hostConfig.NetworkMode.IsHost() {
|
|
| 600 |
+ if child.HostConfig.NetworkMode.IsHost() {
|
|
| 601 | 601 |
return runconfig.ErrConflictHostNetworkAndLinks |
| 602 | 602 |
} |
| 603 | 603 |
if err := daemon.registerLink(container, child, alias); err != nil {
|
| ... | ... |
@@ -608,7 +609,7 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig. |
| 608 | 608 |
// After we load all the links into the daemon |
| 609 | 609 |
// set them to nil on the hostconfig |
| 610 | 610 |
hostConfig.Links = nil |
| 611 |
- if err := container.writeHostConfig(); err != nil {
|
|
| 611 |
+ if err := container.WriteHostConfig(); err != nil {
|
|
| 612 | 612 |
return err |
| 613 | 613 |
} |
| 614 | 614 |
|
| ... | ... |
@@ -617,13 +618,13 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig. |
| 617 | 617 |
|
| 618 | 618 |
// conditionalMountOnStart is a platform specific helper function during the |
| 619 | 619 |
// container start to call mount. |
| 620 |
-func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
|
|
| 620 |
+func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
|
|
| 621 | 621 |
return daemon.Mount(container) |
| 622 | 622 |
} |
| 623 | 623 |
|
| 624 | 624 |
// conditionalUnmountOnCleanup is a platform specific helper function called |
| 625 | 625 |
// during the cleanup of a container to unmount. |
| 626 |
-func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
|
|
| 626 |
+func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) {
|
|
| 627 | 627 |
daemon.Unmount(container) |
| 628 | 628 |
} |
| 629 | 629 |
|
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
|
| 11 | 11 |
"github.com/Sirupsen/logrus" |
| 12 | 12 |
"github.com/docker/distribution/reference" |
| 13 |
+ "github.com/docker/docker/container" |
|
| 13 | 14 |
"github.com/docker/docker/daemon/graphdriver" |
| 14 | 15 |
"github.com/docker/docker/dockerversion" |
| 15 | 16 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -34,7 +35,7 @@ func getBlkioWeightDevices(config *runconfig.HostConfig) ([]*blkiodev.WeightDevi |
| 34 | 34 |
return nil, nil |
| 35 | 35 |
} |
| 36 | 36 |
|
| 37 |
-func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
|
|
| 37 |
+func parseSecurityOpt(container *container.Container, config *runconfig.HostConfig) error {
|
|
| 38 | 38 |
return nil |
| 39 | 39 |
} |
| 40 | 40 |
|
| ... | ... |
@@ -115,7 +116,7 @@ func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkC |
| 115 | 115 |
|
| 116 | 116 |
// registerLinks sets up links between containers and writes the |
| 117 | 117 |
// configuration out for persistence. As of Windows TP4, links are not supported. |
| 118 |
-func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 118 |
+func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 119 | 119 |
return nil |
| 120 | 120 |
} |
| 121 | 121 |
|
| ... | ... |
@@ -125,9 +126,9 @@ func (daemon *Daemon) cleanupMounts() error {
|
| 125 | 125 |
|
| 126 | 126 |
// conditionalMountOnStart is a platform specific helper function during the |
| 127 | 127 |
// container start to call mount. |
| 128 |
-func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
|
|
| 128 |
+func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
|
|
| 129 | 129 |
// We do not mount if a Hyper-V container |
| 130 |
- if !container.hostConfig.Isolation.IsHyperV() {
|
|
| 130 |
+ if !container.HostConfig.Isolation.IsHyperV() {
|
|
| 131 | 131 |
if err := daemon.Mount(container); err != nil {
|
| 132 | 132 |
return err |
| 133 | 133 |
} |
| ... | ... |
@@ -137,9 +138,9 @@ func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
|
| 137 | 137 |
|
| 138 | 138 |
// conditionalUnmountOnCleanup is a platform specific helper function called |
| 139 | 139 |
// during the cleanup of a container to unmount. |
| 140 |
-func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
|
|
| 140 |
+func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) {
|
|
| 141 | 141 |
// We do not unmount if a Hyper-V container |
| 142 |
- if !container.hostConfig.Isolation.IsHyperV() {
|
|
| 142 |
+ if !container.HostConfig.Isolation.IsHyperV() {
|
|
| 143 | 143 |
daemon.Unmount(container) |
| 144 | 144 |
} |
| 145 | 145 |
} |
| ... | ... |
@@ -13,6 +13,7 @@ import ( |
| 13 | 13 |
"github.com/docker/docker/api" |
| 14 | 14 |
"github.com/docker/docker/builder" |
| 15 | 15 |
"github.com/docker/docker/cliconfig" |
| 16 |
+ "github.com/docker/docker/container" |
|
| 16 | 17 |
"github.com/docker/docker/daemon" |
| 17 | 18 |
"github.com/docker/docker/image" |
| 18 | 19 |
"github.com/docker/docker/pkg/archive" |
| ... | ... |
@@ -80,12 +81,12 @@ func (d Docker) Pull(name string) (*image.Image, error) {
|
| 80 | 80 |
} |
| 81 | 81 |
|
| 82 | 82 |
// Container looks up a Docker container referenced by `id`. |
| 83 |
-func (d Docker) Container(id string) (*daemon.Container, error) {
|
|
| 83 |
+func (d Docker) Container(id string) (*container.Container, error) {
|
|
| 84 | 84 |
return d.Daemon.Get(id) |
| 85 | 85 |
} |
| 86 | 86 |
|
| 87 | 87 |
// Create creates a new Docker container and returns potential warnings |
| 88 |
-func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*daemon.Container, []string, error) {
|
|
| 88 |
+func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*container.Container, []string, error) {
|
|
| 89 | 89 |
ccr, err := d.Daemon.ContainerCreate(&daemon.ContainerCreateConfig{
|
| 90 | 90 |
Name: "", |
| 91 | 91 |
Config: cfg, |
| ... | ... |
@@ -129,7 +130,7 @@ func (d Docker) Release(sessionID string, activeImages []string) {
|
| 129 | 129 |
// specified by a container object. |
| 130 | 130 |
// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). |
| 131 | 131 |
// Copy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. |
| 132 |
-func (d Docker) Copy(c *daemon.Container, destPath string, src builder.FileInfo, decompress bool) error {
|
|
| 132 |
+func (d Docker) Copy(c *container.Container, destPath string, src builder.FileInfo, decompress bool) error {
|
|
| 133 | 133 |
srcPath := src.Path() |
| 134 | 134 |
destExists := true |
| 135 | 135 |
rootUID, rootGID := d.Daemon.GetRemappedUIDGID() |
| ... | ... |
@@ -212,23 +213,23 @@ func (d Docker) GetCachedImage(imgID string, cfg *runconfig.Config) (string, err |
| 212 | 212 |
} |
| 213 | 213 |
|
| 214 | 214 |
// Kill stops the container execution abruptly. |
| 215 |
-func (d Docker) Kill(container *daemon.Container) error {
|
|
| 215 |
+func (d Docker) Kill(container *container.Container) error {
|
|
| 216 | 216 |
return d.Daemon.Kill(container) |
| 217 | 217 |
} |
| 218 | 218 |
|
| 219 | 219 |
// Mount mounts the root filesystem for the container. |
| 220 |
-func (d Docker) Mount(c *daemon.Container) error {
|
|
| 220 |
+func (d Docker) Mount(c *container.Container) error {
|
|
| 221 | 221 |
return d.Daemon.Mount(c) |
| 222 | 222 |
} |
| 223 | 223 |
|
| 224 | 224 |
// Unmount unmounts the root filesystem for the container. |
| 225 |
-func (d Docker) Unmount(c *daemon.Container) error {
|
|
| 225 |
+func (d Docker) Unmount(c *container.Container) error {
|
|
| 226 | 226 |
d.Daemon.Unmount(c) |
| 227 | 227 |
return nil |
| 228 | 228 |
} |
| 229 | 229 |
|
| 230 | 230 |
// Start starts a container |
| 231 |
-func (d Docker) Start(c *daemon.Container) error {
|
|
| 231 |
+func (d Docker) Start(c *container.Container) error {
|
|
| 232 | 232 |
return d.Daemon.Start(c) |
| 233 | 233 |
} |
| 234 | 234 |
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"path" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/Sirupsen/logrus" |
| 8 |
+ "github.com/docker/docker/container" |
|
| 8 | 9 |
derr "github.com/docker/docker/errors" |
| 9 | 10 |
"github.com/docker/docker/layer" |
| 10 | 11 |
volumestore "github.com/docker/docker/volume/store" |
| ... | ... |
@@ -26,14 +27,14 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 | 28 |
// Container state RemovalInProgress should be used to avoid races. |
| 29 |
- if err = container.setRemovalInProgress(); err != nil {
|
|
| 29 |
+ if err = container.SetRemovalInProgress(); err != nil {
|
|
| 30 | 30 |
if err == derr.ErrorCodeAlreadyRemoving {
|
| 31 | 31 |
// do not fail when the removal is in progress started by other request. |
| 32 | 32 |
return nil |
| 33 | 33 |
} |
| 34 | 34 |
return derr.ErrorCodeRmState.WithArgs(err) |
| 35 | 35 |
} |
| 36 |
- defer container.resetRemovalInProgress() |
|
| 36 |
+ defer container.ResetRemovalInProgress() |
|
| 37 | 37 |
|
| 38 | 38 |
// check if container wasn't deregistered by previous rm since Get |
| 39 | 39 |
if c := daemon.containers.Get(container.ID); c == nil {
|
| ... | ... |
@@ -87,7 +88,7 @@ func (daemon *Daemon) rmLink(name string) error {
|
| 87 | 87 |
|
| 88 | 88 |
// cleanupContainer unregisters a container from the daemon, stops stats |
| 89 | 89 |
// collection and cleanly removes contents and metadata from the filesystem. |
| 90 |
-func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) (err error) {
|
|
| 90 |
+func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
|
|
| 91 | 91 |
if container.IsRunning() {
|
| 92 | 92 |
if !forceRemove {
|
| 93 | 93 |
return derr.ErrorCodeRmRunning |
| ... | ... |
@@ -106,12 +107,12 @@ func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) ( |
| 106 | 106 |
} |
| 107 | 107 |
|
| 108 | 108 |
// Mark container dead. We don't want anybody to be restarting it. |
| 109 |
- container.setDead() |
|
| 109 |
+ container.SetDead() |
|
| 110 | 110 |
|
| 111 | 111 |
// Save container state to disk. So that if error happens before |
| 112 | 112 |
// container meta file got removed from disk, then a restart of |
| 113 | 113 |
// docker should not make a dead container alive. |
| 114 |
- if err := container.toDiskLocking(); err != nil {
|
|
| 114 |
+ if err := container.ToDiskLocking(); err != nil {
|
|
| 115 | 115 |
logrus.Errorf("Error saving dying container to disk: %v", err)
|
| 116 | 116 |
} |
| 117 | 117 |
|
| ... | ... |
@@ -129,7 +130,7 @@ func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) ( |
| 129 | 129 |
} |
| 130 | 130 |
}() |
| 131 | 131 |
|
| 132 |
- if err = os.RemoveAll(container.root); err != nil {
|
|
| 132 |
+ if err = os.RemoveAll(container.Root); err != nil {
|
|
| 133 | 133 |
return derr.ErrorCodeRmFS.WithArgs(container.ID, err) |
| 134 | 134 |
} |
| 135 | 135 |
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"os" |
| 6 | 6 |
"testing" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/container" |
|
| 8 | 9 |
"github.com/docker/docker/runconfig" |
| 9 | 10 |
) |
| 10 | 11 |
|
| ... | ... |
@@ -18,19 +19,19 @@ func TestContainerDoubleDelete(t *testing.T) {
|
| 18 | 18 |
repository: tmp, |
| 19 | 19 |
root: tmp, |
| 20 | 20 |
} |
| 21 |
- daemon.containers = &contStore{s: make(map[string]*Container)}
|
|
| 21 |
+ daemon.containers = &contStore{s: make(map[string]*container.Container)}
|
|
| 22 | 22 |
|
| 23 |
- container := &Container{
|
|
| 24 |
- CommonContainer: CommonContainer{
|
|
| 23 |
+ container := &container.Container{
|
|
| 24 |
+ CommonContainer: container.CommonContainer{
|
|
| 25 | 25 |
ID: "test", |
| 26 |
- State: NewState(), |
|
| 26 |
+ State: container.NewState(), |
|
| 27 | 27 |
Config: &runconfig.Config{},
|
| 28 | 28 |
}, |
| 29 | 29 |
} |
| 30 | 30 |
daemon.containers.Add(container.ID, container) |
| 31 | 31 |
|
| 32 | 32 |
// Mark the container as having a delete in progress |
| 33 |
- if err := container.setRemovalInProgress(); err != nil {
|
|
| 33 |
+ if err := container.SetRemovalInProgress(); err != nil {
|
|
| 34 | 34 |
t.Fatal(err) |
| 35 | 35 |
} |
| 36 | 36 |
|
| ... | ... |
@@ -1,7 +1,11 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/container" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 3 | 7 |
// LogContainerEvent generates an event related to a container. |
| 4 |
-func (daemon *Daemon) LogContainerEvent(container *Container, action string) {
|
|
| 8 |
+func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) {
|
|
| 5 | 9 |
daemon.EventsService.Log( |
| 6 | 10 |
action, |
| 7 | 11 |
container.ID, |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"time" |
| 7 | 7 |
|
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 |
+ "github.com/docker/docker/container" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/exec" |
| 10 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 11 | 12 |
derr "github.com/docker/docker/errors" |
| ... | ... |
@@ -15,15 +16,15 @@ import ( |
| 15 | 15 |
"github.com/docker/docker/runconfig" |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-func (d *Daemon) registerExecCommand(container *Container, config *exec.Config) {
|
|
| 18 |
+func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
|
|
| 19 | 19 |
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. |
| 20 |
- container.execCommands.Add(config.ID, config) |
|
| 20 |
+ container.ExecCommands.Add(config.ID, config) |
|
| 21 | 21 |
// Storing execs in daemon for easy access via remote API. |
| 22 | 22 |
d.execCommands.Add(config.ID, config) |
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 | 25 |
// ExecExists looks up the exec instance and returns a bool if it exists or not. |
| 26 |
-// It will also return the error produced by `getExecConfig` |
|
| 26 |
+// It will also return the error produced by `getConfig` |
|
| 27 | 27 |
func (d *Daemon) ExecExists(name string) (bool, error) {
|
| 28 | 28 |
if _, err := d.getExecConfig(name); err != nil {
|
| 29 | 29 |
return false, err |
| ... | ... |
@@ -47,7 +48,7 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
| 47 | 47 |
if !container.IsRunning() {
|
| 48 | 48 |
return nil, derr.ErrorCodeContainerNotRunning.WithArgs(container.ID, container.State.String()) |
| 49 | 49 |
} |
| 50 |
- if container.isPaused() {
|
|
| 50 |
+ if container.IsPaused() {
|
|
| 51 | 51 |
return nil, derr.ErrorCodeExecPaused.WithArgs(container.ID) |
| 52 | 52 |
} |
| 53 | 53 |
return ec, nil |
| ... | ... |
@@ -57,12 +58,12 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
| 57 | 57 |
return nil, derr.ErrorCodeNoExecID.WithArgs(name) |
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-func (d *Daemon) unregisterExecCommand(container *Container, execConfig *exec.Config) {
|
|
| 61 |
- container.execCommands.Delete(execConfig.ID) |
|
| 60 |
+func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
|
|
| 61 |
+ container.ExecCommands.Delete(execConfig.ID) |
|
| 62 | 62 |
d.execCommands.Delete(execConfig.ID) |
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 |
-func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
|
| 65 |
+func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
|
| 66 | 66 |
container, err := d.Get(name) |
| 67 | 67 |
if err != nil {
|
| 68 | 68 |
return nil, err |
| ... | ... |
@@ -71,7 +72,7 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
| 71 | 71 |
if !container.IsRunning() {
|
| 72 | 72 |
return nil, derr.ErrorCodeNotRunning.WithArgs(name) |
| 73 | 73 |
} |
| 74 |
- if container.isPaused() {
|
|
| 74 |
+ if container.IsPaused() {
|
|
| 75 | 75 |
return nil, derr.ErrorCodeExecPaused.WithArgs(name) |
| 76 | 76 |
} |
| 77 | 77 |
return container, nil |
| ... | ... |
@@ -131,9 +132,9 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 131 | 131 |
ec.Running = true |
| 132 | 132 |
ec.Unlock() |
| 133 | 133 |
|
| 134 |
- container := d.containers.Get(ec.ContainerID) |
|
| 135 |
- logrus.Debugf("starting exec command %s in container %s", ec.ID, container.ID)
|
|
| 136 |
- d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " ")) |
|
| 134 |
+ c := d.containers.Get(ec.ContainerID) |
|
| 135 |
+ logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
|
|
| 136 |
+ d.LogContainerEvent(c, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " ")) |
|
| 137 | 137 |
|
| 138 | 138 |
if ec.OpenStdin {
|
| 139 | 139 |
r, w := io.Pipe() |
| ... | ... |
@@ -157,8 +158,7 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 157 | 157 |
ec.NewNopInputPipe() |
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 |
- attachErr := attach(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) |
|
| 161 |
- |
|
| 160 |
+ attachErr := container.AttachStreams(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) |
|
| 162 | 161 |
execErr := make(chan error) |
| 163 | 162 |
|
| 164 | 163 |
// Note, the ExecConfig data will be removed when the container |
| ... | ... |
@@ -166,7 +166,7 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 166 | 166 |
// the exitStatus) even after the cmd is done running. |
| 167 | 167 |
|
| 168 | 168 |
go func() {
|
| 169 |
- execErr <- d.containerExec(container, ec) |
|
| 169 |
+ execErr <- d.containerExec(c, ec) |
|
| 170 | 170 |
}() |
| 171 | 171 |
|
| 172 | 172 |
select {
|
| ... | ... |
@@ -184,19 +184,19 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. |
| 184 | 184 |
} |
| 185 | 185 |
|
| 186 | 186 |
// Maybe the container stopped while we were trying to exec |
| 187 |
- if !container.IsRunning() {
|
|
| 187 |
+ if !c.IsRunning() {
|
|
| 188 | 188 |
return derr.ErrorCodeExecContainerStopped |
| 189 | 189 |
} |
| 190 |
- return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, container.ID, err) |
|
| 190 |
+ return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, c.ID, err) |
|
| 191 | 191 |
} |
| 192 | 192 |
} |
| 193 | 193 |
|
| 194 | 194 |
// Exec calls the underlying exec driver to run |
| 195 |
-func (d *Daemon) Exec(c *Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 195 |
+func (d *Daemon) Exec(c *container.Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
|
|
| 196 | 196 |
hooks := execdriver.Hooks{
|
| 197 | 197 |
Start: startCallback, |
| 198 | 198 |
} |
| 199 |
- exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, hooks) |
|
| 199 |
+ exitStatus, err := d.execDriver.Exec(c.Command, execConfig.ProcessConfig, pipes, hooks) |
|
| 200 | 200 |
|
| 201 | 201 |
// On err, make sure we don't leave ExitCode at zero |
| 202 | 202 |
if err != nil && exitStatus == 0 {
|
| ... | ... |
@@ -238,14 +238,14 @@ func (d *Daemon) execCommandGC() {
|
| 238 | 238 |
func (d *Daemon) containerExecIds() map[string]struct{} {
|
| 239 | 239 |
ids := map[string]struct{}{}
|
| 240 | 240 |
for _, c := range d.containers.List() {
|
| 241 |
- for _, id := range c.execCommands.List() {
|
|
| 241 |
+ for _, id := range c.ExecCommands.List() {
|
|
| 242 | 242 |
ids[id] = struct{}{}
|
| 243 | 243 |
} |
| 244 | 244 |
} |
| 245 | 245 |
return ids |
| 246 | 246 |
} |
| 247 | 247 |
|
| 248 |
-func (d *Daemon) containerExec(container *Container, ec *exec.Config) error {
|
|
| 248 |
+func (d *Daemon) containerExec(container *container.Container, ec *exec.Config) error {
|
|
| 249 | 249 |
container.Lock() |
| 250 | 250 |
defer container.Unlock() |
| 251 | 251 |
|
| ... | ... |
@@ -268,7 +268,7 @@ func (d *Daemon) containerExec(container *Container, ec *exec.Config) error {
|
| 268 | 268 |
return ec.Wait(cErr) |
| 269 | 269 |
} |
| 270 | 270 |
|
| 271 |
-func (d *Daemon) monitorExec(container *Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
|
|
| 271 |
+func (d *Daemon) monitorExec(container *container.Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
|
|
| 272 | 272 |
pipes := execdriver.NewPipes(execConfig.Stdin(), execConfig.Stdout(), execConfig.Stderr(), execConfig.OpenStdin) |
| 273 | 273 |
exitCode, err := d.Exec(container, execConfig, pipes, callback) |
| 274 | 274 |
if err != nil {
|
| ... | ... |
@@ -287,6 +287,6 @@ func (d *Daemon) monitorExec(container *Container, execConfig *exec.Config, call |
| 287 | 287 |
} |
| 288 | 288 |
// remove the exec command from the container's store only and not the |
| 289 | 289 |
// daemon's store so that the exec command can be inspected. |
| 290 |
- container.execCommands.Delete(execConfig.ID) |
|
| 290 |
+ container.ExecCommands.Delete(execConfig.ID) |
|
| 291 | 291 |
return err |
| 292 | 292 |
} |
| ... | ... |
@@ -3,13 +3,14 @@ |
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
+ "github.com/docker/docker/container" |
|
| 6 | 7 |
"github.com/docker/docker/daemon/execdriver" |
| 7 | 8 |
"github.com/docker/docker/runconfig" |
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 | 11 |
// setPlatformSpecificExecProcessConfig sets platform-specific fields in the |
| 11 | 12 |
// ProcessConfig structure. |
| 12 |
-func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *Container, pc *execdriver.ProcessConfig) {
|
|
| 13 |
+func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) {
|
|
| 13 | 14 |
user := config.User |
| 14 | 15 |
if len(user) == 0 {
|
| 15 | 16 |
user = container.Config.User |
| ... | ... |
@@ -1,11 +1,12 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/container" |
|
| 4 | 5 |
"github.com/docker/docker/daemon/execdriver" |
| 5 | 6 |
"github.com/docker/docker/runconfig" |
| 6 | 7 |
) |
| 7 | 8 |
|
| 8 | 9 |
// setPlatformSpecificExecProcessConfig sets platform-specific fields in the |
| 9 | 10 |
// ProcessConfig structure. This is a no-op on Windows |
| 10 |
-func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *Container, pc *execdriver.ProcessConfig) {
|
|
| 11 |
+func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) {
|
|
| 11 | 12 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 6 | 7 |
derr "github.com/docker/docker/errors" |
| 7 | 8 |
"github.com/docker/docker/pkg/archive" |
| 8 | 9 |
"github.com/docker/docker/pkg/ioutils" |
| ... | ... |
@@ -29,13 +30,13 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
| 29 | 29 |
return nil |
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 |
-func (daemon *Daemon) containerExport(container *Container) (archive.Archive, error) {
|
|
| 32 |
+func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) {
|
|
| 33 | 33 |
if err := daemon.Mount(container); err != nil {
|
| 34 | 34 |
return nil, err |
| 35 | 35 |
} |
| 36 | 36 |
|
| 37 | 37 |
uidMaps, gidMaps := daemon.GetUIDGIDMaps() |
| 38 |
- archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{
|
|
| 38 |
+ archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
|
|
| 39 | 39 |
Compression: archive.Uncompressed, |
| 40 | 40 |
UIDMaps: uidMaps, |
| 41 | 41 |
GIDMaps: gidMaps, |
| ... | ... |
@@ -2,11 +2,13 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"sort" |
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 | 9 |
// History is a convenience type for storing a list of containers, |
| 8 | 10 |
// ordered by creation date. |
| 9 |
-type History []*Container |
|
| 11 |
+type History []*container.Container |
|
| 10 | 12 |
|
| 11 | 13 |
func (history *History) Len() int {
|
| 12 | 14 |
return len(*history) |
| ... | ... |
@@ -23,7 +25,7 @@ func (history *History) Swap(i, j int) {
|
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 | 25 |
// Add the given container to history. |
| 26 |
-func (history *History) Add(container *Container) {
|
|
| 26 |
+func (history *History) Add(container *container.Container) {
|
|
| 27 | 27 |
*history = append(*history, container) |
| 28 | 28 |
} |
| 29 | 29 |
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/distribution/reference" |
| 8 | 8 |
"github.com/docker/docker/api/types" |
| 9 |
+ "github.com/docker/docker/container" |
|
| 9 | 10 |
derr "github.com/docker/docker/errors" |
| 10 | 11 |
"github.com/docker/docker/image" |
| 11 | 12 |
"github.com/docker/docker/pkg/stringid" |
| ... | ... |
@@ -133,7 +134,7 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
|
| 133 | 133 |
|
| 134 | 134 |
// getContainerUsingImage returns a container that was created using the given |
| 135 | 135 |
// imageID. Returns nil if there is no such container. |
| 136 |
-func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *Container {
|
|
| 136 |
+func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container {
|
|
| 137 | 137 |
for _, container := range daemon.List() {
|
| 138 | 138 |
if container.ImageID == imageID {
|
| 139 | 139 |
return container |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/api/types" |
| 8 | 8 |
"github.com/docker/docker/api/types/versions/v1p20" |
| 9 |
+ "github.com/docker/docker/container" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/exec" |
| 10 | 11 |
"github.com/docker/docker/daemon/network" |
| 11 | 12 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -85,7 +86,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er |
| 85 | 85 |
MacAddress: container.Config.MacAddress, |
| 86 | 86 |
NetworkDisabled: container.Config.NetworkDisabled, |
| 87 | 87 |
ExposedPorts: container.Config.ExposedPorts, |
| 88 |
- VolumeDriver: container.hostConfig.VolumeDriver, |
|
| 88 |
+ VolumeDriver: container.HostConfig.VolumeDriver, |
|
| 89 | 89 |
} |
| 90 | 90 |
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) |
| 91 | 91 |
|
| ... | ... |
@@ -97,9 +98,9 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er |
| 97 | 97 |
}, nil |
| 98 | 98 |
} |
| 99 | 99 |
|
| 100 |
-func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.ContainerJSONBase, error) {
|
|
| 100 |
+func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) {
|
|
| 101 | 101 |
// make a copy to play with |
| 102 |
- hostConfig := *container.hostConfig |
|
| 102 |
+ hostConfig := *container.HostConfig |
|
| 103 | 103 |
|
| 104 | 104 |
if children, err := daemon.children(container.Name); err == nil {
|
| 105 | 105 |
for linkAlias, child := range children {
|
| ... | ... |
@@ -143,7 +144,7 @@ func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co |
| 143 | 143 |
Driver: container.Driver, |
| 144 | 144 |
MountLabel: container.MountLabel, |
| 145 | 145 |
ProcessLabel: container.ProcessLabel, |
| 146 |
- ExecIDs: container.getExecIDs(), |
|
| 146 |
+ ExecIDs: container.GetExecIDs(), |
|
| 147 | 147 |
HostConfig: &hostConfig, |
| 148 | 148 |
} |
| 149 | 149 |
|
| ... | ... |
@@ -5,10 +5,11 @@ package daemon |
| 5 | 5 |
import ( |
| 6 | 6 |
"github.com/docker/docker/api/types" |
| 7 | 7 |
"github.com/docker/docker/api/types/versions/v1p19" |
| 8 |
+ "github.com/docker/docker/container" |
|
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 | 11 |
// This sets platform-specific fields |
| 11 |
-func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
|
| 12 |
+func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
|
| 12 | 13 |
contJSONBase.AppArmorProfile = container.AppArmorProfile |
| 13 | 14 |
contJSONBase.ResolvConfPath = container.ResolvConfPath |
| 14 | 15 |
contJSONBase.HostnamePath = container.HostnamePath |
| ... | ... |
@@ -44,11 +45,11 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, |
| 44 | 44 |
MacAddress: container.Config.MacAddress, |
| 45 | 45 |
NetworkDisabled: container.Config.NetworkDisabled, |
| 46 | 46 |
ExposedPorts: container.Config.ExposedPorts, |
| 47 |
- VolumeDriver: container.hostConfig.VolumeDriver, |
|
| 48 |
- Memory: container.hostConfig.Memory, |
|
| 49 |
- MemorySwap: container.hostConfig.MemorySwap, |
|
| 50 |
- CPUShares: container.hostConfig.CPUShares, |
|
| 51 |
- CPUSet: container.hostConfig.CpusetCpus, |
|
| 47 |
+ VolumeDriver: container.HostConfig.VolumeDriver, |
|
| 48 |
+ Memory: container.HostConfig.Memory, |
|
| 49 |
+ MemorySwap: container.HostConfig.MemorySwap, |
|
| 50 |
+ CPUShares: container.HostConfig.CPUShares, |
|
| 51 |
+ CPUSet: container.HostConfig.CpusetCpus, |
|
| 52 | 52 |
} |
| 53 | 53 |
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) |
| 54 | 54 |
|
| ... | ... |
@@ -61,7 +62,7 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, |
| 61 | 61 |
}, nil |
| 62 | 62 |
} |
| 63 | 63 |
|
| 64 |
-func addMountPoints(container *Container) []types.MountPoint {
|
|
| 64 |
+func addMountPoints(container *container.Container) []types.MountPoint {
|
|
| 65 | 65 |
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) |
| 66 | 66 |
for _, m := range container.MountPoints {
|
| 67 | 67 |
mountPoints = append(mountPoints, types.MountPoint{
|
| ... | ... |
@@ -1,13 +1,16 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/docker/api/types" |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/api/types" |
|
| 5 |
+ "github.com/docker/docker/container" |
|
| 6 |
+) |
|
| 4 | 7 |
|
| 5 | 8 |
// This sets platform-specific fields |
| 6 |
-func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
|
| 9 |
+func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
|
|
| 7 | 10 |
return contJSONBase |
| 8 | 11 |
} |
| 9 | 12 |
|
| 10 |
-func addMountPoints(container *Container) []types.MountPoint {
|
|
| 13 |
+func addMountPoints(container *container.Container) []types.MountPoint {
|
|
| 11 | 14 |
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) |
| 12 | 15 |
for _, m := range container.MountPoints {
|
| 13 | 16 |
mountPoints = append(mountPoints, types.MountPoint{
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"time" |
| 8 | 8 |
|
| 9 | 9 |
"github.com/Sirupsen/logrus" |
| 10 |
+ "github.com/docker/docker/container" |
|
| 10 | 11 |
derr "github.com/docker/docker/errors" |
| 11 | 12 |
"github.com/docker/docker/pkg/signal" |
| 12 | 13 |
) |
| ... | ... |
@@ -37,7 +38,7 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
|
| 37 | 37 |
// to send the signal. An error is returned if the container is paused |
| 38 | 38 |
// or not running, or if there is a problem returned from the |
| 39 | 39 |
// underlying kill command. |
| 40 |
-func (daemon *Daemon) killWithSignal(container *Container, sig int) error {
|
|
| 40 |
+func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error {
|
|
| 41 | 41 |
logrus.Debugf("Sending %d to %s", sig, container.ID)
|
| 42 | 42 |
container.Lock() |
| 43 | 43 |
defer container.Unlock() |
| ... | ... |
@@ -69,7 +70,7 @@ func (daemon *Daemon) killWithSignal(container *Container, sig int) error {
|
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 | 71 |
// Kill forcefully terminates a container. |
| 72 |
-func (daemon *Daemon) Kill(container *Container) error {
|
|
| 72 |
+func (daemon *Daemon) Kill(container *container.Container) error {
|
|
| 73 | 73 |
if !container.IsRunning() {
|
| 74 | 74 |
return derr.ErrorCodeNotRunning.WithArgs(container.ID) |
| 75 | 75 |
} |
| ... | ... |
@@ -105,7 +106,7 @@ func (daemon *Daemon) Kill(container *Container) error {
|
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 | 107 |
// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error. |
| 108 |
-func (daemon *Daemon) killPossiblyDeadProcess(container *Container, sig int) error {
|
|
| 108 |
+func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
|
|
| 109 | 109 |
err := daemon.killWithSignal(container, sig) |
| 110 | 110 |
if err == syscall.ESRCH {
|
| 111 | 111 |
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig)
|
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
|
| 9 | 9 |
"github.com/Sirupsen/logrus" |
| 10 | 10 |
"github.com/docker/docker/api/types" |
| 11 |
+ "github.com/docker/docker/container" |
|
| 11 | 12 |
"github.com/docker/docker/image" |
| 12 | 13 |
"github.com/docker/docker/pkg/graphdb" |
| 13 | 14 |
"github.com/docker/docker/pkg/nat" |
| ... | ... |
@@ -19,7 +20,7 @@ type iterationAction int |
| 19 | 19 |
|
| 20 | 20 |
// containerReducer represents a reducer for a container. |
| 21 | 21 |
// Returns the object to serialize by the api. |
| 22 |
-type containerReducer func(*Container, *listContext) (*types.Container, error) |
|
| 22 |
+type containerReducer func(*container.Container, *listContext) (*types.Container, error) |
|
| 23 | 23 |
|
| 24 | 24 |
const ( |
| 25 | 25 |
// includeContainer is the action to include a container in the reducer. |
| ... | ... |
@@ -34,7 +35,7 @@ const ( |
| 34 | 34 |
var errStopIteration = errors.New("container list iteration stopped")
|
| 35 | 35 |
|
| 36 | 36 |
// List returns an array of all containers registered in the daemon. |
| 37 |
-func (daemon *Daemon) List() []*Container {
|
|
| 37 |
+func (daemon *Daemon) List() []*container.Container {
|
|
| 38 | 38 |
return daemon.containers.List() |
| 39 | 39 |
} |
| 40 | 40 |
|
| ... | ... |
@@ -71,10 +72,10 @@ type listContext struct {
|
| 71 | 71 |
exitAllowed []int |
| 72 | 72 |
// beforeFilter is a filter to ignore containers that appear before the one given |
| 73 | 73 |
// this is used for --filter=before= and --before=, the latter is deprecated. |
| 74 |
- beforeFilter *Container |
|
| 74 |
+ beforeFilter *container.Container |
|
| 75 | 75 |
// sinceFilter is a filter to stop the filtering when the iterator arrive to the given container |
| 76 | 76 |
// this is used for --filter=since= and --since=, the latter is deprecated. |
| 77 |
- sinceFilter *Container |
|
| 77 |
+ sinceFilter *container.Container |
|
| 78 | 78 |
// ContainersConfig is the filters set by the user |
| 79 | 79 |
*ContainersConfig |
| 80 | 80 |
} |
| ... | ... |
@@ -110,7 +111,7 @@ func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer contain |
| 110 | 110 |
} |
| 111 | 111 |
|
| 112 | 112 |
// reducePsContainer is the basic representation for a container as expected by the ps command. |
| 113 |
-func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
|
|
| 113 |
+func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
|
|
| 114 | 114 |
container.Lock() |
| 115 | 115 |
defer container.Unlock() |
| 116 | 116 |
|
| ... | ... |
@@ -148,7 +149,7 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) |
| 148 | 148 |
} |
| 149 | 149 |
|
| 150 | 150 |
err = psFilters.WalkValues("status", func(value string) error {
|
| 151 |
- if !isValidStateString(value) {
|
|
| 151 |
+ if !container.IsValidStateString(value) {
|
|
| 152 | 152 |
return fmt.Errorf("Unrecognised filter value for status: %s", value)
|
| 153 | 153 |
} |
| 154 | 154 |
|
| ... | ... |
@@ -159,7 +160,7 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) |
| 159 | 159 |
return nil, err |
| 160 | 160 |
} |
| 161 | 161 |
|
| 162 |
- var beforeContFilter, sinceContFilter *Container |
|
| 162 |
+ var beforeContFilter, sinceContFilter *container.Container |
|
| 163 | 163 |
err = psFilters.WalkValues("before", func(value string) error {
|
| 164 | 164 |
beforeContFilter, err = daemon.Get(value) |
| 165 | 165 |
return err |
| ... | ... |
@@ -230,7 +231,7 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) |
| 230 | 230 |
|
| 231 | 231 |
// includeContainerInList decides whether a containers should be include in the output or not based in the filter. |
| 232 | 232 |
// It also decides if the iteration should be stopped or not. |
| 233 |
-func includeContainerInList(container *Container, ctx *listContext) iterationAction {
|
|
| 233 |
+func includeContainerInList(container *container.Container, ctx *listContext) iterationAction {
|
|
| 234 | 234 |
// Do not include container if it's stopped and we're not filters |
| 235 | 235 |
if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeFilter == nil && ctx.sinceFilter == nil {
|
| 236 | 236 |
return excludeContainer |
| ... | ... |
@@ -309,7 +310,7 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct |
| 309 | 309 |
} |
| 310 | 310 |
|
| 311 | 311 |
// transformContainer generates the container type expected by the docker ps command. |
| 312 |
-func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
|
|
| 312 |
+func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) {
|
|
| 313 | 313 |
newC := &types.Container{
|
| 314 | 314 |
ID: container.ID, |
| 315 | 315 |
Names: ctx.names[container.ID], |
| ... | ... |
@@ -349,7 +350,7 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) |
| 349 | 349 |
} |
| 350 | 350 |
newC.Created = container.Created.Unix() |
| 351 | 351 |
newC.Status = container.State.String() |
| 352 |
- newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode) |
|
| 352 |
+ newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) |
|
| 353 | 353 |
|
| 354 | 354 |
newC.Ports = []types.Port{}
|
| 355 | 355 |
for port, bindings := range container.NetworkSettings.Ports {
|
| ... | ... |
@@ -2,8 +2,10 @@ |
| 2 | 2 |
|
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 |
+import "github.com/docker/docker/container" |
|
| 6 |
+ |
|
| 5 | 7 |
// excludeByIsolation is a platform specific helper function to support PS |
| 6 | 8 |
// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. |
| 7 |
-func excludeByIsolation(container *Container, ctx *listContext) iterationAction {
|
|
| 9 |
+func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
|
|
| 8 | 10 |
return includeContainer |
| 9 | 11 |
} |
| ... | ... |
@@ -1,11 +1,15 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "strings" |
|
| 3 |
+import ( |
|
| 4 |
+ "strings" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 7 |
+) |
|
| 4 | 8 |
|
| 5 | 9 |
// excludeByIsolation is a platform specific helper function to support PS |
| 6 | 10 |
// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. |
| 7 |
-func excludeByIsolation(container *Container, ctx *listContext) iterationAction {
|
|
| 8 |
- i := strings.ToLower(string(container.hostConfig.Isolation)) |
|
| 11 |
+func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
|
|
| 12 |
+ i := strings.ToLower(string(container.HostConfig.Isolation)) |
|
| 9 | 13 |
if i == "" {
|
| 10 | 14 |
i = "default" |
| 11 | 15 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"time" |
| 7 | 7 |
|
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 |
+ "github.com/docker/docker/container" |
|
| 9 | 10 |
"github.com/docker/docker/daemon/logger" |
| 10 | 11 |
"github.com/docker/docker/daemon/logger/jsonfilelog" |
| 11 | 12 |
derr "github.com/docker/docker/errors" |
| ... | ... |
@@ -99,11 +100,11 @@ func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsC |
| 99 | 99 |
} |
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 |
-func (daemon *Daemon) getLogger(container *Container) (logger.Logger, error) {
|
|
| 103 |
- if container.logDriver != nil && container.IsRunning() {
|
|
| 104 |
- return container.logDriver, nil |
|
| 102 |
+func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {
|
|
| 103 |
+ if container.LogDriver != nil && container.IsRunning() {
|
|
| 104 |
+ return container.LogDriver, nil |
|
| 105 | 105 |
} |
| 106 |
- cfg := container.getLogConfig(daemon.defaultLogConfig) |
|
| 106 |
+ cfg := container.GetLogConfig(daemon.defaultLogConfig) |
|
| 107 | 107 |
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
|
| 108 | 108 |
return nil, err |
| 109 | 109 |
} |
| ... | ... |
@@ -111,8 +112,8 @@ func (daemon *Daemon) getLogger(container *Container) (logger.Logger, error) {
|
| 111 | 111 |
} |
| 112 | 112 |
|
| 113 | 113 |
// StartLogging initializes and starts the container logging stream. |
| 114 |
-func (daemon *Daemon) StartLogging(container *Container) error {
|
|
| 115 |
- cfg := container.getLogConfig(daemon.defaultLogConfig) |
|
| 114 |
+func (daemon *Daemon) StartLogging(container *container.Container) error {
|
|
| 115 |
+ cfg := container.GetLogConfig(daemon.defaultLogConfig) |
|
| 116 | 116 |
if cfg.Type == "none" {
|
| 117 | 117 |
return nil // do not start logging routines |
| 118 | 118 |
} |
| ... | ... |
@@ -126,9 +127,9 @@ func (daemon *Daemon) StartLogging(container *Container) error {
|
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
|
| 129 |
- container.logCopier = copier |
|
| 129 |
+ container.LogCopier = copier |
|
| 130 | 130 |
copier.Run() |
| 131 |
- container.logDriver = l |
|
| 131 |
+ container.LogDriver = l |
|
| 132 | 132 |
|
| 133 | 133 |
// set LogPath field only for json-file logdriver |
| 134 | 134 |
if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
|
| 135 | 135 |
deleted file mode 100644 |
| ... | ... |
@@ -1,382 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "io" |
|
| 5 |
- "os/exec" |
|
| 6 |
- "strings" |
|
| 7 |
- "sync" |
|
| 8 |
- "syscall" |
|
| 9 |
- "time" |
|
| 10 |
- |
|
| 11 |
- "github.com/Sirupsen/logrus" |
|
| 12 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 13 |
- derr "github.com/docker/docker/errors" |
|
| 14 |
- "github.com/docker/docker/pkg/stringid" |
|
| 15 |
- "github.com/docker/docker/runconfig" |
|
| 16 |
- "github.com/docker/docker/utils" |
|
| 17 |
-) |
|
| 18 |
- |
|
| 19 |
-const ( |
|
| 20 |
- defaultTimeIncrement = 100 |
|
| 21 |
- loggerCloseTimeout = 10 * time.Second |
|
| 22 |
-) |
|
| 23 |
- |
|
| 24 |
-// containerSupervisor defines the interface that a supervisor must implement |
|
| 25 |
-type containerSupervisor interface {
|
|
| 26 |
- // LogContainerEvent generates events related to a given container |
|
| 27 |
- LogContainerEvent(*Container, string) |
|
| 28 |
- // Cleanup ensures that the container is properly unmounted |
|
| 29 |
- Cleanup(*Container) |
|
| 30 |
- // StartLogging starts the logging driver for the container |
|
| 31 |
- StartLogging(*Container) error |
|
| 32 |
- // Run starts a container |
|
| 33 |
- Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) |
|
| 34 |
- // IsShuttingDown tells whether the supervisor is shutting down or not |
|
| 35 |
- IsShuttingDown() bool |
|
| 36 |
-} |
|
| 37 |
- |
|
| 38 |
-// containerMonitor monitors the execution of a container's main process. |
|
| 39 |
-// If a restart policy is specified for the container the monitor will ensure that the |
|
| 40 |
-// process is restarted based on the rules of the policy. When the container is finally stopped |
|
| 41 |
-// the monitor will reset and cleanup any of the container resources such as networking allocations |
|
| 42 |
-// and the rootfs |
|
| 43 |
-type containerMonitor struct {
|
|
| 44 |
- mux sync.Mutex |
|
| 45 |
- |
|
| 46 |
- // supervisor keeps track of the container and the events it generates |
|
| 47 |
- supervisor containerSupervisor |
|
| 48 |
- |
|
| 49 |
- // container is the container being monitored |
|
| 50 |
- container *Container |
|
| 51 |
- |
|
| 52 |
- // restartPolicy is the current policy being applied to the container monitor |
|
| 53 |
- restartPolicy runconfig.RestartPolicy |
|
| 54 |
- |
|
| 55 |
- // failureCount is the number of times the container has failed to |
|
| 56 |
- // start in a row |
|
| 57 |
- failureCount int |
|
| 58 |
- |
|
| 59 |
- // shouldStop signals the monitor that the next time the container exits it is |
|
| 60 |
- // either because docker or the user asked for the container to be stopped |
|
| 61 |
- shouldStop bool |
|
| 62 |
- |
|
| 63 |
- // startSignal is a channel that is closes after the container initially starts |
|
| 64 |
- startSignal chan struct{}
|
|
| 65 |
- |
|
| 66 |
- // stopChan is used to signal to the monitor whenever there is a wait for the |
|
| 67 |
- // next restart so that the timeIncrement is not honored and the user is not |
|
| 68 |
- // left waiting for nothing to happen during this time |
|
| 69 |
- stopChan chan struct{}
|
|
| 70 |
- |
|
| 71 |
- // timeIncrement is the amount of time to wait between restarts |
|
| 72 |
- // this is in milliseconds |
|
| 73 |
- timeIncrement int |
|
| 74 |
- |
|
| 75 |
- // lastStartTime is the time which the monitor last exec'd the container's process |
|
| 76 |
- lastStartTime time.Time |
|
| 77 |
-} |
|
| 78 |
- |
|
| 79 |
-// newContainerMonitor returns an initialized containerMonitor for the provided container |
|
| 80 |
-// honoring the provided restart policy |
|
| 81 |
-func (daemon *Daemon) newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor {
|
|
| 82 |
- return &containerMonitor{
|
|
| 83 |
- supervisor: daemon, |
|
| 84 |
- container: container, |
|
| 85 |
- restartPolicy: policy, |
|
| 86 |
- timeIncrement: defaultTimeIncrement, |
|
| 87 |
- stopChan: make(chan struct{}),
|
|
| 88 |
- startSignal: make(chan struct{}),
|
|
| 89 |
- } |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// Stop signals to the container monitor that it should stop monitoring the container |
|
| 93 |
-// for exits the next time the process dies |
|
| 94 |
-func (m *containerMonitor) ExitOnNext() {
|
|
| 95 |
- m.mux.Lock() |
|
| 96 |
- |
|
| 97 |
- // we need to protect having a double close of the channel when stop is called |
|
| 98 |
- // twice or else we will get a panic |
|
| 99 |
- if !m.shouldStop {
|
|
| 100 |
- m.shouldStop = true |
|
| 101 |
- close(m.stopChan) |
|
| 102 |
- } |
|
| 103 |
- |
|
| 104 |
- m.mux.Unlock() |
|
| 105 |
-} |
|
| 106 |
- |
|
| 107 |
-// Close closes the container's resources such as networking allocations and |
|
| 108 |
-// unmounts the contatiner's root filesystem |
|
| 109 |
-func (m *containerMonitor) Close() error {
|
|
| 110 |
- // Cleanup networking and mounts |
|
| 111 |
- m.supervisor.Cleanup(m.container) |
|
| 112 |
- |
|
| 113 |
- // FIXME: here is race condition between two RUN instructions in Dockerfile |
|
| 114 |
- // because they share same runconfig and change image. Must be fixed |
|
| 115 |
- // in builder/builder.go |
|
| 116 |
- if err := m.container.toDisk(); err != nil {
|
|
| 117 |
- logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
|
|
| 118 |
- |
|
| 119 |
- return err |
|
| 120 |
- } |
|
| 121 |
- |
|
| 122 |
- return nil |
|
| 123 |
-} |
|
| 124 |
- |
|
| 125 |
-// Start starts the containers process and monitors it according to the restart policy |
|
| 126 |
-func (m *containerMonitor) Start() error {
|
|
| 127 |
- var ( |
|
| 128 |
- err error |
|
| 129 |
- exitStatus execdriver.ExitStatus |
|
| 130 |
- // this variable indicates where we in execution flow: |
|
| 131 |
- // before Run or after |
|
| 132 |
- afterRun bool |
|
| 133 |
- ) |
|
| 134 |
- |
|
| 135 |
- // ensure that when the monitor finally exits we release the networking and unmount the rootfs |
|
| 136 |
- defer func() {
|
|
| 137 |
- if afterRun {
|
|
| 138 |
- m.container.Lock() |
|
| 139 |
- defer m.container.Unlock() |
|
| 140 |
- m.container.setStopped(&exitStatus) |
|
| 141 |
- } |
|
| 142 |
- m.Close() |
|
| 143 |
- }() |
|
| 144 |
- // reset stopped flag |
|
| 145 |
- if m.container.HasBeenManuallyStopped {
|
|
| 146 |
- m.container.HasBeenManuallyStopped = false |
|
| 147 |
- } |
|
| 148 |
- |
|
| 149 |
- // reset the restart count |
|
| 150 |
- m.container.RestartCount = -1 |
|
| 151 |
- |
|
| 152 |
- for {
|
|
| 153 |
- m.container.RestartCount++ |
|
| 154 |
- |
|
| 155 |
- if err := m.supervisor.StartLogging(m.container); err != nil {
|
|
| 156 |
- m.resetContainer(false) |
|
| 157 |
- |
|
| 158 |
- return err |
|
| 159 |
- } |
|
| 160 |
- |
|
| 161 |
- pipes := execdriver.NewPipes(m.container.Stdin(), m.container.Stdout(), m.container.Stderr(), m.container.Config.OpenStdin) |
|
| 162 |
- |
|
| 163 |
- m.logEvent("start")
|
|
| 164 |
- |
|
| 165 |
- m.lastStartTime = time.Now() |
|
| 166 |
- |
|
| 167 |
- if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
|
|
| 168 |
- // if we receive an internal error from the initial start of a container then lets |
|
| 169 |
- // return it instead of entering the restart loop |
|
| 170 |
- // set to 127 for container cmd not found/does not exist) |
|
| 171 |
- if strings.Contains(err.Error(), "executable file not found") || |
|
| 172 |
- strings.Contains(err.Error(), "no such file or directory") || |
|
| 173 |
- strings.Contains(err.Error(), "system cannot find the file specified") {
|
|
| 174 |
- if m.container.RestartCount == 0 {
|
|
| 175 |
- m.container.ExitCode = 127 |
|
| 176 |
- m.resetContainer(false) |
|
| 177 |
- return derr.ErrorCodeCmdNotFound |
|
| 178 |
- } |
|
| 179 |
- } |
|
| 180 |
- // set to 126 for container cmd can't be invoked errors |
|
| 181 |
- if strings.Contains(err.Error(), syscall.EACCES.Error()) {
|
|
| 182 |
- if m.container.RestartCount == 0 {
|
|
| 183 |
- m.container.ExitCode = 126 |
|
| 184 |
- m.resetContainer(false) |
|
| 185 |
- return derr.ErrorCodeCmdCouldNotBeInvoked |
|
| 186 |
- } |
|
| 187 |
- } |
|
| 188 |
- |
|
| 189 |
- if m.container.RestartCount == 0 {
|
|
| 190 |
- m.container.ExitCode = -1 |
|
| 191 |
- m.resetContainer(false) |
|
| 192 |
- |
|
| 193 |
- return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err)) |
|
| 194 |
- } |
|
| 195 |
- |
|
| 196 |
- logrus.Errorf("Error running container: %s", err)
|
|
| 197 |
- } |
|
| 198 |
- |
|
| 199 |
- // here container.Lock is already lost |
|
| 200 |
- afterRun = true |
|
| 201 |
- |
|
| 202 |
- m.resetMonitor(err == nil && exitStatus.ExitCode == 0) |
|
| 203 |
- |
|
| 204 |
- if m.shouldRestart(exitStatus.ExitCode) {
|
|
| 205 |
- m.container.setRestarting(&exitStatus) |
|
| 206 |
- m.logEvent("die")
|
|
| 207 |
- m.resetContainer(true) |
|
| 208 |
- |
|
| 209 |
- // sleep with a small time increment between each restart to help avoid issues cased by quickly |
|
| 210 |
- // restarting the container because of some types of errors ( networking cut out, etc... ) |
|
| 211 |
- m.waitForNextRestart() |
|
| 212 |
- |
|
| 213 |
- // we need to check this before reentering the loop because the waitForNextRestart could have |
|
| 214 |
- // been terminated by a request from a user |
|
| 215 |
- if m.shouldStop {
|
|
| 216 |
- return err |
|
| 217 |
- } |
|
| 218 |
- continue |
|
| 219 |
- } |
|
| 220 |
- |
|
| 221 |
- m.logEvent("die")
|
|
| 222 |
- m.resetContainer(true) |
|
| 223 |
- return err |
|
| 224 |
- } |
|
| 225 |
-} |
|
| 226 |
- |
|
| 227 |
-// resetMonitor resets the stateful fields on the containerMonitor based on the |
|
| 228 |
-// previous runs success or failure. Regardless of success, if the container had |
|
| 229 |
-// an execution time of more than 10s then reset the timer back to the default |
|
| 230 |
-func (m *containerMonitor) resetMonitor(successful bool) {
|
|
| 231 |
- executionTime := time.Now().Sub(m.lastStartTime).Seconds() |
|
| 232 |
- |
|
| 233 |
- if executionTime > 10 {
|
|
| 234 |
- m.timeIncrement = defaultTimeIncrement |
|
| 235 |
- } else {
|
|
| 236 |
- // otherwise we need to increment the amount of time we wait before restarting |
|
| 237 |
- // the process. We will build up by multiplying the increment by 2 |
|
| 238 |
- m.timeIncrement *= 2 |
|
| 239 |
- } |
|
| 240 |
- |
|
| 241 |
- // the container exited successfully so we need to reset the failure counter |
|
| 242 |
- if successful {
|
|
| 243 |
- m.failureCount = 0 |
|
| 244 |
- } else {
|
|
| 245 |
- m.failureCount++ |
|
| 246 |
- } |
|
| 247 |
-} |
|
| 248 |
- |
|
| 249 |
-// waitForNextRestart waits with the default time increment to restart the container unless |
|
| 250 |
-// a user or docker asks for the container to be stopped |
|
| 251 |
-func (m *containerMonitor) waitForNextRestart() {
|
|
| 252 |
- select {
|
|
| 253 |
- case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): |
|
| 254 |
- case <-m.stopChan: |
|
| 255 |
- } |
|
| 256 |
-} |
|
| 257 |
- |
|
| 258 |
-// shouldRestart checks the restart policy and applies the rules to determine if |
|
| 259 |
-// the container's process should be restarted |
|
| 260 |
-func (m *containerMonitor) shouldRestart(exitCode int) bool {
|
|
| 261 |
- m.mux.Lock() |
|
| 262 |
- defer m.mux.Unlock() |
|
| 263 |
- |
|
| 264 |
- // do not restart if the user or docker has requested that this container be stopped |
|
| 265 |
- if m.shouldStop {
|
|
| 266 |
- m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown() |
|
| 267 |
- return false |
|
| 268 |
- } |
|
| 269 |
- |
|
| 270 |
- switch {
|
|
| 271 |
- case m.restartPolicy.IsAlways(), m.restartPolicy.IsUnlessStopped(): |
|
| 272 |
- return true |
|
| 273 |
- case m.restartPolicy.IsOnFailure(): |
|
| 274 |
- // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count |
|
| 275 |
- if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
|
|
| 276 |
- logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
|
|
| 277 |
- stringid.TruncateID(m.container.ID), max) |
|
| 278 |
- return false |
|
| 279 |
- } |
|
| 280 |
- |
|
| 281 |
- return exitCode != 0 |
|
| 282 |
- } |
|
| 283 |
- |
|
| 284 |
- return false |
|
| 285 |
-} |
|
| 286 |
- |
|
| 287 |
-// callback ensures that the container's state is properly updated after we |
|
| 288 |
-// received ack from the execution drivers |
|
| 289 |
-func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
|
|
| 290 |
- go func() {
|
|
| 291 |
- _, ok := <-chOOM |
|
| 292 |
- if ok {
|
|
| 293 |
- m.logEvent("oom")
|
|
| 294 |
- } |
|
| 295 |
- }() |
|
| 296 |
- |
|
| 297 |
- if processConfig.Tty {
|
|
| 298 |
- // The callback is called after the process Start() |
|
| 299 |
- // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave |
|
| 300 |
- // which we close here. |
|
| 301 |
- if c, ok := processConfig.Stdout.(io.Closer); ok {
|
|
| 302 |
- c.Close() |
|
| 303 |
- } |
|
| 304 |
- } |
|
| 305 |
- |
|
| 306 |
- m.container.setRunning(pid) |
|
| 307 |
- |
|
| 308 |
- // signal that the process has started |
|
| 309 |
- // close channel only if not closed |
|
| 310 |
- select {
|
|
| 311 |
- case <-m.startSignal: |
|
| 312 |
- default: |
|
| 313 |
- close(m.startSignal) |
|
| 314 |
- } |
|
| 315 |
- |
|
| 316 |
- if err := m.container.toDiskLocking(); err != nil {
|
|
| 317 |
- logrus.Errorf("Error saving container to disk: %v", err)
|
|
| 318 |
- } |
|
| 319 |
- return nil |
|
| 320 |
-} |
|
| 321 |
- |
|
| 322 |
-// resetContainer resets the container's IO and ensures that the command is able to be executed again |
|
| 323 |
-// by copying the data into a new struct |
|
| 324 |
-// if lock is true, then container locked during reset |
|
| 325 |
-func (m *containerMonitor) resetContainer(lock bool) {
|
|
| 326 |
- container := m.container |
|
| 327 |
- if lock {
|
|
| 328 |
- container.Lock() |
|
| 329 |
- defer container.Unlock() |
|
| 330 |
- } |
|
| 331 |
- |
|
| 332 |
- if err := container.CloseStreams(); err != nil {
|
|
| 333 |
- logrus.Errorf("%s: %s", container.ID, err)
|
|
| 334 |
- } |
|
| 335 |
- |
|
| 336 |
- if container.command != nil && container.command.ProcessConfig.Terminal != nil {
|
|
| 337 |
- if err := container.command.ProcessConfig.Terminal.Close(); err != nil {
|
|
| 338 |
- logrus.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
|
| 339 |
- } |
|
| 340 |
- } |
|
| 341 |
- |
|
| 342 |
- // Re-create a brand new stdin pipe once the container exited |
|
| 343 |
- if container.Config.OpenStdin {
|
|
| 344 |
- container.NewInputPipes() |
|
| 345 |
- } |
|
| 346 |
- |
|
| 347 |
- if container.logDriver != nil {
|
|
| 348 |
- if container.logCopier != nil {
|
|
| 349 |
- exit := make(chan struct{})
|
|
| 350 |
- go func() {
|
|
| 351 |
- container.logCopier.Wait() |
|
| 352 |
- close(exit) |
|
| 353 |
- }() |
|
| 354 |
- select {
|
|
| 355 |
- case <-time.After(loggerCloseTimeout): |
|
| 356 |
- logrus.Warnf("Logger didn't exit in time: logs may be truncated")
|
|
| 357 |
- case <-exit: |
|
| 358 |
- } |
|
| 359 |
- } |
|
| 360 |
- container.logDriver.Close() |
|
| 361 |
- container.logCopier = nil |
|
| 362 |
- container.logDriver = nil |
|
| 363 |
- } |
|
| 364 |
- |
|
| 365 |
- c := container.command.ProcessConfig.Cmd |
|
| 366 |
- |
|
| 367 |
- container.command.ProcessConfig.Cmd = exec.Cmd{
|
|
| 368 |
- Stdin: c.Stdin, |
|
| 369 |
- Stdout: c.Stdout, |
|
| 370 |
- Stderr: c.Stderr, |
|
| 371 |
- Path: c.Path, |
|
| 372 |
- Env: c.Env, |
|
| 373 |
- ExtraFiles: c.ExtraFiles, |
|
| 374 |
- Args: c.Args, |
|
| 375 |
- Dir: c.Dir, |
|
| 376 |
- SysProcAttr: c.SysProcAttr, |
|
| 377 |
- } |
|
| 378 |
-} |
|
| 379 |
- |
|
| 380 |
-func (m *containerMonitor) logEvent(action string) {
|
|
| 381 |
- m.supervisor.LogContainerEvent(m.container, action) |
|
| 382 |
-} |
| ... | ... |
@@ -3,11 +3,12 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"strings" |
| 5 | 5 |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 6 | 7 |
derr "github.com/docker/docker/errors" |
| 7 | 8 |
volumestore "github.com/docker/docker/volume/store" |
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 |
-func (daemon *Daemon) prepareMountPoints(container *Container) error {
|
|
| 11 |
+func (daemon *Daemon) prepareMountPoints(container *container.Container) error {
|
|
| 11 | 12 |
for _, config := range container.MountPoints {
|
| 12 | 13 |
if len(config.Driver) > 0 {
|
| 13 | 14 |
v, err := daemon.createVolume(config.Name, config.Driver, nil) |
| ... | ... |
@@ -20,7 +21,7 @@ func (daemon *Daemon) prepareMountPoints(container *Container) error {
|
| 20 | 20 |
return nil |
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 |
-func (daemon *Daemon) removeMountPoints(container *Container, rm bool) error {
|
|
| 23 |
+func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error {
|
|
| 24 | 24 |
var rmErrors []string |
| 25 | 25 |
for _, m := range container.MountPoints {
|
| 26 | 26 |
if m.Volume == nil {
|
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/container" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| ... | ... |
@@ -20,7 +21,7 @@ func (daemon *Daemon) ContainerPause(name string) error {
|
| 20 | 20 |
|
| 21 | 21 |
// containerPause pauses the container execution without stopping the process. |
| 22 | 22 |
// The execution can be resumed by calling containerUnpause. |
| 23 |
-func (daemon *Daemon) containerPause(container *Container) error {
|
|
| 23 |
+func (daemon *Daemon) containerPause(container *container.Container) error {
|
|
| 24 | 24 |
container.Lock() |
| 25 | 25 |
defer container.Unlock() |
| 26 | 26 |
|
| ... | ... |
@@ -34,7 +35,7 @@ func (daemon *Daemon) containerPause(container *Container) error {
|
| 34 | 34 |
return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID) |
| 35 | 35 |
} |
| 36 | 36 |
|
| 37 |
- if err := daemon.execDriver.Pause(container.command); err != nil {
|
|
| 37 |
+ if err := daemon.execDriver.Pause(container.Command); err != nil {
|
|
| 38 | 38 |
return err |
| 39 | 39 |
} |
| 40 | 40 |
container.Paused = true |
| ... | ... |
@@ -13,17 +13,15 @@ import ( |
| 13 | 13 |
// reserved. |
| 14 | 14 |
func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 15 | 15 |
var ( |
| 16 |
- err error |
|
| 17 |
- sid string |
|
| 18 |
- sb libnetwork.Sandbox |
|
| 19 |
- container *Container |
|
| 16 |
+ sid string |
|
| 17 |
+ sb libnetwork.Sandbox |
|
| 20 | 18 |
) |
| 21 | 19 |
|
| 22 | 20 |
if oldName == "" || newName == "" {
|
| 23 | 21 |
return derr.ErrorCodeEmptyRename |
| 24 | 22 |
} |
| 25 | 23 |
|
| 26 |
- container, err = daemon.Get(oldName) |
|
| 24 |
+ container, err := daemon.Get(oldName) |
|
| 27 | 25 |
if err != nil {
|
| 28 | 26 |
return err |
| 29 | 27 |
} |
| ... | ... |
@@ -50,7 +48,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 50 | 50 |
return derr.ErrorCodeRenameDelete.WithArgs(oldName, err) |
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 |
- if err = container.toDisk(); err != nil {
|
|
| 53 |
+ if err = container.ToDisk(); err != nil {
|
|
| 54 | 54 |
return err |
| 55 | 55 |
} |
| 56 | 56 |
|
| ... | ... |
@@ -62,7 +60,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
| 62 | 62 |
defer func() {
|
| 63 | 63 |
if err != nil {
|
| 64 | 64 |
container.Name = oldName |
| 65 |
- if e := container.toDisk(); e != nil {
|
|
| 65 |
+ if e := container.ToDisk(); e != nil {
|
|
| 66 | 66 |
logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
|
| 67 | 67 |
} |
| 68 | 68 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/container" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| ... | ... |
@@ -25,7 +26,7 @@ func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
|
| 25 | 25 |
// container. When stopping, wait for the given duration in seconds to |
| 26 | 26 |
// gracefully stop, before forcefully terminating the container. If |
| 27 | 27 |
// given a negative duration, wait forever for a graceful stop. |
| 28 |
-func (daemon *Daemon) containerRestart(container *Container, seconds int) error {
|
|
| 28 |
+func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error {
|
|
| 29 | 29 |
// Avoid unnecessarily unmounting and then directly mounting |
| 30 | 30 |
// the container when the container stops and then starts |
| 31 | 31 |
// again |
| ... | ... |
@@ -4,8 +4,8 @@ import ( |
| 4 | 4 |
"runtime" |
| 5 | 5 |
|
| 6 | 6 |
"github.com/Sirupsen/logrus" |
| 7 |
+ "github.com/docker/docker/container" |
|
| 7 | 8 |
derr "github.com/docker/docker/errors" |
| 8 |
- "github.com/docker/docker/pkg/promise" |
|
| 9 | 9 |
"github.com/docker/docker/runconfig" |
| 10 | 10 |
) |
| 11 | 11 |
|
| ... | ... |
@@ -16,7 +16,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 16 | 16 |
return err |
| 17 | 17 |
} |
| 18 | 18 |
|
| 19 |
- if container.isPaused() {
|
|
| 19 |
+ if container.IsPaused() {
|
|
| 20 | 20 |
return derr.ErrorCodeStartPaused |
| 21 | 21 |
} |
| 22 | 22 |
|
| ... | ... |
@@ -42,7 +42,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 42 | 42 |
if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
| 43 | 43 |
return err |
| 44 | 44 |
} |
| 45 |
- initDNSHostConfig(container) |
|
| 45 |
+ container.InitDNSHostConfig() |
|
| 46 | 46 |
} |
| 47 | 47 |
} else {
|
| 48 | 48 |
if hostConfig != nil {
|
| ... | ... |
@@ -52,7 +52,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 52 | 52 |
|
| 53 | 53 |
// check if hostConfig is in line with the current system settings. |
| 54 | 54 |
// It may happen cgroups are umounted or the like. |
| 55 |
- if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
|
|
| 55 |
+ if _, err = daemon.verifyContainerSettings(container.HostConfig, nil); err != nil {
|
|
| 56 | 56 |
return err |
| 57 | 57 |
} |
| 58 | 58 |
|
| ... | ... |
@@ -64,7 +64,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf |
| 64 | 64 |
} |
| 65 | 65 |
|
| 66 | 66 |
// Start starts a container |
| 67 |
-func (daemon *Daemon) Start(container *Container) error {
|
|
| 67 |
+func (daemon *Daemon) Start(container *container.Container) error {
|
|
| 68 | 68 |
return daemon.containerStart(container) |
| 69 | 69 |
} |
| 70 | 70 |
|
| ... | ... |
@@ -72,7 +72,7 @@ func (daemon *Daemon) Start(container *Container) error {
|
| 72 | 72 |
// container needs, such as storage and networking, as well as links |
| 73 | 73 |
// between containers. The container is left waiting for a signal to |
| 74 | 74 |
// begin running. |
| 75 |
-func (daemon *Daemon) containerStart(container *Container) (err error) {
|
|
| 75 |
+func (daemon *Daemon) containerStart(container *container.Container) (err error) {
|
|
| 76 | 76 |
container.Lock() |
| 77 | 77 |
defer container.Unlock() |
| 78 | 78 |
|
| ... | ... |
@@ -80,7 +80,7 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 80 | 80 |
return nil |
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 |
- if container.removalInProgress || container.Dead {
|
|
| 83 |
+ if container.RemovalInProgress || container.Dead {
|
|
| 84 | 84 |
return derr.ErrorCodeContainerBeingRemoved |
| 85 | 85 |
} |
| 86 | 86 |
|
| ... | ... |
@@ -88,12 +88,12 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 88 | 88 |
// setup has been cleaned up properly |
| 89 | 89 |
defer func() {
|
| 90 | 90 |
if err != nil {
|
| 91 |
- container.setError(err) |
|
| 91 |
+ container.SetError(err) |
|
| 92 | 92 |
// if no one else has set it, make sure we don't leave it at zero |
| 93 | 93 |
if container.ExitCode == 0 {
|
| 94 | 94 |
container.ExitCode = 128 |
| 95 | 95 |
} |
| 96 |
- container.toDisk() |
|
| 96 |
+ container.ToDisk() |
|
| 97 | 97 |
daemon.Cleanup(container) |
| 98 | 98 |
daemon.LogContainerEvent(container, "die") |
| 99 | 99 |
} |
| ... | ... |
@@ -105,7 +105,7 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 105 | 105 |
|
| 106 | 106 |
// Make sure NetworkMode has an acceptable value. We do this to ensure |
| 107 | 107 |
// backwards API compatibility. |
| 108 |
- container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig) |
|
| 108 |
+ container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) |
|
| 109 | 109 |
|
| 110 | 110 |
if err := daemon.initializeNetworking(container); err != nil {
|
| 111 | 111 |
return err |
| ... | ... |
@@ -114,15 +114,15 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 114 | 114 |
if err != nil {
|
| 115 | 115 |
return err |
| 116 | 116 |
} |
| 117 |
- if err := container.setupWorkingDirectory(); err != nil {
|
|
| 117 |
+ if err := container.SetupWorkingDirectory(); err != nil {
|
|
| 118 | 118 |
return err |
| 119 | 119 |
} |
| 120 |
- env := container.createDaemonEnvironment(linkedEnv) |
|
| 120 |
+ env := container.CreateDaemonEnvironment(linkedEnv) |
|
| 121 | 121 |
if err := daemon.populateCommand(container, env); err != nil {
|
| 122 | 122 |
return err |
| 123 | 123 |
} |
| 124 | 124 |
|
| 125 |
- if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() {
|
|
| 125 |
+ if !container.HostConfig.IpcMode.IsContainer() && !container.HostConfig.IpcMode.IsHost() {
|
|
| 126 | 126 |
if err := daemon.setupIpcDirs(container); err != nil {
|
| 127 | 127 |
return err |
| 128 | 128 |
} |
| ... | ... |
@@ -132,10 +132,10 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 132 | 132 |
if err != nil {
|
| 133 | 133 |
return err |
| 134 | 134 |
} |
| 135 |
- mounts = append(mounts, container.ipcMounts()...) |
|
| 136 |
- mounts = append(mounts, container.tmpfsMounts()...) |
|
| 135 |
+ mounts = append(mounts, container.IpcMounts()...) |
|
| 136 |
+ mounts = append(mounts, container.TmpfsMounts()...) |
|
| 137 | 137 |
|
| 138 |
- container.command.Mounts = mounts |
|
| 138 |
+ container.Command.Mounts = mounts |
|
| 139 | 139 |
if err := daemon.waitForStart(container); err != nil {
|
| 140 | 140 |
return err |
| 141 | 141 |
} |
| ... | ... |
@@ -143,34 +143,24 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
|
| 143 | 143 |
return nil |
| 144 | 144 |
} |
| 145 | 145 |
|
| 146 |
-func (daemon *Daemon) waitForStart(container *Container) error {
|
|
| 147 |
- container.monitor = daemon.newContainerMonitor(container, container.hostConfig.RestartPolicy) |
|
| 148 |
- |
|
| 149 |
- // block until we either receive an error from the initial start of the container's |
|
| 150 |
- // process or until the process is running in the container |
|
| 151 |
- select {
|
|
| 152 |
- case <-container.monitor.startSignal: |
|
| 153 |
- case err := <-promise.Go(container.monitor.Start): |
|
| 154 |
- return err |
|
| 155 |
- } |
|
| 156 |
- |
|
| 157 |
- return nil |
|
| 146 |
+func (daemon *Daemon) waitForStart(container *container.Container) error {
|
|
| 147 |
+ return container.StartMonitor(daemon, container.HostConfig.RestartPolicy) |
|
| 158 | 148 |
} |
| 159 | 149 |
|
| 160 | 150 |
// Cleanup releases any network resources allocated to the container along with any rules |
| 161 | 151 |
// around how containers are linked together. It also unmounts the container's root filesystem. |
| 162 |
-func (daemon *Daemon) Cleanup(container *Container) {
|
|
| 152 |
+func (daemon *Daemon) Cleanup(container *container.Container) {
|
|
| 163 | 153 |
daemon.releaseNetwork(container) |
| 164 | 154 |
|
| 165 |
- container.unmountIpcMounts(detachMounted) |
|
| 155 |
+ container.UnmountIpcMounts(detachMounted) |
|
| 166 | 156 |
|
| 167 | 157 |
daemon.conditionalUnmountOnCleanup(container) |
| 168 | 158 |
|
| 169 |
- for _, eConfig := range container.execCommands.Commands() {
|
|
| 159 |
+ for _, eConfig := range container.ExecCommands.Commands() {
|
|
| 170 | 160 |
daemon.unregisterExecCommand(container, eConfig) |
| 171 | 161 |
} |
| 172 | 162 |
|
| 173 |
- if err := container.unmountVolumes(false); err != nil {
|
|
| 163 |
+ if err := container.UnmountVolumes(false); err != nil {
|
|
| 174 | 164 |
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
|
| 175 | 165 |
} |
| 176 | 166 |
} |
| 177 | 167 |
deleted file mode 100644 |
| ... | ... |
@@ -1,263 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "sync" |
|
| 6 |
- "time" |
|
| 7 |
- |
|
| 8 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 9 |
- derr "github.com/docker/docker/errors" |
|
| 10 |
- "github.com/docker/docker/pkg/units" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-// State holds the current container state, and has methods to get and |
|
| 14 |
-// set the state. Container has an embed, which allows all of the |
|
| 15 |
-// functions defined against State to run against Container. |
|
| 16 |
-type State struct {
|
|
| 17 |
- sync.Mutex |
|
| 18 |
- // FIXME: Why do we have both paused and running if a |
|
| 19 |
- // container cannot be paused and running at the same time? |
|
| 20 |
- Running bool |
|
| 21 |
- Paused bool |
|
| 22 |
- Restarting bool |
|
| 23 |
- OOMKilled bool |
|
| 24 |
- removalInProgress bool // Not need for this to be persistent on disk. |
|
| 25 |
- Dead bool |
|
| 26 |
- Pid int |
|
| 27 |
- ExitCode int |
|
| 28 |
- Error string // contains last known error when starting the container |
|
| 29 |
- StartedAt time.Time |
|
| 30 |
- FinishedAt time.Time |
|
| 31 |
- waitChan chan struct{}
|
|
| 32 |
-} |
|
| 33 |
- |
|
| 34 |
-// NewState creates a default state object with a fresh channel for state changes. |
|
| 35 |
-func NewState() *State {
|
|
| 36 |
- return &State{
|
|
| 37 |
- waitChan: make(chan struct{}),
|
|
| 38 |
- } |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// String returns a human-readable description of the state |
|
| 42 |
-func (s *State) String() string {
|
|
| 43 |
- if s.Running {
|
|
| 44 |
- if s.Paused {
|
|
| 45 |
- return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
|
| 46 |
- } |
|
| 47 |
- if s.Restarting {
|
|
| 48 |
- return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
|
| 52 |
- } |
|
| 53 |
- |
|
| 54 |
- if s.removalInProgress {
|
|
| 55 |
- return "Removal In Progress" |
|
| 56 |
- } |
|
| 57 |
- |
|
| 58 |
- if s.Dead {
|
|
| 59 |
- return "Dead" |
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 |
- if s.StartedAt.IsZero() {
|
|
| 63 |
- return "Created" |
|
| 64 |
- } |
|
| 65 |
- |
|
| 66 |
- if s.FinishedAt.IsZero() {
|
|
| 67 |
- return "" |
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
|
| 71 |
-} |
|
| 72 |
- |
|
| 73 |
-// StateString returns a single string to describe state |
|
| 74 |
-func (s *State) StateString() string {
|
|
| 75 |
- if s.Running {
|
|
| 76 |
- if s.Paused {
|
|
| 77 |
- return "paused" |
|
| 78 |
- } |
|
| 79 |
- if s.Restarting {
|
|
| 80 |
- return "restarting" |
|
| 81 |
- } |
|
| 82 |
- return "running" |
|
| 83 |
- } |
|
| 84 |
- |
|
| 85 |
- if s.Dead {
|
|
| 86 |
- return "dead" |
|
| 87 |
- } |
|
| 88 |
- |
|
| 89 |
- if s.StartedAt.IsZero() {
|
|
| 90 |
- return "created" |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
- return "exited" |
|
| 94 |
-} |
|
| 95 |
- |
|
| 96 |
-func isValidStateString(s string) bool {
|
|
| 97 |
- if s != "paused" && |
|
| 98 |
- s != "restarting" && |
|
| 99 |
- s != "running" && |
|
| 100 |
- s != "dead" && |
|
| 101 |
- s != "created" && |
|
| 102 |
- s != "exited" {
|
|
| 103 |
- return false |
|
| 104 |
- } |
|
| 105 |
- return true |
|
| 106 |
-} |
|
| 107 |
- |
|
| 108 |
-func wait(waitChan <-chan struct{}, timeout time.Duration) error {
|
|
| 109 |
- if timeout < 0 {
|
|
| 110 |
- <-waitChan |
|
| 111 |
- return nil |
|
| 112 |
- } |
|
| 113 |
- select {
|
|
| 114 |
- case <-time.After(timeout): |
|
| 115 |
- return derr.ErrorCodeTimedOut.WithArgs(timeout) |
|
| 116 |
- case <-waitChan: |
|
| 117 |
- return nil |
|
| 118 |
- } |
|
| 119 |
-} |
|
| 120 |
- |
|
| 121 |
-// waitRunning waits until state is running. If state is already |
|
| 122 |
-// running it returns immediately. If you want wait forever you must |
|
| 123 |
-// supply negative timeout. Returns pid, that was passed to |
|
| 124 |
-// setRunning. |
|
| 125 |
-func (s *State) waitRunning(timeout time.Duration) (int, error) {
|
|
| 126 |
- s.Lock() |
|
| 127 |
- if s.Running {
|
|
| 128 |
- pid := s.Pid |
|
| 129 |
- s.Unlock() |
|
| 130 |
- return pid, nil |
|
| 131 |
- } |
|
| 132 |
- waitChan := s.waitChan |
|
| 133 |
- s.Unlock() |
|
| 134 |
- if err := wait(waitChan, timeout); err != nil {
|
|
| 135 |
- return -1, err |
|
| 136 |
- } |
|
| 137 |
- return s.GetPID(), nil |
|
| 138 |
-} |
|
| 139 |
- |
|
| 140 |
-// WaitStop waits until state is stopped. If state already stopped it returns |
|
| 141 |
-// immediately. If you want wait forever you must supply negative timeout. |
|
| 142 |
-// Returns exit code, that was passed to setStoppedLocking |
|
| 143 |
-func (s *State) WaitStop(timeout time.Duration) (int, error) {
|
|
| 144 |
- s.Lock() |
|
| 145 |
- if !s.Running {
|
|
| 146 |
- exitCode := s.ExitCode |
|
| 147 |
- s.Unlock() |
|
| 148 |
- return exitCode, nil |
|
| 149 |
- } |
|
| 150 |
- waitChan := s.waitChan |
|
| 151 |
- s.Unlock() |
|
| 152 |
- if err := wait(waitChan, timeout); err != nil {
|
|
| 153 |
- return -1, err |
|
| 154 |
- } |
|
| 155 |
- return s.getExitCode(), nil |
|
| 156 |
-} |
|
| 157 |
- |
|
| 158 |
-// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. |
|
| 159 |
-func (s *State) IsRunning() bool {
|
|
| 160 |
- s.Lock() |
|
| 161 |
- res := s.Running |
|
| 162 |
- s.Unlock() |
|
| 163 |
- return res |
|
| 164 |
-} |
|
| 165 |
- |
|
| 166 |
-// GetPID holds the process id of a container. |
|
| 167 |
-func (s *State) GetPID() int {
|
|
| 168 |
- s.Lock() |
|
| 169 |
- res := s.Pid |
|
| 170 |
- s.Unlock() |
|
| 171 |
- return res |
|
| 172 |
-} |
|
| 173 |
- |
|
| 174 |
-func (s *State) getExitCode() int {
|
|
| 175 |
- s.Lock() |
|
| 176 |
- res := s.ExitCode |
|
| 177 |
- s.Unlock() |
|
| 178 |
- return res |
|
| 179 |
-} |
|
| 180 |
- |
|
| 181 |
-func (s *State) setRunning(pid int) {
|
|
| 182 |
- s.Error = "" |
|
| 183 |
- s.Running = true |
|
| 184 |
- s.Paused = false |
|
| 185 |
- s.Restarting = false |
|
| 186 |
- s.ExitCode = 0 |
|
| 187 |
- s.Pid = pid |
|
| 188 |
- s.StartedAt = time.Now().UTC() |
|
| 189 |
- close(s.waitChan) // fire waiters for start |
|
| 190 |
- s.waitChan = make(chan struct{})
|
|
| 191 |
-} |
|
| 192 |
- |
|
| 193 |
-func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) {
|
|
| 194 |
- s.Lock() |
|
| 195 |
- s.setStopped(exitStatus) |
|
| 196 |
- s.Unlock() |
|
| 197 |
-} |
|
| 198 |
- |
|
| 199 |
-func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
|
|
| 200 |
- s.Running = false |
|
| 201 |
- s.Restarting = false |
|
| 202 |
- s.Pid = 0 |
|
| 203 |
- s.FinishedAt = time.Now().UTC() |
|
| 204 |
- s.setFromExitStatus(exitStatus) |
|
| 205 |
- close(s.waitChan) // fire waiters for stop |
|
| 206 |
- s.waitChan = make(chan struct{})
|
|
| 207 |
-} |
|
| 208 |
- |
|
| 209 |
-// setRestarting is when docker handles the auto restart of containers when they are |
|
| 210 |
-// in the middle of a stop and being restarted again |
|
| 211 |
-func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) {
|
|
| 212 |
- s.Lock() |
|
| 213 |
- s.setRestarting(exitStatus) |
|
| 214 |
- s.Unlock() |
|
| 215 |
-} |
|
| 216 |
- |
|
| 217 |
-func (s *State) setRestarting(exitStatus *execdriver.ExitStatus) {
|
|
| 218 |
- // we should consider the container running when it is restarting because of |
|
| 219 |
- // all the checks in docker around rm/stop/etc |
|
| 220 |
- s.Running = true |
|
| 221 |
- s.Restarting = true |
|
| 222 |
- s.Pid = 0 |
|
| 223 |
- s.FinishedAt = time.Now().UTC() |
|
| 224 |
- s.setFromExitStatus(exitStatus) |
|
| 225 |
- close(s.waitChan) // fire waiters for stop |
|
| 226 |
- s.waitChan = make(chan struct{})
|
|
| 227 |
-} |
|
| 228 |
- |
|
| 229 |
-// setError sets the container's error state. This is useful when we want to |
|
| 230 |
-// know the error that occurred when container transits to another state |
|
| 231 |
-// when inspecting it |
|
| 232 |
-func (s *State) setError(err error) {
|
|
| 233 |
- s.Error = err.Error() |
|
| 234 |
-} |
|
| 235 |
- |
|
| 236 |
-func (s *State) isPaused() bool {
|
|
| 237 |
- s.Lock() |
|
| 238 |
- res := s.Paused |
|
| 239 |
- s.Unlock() |
|
| 240 |
- return res |
|
| 241 |
-} |
|
| 242 |
- |
|
| 243 |
-func (s *State) setRemovalInProgress() error {
|
|
| 244 |
- s.Lock() |
|
| 245 |
- defer s.Unlock() |
|
| 246 |
- if s.removalInProgress {
|
|
| 247 |
- return derr.ErrorCodeAlreadyRemoving |
|
| 248 |
- } |
|
| 249 |
- s.removalInProgress = true |
|
| 250 |
- return nil |
|
| 251 |
-} |
|
| 252 |
- |
|
| 253 |
-func (s *State) resetRemovalInProgress() {
|
|
| 254 |
- s.Lock() |
|
| 255 |
- s.removalInProgress = false |
|
| 256 |
- s.Unlock() |
|
| 257 |
-} |
|
| 258 |
- |
|
| 259 |
-func (s *State) setDead() {
|
|
| 260 |
- s.Lock() |
|
| 261 |
- s.Dead = true |
|
| 262 |
- s.Unlock() |
|
| 263 |
-} |
| 264 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,111 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "sync/atomic" |
|
| 5 |
- "testing" |
|
| 6 |
- "time" |
|
| 7 |
- |
|
| 8 |
- "github.com/docker/docker/daemon/execdriver" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-func TestStateRunStop(t *testing.T) {
|
|
| 12 |
- s := NewState() |
|
| 13 |
- for i := 1; i < 3; i++ { // full lifecycle two times
|
|
| 14 |
- started := make(chan struct{})
|
|
| 15 |
- var pid int64 |
|
| 16 |
- go func() {
|
|
| 17 |
- runPid, _ := s.waitRunning(-1 * time.Second) |
|
| 18 |
- atomic.StoreInt64(&pid, int64(runPid)) |
|
| 19 |
- close(started) |
|
| 20 |
- }() |
|
| 21 |
- s.Lock() |
|
| 22 |
- s.setRunning(i + 100) |
|
| 23 |
- s.Unlock() |
|
| 24 |
- |
|
| 25 |
- if !s.IsRunning() {
|
|
| 26 |
- t.Fatal("State not running")
|
|
| 27 |
- } |
|
| 28 |
- if s.Pid != i+100 {
|
|
| 29 |
- t.Fatalf("Pid %v, expected %v", s.Pid, i+100)
|
|
| 30 |
- } |
|
| 31 |
- if s.ExitCode != 0 {
|
|
| 32 |
- t.Fatalf("ExitCode %v, expected 0", s.ExitCode)
|
|
| 33 |
- } |
|
| 34 |
- select {
|
|
| 35 |
- case <-time.After(100 * time.Millisecond): |
|
| 36 |
- t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 37 |
- case <-started: |
|
| 38 |
- t.Log("Start callback fired")
|
|
| 39 |
- } |
|
| 40 |
- runPid := int(atomic.LoadInt64(&pid)) |
|
| 41 |
- if runPid != i+100 {
|
|
| 42 |
- t.Fatalf("Pid %v, expected %v", runPid, i+100)
|
|
| 43 |
- } |
|
| 44 |
- if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 {
|
|
| 45 |
- t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
|
|
| 46 |
- } |
|
| 47 |
- |
|
| 48 |
- stopped := make(chan struct{})
|
|
| 49 |
- var exit int64 |
|
| 50 |
- go func() {
|
|
| 51 |
- exitCode, _ := s.WaitStop(-1 * time.Second) |
|
| 52 |
- atomic.StoreInt64(&exit, int64(exitCode)) |
|
| 53 |
- close(stopped) |
|
| 54 |
- }() |
|
| 55 |
- s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
|
|
| 56 |
- if s.IsRunning() {
|
|
| 57 |
- t.Fatal("State is running")
|
|
| 58 |
- } |
|
| 59 |
- if s.ExitCode != i {
|
|
| 60 |
- t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i)
|
|
| 61 |
- } |
|
| 62 |
- if s.Pid != 0 {
|
|
| 63 |
- t.Fatalf("Pid %v, expected 0", s.Pid)
|
|
| 64 |
- } |
|
| 65 |
- select {
|
|
| 66 |
- case <-time.After(100 * time.Millisecond): |
|
| 67 |
- t.Fatal("Stop callback doesn't fire in 100 milliseconds")
|
|
| 68 |
- case <-stopped: |
|
| 69 |
- t.Log("Stop callback fired")
|
|
| 70 |
- } |
|
| 71 |
- exitCode := int(atomic.LoadInt64(&exit)) |
|
| 72 |
- if exitCode != i {
|
|
| 73 |
- t.Fatalf("ExitCode %v, expected %v", exitCode, i)
|
|
| 74 |
- } |
|
| 75 |
- if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i {
|
|
| 76 |
- t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
|
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
-} |
|
| 80 |
- |
|
| 81 |
-func TestStateTimeoutWait(t *testing.T) {
|
|
| 82 |
- s := NewState() |
|
| 83 |
- started := make(chan struct{})
|
|
| 84 |
- go func() {
|
|
| 85 |
- s.waitRunning(100 * time.Millisecond) |
|
| 86 |
- close(started) |
|
| 87 |
- }() |
|
| 88 |
- select {
|
|
| 89 |
- case <-time.After(200 * time.Millisecond): |
|
| 90 |
- t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 91 |
- case <-started: |
|
| 92 |
- t.Log("Start callback fired")
|
|
| 93 |
- } |
|
| 94 |
- |
|
| 95 |
- s.Lock() |
|
| 96 |
- s.setRunning(49) |
|
| 97 |
- s.Unlock() |
|
| 98 |
- |
|
| 99 |
- stopped := make(chan struct{})
|
|
| 100 |
- go func() {
|
|
| 101 |
- s.waitRunning(100 * time.Millisecond) |
|
| 102 |
- close(stopped) |
|
| 103 |
- }() |
|
| 104 |
- select {
|
|
| 105 |
- case <-time.After(200 * time.Millisecond): |
|
| 106 |
- t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
|
| 107 |
- case <-stopped: |
|
| 108 |
- t.Log("Start callback fired")
|
|
| 109 |
- } |
|
| 110 |
- |
|
| 111 |
-} |
| 112 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,12 +0,0 @@ |
| 1 |
-// +build linux freebsd |
|
| 2 |
- |
|
| 3 |
-package daemon |
|
| 4 |
- |
|
| 5 |
-import "github.com/docker/docker/daemon/execdriver" |
|
| 6 |
- |
|
| 7 |
-// setFromExitStatus is a platform specific helper function to set the state |
|
| 8 |
-// based on the ExitStatus structure. |
|
| 9 |
-func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) {
|
|
| 10 |
- s.ExitCode = exitStatus.ExitCode |
|
| 11 |
- s.OOMKilled = exitStatus.OOMKilled |
|
| 12 |
-} |
| 13 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,9 +0,0 @@ |
| 1 |
-package daemon |
|
| 2 |
- |
|
| 3 |
-import "github.com/docker/docker/daemon/execdriver" |
|
| 4 |
- |
|
| 5 |
-// setFromExitStatus is a platform specific helper function to set the state |
|
| 6 |
-// based on the ExitStatus structure. |
|
| 7 |
-func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) {
|
|
| 8 |
- s.ExitCode = exitStatus.ExitCode |
|
| 9 |
-} |
| ... | ... |
@@ -11,6 +11,7 @@ import ( |
| 11 | 11 |
"time" |
| 12 | 12 |
|
| 13 | 13 |
"github.com/Sirupsen/logrus" |
| 14 |
+ "github.com/docker/docker/container" |
|
| 14 | 15 |
"github.com/docker/docker/daemon/execdriver" |
| 15 | 16 |
derr "github.com/docker/docker/errors" |
| 16 | 17 |
"github.com/docker/docker/pkg/pubsub" |
| ... | ... |
@@ -19,7 +20,7 @@ import ( |
| 19 | 19 |
|
| 20 | 20 |
type statsSupervisor interface {
|
| 21 | 21 |
// GetContainerStats collects all the stats related to a container |
| 22 |
- GetContainerStats(container *Container) (*execdriver.ResourceStats, error) |
|
| 22 |
+ GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error) |
|
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 | 25 |
// newStatsCollector returns a new statsCollector that collections |
| ... | ... |
@@ -30,7 +31,7 @@ func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector |
| 30 | 30 |
s := &statsCollector{
|
| 31 | 31 |
interval: interval, |
| 32 | 32 |
supervisor: daemon, |
| 33 |
- publishers: make(map[*Container]*pubsub.Publisher), |
|
| 33 |
+ publishers: make(map[*container.Container]*pubsub.Publisher), |
|
| 34 | 34 |
clockTicksPerSecond: uint64(system.GetClockTicks()), |
| 35 | 35 |
bufReader: bufio.NewReaderSize(nil, 128), |
| 36 | 36 |
} |
| ... | ... |
@@ -44,14 +45,14 @@ type statsCollector struct {
|
| 44 | 44 |
supervisor statsSupervisor |
| 45 | 45 |
interval time.Duration |
| 46 | 46 |
clockTicksPerSecond uint64 |
| 47 |
- publishers map[*Container]*pubsub.Publisher |
|
| 47 |
+ publishers map[*container.Container]*pubsub.Publisher |
|
| 48 | 48 |
bufReader *bufio.Reader |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 | 51 |
// collect registers the container with the collector and adds it to |
| 52 | 52 |
// the event loop for collection on the specified interval returning |
| 53 | 53 |
// a channel for the subscriber to receive on. |
| 54 |
-func (s *statsCollector) collect(c *Container) chan interface{} {
|
|
| 54 |
+func (s *statsCollector) collect(c *container.Container) chan interface{} {
|
|
| 55 | 55 |
s.m.Lock() |
| 56 | 56 |
defer s.m.Unlock() |
| 57 | 57 |
publisher, exists := s.publishers[c] |
| ... | ... |
@@ -64,7 +65,7 @@ func (s *statsCollector) collect(c *Container) chan interface{} {
|
| 64 | 64 |
|
| 65 | 65 |
// stopCollection closes the channels for all subscribers and removes |
| 66 | 66 |
// the container from metrics collection. |
| 67 |
-func (s *statsCollector) stopCollection(c *Container) {
|
|
| 67 |
+func (s *statsCollector) stopCollection(c *container.Container) {
|
|
| 68 | 68 |
s.m.Lock() |
| 69 | 69 |
if publisher, exists := s.publishers[c]; exists {
|
| 70 | 70 |
publisher.Close() |
| ... | ... |
@@ -74,7 +75,7 @@ func (s *statsCollector) stopCollection(c *Container) {
|
| 74 | 74 |
} |
| 75 | 75 |
|
| 76 | 76 |
// unsubscribe removes a specific subscriber from receiving updates for a container's stats. |
| 77 |
-func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
|
|
| 77 |
+func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
|
|
| 78 | 78 |
s.m.Lock() |
| 79 | 79 |
publisher := s.publishers[c] |
| 80 | 80 |
if publisher != nil {
|
| ... | ... |
@@ -88,7 +89,7 @@ func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
|
| 88 | 88 |
|
| 89 | 89 |
func (s *statsCollector) run() {
|
| 90 | 90 |
type publishersPair struct {
|
| 91 |
- container *Container |
|
| 91 |
+ container *container.Container |
|
| 92 | 92 |
publisher *pubsub.Publisher |
| 93 | 93 |
} |
| 94 | 94 |
// we cannot determine the capacity here. |
| ... | ... |
@@ -1,6 +1,10 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "time" |
|
| 3 |
+import ( |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 7 |
+) |
|
| 4 | 8 |
|
| 5 | 9 |
// newStatsCollector returns a new statsCollector for collection stats |
| 6 | 10 |
// for a registered container at the specified interval. The collector allows |
| ... | ... |
@@ -17,15 +21,15 @@ type statsCollector struct {
|
| 17 | 17 |
// collect registers the container with the collector and adds it to |
| 18 | 18 |
// the event loop for collection on the specified interval returning |
| 19 | 19 |
// a channel for the subscriber to receive on. |
| 20 |
-func (s *statsCollector) collect(c *Container) chan interface{} {
|
|
| 20 |
+func (s *statsCollector) collect(c *container.Container) chan interface{} {
|
|
| 21 | 21 |
return nil |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 | 24 |
// stopCollection closes the channels for all subscribers and removes |
| 25 | 25 |
// the container from metrics collection. |
| 26 |
-func (s *statsCollector) stopCollection(c *Container) {
|
|
| 26 |
+func (s *statsCollector) stopCollection(c *container.Container) {
|
|
| 27 | 27 |
} |
| 28 | 28 |
|
| 29 | 29 |
// unsubscribe removes a specific subscriber from receiving updates for a container's stats. |
| 30 |
-func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
|
|
| 30 |
+func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
|
|
| 31 | 31 |
} |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"time" |
| 5 | 5 |
|
| 6 | 6 |
"github.com/Sirupsen/logrus" |
| 7 |
+ "github.com/docker/docker/container" |
|
| 7 | 8 |
derr "github.com/docker/docker/errors" |
| 8 | 9 |
) |
| 9 | 10 |
|
| ... | ... |
@@ -32,13 +33,13 @@ func (daemon *Daemon) ContainerStop(name string, seconds int) error {
|
| 32 | 32 |
// process to exit. If a negative duration is given, Stop will wait |
| 33 | 33 |
// for the initial signal forever. If the container is not running Stop returns |
| 34 | 34 |
// immediately. |
| 35 |
-func (daemon *Daemon) containerStop(container *Container, seconds int) error {
|
|
| 35 |
+func (daemon *Daemon) containerStop(container *container.Container, seconds int) error {
|
|
| 36 | 36 |
if !container.IsRunning() {
|
| 37 | 37 |
return nil |
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 | 40 |
// 1. Send a SIGTERM |
| 41 |
- if err := daemon.killPossiblyDeadProcess(container, container.stopSignal()); err != nil {
|
|
| 41 |
+ if err := daemon.killPossiblyDeadProcess(container, container.StopSignal()); err != nil {
|
|
| 42 | 42 |
logrus.Infof("Failed to send SIGTERM to the process, force killing")
|
| 43 | 43 |
if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
|
| 44 | 44 |
return err |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "github.com/docker/docker/container" |
|
| 4 | 5 |
derr "github.com/docker/docker/errors" |
| 5 | 6 |
) |
| 6 | 7 |
|
| ... | ... |
@@ -19,7 +20,7 @@ func (daemon *Daemon) ContainerUnpause(name string) error {
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
// containerUnpause resumes the container execution after the container is paused. |
| 22 |
-func (daemon *Daemon) containerUnpause(container *Container) error {
|
|
| 22 |
+func (daemon *Daemon) containerUnpause(container *container.Container) error {
|
|
| 23 | 23 |
container.Lock() |
| 24 | 24 |
defer container.Unlock() |
| 25 | 25 |
|
| ... | ... |
@@ -33,7 +34,7 @@ func (daemon *Daemon) containerUnpause(container *Container) error {
|
| 33 | 33 |
return derr.ErrorCodeNotPaused.WithArgs(container.ID) |
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 |
- if err := daemon.execDriver.Unpause(container.command); err != nil {
|
|
| 36 |
+ if err := daemon.execDriver.Unpause(container.Command); err != nil {
|
|
| 37 | 37 |
return err |
| 38 | 38 |
} |
| 39 | 39 |
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"strings" |
| 8 | 8 |
|
| 9 | 9 |
"github.com/docker/docker/api/types" |
| 10 |
+ "github.com/docker/docker/container" |
|
| 10 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 11 | 12 |
derr "github.com/docker/docker/errors" |
| 12 | 13 |
"github.com/docker/docker/runconfig" |
| ... | ... |
@@ -70,7 +71,7 @@ func (m mounts) parts(i int) int {
|
| 70 | 70 |
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. |
| 71 | 71 |
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. |
| 72 | 72 |
// 4. Cleanup old volumes that are about to be reasigned. |
| 73 |
-func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 73 |
+func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 74 | 74 |
binds := map[string]bool{}
|
| 75 | 75 |
mountPoints := map[string]*volume.MountPoint{}
|
| 76 | 76 |
|
| ... | ... |
@@ -3,66 +3,27 @@ |
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
- "io/ioutil" |
|
| 7 | 6 |
"os" |
| 8 | 7 |
"sort" |
| 9 | 8 |
|
| 9 |
+ "github.com/docker/docker/container" |
|
| 10 | 10 |
"github.com/docker/docker/daemon/execdriver" |
| 11 |
- "github.com/docker/docker/pkg/chrootarchive" |
|
| 12 |
- "github.com/docker/docker/pkg/system" |
|
| 13 | 11 |
"github.com/docker/docker/volume" |
| 14 | 12 |
volumedrivers "github.com/docker/docker/volume/drivers" |
| 15 | 13 |
"github.com/docker/docker/volume/local" |
| 16 | 14 |
) |
| 17 | 15 |
|
| 18 |
-// copyExistingContents copies from the source to the destination and |
|
| 19 |
-// ensures the ownership is appropriately set. |
|
| 20 |
-func copyExistingContents(source, destination string) error {
|
|
| 21 |
- volList, err := ioutil.ReadDir(source) |
|
| 22 |
- if err != nil {
|
|
| 23 |
- return err |
|
| 24 |
- } |
|
| 25 |
- if len(volList) > 0 {
|
|
| 26 |
- srcList, err := ioutil.ReadDir(destination) |
|
| 27 |
- if err != nil {
|
|
| 28 |
- return err |
|
| 29 |
- } |
|
| 30 |
- if len(srcList) == 0 {
|
|
| 31 |
- // If the source volume is empty copy files from the root into the volume |
|
| 32 |
- if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
|
| 33 |
- return err |
|
| 34 |
- } |
|
| 35 |
- } |
|
| 36 |
- } |
|
| 37 |
- return copyOwnership(source, destination) |
|
| 38 |
-} |
|
| 39 |
- |
|
| 40 |
-// copyOwnership copies the permissions and uid:gid of the source file |
|
| 41 |
-// to the destination file |
|
| 42 |
-func copyOwnership(source, destination string) error {
|
|
| 43 |
- stat, err := system.Stat(source) |
|
| 44 |
- if err != nil {
|
|
| 45 |
- return err |
|
| 46 |
- } |
|
| 47 |
- |
|
| 48 |
- if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil {
|
|
| 49 |
- return err |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- return os.Chmod(destination, os.FileMode(stat.Mode())) |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 | 16 |
// setupMounts iterates through each of the mount points for a container and |
| 56 | 17 |
// calls Setup() on each. It also looks to see if is a network mount such as |
| 57 | 18 |
// /etc/resolv.conf, and if it is not, appends it to the array of mounts. |
| 58 |
-func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
|
|
| 19 |
+func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
|
|
| 59 | 20 |
var mounts []execdriver.Mount |
| 60 | 21 |
for _, m := range container.MountPoints {
|
| 61 | 22 |
path, err := m.Setup() |
| 62 | 23 |
if err != nil {
|
| 63 | 24 |
return nil, err |
| 64 | 25 |
} |
| 65 |
- if !container.trySetNetworkMount(m.Destination, path) {
|
|
| 26 |
+ if !container.TrySetNetworkMount(m.Destination, path) {
|
|
| 66 | 27 |
mounts = append(mounts, execdriver.Mount{
|
| 67 | 28 |
Source: path, |
| 68 | 29 |
Destination: m.Destination, |
| ... | ... |
@@ -72,7 +33,7 @@ func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, err |
| 72 | 72 |
} |
| 73 | 73 |
|
| 74 | 74 |
mounts = sortMounts(mounts) |
| 75 |
- netMounts := container.networkMounts() |
|
| 75 |
+ netMounts := container.NetworkMounts() |
|
| 76 | 76 |
// if we are going to mount any of the network files from container |
| 77 | 77 |
// metadata, the ownership must be set properly for potential container |
| 78 | 78 |
// remapped root (user namespaces) |
| ... | ... |
@@ -5,6 +5,7 @@ package daemon |
| 5 | 5 |
import ( |
| 6 | 6 |
"sort" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/container" |
|
| 8 | 9 |
"github.com/docker/docker/daemon/execdriver" |
| 9 | 10 |
derr "github.com/docker/docker/errors" |
| 10 | 11 |
"github.com/docker/docker/volume" |
| ... | ... |
@@ -14,7 +15,7 @@ import ( |
| 14 | 14 |
// of the configured mounts on the container to the execdriver mount structure |
| 15 | 15 |
// which will ultimately be passed into the exec driver during container creation. |
| 16 | 16 |
// It also ensures each of the mounts are lexographically sorted. |
| 17 |
-func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
|
|
| 17 |
+func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
|
|
| 18 | 18 |
var mnts []execdriver.Mount |
| 19 | 19 |
for _, mount := range container.MountPoints { // type is volume.MountPoint
|
| 20 | 20 |
// If there is no source, take it from the volume path |
| ... | ... |
@@ -22,7 +22,7 @@ package opts |
| 22 | 22 |
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" |
| 23 | 23 |
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" |
| 24 | 24 |
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." |
| 25 |
-// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=toDiskLocking.... |
|
| 25 |
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... |
|
| 26 | 26 |
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." |
| 27 | 27 |
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." |
| 28 | 28 |
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" |