Browse code

Merge branch 'master' into b

Nirmal Mehta authored on 2016/05/25 10:43:45
Showing 184 changed files
... ...
@@ -168,7 +168,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
168 168
 
169 169
 VOLUME /var/lib/docker
170 170
 WORKDIR /go/src/github.com/docker/docker
171
-ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
171
+ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
172 172
 
173 173
 # Let us use a .bashrc file
174 174
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
... ...
@@ -35,6 +35,7 @@
35 35
 			"estesp",
36 36
 			"icecrime",
37 37
 			"jhowardmsft",
38
+			"justincormack",
38 39
 			"lk4d4",
39 40
 			"mavenugo",
40 41
 			"mhbauer",
... ...
@@ -204,6 +205,11 @@
204 204
 	Email = "jess@linux.com"
205 205
 	GitHub = "jfrazelle"
206 206
 
207
+	[people.justincormack]
208
+	Name = "Justin Cormack"
209
+	Email = "justin.cormack@docker.com"
210
+	GitHub = "justincormack"
211
+
207 212
 	[people.lk4d4]
208 213
 	Name = "Alexander Morozov"
209 214
 	Email = "lk4d4@docker.com"
... ...
@@ -27,7 +27,9 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
27 27
 
28 28
 	cmd.ParseFlags(args, true)
29 29
 
30
-	c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0))
30
+	ctx := context.Background()
31
+
32
+	c, err := cli.client.ContainerInspect(ctx, cmd.Arg(0))
31 33
 	if err != nil {
32 34
 		return err
33 35
 	}
... ...
@@ -64,11 +66,11 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
64 64
 	}
65 65
 
66 66
 	if *proxy && !c.Config.Tty {
67
-		sigc := cli.forwardAllSignals(container)
67
+		sigc := cli.forwardAllSignals(ctx, container)
68 68
 		defer signal.StopCatch(sigc)
69 69
 	}
70 70
 
71
-	resp, errAttach := cli.client.ContainerAttach(context.Background(), container, options)
71
+	resp, errAttach := cli.client.ContainerAttach(ctx, container, options)
72 72
 	if errAttach != nil && errAttach != httputil.ErrPersistEOF {
73 73
 		// ContainerAttach returns an ErrPersistEOF (connection closed)
74 74
 		// means server met an error and put it in Hijacked connection
... ...
@@ -83,15 +85,15 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
83 83
 		// terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially
84 84
 		// resize it, then go back to normal. Without this, every attach after the first will
85 85
 		// require the user to manually resize or hit enter.
86
-		cli.resizeTtyTo(cmd.Arg(0), height+1, width+1, false)
86
+		cli.resizeTtyTo(ctx, cmd.Arg(0), height+1, width+1, false)
87 87
 
88 88
 		// After the above resizing occurs, the call to monitorTtySize below will handle resetting back
89 89
 		// to the actual size.
90
-		if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
90
+		if err := cli.monitorTtySize(ctx, cmd.Arg(0), false); err != nil {
91 91
 			logrus.Debugf("Error monitoring TTY size: %s", err)
92 92
 		}
93 93
 	}
94
-	if err := cli.holdHijackedConnection(context.Background(), c.Config.Tty, in, cli.out, cli.err, resp); err != nil {
94
+	if err := cli.holdHijackedConnection(ctx, c.Config.Tty, in, cli.out, cli.err, resp); err != nil {
95 95
 		return err
96 96
 	}
97 97
 
... ...
@@ -99,7 +101,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
99 99
 		return errAttach
100 100
 	}
101 101
 
102
-	_, status, err := getExitCode(cli, container)
102
+	_, status, err := cli.getExitCode(ctx, container)
103 103
 	if err != nil {
104 104
 		return err
105 105
 	}
... ...
@@ -32,7 +32,7 @@ import (
32 32
 	"github.com/docker/go-units"
33 33
 )
34 34
 
35
-type translatorFunc func(reference.NamedTagged) (reference.Canonical, error)
35
+type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error)
36 36
 
37 37
 // CmdBuild builds a new image from the source code at a given path.
38 38
 //
... ...
@@ -77,8 +77,8 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
77 77
 	cmd.ParseFlags(args, true)
78 78
 
79 79
 	var (
80
-		ctx io.ReadCloser
81
-		err error
80
+		buildCtx io.ReadCloser
81
+		err      error
82 82
 	)
83 83
 
84 84
 	specifiedContext := cmd.Arg(0)
... ...
@@ -100,11 +100,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
100 100
 
101 101
 	switch {
102 102
 	case specifiedContext == "-":
103
-		ctx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName)
103
+		buildCtx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName)
104 104
 	case urlutil.IsGitURL(specifiedContext):
105 105
 		tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, *dockerfileName)
106 106
 	case urlutil.IsURL(specifiedContext):
107
-		ctx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName)
107
+		buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName)
108 108
 	default:
109 109
 		contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, *dockerfileName)
110 110
 	}
... ...
@@ -121,7 +121,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
121 121
 		contextDir = tempDir
122 122
 	}
123 123
 
124
-	if ctx == nil {
124
+	if buildCtx == nil {
125 125
 		// And canonicalize dockerfile name to a platform-independent one
126 126
 		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
127 127
 		if err != nil {
... ...
@@ -159,7 +159,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
159 159
 			includes = append(includes, ".dockerignore", relDockerfile)
160 160
 		}
161 161
 
162
-		ctx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
162
+		buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
163 163
 			Compression:     archive.Uncompressed,
164 164
 			ExcludePatterns: excludes,
165 165
 			IncludeFiles:    includes,
... ...
@@ -169,17 +169,19 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
169 169
 		}
170 170
 	}
171 171
 
172
+	ctx := context.Background()
173
+
172 174
 	var resolvedTags []*resolvedTag
173 175
 	if isTrusted() {
174 176
 		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
175 177
 		// Dockerfile which uses trusted pulls.
176
-		ctx = replaceDockerfileTarWrapper(ctx, relDockerfile, cli.trustedReference, &resolvedTags)
178
+		buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, cli.trustedReference, &resolvedTags)
177 179
 	}
178 180
 
179 181
 	// Setup an upload progress bar
180 182
 	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)
181 183
 
182
-	var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon")
184
+	var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
183 185
 
184 186
 	var memory int64
185 187
 	if *flMemoryString != "" {
... ...
@@ -235,7 +237,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
235 235
 		Labels:         runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()),
236 236
 	}
237 237
 
238
-	response, err := cli.client.ImageBuild(context.Background(), body, options)
238
+	response, err := cli.client.ImageBuild(ctx, body, options)
239 239
 	if err != nil {
240 240
 		return err
241 241
 	}
... ...
@@ -271,7 +273,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
271 271
 		// Since the build was successful, now we must tag any of the resolved
272 272
 		// images from the above Dockerfile rewrite.
273 273
 		for _, resolved := range resolvedTags {
274
-			if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil {
274
+			if err := cli.tagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil {
275 275
 				return err
276 276
 			}
277 277
 		}
... ...
@@ -303,7 +305,7 @@ type resolvedTag struct {
303 303
 // "FROM <image>" instructions to a digest reference. `translator` is a
304 304
 // function that takes a repository name and tag reference and returns a
305 305
 // trusted digest reference.
306
-func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
306
+func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
307 307
 	scanner := bufio.NewScanner(dockerfile)
308 308
 	buf := bytes.NewBuffer(nil)
309 309
 
... ...
@@ -320,7 +322,7 @@ func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (new
320 320
 			}
321 321
 			ref = reference.WithDefaultTag(ref)
322 322
 			if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() {
323
-				trustedRef, err := translator(ref)
323
+				trustedRef, err := translator(ctx, ref)
324 324
 				if err != nil {
325 325
 					return nil, nil, err
326 326
 				}
... ...
@@ -346,7 +348,7 @@ func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (new
346 346
 // replaces the entry with the given Dockerfile name with the contents of the
347 347
 // new Dockerfile. Returns a new tar archive stream with the replaced
348 348
 // Dockerfile.
349
-func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
349
+func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
350 350
 	pipeReader, pipeWriter := io.Pipe()
351 351
 	go func() {
352 352
 		tarReader := tar.NewReader(inputTarStream)
... ...
@@ -373,7 +375,7 @@ func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName st
373 373
 				// generated from a directory on the local filesystem, the
374 374
 				// Dockerfile will only appear once in the archive.
375 375
 				var newDockerfile []byte
376
-				newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator)
376
+				newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator)
377 377
 				if err != nil {
378 378
 					pipeWriter.CloseWithError(err)
379 379
 					return
... ...
@@ -81,11 +81,13 @@ func (cli *DockerCli) CmdCp(args ...string) error {
81 81
 		followLink: *followLink,
82 82
 	}
83 83
 
84
+	ctx := context.Background()
85
+
84 86
 	switch direction {
85 87
 	case fromContainer:
86
-		return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam)
88
+		return cli.copyFromContainer(ctx, srcContainer, srcPath, dstPath, cpParam)
87 89
 	case toContainer:
88
-		return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam)
90
+		return cli.copyToContainer(ctx, srcPath, dstContainer, dstPath, cpParam)
89 91
 	case acrossContainers:
90 92
 		// Copying between containers isn't supported.
91 93
 		return fmt.Errorf("copying between containers is not supported")
... ...
@@ -126,8 +128,8 @@ func splitCpArg(arg string) (container, path string) {
126 126
 	return parts[0], parts[1]
127 127
 }
128 128
 
129
-func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) {
130
-	return cli.client.ContainerStatPath(context.Background(), containerName, path)
129
+func (cli *DockerCli) statContainerPath(ctx context.Context, containerName, path string) (types.ContainerPathStat, error) {
130
+	return cli.client.ContainerStatPath(ctx, containerName, path)
131 131
 }
132 132
 
133 133
 func resolveLocalPath(localPath string) (absPath string, err error) {
... ...
@@ -138,7 +140,7 @@ func resolveLocalPath(localPath string) (absPath string, err error) {
138 138
 	return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil
139 139
 }
140 140
 
141
-func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) {
141
+func (cli *DockerCli) copyFromContainer(ctx context.Context, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) {
142 142
 	if dstPath != "-" {
143 143
 		// Get an absolute destination path.
144 144
 		dstPath, err = resolveLocalPath(dstPath)
... ...
@@ -150,7 +152,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c
150 150
 	// if client requests to follow symbol link, then must decide target file to be copied
151 151
 	var rebaseName string
152 152
 	if cpParam.followLink {
153
-		srcStat, err := cli.statContainerPath(srcContainer, srcPath)
153
+		srcStat, err := cli.statContainerPath(ctx, srcContainer, srcPath)
154 154
 
155 155
 		// If the destination is a symbolic link, we should follow it.
156 156
 		if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
... ...
@@ -167,7 +169,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c
167 167
 
168 168
 	}
169 169
 
170
-	content, stat, err := cli.client.CopyFromContainer(context.Background(), srcContainer, srcPath)
170
+	content, stat, err := cli.client.CopyFromContainer(ctx, srcContainer, srcPath)
171 171
 	if err != nil {
172 172
 		return err
173 173
 	}
... ...
@@ -199,7 +201,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c
199 199
 	return archive.CopyTo(preArchive, srcInfo, dstPath)
200 200
 }
201 201
 
202
-func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) {
202
+func (cli *DockerCli) copyToContainer(ctx context.Context, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) {
203 203
 	if srcPath != "-" {
204 204
 		// Get an absolute source path.
205 205
 		srcPath, err = resolveLocalPath(srcPath)
... ...
@@ -215,7 +217,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
215 215
 
216 216
 	// Prepare destination copy info by stat-ing the container path.
217 217
 	dstInfo := archive.CopyInfo{Path: dstPath}
218
-	dstStat, err := cli.statContainerPath(dstContainer, dstPath)
218
+	dstStat, err := cli.statContainerPath(ctx, dstContainer, dstPath)
219 219
 
220 220
 	// If the destination is a symbolic link, we should evaluate it.
221 221
 	if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
... ...
@@ -227,7 +229,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
227 227
 		}
228 228
 
229 229
 		dstInfo.Path = linkTarget
230
-		dstStat, err = cli.statContainerPath(dstContainer, linkTarget)
230
+		dstStat, err = cli.statContainerPath(ctx, dstContainer, linkTarget)
231 231
 	}
232 232
 
233 233
 	// Ignore any error and assume that the parent directory of the destination
... ...
@@ -291,5 +293,5 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
291 291
 		AllowOverwriteDirWithFile: false,
292 292
 	}
293 293
 
294
-	return cli.client.CopyToContainer(context.Background(), dstContainer, resolvedDstPath, content, options)
294
+	return cli.client.CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options)
295 295
 }
... ...
@@ -19,7 +19,7 @@ import (
19 19
 	networktypes "github.com/docker/engine-api/types/network"
20 20
 )
21 21
 
22
-func (cli *DockerCli) pullImage(image string, out io.Writer) error {
22
+func (cli *DockerCli) pullImage(ctx context.Context, image string, out io.Writer) error {
23 23
 	ref, err := reference.ParseNamed(image)
24 24
 	if err != nil {
25 25
 		return err
... ...
@@ -31,7 +31,7 @@ func (cli *DockerCli) pullImage(image string, out io.Writer) error {
31 31
 		return err
32 32
 	}
33 33
 
34
-	authConfig := cli.resolveAuthConfig(repoInfo.Index)
34
+	authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index)
35 35
 	encodedAuth, err := encodeAuthToBase64(authConfig)
36 36
 	if err != nil {
37 37
 		return err
... ...
@@ -41,7 +41,7 @@ func (cli *DockerCli) pullImage(image string, out io.Writer) error {
41 41
 		RegistryAuth: encodedAuth,
42 42
 	}
43 43
 
44
-	responseBody, err := cli.client.ImageCreate(context.Background(), image, options)
44
+	responseBody, err := cli.client.ImageCreate(ctx, image, options)
45 45
 	if err != nil {
46 46
 		return err
47 47
 	}
... ...
@@ -69,7 +69,7 @@ func newCIDFile(path string) (*cidFile, error) {
69 69
 	return &cidFile{path: path, file: f}, nil
70 70
 }
71 71
 
72
-func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
72
+func (cli *DockerCli) createContainer(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
73 73
 	var containerIDFile *cidFile
74 74
 	if cidfile != "" {
75 75
 		var err error
... ...
@@ -89,7 +89,7 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont
89 89
 
90 90
 		if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() {
91 91
 			var err error
92
-			trustedRef, err = cli.trustedReference(ref)
92
+			trustedRef, err = cli.trustedReference(ctx, ref)
93 93
 			if err != nil {
94 94
 				return nil, err
95 95
 			}
... ...
@@ -98,7 +98,7 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont
98 98
 	}
99 99
 
100 100
 	//create the container
101
-	response, err := cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name)
101
+	response, err := cli.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, name)
102 102
 
103 103
 	//if image not found try to pull it
104 104
 	if err != nil {
... ...
@@ -106,17 +106,17 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont
106 106
 			fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String())
107 107
 
108 108
 			// we don't want to write to stdout anything apart from container.ID
109
-			if err = cli.pullImage(config.Image, cli.err); err != nil {
109
+			if err = cli.pullImage(ctx, config.Image, cli.err); err != nil {
110 110
 				return nil, err
111 111
 			}
112 112
 			if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil {
113
-				if err := cli.tagTrusted(trustedRef, ref); err != nil {
113
+				if err := cli.tagTrusted(ctx, trustedRef, ref); err != nil {
114 114
 					return nil, err
115 115
 				}
116 116
 			}
117 117
 			// Retry
118 118
 			var retryErr error
119
-			response, retryErr = cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name)
119
+			response, retryErr = cli.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, name)
120 120
 			if retryErr != nil {
121 121
 				return nil, retryErr
122 122
 			}
... ...
@@ -158,7 +158,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error {
158 158
 		cmd.Usage()
159 159
 		return nil
160 160
 	}
161
-	response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName)
161
+	response, err := cli.createContainer(context.Background(), config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName)
162 162
 	if err != nil {
163 163
 		return err
164 164
 	}
... ...
@@ -34,7 +34,9 @@ func (cli *DockerCli) CmdExec(args ...string) error {
34 34
 	// Send client escape keys
35 35
 	execConfig.DetachKeys = cli.configFile.DetachKeys
36 36
 
37
-	response, err := cli.client.ContainerExecCreate(context.Background(), container, *execConfig)
37
+	ctx := context.Background()
38
+
39
+	response, err := cli.client.ContainerExecCreate(ctx, container, *execConfig)
38 40
 	if err != nil {
39 41
 		return err
40 42
 	}
... ...
@@ -56,7 +58,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
56 56
 			Tty:    execConfig.Tty,
57 57
 		}
58 58
 
59
-		if err := cli.client.ContainerExecStart(context.Background(), execID, execStartCheck); err != nil {
59
+		if err := cli.client.ContainerExecStart(ctx, execID, execStartCheck); err != nil {
60 60
 			return err
61 61
 		}
62 62
 		// For now don't print this - wait for when we support exec wait()
... ...
@@ -85,17 +87,17 @@ func (cli *DockerCli) CmdExec(args ...string) error {
85 85
 		}
86 86
 	}
87 87
 
88
-	resp, err := cli.client.ContainerExecAttach(context.Background(), execID, *execConfig)
88
+	resp, err := cli.client.ContainerExecAttach(ctx, execID, *execConfig)
89 89
 	if err != nil {
90 90
 		return err
91 91
 	}
92 92
 	defer resp.Close()
93 93
 	errCh = promise.Go(func() error {
94
-		return cli.holdHijackedConnection(context.Background(), execConfig.Tty, in, out, stderr, resp)
94
+		return cli.holdHijackedConnection(ctx, execConfig.Tty, in, out, stderr, resp)
95 95
 	})
96 96
 
97 97
 	if execConfig.Tty && cli.isTerminalIn {
98
-		if err := cli.monitorTtySize(execID, true); err != nil {
98
+		if err := cli.monitorTtySize(ctx, execID, true); err != nil {
99 99
 			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
100 100
 		}
101 101
 	}
... ...
@@ -106,7 +108,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
106 106
 	}
107 107
 
108 108
 	var status int
109
-	if _, status, err = getExecExitCode(cli, execID); err != nil {
109
+	if _, status, err = cli.getExecExitCode(ctx, execID); err != nil {
110 110
 		return err
111 111
 	}
112 112
 
... ...
@@ -38,5 +38,4 @@ func (cli *DockerCli) CmdExport(args ...string) error {
38 38
 	}
39 39
 
40 40
 	return copyToFile(*outfile, responseBody)
41
-
42 41
 }
... ...
@@ -28,38 +28,40 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
28 28
 		return fmt.Errorf("%q is not a valid value for --type", *inspectType)
29 29
 	}
30 30
 
31
+	ctx := context.Background()
32
+
31 33
 	var elementSearcher inspectSearcher
32 34
 	switch *inspectType {
33 35
 	case "container":
34
-		elementSearcher = cli.inspectContainers(*size)
36
+		elementSearcher = cli.inspectContainers(ctx, *size)
35 37
 	case "image":
36
-		elementSearcher = cli.inspectImages(*size)
38
+		elementSearcher = cli.inspectImages(ctx, *size)
37 39
 	default:
38
-		elementSearcher = cli.inspectAll(*size)
40
+		elementSearcher = cli.inspectAll(ctx, *size)
39 41
 	}
40 42
 
41 43
 	return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher)
42 44
 }
43 45
 
44
-func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher {
46
+func (cli *DockerCli) inspectContainers(ctx context.Context, getSize bool) inspectSearcher {
45 47
 	return func(ref string) (interface{}, []byte, error) {
46
-		return cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize)
48
+		return cli.client.ContainerInspectWithRaw(ctx, ref, getSize)
47 49
 	}
48 50
 }
49 51
 
50
-func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher {
52
+func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspectSearcher {
51 53
 	return func(ref string) (interface{}, []byte, error) {
52
-		return cli.client.ImageInspectWithRaw(context.Background(), ref, getSize)
54
+		return cli.client.ImageInspectWithRaw(ctx, ref, getSize)
53 55
 	}
54 56
 }
55 57
 
56
-func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher {
58
+func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspectSearcher {
57 59
 	return func(ref string) (interface{}, []byte, error) {
58
-		c, rawContainer, err := cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize)
60
+		c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize)
59 61
 		if err != nil {
60 62
 			// Search for image with that id if a container doesn't exist.
61 63
 			if client.IsErrContainerNotFound(err) {
62
-				i, rawImage, err := cli.client.ImageInspectWithRaw(context.Background(), ref, getSize)
64
+				i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize)
63 65
 				if err != nil {
64 66
 					if client.IsErrImageNotFound(err) {
65 67
 						return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref)
... ...
@@ -40,12 +40,14 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
40 40
 		cli.in = os.Stdin
41 41
 	}
42 42
 
43
+	ctx := context.Background()
44
+
43 45
 	var serverAddress string
44 46
 	var isDefaultRegistry bool
45 47
 	if len(cmd.Args()) > 0 {
46 48
 		serverAddress = cmd.Arg(0)
47 49
 	} else {
48
-		serverAddress = cli.electAuthServer()
50
+		serverAddress = cli.electAuthServer(ctx)
49 51
 		isDefaultRegistry = true
50 52
 	}
51 53
 
... ...
@@ -54,7 +56,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
54 54
 		return err
55 55
 	}
56 56
 
57
-	response, err := cli.client.RegistryLogin(context.Background(), authConfig)
57
+	response, err := cli.client.RegistryLogin(ctx, authConfig)
58 58
 	if err != nil {
59 59
 		return err
60 60
 	}
... ...
@@ -3,6 +3,8 @@ package client
3 3
 import (
4 4
 	"fmt"
5 5
 
6
+	"golang.org/x/net/context"
7
+
6 8
 	Cli "github.com/docker/docker/cli"
7 9
 	flag "github.com/docker/docker/pkg/mflag"
8 10
 )
... ...
@@ -22,7 +24,7 @@ func (cli *DockerCli) CmdLogout(args ...string) error {
22 22
 	if len(cmd.Args()) > 0 {
23 23
 		serverAddress = cmd.Arg(0)
24 24
 	} else {
25
-		serverAddress = cli.electAuthServer()
25
+		serverAddress = cli.electAuthServer(context.Background())
26 26
 	}
27 27
 
28 28
 	// check if we're logged in based on the records in the config file
... ...
@@ -33,7 +33,9 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
33 33
 
34 34
 	name := cmd.Arg(0)
35 35
 
36
-	c, err := cli.client.ContainerInspect(context.Background(), name)
36
+	ctx := context.Background()
37
+
38
+	c, err := cli.client.ContainerInspect(ctx, name)
37 39
 	if err != nil {
38 40
 		return err
39 41
 	}
... ...
@@ -51,7 +53,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
51 51
 		Tail:       *tail,
52 52
 		Details:    *details,
53 53
 	}
54
-	responseBody, err := cli.client.ContainerLogs(context.Background(), name, options)
54
+	responseBody, err := cli.client.ContainerLogs(ctx, name, options)
55 55
 	if err != nil {
56 56
 		return err
57 57
 	}
... ...
@@ -104,9 +104,11 @@ func (cli *DockerCli) CmdNetworkRm(args ...string) error {
104 104
 		return err
105 105
 	}
106 106
 
107
+	ctx := context.Background()
108
+
107 109
 	status := 0
108 110
 	for _, net := range cmd.Args() {
109
-		if err := cli.client.NetworkRemove(context.Background(), net); err != nil {
111
+		if err := cli.client.NetworkRemove(ctx, net); err != nil {
110 112
 			fmt.Fprintf(cli.err, "%s\n", err)
111 113
 			status = 1
112 114
 			continue
... ...
@@ -239,8 +241,10 @@ func (cli *DockerCli) CmdNetworkInspect(args ...string) error {
239 239
 		return err
240 240
 	}
241 241
 
242
+	ctx := context.Background()
243
+
242 244
 	inspectSearcher := func(name string) (interface{}, []byte, error) {
243
-		i, err := cli.client.NetworkInspect(context.Background(), name)
245
+		i, err := cli.client.NetworkInspect(ctx, name)
244 246
 		return i, nil, err
245 247
 	}
246 248
 
... ...
@@ -19,9 +19,11 @@ func (cli *DockerCli) CmdPause(args ...string) error {
19 19
 
20 20
 	cmd.ParseFlags(args, true)
21 21
 
22
+	ctx := context.Background()
23
+
22 24
 	var errs []string
23 25
 	for _, name := range cmd.Args() {
24
-		if err := cli.client.ContainerPause(context.Background(), name); err != nil {
26
+		if err := cli.client.ContainerPause(ctx, name); err != nil {
25 27
 			errs = append(errs, err.Error())
26 28
 		} else {
27 29
 			fmt.Fprintf(cli.out, "%s\n", name)
... ...
@@ -55,18 +55,20 @@ func (cli *DockerCli) CmdPull(args ...string) error {
55 55
 		return err
56 56
 	}
57 57
 
58
-	authConfig := cli.resolveAuthConfig(repoInfo.Index)
58
+	ctx := context.Background()
59
+
60
+	authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index)
59 61
 	requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull")
60 62
 
61 63
 	if isTrusted() && !registryRef.HasDigest() {
62 64
 		// Check if tag is digest
63
-		return cli.trustedPull(repoInfo, registryRef, authConfig, requestPrivilege)
65
+		return cli.trustedPull(ctx, repoInfo, registryRef, authConfig, requestPrivilege)
64 66
 	}
65 67
 
66
-	return cli.imagePullPrivileged(authConfig, distributionRef.String(), requestPrivilege, *allTags)
68
+	return cli.imagePullPrivileged(ctx, authConfig, distributionRef.String(), requestPrivilege, *allTags)
67 69
 }
68 70
 
69
-func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error {
71
+func (cli *DockerCli) imagePullPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error {
70 72
 
71 73
 	encodedAuth, err := encodeAuthToBase64(authConfig)
72 74
 	if err != nil {
... ...
@@ -78,7 +80,7 @@ func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, ref strin
78 78
 		All:           all,
79 79
 	}
80 80
 
81
-	responseBody, err := cli.client.ImagePull(context.Background(), ref, options)
81
+	responseBody, err := cli.client.ImagePull(ctx, ref, options)
82 82
 	if err != nil {
83 83
 		return err
84 84
 	}
... ...
@@ -33,15 +33,18 @@ func (cli *DockerCli) CmdPush(args ...string) error {
33 33
 	if err != nil {
34 34
 		return err
35 35
 	}
36
-	// Resolve the Auth config relevant for this server
37
-	authConfig := cli.resolveAuthConfig(repoInfo.Index)
38 36
 
37
+	ctx := context.Background()
38
+
39
+	// Resolve the Auth config relevant for this server
40
+	authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index)
39 41
 	requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push")
42
+
40 43
 	if isTrusted() {
41
-		return cli.trustedPush(repoInfo, ref, authConfig, requestPrivilege)
44
+		return cli.trustedPush(ctx, repoInfo, ref, authConfig, requestPrivilege)
42 45
 	}
43 46
 
44
-	responseBody, err := cli.imagePushPrivileged(authConfig, ref.String(), requestPrivilege)
47
+	responseBody, err := cli.imagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege)
45 48
 	if err != nil {
46 49
 		return err
47 50
 	}
... ...
@@ -51,7 +54,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
51 51
 	return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil)
52 52
 }
53 53
 
54
-func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) {
54
+func (cli *DockerCli) imagePushPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) {
55 55
 	encodedAuth, err := encodeAuthToBase64(authConfig)
56 56
 	if err != nil {
57 57
 		return nil, err
... ...
@@ -61,5 +64,5 @@ func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, ref strin
61 61
 		PrivilegeFunc: requestPrivilege,
62 62
 	}
63 63
 
64
-	return cli.client.ImagePush(context.Background(), ref, options)
64
+	return cli.client.ImagePush(ctx, ref, options)
65 65
 }
... ...
@@ -23,6 +23,8 @@ func (cli *DockerCli) CmdRm(args ...string) error {
23 23
 
24 24
 	cmd.ParseFlags(args, true)
25 25
 
26
+	ctx := context.Background()
27
+
26 28
 	var errs []string
27 29
 	for _, name := range cmd.Args() {
28 30
 		if name == "" {
... ...
@@ -30,7 +32,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
30 30
 		}
31 31
 		name = strings.Trim(name, "/")
32 32
 
33
-		if err := cli.removeContainer(name, *v, *link, *force); err != nil {
33
+		if err := cli.removeContainer(ctx, name, *v, *link, *force); err != nil {
34 34
 			errs = append(errs, err.Error())
35 35
 		} else {
36 36
 			fmt.Fprintf(cli.out, "%s\n", name)
... ...
@@ -42,13 +44,13 @@ func (cli *DockerCli) CmdRm(args ...string) error {
42 42
 	return nil
43 43
 }
44 44
 
45
-func (cli *DockerCli) removeContainer(container string, removeVolumes, removeLinks, force bool) error {
45
+func (cli *DockerCli) removeContainer(ctx context.Context, container string, removeVolumes, removeLinks, force bool) error {
46 46
 	options := types.ContainerRemoveOptions{
47 47
 		RemoveVolumes: removeVolumes,
48 48
 		RemoveLinks:   removeLinks,
49 49
 		Force:         force,
50 50
 	}
51
-	if err := cli.client.ContainerRemove(context.Background(), container, options); err != nil {
51
+	if err := cli.client.ContainerRemove(ctx, container, options); err != nil {
52 52
 		return err
53 53
 	}
54 54
 	return nil
... ...
@@ -31,6 +31,8 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
31 31
 		v.Set("noprune", "1")
32 32
 	}
33 33
 
34
+	ctx := context.Background()
35
+
34 36
 	var errs []string
35 37
 	for _, image := range cmd.Args() {
36 38
 		options := types.ImageRemoveOptions{
... ...
@@ -38,7 +40,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
38 38
 			PruneChildren: !*noprune,
39 39
 		}
40 40
 
41
-		dels, err := cli.client.ImageRemove(context.Background(), image, options)
41
+		dels, err := cli.client.ImageRemove(ctx, image, options)
42 42
 		if err != nil {
43 43
 			errs = append(errs, err.Error())
44 44
 		} else {
... ...
@@ -147,20 +147,20 @@ func (cli *DockerCli) CmdRun(args ...string) error {
147 147
 		hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize()
148 148
 	}
149 149
 
150
-	createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName)
150
+	ctx, cancelFun := context.WithCancel(context.Background())
151
+
152
+	createResponse, err := cli.createContainer(ctx, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName)
151 153
 	if err != nil {
152 154
 		cmd.ReportError(err.Error(), true)
153 155
 		return runStartContainerErr(err)
154 156
 	}
155 157
 	if sigProxy {
156
-		sigc := cli.forwardAllSignals(createResponse.ID)
158
+		sigc := cli.forwardAllSignals(ctx, createResponse.ID)
157 159
 		defer signal.StopCatch(sigc)
158 160
 	}
159 161
 	var (
160 162
 		waitDisplayID chan struct{}
161 163
 		errCh         chan error
162
-		cancelFun     context.CancelFunc
163
-		ctx           context.Context
164 164
 	)
165 165
 	if !config.AttachStdout && !config.AttachStderr {
166 166
 		// Make this asynchronous to allow the client to write to stdin before having to read the ID
... ...
@@ -205,7 +205,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
205 205
 			DetachKeys: cli.configFile.DetachKeys,
206 206
 		}
207 207
 
208
-		resp, errAttach := cli.client.ContainerAttach(context.Background(), createResponse.ID, options)
208
+		resp, errAttach := cli.client.ContainerAttach(ctx, createResponse.ID, options)
209 209
 		if errAttach != nil && errAttach != httputil.ErrPersistEOF {
210 210
 			// ContainerAttach returns an ErrPersistEOF (connection closed)
211 211
 			// means server met an error and put it in Hijacked connection
... ...
@@ -214,7 +214,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
214 214
 		}
215 215
 		defer resp.Close()
216 216
 
217
-		ctx, cancelFun = context.WithCancel(context.Background())
218 217
 		errCh = promise.Go(func() error {
219 218
 			errHijack := cli.holdHijackedConnection(ctx, config.Tty, in, out, stderr, resp)
220 219
 			if errHijack == nil {
... ...
@@ -226,14 +225,16 @@ func (cli *DockerCli) CmdRun(args ...string) error {
226 226
 
227 227
 	if *flAutoRemove {
228 228
 		defer func() {
229
-			if err := cli.removeContainer(createResponse.ID, true, false, true); err != nil {
229
+			// Explicitly not sharing the context as it could be "Done" (by calling cancelFun)
230
+			// and thus the container would not be removed.
231
+			if err := cli.removeContainer(context.Background(), createResponse.ID, true, false, true); err != nil {
230 232
 				fmt.Fprintf(cli.err, "%v\n", err)
231 233
 			}
232 234
 		}()
233 235
 	}
234 236
 
235 237
 	//start the container
236
-	if err := cli.client.ContainerStart(context.Background(), createResponse.ID); err != nil {
238
+	if err := cli.client.ContainerStart(ctx, createResponse.ID); err != nil {
237 239
 		// If we have holdHijackedConnection, we should notify
238 240
 		// holdHijackedConnection we are going to exit and wait
239 241
 		// to avoid the terminal are not restored.
... ...
@@ -247,7 +248,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
247 247
 	}
248 248
 
249 249
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
250
-		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
250
+		if err := cli.monitorTtySize(ctx, createResponse.ID, false); err != nil {
251 251
 			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
252 252
 		}
253 253
 	}
... ...
@@ -272,23 +273,23 @@ func (cli *DockerCli) CmdRun(args ...string) error {
272 272
 	if *flAutoRemove {
273 273
 		// Autoremove: wait for the container to finish, retrieve
274 274
 		// the exit code and remove the container
275
-		if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil {
275
+		if status, err = cli.client.ContainerWait(ctx, createResponse.ID); err != nil {
276 276
 			return runStartContainerErr(err)
277 277
 		}
278
-		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
278
+		if _, status, err = cli.getExitCode(ctx, createResponse.ID); err != nil {
279 279
 			return err
280 280
 		}
281 281
 	} else {
282 282
 		// No Autoremove: Simply retrieve the exit code
283 283
 		if !config.Tty {
284 284
 			// In non-TTY mode, we can't detach, so we must wait for container exit
285
-			if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil {
285
+			if status, err = cli.client.ContainerWait(ctx, createResponse.ID); err != nil {
286 286
 				return err
287 287
 			}
288 288
 		} else {
289 289
 			// In TTY mode, there is a race: if the process dies too slowly, the state could
290 290
 			// be updated after the getExitCode call and result in the wrong exit code being reported
291
-			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
291
+			if _, status, err = cli.getExitCode(ctx, createResponse.ID); err != nil {
292 292
 				return err
293 293
 			}
294 294
 		}
... ...
@@ -10,10 +10,12 @@ import (
10 10
 	"golang.org/x/net/context"
11 11
 
12 12
 	Cli "github.com/docker/docker/cli"
13
+	"github.com/docker/docker/opts"
13 14
 	flag "github.com/docker/docker/pkg/mflag"
14 15
 	"github.com/docker/docker/pkg/stringutils"
15 16
 	"github.com/docker/docker/registry"
16 17
 	"github.com/docker/engine-api/types"
18
+	"github.com/docker/engine-api/types/filters"
17 19
 	registrytypes "github.com/docker/engine-api/types/registry"
18 20
 )
19 21
 
... ...
@@ -21,14 +23,32 @@ import (
21 21
 //
22 22
 // Usage: docker search [OPTIONS] TERM
23 23
 func (cli *DockerCli) CmdSearch(args ...string) error {
24
+	var (
25
+		err error
26
+
27
+		filterArgs = filters.NewArgs()
28
+
29
+		flFilter = opts.NewListOpts(nil)
30
+	)
31
+
24 32
 	cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true)
25 33
 	noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output")
26
-	automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
27
-	stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars")
34
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
35
+
36
+	// Deprecated since Docker 1.12 in favor of "--filter"
37
+	automated := cmd.Bool([]string{"#-automated"}, false, "Only show automated builds - DEPRECATED")
38
+	stars := cmd.Uint([]string{"s", "#-stars"}, 0, "Only displays with at least x stars - DEPRECATED")
39
+
28 40
 	cmd.Require(flag.Exact, 1)
29 41
 
30 42
 	cmd.ParseFlags(args, true)
31 43
 
44
+	for _, f := range flFilter.GetAll() {
45
+		if filterArgs, err = filters.ParseFlag(f, filterArgs); err != nil {
46
+			return err
47
+		}
48
+	}
49
+
32 50
 	name := cmd.Arg(0)
33 51
 	v := url.Values{}
34 52
 	v.Set("term", name)
... ...
@@ -38,7 +58,9 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
38 38
 		return err
39 39
 	}
40 40
 
41
-	authConfig := cli.resolveAuthConfig(indexInfo)
41
+	ctx := context.Background()
42
+
43
+	authConfig := cli.resolveAuthConfig(ctx, indexInfo)
42 44
 	requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search")
43 45
 
44 46
 	encodedAuth, err := encodeAuthToBase64(authConfig)
... ...
@@ -49,9 +71,10 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
49 49
 	options := types.ImageSearchOptions{
50 50
 		RegistryAuth:  encodedAuth,
51 51
 		PrivilegeFunc: requestPrivilege,
52
+		Filters:       filterArgs,
52 53
 	}
53 54
 
54
-	unorderedResults, err := cli.client.ImageSearch(context.Background(), name, options)
55
+	unorderedResults, err := cli.client.ImageSearch(ctx, name, options)
55 56
 	if err != nil {
56 57
 		return err
57 58
 	}
... ...
@@ -62,6 +85,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
62 62
 	w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
63 63
 	fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
64 64
 	for _, res := range results {
65
+		// --automated and -s, --stars are deprecated since Docker 1.12
65 66
 		if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) {
66 67
 			continue
67 68
 		}
... ...
@@ -17,7 +17,7 @@ import (
17 17
 	"github.com/docker/engine-api/types"
18 18
 )
19 19
 
20
-func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
20
+func (cli *DockerCli) forwardAllSignals(ctx context.Context, cid string) chan os.Signal {
21 21
 	sigc := make(chan os.Signal, 128)
22 22
 	signal.CatchAll(sigc)
23 23
 	go func() {
... ...
@@ -37,7 +37,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
37 37
 				continue
38 38
 			}
39 39
 
40
-			if err := cli.client.ContainerKill(context.Background(), cid, sig); err != nil {
40
+			if err := cli.client.ContainerKill(ctx, cid, sig); err != nil {
41 41
 				logrus.Debugf("Error sending signal: %s", err)
42 42
 			}
43 43
 		}
... ...
@@ -57,6 +57,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
57 57
 
58 58
 	cmd.ParseFlags(args, true)
59 59
 
60
+	ctx, cancelFun := context.WithCancel(context.Background())
61
+
60 62
 	if *attach || *openStdin {
61 63
 		// We're going to attach to a container.
62 64
 		// 1. Ensure we only have one container.
... ...
@@ -66,13 +68,13 @@ func (cli *DockerCli) CmdStart(args ...string) error {
66 66
 
67 67
 		// 2. Attach to the container.
68 68
 		container := cmd.Arg(0)
69
-		c, err := cli.client.ContainerInspect(context.Background(), container)
69
+		c, err := cli.client.ContainerInspect(ctx, container)
70 70
 		if err != nil {
71 71
 			return err
72 72
 		}
73 73
 
74 74
 		if !c.Config.Tty {
75
-			sigc := cli.forwardAllSignals(container)
75
+			sigc := cli.forwardAllSignals(ctx, container)
76 76
 			defer signal.StopCatch(sigc)
77 77
 		}
78 78
 
... ...
@@ -94,7 +96,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
94 94
 			in = cli.in
95 95
 		}
96 96
 
97
-		resp, errAttach := cli.client.ContainerAttach(context.Background(), container, options)
97
+		resp, errAttach := cli.client.ContainerAttach(ctx, container, options)
98 98
 		if errAttach != nil && errAttach != httputil.ErrPersistEOF {
99 99
 			// ContainerAttach return an ErrPersistEOF (connection closed)
100 100
 			// means server met an error and put it in Hijacked connection
... ...
@@ -102,7 +104,6 @@ func (cli *DockerCli) CmdStart(args ...string) error {
102 102
 			return errAttach
103 103
 		}
104 104
 		defer resp.Close()
105
-		ctx, cancelFun := context.WithCancel(context.Background())
106 105
 		cErr := promise.Go(func() error {
107 106
 			errHijack := cli.holdHijackedConnection(ctx, c.Config.Tty, in, cli.out, cli.err, resp)
108 107
 			if errHijack == nil {
... ...
@@ -112,7 +113,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
112 112
 		})
113 113
 
114 114
 		// 3. Start the container.
115
-		if err := cli.client.ContainerStart(context.Background(), container); err != nil {
115
+		if err := cli.client.ContainerStart(ctx, container); err != nil {
116 116
 			cancelFun()
117 117
 			<-cErr
118 118
 			return err
... ...
@@ -120,14 +121,14 @@ func (cli *DockerCli) CmdStart(args ...string) error {
120 120
 
121 121
 		// 4. Wait for attachment to break.
122 122
 		if c.Config.Tty && cli.isTerminalOut {
123
-			if err := cli.monitorTtySize(container, false); err != nil {
123
+			if err := cli.monitorTtySize(ctx, container, false); err != nil {
124 124
 				fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
125 125
 			}
126 126
 		}
127 127
 		if attchErr := <-cErr; attchErr != nil {
128 128
 			return attchErr
129 129
 		}
130
-		_, status, err := getExitCode(cli, container)
130
+		_, status, err := cli.getExitCode(ctx, container)
131 131
 		if err != nil {
132 132
 			return err
133 133
 		}
... ...
@@ -137,16 +138,16 @@ func (cli *DockerCli) CmdStart(args ...string) error {
137 137
 	} else {
138 138
 		// We're not going to attach to anything.
139 139
 		// Start as many containers as we want.
140
-		return cli.startContainersWithoutAttachments(cmd.Args())
140
+		return cli.startContainersWithoutAttachments(ctx, cmd.Args())
141 141
 	}
142 142
 
143 143
 	return nil
144 144
 }
145 145
 
146
-func (cli *DockerCli) startContainersWithoutAttachments(containers []string) error {
146
+func (cli *DockerCli) startContainersWithoutAttachments(ctx context.Context, containers []string) error {
147 147
 	var failedContainers []string
148 148
 	for _, container := range containers {
149
-		if err := cli.client.ContainerStart(context.Background(), container); err != nil {
149
+		if err := cli.client.ContainerStart(ctx, container); err != nil {
150 150
 			fmt.Fprintf(cli.err, "%s\n", err)
151 151
 			failedContainers = append(failedContainers, container)
152 152
 		} else {
... ...
@@ -33,6 +33,8 @@ func (cli *DockerCli) CmdStats(args ...string) error {
33 33
 	showAll := len(names) == 0
34 34
 	closeChan := make(chan error)
35 35
 
36
+	ctx := context.Background()
37
+
36 38
 	// monitorContainerEvents watches for container creation and removal (only
37 39
 	// used when calling `docker stats` without arguments).
38 40
 	monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) {
... ...
@@ -41,7 +43,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
41 41
 		options := types.EventsOptions{
42 42
 			Filters: f,
43 43
 		}
44
-		resBody, err := cli.client.Events(context.Background(), options)
44
+		resBody, err := cli.client.Events(ctx, options)
45 45
 		// Whether we successfully subscribed to events or not, we can now
46 46
 		// unblock the main goroutine.
47 47
 		close(started)
... ...
@@ -71,7 +73,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
71 71
 		options := types.ContainerListOptions{
72 72
 			All: *all,
73 73
 		}
74
-		cs, err := cli.client.ContainerList(context.Background(), options)
74
+		cs, err := cli.client.ContainerList(ctx, options)
75 75
 		if err != nil {
76 76
 			closeChan <- err
77 77
 		}
... ...
@@ -79,7 +81,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
79 79
 			s := &containerStats{Name: container.ID[:12]}
80 80
 			if cStats.add(s) {
81 81
 				waitFirst.Add(1)
82
-				go s.Collect(cli.client, !*noStream, waitFirst)
82
+				go s.Collect(ctx, cli.client, !*noStream, waitFirst)
83 83
 			}
84 84
 		}
85 85
 	}
... ...
@@ -96,7 +98,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
96 96
 				s := &containerStats{Name: e.ID[:12]}
97 97
 				if cStats.add(s) {
98 98
 					waitFirst.Add(1)
99
-					go s.Collect(cli.client, !*noStream, waitFirst)
99
+					go s.Collect(ctx, cli.client, !*noStream, waitFirst)
100 100
 				}
101 101
 			}
102 102
 		})
... ...
@@ -105,7 +107,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
105 105
 			s := &containerStats{Name: e.ID[:12]}
106 106
 			if cStats.add(s) {
107 107
 				waitFirst.Add(1)
108
-				go s.Collect(cli.client, !*noStream, waitFirst)
108
+				go s.Collect(ctx, cli.client, !*noStream, waitFirst)
109 109
 			}
110 110
 		})
111 111
 
... ...
@@ -131,7 +133,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
131 131
 			s := &containerStats{Name: name}
132 132
 			if cStats.add(s) {
133 133
 				waitFirst.Add(1)
134
-				go s.Collect(cli.client, !*noStream, waitFirst)
134
+				go s.Collect(ctx, cli.client, !*noStream, waitFirst)
135 135
 			}
136 136
 		}
137 137
 
... ...
@@ -63,7 +63,7 @@ func (s *stats) isKnownContainer(cid string) (int, bool) {
63 63
 	return -1, false
64 64
 }
65 65
 
66
-func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) {
66
+func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) {
67 67
 	logrus.Debugf("collecting stats for %s", s.Name)
68 68
 	var (
69 69
 		getFirst       bool
... ...
@@ -80,7 +80,7 @@ func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFir
80 80
 		}
81 81
 	}()
82 82
 
83
-	responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats)
83
+	responseBody, err := cli.ContainerStats(ctx, s.Name, streamStats)
84 84
 	if err != nil {
85 85
 		s.mu.Lock()
86 86
 		s.err = err
... ...
@@ -22,9 +22,11 @@ func (cli *DockerCli) CmdStop(args ...string) error {
22 22
 
23 23
 	cmd.ParseFlags(args, true)
24 24
 
25
+	ctx := context.Background()
26
+
25 27
 	var errs []string
26 28
 	for _, name := range cmd.Args() {
27
-		if err := cli.client.ContainerStop(context.Background(), name, *nSeconds); err != nil {
29
+		if err := cli.client.ContainerStop(ctx, name, *nSeconds); err != nil {
28 30
 			errs = append(errs, err.Error())
29 31
 		} else {
30 32
 			fmt.Fprintf(cli.out, "%s\n", name)
... ...
@@ -229,14 +229,14 @@ func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
229 229
 	}
230 230
 }
231 231
 
232
-func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) {
232
+func (cli *DockerCli) trustedReference(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
233 233
 	repoInfo, err := registry.ParseRepositoryInfo(ref)
234 234
 	if err != nil {
235 235
 		return nil, err
236 236
 	}
237 237
 
238 238
 	// Resolve the Auth config relevant for this server
239
-	authConfig := cli.resolveAuthConfig(repoInfo.Index)
239
+	authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index)
240 240
 
241 241
 	notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull")
242 242
 	if err != nil {
... ...
@@ -262,14 +262,14 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can
262 262
 	return reference.WithDigest(ref, r.digest)
263 263
 }
264 264
 
265
-func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error {
265
+func (cli *DockerCli) tagTrusted(ctx context.Context, trustedRef reference.Canonical, ref reference.NamedTagged) error {
266 266
 	fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String())
267 267
 
268 268
 	options := types.ImageTagOptions{
269 269
 		Force: true,
270 270
 	}
271 271
 
272
-	return cli.client.ImageTag(context.Background(), trustedRef.String(), ref.String(), options)
272
+	return cli.client.ImageTag(ctx, trustedRef.String(), ref.String(), options)
273 273
 }
274 274
 
275 275
 func notaryError(repoName string, err error) error {
... ...
@@ -302,7 +302,7 @@ func notaryError(repoName string, err error) error {
302 302
 	return err
303 303
 }
304 304
 
305
-func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
305
+func (cli *DockerCli) trustedPull(ctx context.Context, repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
306 306
 	var refs []target
307 307
 
308 308
 	notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull")
... ...
@@ -364,7 +364,7 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
364 364
 		if err != nil {
365 365
 			return err
366 366
 		}
367
-		if err := cli.imagePullPrivileged(authConfig, ref.String(), requestPrivilege, false); err != nil {
367
+		if err := cli.imagePullPrivileged(ctx, authConfig, ref.String(), requestPrivilege, false); err != nil {
368 368
 			return err
369 369
 		}
370 370
 
... ...
@@ -378,7 +378,7 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
378 378
 			if err != nil {
379 379
 				return err
380 380
 			}
381
-			if err := cli.tagTrusted(trustedRef, tagged); err != nil {
381
+			if err := cli.tagTrusted(ctx, trustedRef, tagged); err != nil {
382 382
 				return err
383 383
 			}
384 384
 		}
... ...
@@ -386,8 +386,8 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
386 386
 	return nil
387 387
 }
388 388
 
389
-func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
390
-	responseBody, err := cli.imagePushPrivileged(authConfig, ref.String(), requestPrivilege)
389
+func (cli *DockerCli) trustedPush(ctx context.Context, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
390
+	responseBody, err := cli.imagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege)
391 391
 	if err != nil {
392 392
 		return err
393 393
 	}
... ...
@@ -19,9 +19,11 @@ func (cli *DockerCli) CmdUnpause(args ...string) error {
19 19
 
20 20
 	cmd.ParseFlags(args, true)
21 21
 
22
+	ctx := context.Background()
23
+
22 24
 	var errs []string
23 25
 	for _, name := range cmd.Args() {
24
-		if err := cli.client.ContainerUnpause(context.Background(), name); err != nil {
26
+		if err := cli.client.ContainerUnpause(ctx, name); err != nil {
25 27
 			errs = append(errs, err.Error())
26 28
 		} else {
27 29
 			fmt.Fprintf(cli.out, "%s\n", name)
... ...
@@ -99,10 +99,13 @@ func (cli *DockerCli) CmdUpdate(args ...string) error {
99 99
 		RestartPolicy: restartPolicy,
100 100
 	}
101 101
 
102
+	ctx := context.Background()
103
+
102 104
 	names := cmd.Args()
103 105
 	var errs []string
106
+
104 107
 	for _, name := range names {
105
-		if err := cli.client.ContainerUpdate(context.Background(), name, updateConfig); err != nil {
108
+		if err := cli.client.ContainerUpdate(ctx, name, updateConfig); err != nil {
106 109
 			errs = append(errs, err.Error())
107 110
 		} else {
108 111
 			fmt.Fprintf(cli.out, "%s\n", name)
... ...
@@ -23,13 +23,13 @@ import (
23 23
 	registrytypes "github.com/docker/engine-api/types/registry"
24 24
 )
25 25
 
26
-func (cli *DockerCli) electAuthServer() string {
26
+func (cli *DockerCli) electAuthServer(ctx context.Context) string {
27 27
 	// The daemon `/info` endpoint informs us of the default registry being
28 28
 	// used. This is essential in cross-platforms environment, where for
29 29
 	// example a Linux client might be interacting with a Windows daemon, hence
30 30
 	// the default registry URL might be Windows specific.
31 31
 	serverAddress := registry.IndexServer
32
-	if info, err := cli.client.Info(context.Background()); err != nil {
32
+	if info, err := cli.client.Info(ctx); err != nil {
33 33
 		fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
34 34
 	} else {
35 35
 		serverAddress = info.IndexServerAddress
... ...
@@ -58,12 +58,12 @@ func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.
58 58
 	}
59 59
 }
60 60
 
61
-func (cli *DockerCli) resizeTty(id string, isExec bool) {
61
+func (cli *DockerCli) resizeTty(ctx context.Context, id string, isExec bool) {
62 62
 	height, width := cli.getTtySize()
63
-	cli.resizeTtyTo(id, height, width, isExec)
63
+	cli.resizeTtyTo(ctx, id, height, width, isExec)
64 64
 }
65 65
 
66
-func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) {
66
+func (cli *DockerCli) resizeTtyTo(ctx context.Context, id string, height, width int, isExec bool) {
67 67
 	if height == 0 && width == 0 {
68 68
 		return
69 69
 	}
... ...
@@ -75,9 +75,9 @@ func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) {
75 75
 
76 76
 	var err error
77 77
 	if isExec {
78
-		err = cli.client.ContainerExecResize(context.Background(), id, options)
78
+		err = cli.client.ContainerExecResize(ctx, id, options)
79 79
 	} else {
80
-		err = cli.client.ContainerResize(context.Background(), id, options)
80
+		err = cli.client.ContainerResize(ctx, id, options)
81 81
 	}
82 82
 
83 83
 	if err != nil {
... ...
@@ -87,8 +87,8 @@ func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) {
87 87
 
88 88
 // getExitCode perform an inspect on the container. It returns
89 89
 // the running state and the exit code.
90
-func getExitCode(cli *DockerCli, containerID string) (bool, int, error) {
91
-	c, err := cli.client.ContainerInspect(context.Background(), containerID)
90
+func (cli *DockerCli) getExitCode(ctx context.Context, containerID string) (bool, int, error) {
91
+	c, err := cli.client.ContainerInspect(ctx, containerID)
92 92
 	if err != nil {
93 93
 		// If we can't connect, then the daemon probably died.
94 94
 		if err != client.ErrConnectionFailed {
... ...
@@ -102,8 +102,8 @@ func getExitCode(cli *DockerCli, containerID string) (bool, int, error) {
102 102
 
103 103
 // getExecExitCode perform an inspect on the exec command. It returns
104 104
 // the running state and the exit code.
105
-func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) {
106
-	resp, err := cli.client.ContainerExecInspect(context.Background(), execID)
105
+func (cli *DockerCli) getExecExitCode(ctx context.Context, execID string) (bool, int, error) {
106
+	resp, err := cli.client.ContainerExecInspect(ctx, execID)
107 107
 	if err != nil {
108 108
 		// If we can't connect, then the daemon probably died.
109 109
 		if err != client.ErrConnectionFailed {
... ...
@@ -115,8 +115,8 @@ func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) {
115 115
 	return resp.Running, resp.ExitCode, nil
116 116
 }
117 117
 
118
-func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
119
-	cli.resizeTty(id, isExec)
118
+func (cli *DockerCli) monitorTtySize(ctx context.Context, id string, isExec bool) error {
119
+	cli.resizeTty(ctx, id, isExec)
120 120
 
121 121
 	if runtime.GOOS == "windows" {
122 122
 		go func() {
... ...
@@ -126,7 +126,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
126 126
 				h, w := cli.getTtySize()
127 127
 
128 128
 				if prevW != w || prevH != h {
129
-					cli.resizeTty(id, isExec)
129
+					cli.resizeTty(ctx, id, isExec)
130 130
 				}
131 131
 				prevH = h
132 132
 				prevW = w
... ...
@@ -137,7 +137,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
137 137
 		gosignal.Notify(sigchan, signal.SIGWINCH)
138 138
 		go func() {
139 139
 			for range sigchan {
140
-				cli.resizeTty(id, isExec)
140
+				cli.resizeTty(ctx, id, isExec)
141 141
 			}
142 142
 		}()
143 143
 	}
... ...
@@ -185,10 +185,10 @@ func copyToFile(outfile string, r io.Reader) error {
185 185
 // resolveAuthConfig is like registry.ResolveAuthConfig, but if using the
186 186
 // default index, it uses the default index name for the daemon's platform,
187 187
 // not the client's platform.
188
-func (cli *DockerCli) resolveAuthConfig(index *registrytypes.IndexInfo) types.AuthConfig {
188
+func (cli *DockerCli) resolveAuthConfig(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
189 189
 	configKey := index.Name
190 190
 	if index.Official {
191
-		configKey = cli.electAuthServer()
191
+		configKey = cli.electAuthServer(ctx)
192 192
 	}
193 193
 
194 194
 	a, _ := getCredentials(cli.configFile, configKey)
... ...
@@ -110,8 +110,10 @@ func (cli *DockerCli) CmdVolumeInspect(args ...string) error {
110 110
 		return nil
111 111
 	}
112 112
 
113
+	ctx := context.Background()
114
+
113 115
 	inspectSearcher := func(name string) (interface{}, []byte, error) {
114
-		i, err := cli.client.VolumeInspect(context.Background(), name)
116
+		i, err := cli.client.VolumeInspect(ctx, name)
115 117
 		return i, nil, err
116 118
 	}
117 119
 
... ...
@@ -161,8 +163,10 @@ func (cli *DockerCli) CmdVolumeRm(args ...string) error {
161 161
 
162 162
 	var status = 0
163 163
 
164
+	ctx := context.Background()
165
+
164 166
 	for _, name := range cmd.Args() {
165
-		if err := cli.client.VolumeRemove(context.Background(), name); err != nil {
167
+		if err := cli.client.VolumeRemove(ctx, name); err != nil {
166 168
 			fmt.Fprintf(cli.err, "%s\n", err)
167 169
 			status = 1
168 170
 			continue
... ...
@@ -21,9 +21,11 @@ func (cli *DockerCli) CmdWait(args ...string) error {
21 21
 
22 22
 	cmd.ParseFlags(args, true)
23 23
 
24
+	ctx := context.Background()
25
+
24 26
 	var errs []string
25 27
 	for _, name := range cmd.Args() {
26
-		status, err := cli.client.ContainerWait(context.Background(), name)
28
+		status, err := cli.client.ContainerWait(ctx, name)
27 29
 		if err != nil {
28 30
 			errs = append(errs, err.Error())
29 31
 		} else {
... ...
@@ -39,5 +39,5 @@ type importExportBackend interface {
39 39
 type registryBackend interface {
40 40
 	PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
41 41
 	PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
42
-	SearchRegistryForImages(ctx context.Context, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
42
+	SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
43 43
 }
... ...
@@ -301,7 +301,7 @@ func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter
301 301
 			headers[k] = v
302 302
 		}
303 303
 	}
304
-	query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("term"), config, headers)
304
+	query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), config, headers)
305 305
 	if err != nil {
306 306
 		return err
307 307
 	}
... ...
@@ -266,7 +266,7 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
266 266
 		return err
267 267
 	}
268 268
 
269
-	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
269
+	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", b.runConfig.WorkingDir))
270 270
 }
271 271
 
272 272
 // RUN some command yo
... ...
@@ -3,6 +3,7 @@ package dockerfile
3 3
 import (
4 4
 	"io/ioutil"
5 5
 	"os"
6
+	"path/filepath"
6 7
 	"strings"
7 8
 	"testing"
8 9
 
... ...
@@ -16,6 +17,7 @@ import (
16 16
 
17 17
 type dispatchTestCase struct {
18 18
 	name, dockerfile, expectedError string
19
+	files                           map[string]string
19 20
 }
20 21
 
21 22
 func init() {
... ...
@@ -34,21 +36,97 @@ func initDispatchTestCases() []dispatchTestCase {
34 34
 			name:          "ONBUILD forbidden FROM",
35 35
 			dockerfile:    "ONBUILD FROM scratch",
36 36
 			expectedError: "FROM isn't allowed as an ONBUILD trigger",
37
+			files:         nil,
37 38
 		},
38 39
 		{
39 40
 			name:          "ONBUILD forbidden MAINTAINER",
40 41
 			dockerfile:    "ONBUILD MAINTAINER docker.io",
41 42
 			expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger",
43
+			files:         nil,
42 44
 		},
43 45
 		{
44 46
 			name:          "ARG two arguments",
45 47
 			dockerfile:    "ARG foo bar",
46 48
 			expectedError: "ARG requires exactly one argument definition",
49
+			files:         nil,
47 50
 		},
48 51
 		{
49 52
 			name:          "MAINTAINER unknown flag",
50 53
 			dockerfile:    "MAINTAINER --boo joe@example.com",
51 54
 			expectedError: "Unknown flag: boo",
55
+			files:         nil,
56
+		},
57
+		{
58
+			name:          "ADD multiple files to file",
59
+			dockerfile:    "ADD file1.txt file2.txt test",
60
+			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
61
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
62
+		},
63
+		{
64
+			name:          "JSON ADD multiple files to file",
65
+			dockerfile:    `ADD ["file1.txt", "file2.txt", "test"]`,
66
+			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
67
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
68
+		},
69
+		{
70
+			name:          "Wildcard ADD multiple files to file",
71
+			dockerfile:    "ADD file*.txt test",
72
+			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
73
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
74
+		},
75
+		{
76
+			name:          "Wildcard JSON ADD multiple files to file",
77
+			dockerfile:    `ADD ["file*.txt", "test"]`,
78
+			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
79
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
80
+		},
81
+		{
82
+			name:          "COPY multiple files to file",
83
+			dockerfile:    "COPY file1.txt file2.txt test",
84
+			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
85
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
86
+		},
87
+		{
88
+			name:          "JSON COPY multiple files to file",
89
+			dockerfile:    `COPY ["file1.txt", "file2.txt", "test"]`,
90
+			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
91
+			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
92
+		},
93
+		{
94
+			name:          "ADD multiple files to file with whitespace",
95
+			dockerfile:    `ADD [ "test file1.txt", "test file2.txt", "test" ]`,
96
+			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
97
+			files:         map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
98
+		},
99
+		{
100
+			name:          "COPY multiple files to file with whitespace",
101
+			dockerfile:    `COPY [ "test file1.txt", "test file2.txt", "test" ]`,
102
+			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
103
+			files:         map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
104
+		},
105
+		{
106
+			name:          "COPY wildcard no files",
107
+			dockerfile:    `COPY file*.txt /tmp/`,
108
+			expectedError: "No source files were specified",
109
+			files:         nil,
110
+		},
111
+		{
112
+			name:          "COPY url",
113
+			dockerfile:    `COPY https://index.docker.io/robots.txt /`,
114
+			expectedError: "Source can't be a URL for COPY",
115
+			files:         nil,
116
+		},
117
+		{
118
+			name:          "Chaining ONBUILD",
119
+			dockerfile:    `ONBUILD ONBUILD RUN touch foobar`,
120
+			expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
121
+			files:         nil,
122
+		},
123
+		{
124
+			name:          "Invalid instruction",
125
+			dockerfile:    `foo bar`,
126
+			expectedError: "Unknown instruction: FOO",
127
+			files:         nil,
52 128
 		}}
53 129
 
54 130
 	return dispatchTestCases
... ...
@@ -66,6 +144,10 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) {
66 66
 	contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
67 67
 	defer cleanup()
68 68
 
69
+	for filename, content := range testCase.files {
70
+		createTestTempFile(t, contextDir, filename, content, 0777)
71
+	}
72
+
69 73
 	tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
70 74
 
71 75
 	if err != nil {
... ...
@@ -132,3 +214,16 @@ func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) {
132 132
 		}
133 133
 	}
134 134
 }
135
+
136
+// createTestTempFile creates a temporary file within dir with specific contents and permissions.
137
+// When an error occurs, it terminates the test
138
+func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string {
139
+	filePath := filepath.Join(dir, filename)
140
+	err := ioutil.WriteFile(filePath, []byte(contents), perm)
141
+
142
+	if err != nil {
143
+		t.Fatalf("Error when creating %s file: %s", filename, err)
144
+	}
145
+
146
+	return filePath
147
+}
... ...
@@ -618,6 +618,27 @@ func (b *Builder) readDockerfile() error {
618 618
 		}
619 619
 	}
620 620
 
621
+	err := b.parseDockerfile()
622
+
623
+	if err != nil {
624
+		return err
625
+	}
626
+
627
+	// After the Dockerfile has been parsed, we need to check the .dockerignore
628
+	// file for either "Dockerfile" or ".dockerignore", and if either are
629
+	// present then erase them from the build context. These files should never
630
+	// have been sent from the client but we did send them to make sure that
631
+	// we had the Dockerfile to actually parse, and then we also need the
632
+	// .dockerignore file to know whether either file should be removed.
633
+	// Note that this assumes the Dockerfile has been read into memory and
634
+	// is now safe to be removed.
635
+	if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
636
+		dockerIgnore.Process([]string{b.options.Dockerfile})
637
+	}
638
+	return nil
639
+}
640
+
641
+func (b *Builder) parseDockerfile() error {
621 642
 	f, err := b.context.Open(b.options.Dockerfile)
622 643
 	if err != nil {
623 644
 		if os.IsNotExist(err) {
... ...
@@ -625,6 +646,7 @@ func (b *Builder) readDockerfile() error {
625 625
 		}
626 626
 		return err
627 627
 	}
628
+	defer f.Close()
628 629
 	if f, ok := f.(*os.File); ok {
629 630
 		// ignoring error because Open already succeeded
630 631
 		fi, err := f.Stat()
... ...
@@ -636,22 +658,10 @@ func (b *Builder) readDockerfile() error {
636 636
 		}
637 637
 	}
638 638
 	b.dockerfile, err = parser.Parse(f)
639
-	f.Close()
640 639
 	if err != nil {
641 640
 		return err
642 641
 	}
643 642
 
644
-	// After the Dockerfile has been parsed, we need to check the .dockerignore
645
-	// file for either "Dockerfile" or ".dockerignore", and if either are
646
-	// present then erase them from the build context. These files should never
647
-	// have been sent from the client but we did send them to make sure that
648
-	// we had the Dockerfile to actually parse, and then we also need the
649
-	// .dockerignore file to know whether either file should be removed.
650
-	// Note that this assumes the Dockerfile has been read into memory and
651
-	// is now safe to be removed.
652
-	if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
653
-		dockerIgnore.Process([]string{b.options.Dockerfile})
654
-	}
655 643
 	return nil
656 644
 }
657 645
 
658 646
new file mode 100644
... ...
@@ -0,0 +1,55 @@
0
+package dockerfile
1
+
2
+import (
3
+	"strings"
4
+	"testing"
5
+
6
+	"github.com/docker/docker/builder"
7
+	"github.com/docker/docker/pkg/archive"
8
+	"github.com/docker/engine-api/types"
9
+)
10
+
11
+func TestEmptyDockerfile(t *testing.T) {
12
+	contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
13
+	defer cleanup()
14
+
15
+	createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777)
16
+
17
+	tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
18
+
19
+	if err != nil {
20
+		t.Fatalf("Error when creating tar stream: %s", err)
21
+	}
22
+
23
+	defer func() {
24
+		if err = tarStream.Close(); err != nil {
25
+			t.Fatalf("Error when closing tar stream: %s", err)
26
+		}
27
+	}()
28
+
29
+	context, err := builder.MakeTarSumContext(tarStream)
30
+
31
+	if err != nil {
32
+		t.Fatalf("Error when creating tar context: %s", err)
33
+	}
34
+
35
+	defer func() {
36
+		if err = context.Close(); err != nil {
37
+			t.Fatalf("Error when closing tar context: %s", err)
38
+		}
39
+	}()
40
+
41
+	options := &types.ImageBuildOptions{}
42
+
43
+	b := &Builder{options: options, context: context}
44
+
45
+	err = b.readDockerfile()
46
+
47
+	if err == nil {
48
+		t.Fatalf("No error when executing test for empty Dockerfile")
49
+	}
50
+
51
+	if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") {
52
+		t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", "The Dockerfile (Dockerfile) cannot be empty", err.Error())
53
+	}
54
+}
... ...
@@ -39,12 +39,7 @@ func New(handlers ...Handler) *Cli {
39 39
 	return cli
40 40
 }
41 41
 
42
-// initErr is an error returned upon initialization of a handler implementing Initializer.
43
-type initErr struct{ error }
44
-
45
-func (err initErr) Error() string {
46
-	return err.Error()
47
-}
42
+var errCommandNotFound = errors.New("command not found")
48 43
 
49 44
 func (cli *Cli) command(args ...string) (func(...string) error, error) {
50 45
 	for _, c := range cli.handlers {
... ...
@@ -54,35 +49,36 @@ func (cli *Cli) command(args ...string) (func(...string) error, error) {
54 54
 		if cmd := c.Command(strings.Join(args, " ")); cmd != nil {
55 55
 			if ci, ok := c.(Initializer); ok {
56 56
 				if err := ci.Initialize(); err != nil {
57
-					return nil, initErr{err}
57
+					return nil, err
58 58
 				}
59 59
 			}
60 60
 			return cmd, nil
61 61
 		}
62 62
 	}
63
-	return nil, errors.New("command not found")
63
+	return nil, errCommandNotFound
64 64
 }
65 65
 
66 66
 // Run executes the specified command.
67 67
 func (cli *Cli) Run(args ...string) error {
68 68
 	if len(args) > 1 {
69 69
 		command, err := cli.command(args[:2]...)
70
-		switch err := err.(type) {
71
-		case nil:
70
+		if err == nil {
72 71
 			return command(args[2:]...)
73
-		case initErr:
74
-			return err.error
72
+		}
73
+		if err != errCommandNotFound {
74
+			return err
75 75
 		}
76 76
 	}
77 77
 	if len(args) > 0 {
78 78
 		command, err := cli.command(args[0])
79
-		switch err := err.(type) {
80
-		case nil:
81
-			return command(args[1:]...)
82
-		case initErr:
83
-			return err.error
79
+		if err != nil {
80
+			if err == errCommandNotFound {
81
+				cli.noSuchCommand(args[0])
82
+				return nil
83
+			}
84
+			return err
84 85
 		}
85
-		cli.noSuchCommand(args[0])
86
+		return command(args[1:]...)
86 87
 	}
87 88
 	return cli.CmdHelp()
88 89
 }
... ...
@@ -110,24 +106,25 @@ func (cli *Cli) Command(name string) func(...string) error {
110 110
 func (cli *Cli) CmdHelp(args ...string) error {
111 111
 	if len(args) > 1 {
112 112
 		command, err := cli.command(args[:2]...)
113
-		switch err := err.(type) {
114
-		case nil:
113
+		if err == nil {
115 114
 			command("--help")
116 115
 			return nil
117
-		case initErr:
118
-			return err.error
116
+		}
117
+		if err != errCommandNotFound {
118
+			return err
119 119
 		}
120 120
 	}
121 121
 	if len(args) > 0 {
122 122
 		command, err := cli.command(args[0])
123
-		switch err := err.(type) {
124
-		case nil:
125
-			command("--help")
126
-			return nil
127
-		case initErr:
128
-			return err.error
123
+		if err != nil {
124
+			if err == errCommandNotFound {
125
+				cli.noSuchCommand(args[0])
126
+				return nil
127
+			}
128
+			return err
129 129
 		}
130
-		cli.noSuchCommand(args[0])
130
+		command("--help")
131
+		return nil
131 132
 	}
132 133
 
133 134
 	if cli.Usage == nil {
... ...
@@ -16,11 +16,6 @@ const (
16 16
 	ConfigFileName = "config.json"
17 17
 	configFileDir  = ".docker"
18 18
 	oldConfigfile  = ".dockercfg"
19
-
20
-	// This constant is only used for really old config files when the
21
-	// URL wasn't saved as part of the config file and it was just
22
-	// assumed to be this value.
23
-	defaultIndexserver = "https://index.docker.io/v1/"
24 19
 )
25 20
 
26 21
 var (
... ...
@@ -228,10 +228,11 @@ func (cli *DaemonCli) start() (err error) {
228 228
 		if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) {
229 229
 			logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]")
230 230
 		}
231
-		l, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
231
+		ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
232 232
 		if err != nil {
233 233
 			return err
234 234
 		}
235
+		ls = wrapListeners(proto, ls)
235 236
 		// If we're binding to a TCP port, make sure that a container doesn't try to use it.
236 237
 		if proto == "tcp" {
237 238
 			if err := allocateDaemonPort(addr); err != nil {
... ...
@@ -239,7 +240,7 @@ func (cli *DaemonCli) start() (err error) {
239 239
 			}
240 240
 		}
241 241
 		logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
242
-		api.Accept(protoAddrParts[1], l...)
242
+		api.Accept(protoAddrParts[1], ls...)
243 243
 	}
244 244
 
245 245
 	if err := migrateKey(); err != nil {
246 246
new file mode 100644
... ...
@@ -0,0 +1,74 @@
0
+// +build solaris
1
+
2
+package main
3
+
4
+import (
5
+	"fmt"
6
+	"net"
7
+	"os"
8
+	"path/filepath"
9
+	"syscall"
10
+
11
+	"github.com/docker/docker/libcontainerd"
12
+	"github.com/docker/docker/pkg/system"
13
+)
14
+
15
+const defaultDaemonConfigFile = ""
16
+
17
+// currentUserIsOwner checks whether the current user is the owner of the given
18
+// file.
19
+func currentUserIsOwner(f string) bool {
20
+	if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil {
21
+		if int(fileInfo.UID()) == os.Getuid() {
22
+			return true
23
+		}
24
+	}
25
+	return false
26
+}
27
+
28
+// setDefaultUmask sets the umask to 0022 to avoid problems
29
+// caused by custom umask
30
+func setDefaultUmask() error {
31
+	desiredUmask := 0022
32
+	syscall.Umask(desiredUmask)
33
+	if umask := syscall.Umask(desiredUmask); umask != desiredUmask {
34
+		return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask)
35
+	}
36
+
37
+	return nil
38
+}
39
+
40
+func getDaemonConfDir() string {
41
+	return "/etc/docker"
42
+}
43
+
44
+// setupConfigReloadTrap configures the USR2 signal to reload the configuration.
45
+func (cli *DaemonCli) setupConfigReloadTrap() {
46
+}
47
+
48
+// notifySystem sends a message to the host when the server is ready to be used
49
+func notifySystem() {
50
+}
51
+
52
+func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption {
53
+	opts := []libcontainerd.RemoteOption{}
54
+	return opts
55
+}
56
+
57
+// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to
58
+// store their state.
59
+func (cli *DaemonCli) getLibcontainerdRoot() string {
60
+	return filepath.Join(cli.Config.ExecRoot, "libcontainerd")
61
+}
62
+
63
+func allocateDaemonPort(addr string) error {
64
+	return nil
65
+}
66
+
67
+// notifyShutdown is called after the daemon shuts down but before the process exits.
68
+func notifyShutdown(err error) {
69
+}
70
+
71
+func wrapListeners(proto string, ls []net.Listener) []net.Listener {
72
+	return ls
73
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows
1
+// +build !windows,!solaris
2 2
 
3 3
 package main
4 4
 
... ...
@@ -11,6 +11,7 @@ import (
11 11
 	"strconv"
12 12
 	"syscall"
13 13
 
14
+	"github.com/docker/docker/cmd/dockerd/hack"
14 15
 	"github.com/docker/docker/daemon"
15 16
 	"github.com/docker/docker/libcontainerd"
16 17
 	"github.com/docker/docker/pkg/system"
... ...
@@ -111,3 +112,17 @@ func allocateDaemonPort(addr string) error {
111 111
 // notifyShutdown is called after the daemon shuts down but before the process exits.
112 112
 func notifyShutdown(err error) {
113 113
 }
114
+
115
+func wrapListeners(proto string, ls []net.Listener) []net.Listener {
116
+	if os.Getenv("DOCKER_HTTP_HOST_COMPAT") != "" {
117
+		switch proto {
118
+		case "unix":
119
+			ls[0] = &hack.MalformedHostHeaderOverride{ls[0]}
120
+		case "fd":
121
+			for i := range ls {
122
+				ls[i] = &hack.MalformedHostHeaderOverride{ls[i]}
123
+			}
124
+		}
125
+	}
126
+	return ls
127
+}
... ...
@@ -2,6 +2,7 @@ package main
2 2
 
3 3
 import (
4 4
 	"fmt"
5
+	"net"
5 6
 	"os"
6 7
 	"syscall"
7 8
 
... ...
@@ -75,3 +76,7 @@ func (cli *DaemonCli) getLibcontainerdRoot() string {
75 75
 func allocateDaemonPort(addr string) error {
76 76
 	return nil
77 77
 }
78
+
79
+func wrapListeners(proto string, ls []net.Listener) []net.Listener {
80
+	return ls
81
+}
78 82
new file mode 100644
... ...
@@ -0,0 +1,116 @@
0
+// +build !windows
1
+
2
+package hack
3
+
4
+import "net"
5
+
6
+// MalformedHostHeaderOverride is a wrapper to be able
7
+// to overcome the 400 Bad request coming from old docker
8
+// clients that send an invalid Host header.
9
+type MalformedHostHeaderOverride struct {
10
+	net.Listener
11
+}
12
+
13
+// MalformedHostHeaderOverrideConn wraps the underlying unix
14
+// connection and keeps track of the first read from http.Server
15
+// which just reads the headers.
16
+type MalformedHostHeaderOverrideConn struct {
17
+	net.Conn
18
+	first bool
19
+}
20
+
21
+var closeConnHeader = []byte("\r\nConnection: close\r")
22
+
23
+// Read reads the first *read* request from http.Server to inspect
24
+// the Host header. If the Host starts with / then we're talking to
25
+// an old docker client which send an invalid Host header. To not
26
+// error out in http.Server we rewrite the first bytes of the request
27
+// to sanitize the Host header itself.
28
+// In case we're not dealing with old docker clients the data is just passed
29
+// to the server w/o modification.
30
+func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) {
31
+	// http.Server uses a 4k buffer
32
+	if l.first && len(b) == 4096 {
33
+		// This keeps track of the first read from http.Server which just reads
34
+		// the headers
35
+		l.first = false
36
+		// The first read of the connection by http.Server is done limited to
37
+		// DefaultMaxHeaderBytes (usually 1 << 20) + 4096.
38
+		// Here we do the first read which gets us all the http headers to
39
+		// be inspected and modified below.
40
+		c, err := l.Conn.Read(b)
41
+		if err != nil {
42
+			return c, err
43
+		}
44
+
45
+		var (
46
+			start, end    int
47
+			firstLineFeed = -1
48
+			buf           []byte
49
+		)
50
+		for i, bb := range b[:c] {
51
+			if bb == '\n' && firstLineFeed == -1 {
52
+				firstLineFeed = i
53
+			}
54
+			if bb != '\n' {
55
+				continue
56
+			}
57
+			if b[i+1] != 'H' {
58
+				continue
59
+			}
60
+			if b[i+2] != 'o' {
61
+				continue
62
+			}
63
+			if b[i+3] != 's' {
64
+				continue
65
+			}
66
+			if b[i+4] != 't' {
67
+				continue
68
+			}
69
+			if b[i+5] != ':' {
70
+				continue
71
+			}
72
+			if b[i+6] != ' ' {
73
+				continue
74
+			}
75
+			if b[i+7] != '/' {
76
+				continue
77
+			}
78
+			// ensure clients other than the docker clients do not get this hack
79
+			if i != firstLineFeed {
80
+				return c, nil
81
+			}
82
+			start = i + 7
83
+			// now find where the value ends
84
+			for ii, bbb := range b[start:c] {
85
+				if bbb == '\n' {
86
+					end = start + ii
87
+					break
88
+				}
89
+			}
90
+			buf = make([]byte, 0, c+len(closeConnHeader)-(end-start))
91
+			// strip the value of the host header and
92
+			// inject `Connection: close` to ensure we don't reuse this connection
93
+			buf = append(buf, b[:start]...)
94
+			buf = append(buf, closeConnHeader...)
95
+			buf = append(buf, b[end:c]...)
96
+			copy(b, buf)
97
+			break
98
+		}
99
+		if len(buf) == 0 {
100
+			return c, nil
101
+		}
102
+		return len(buf), nil
103
+	}
104
+	return l.Conn.Read(b)
105
+}
106
+
107
+// Accept makes the listener accepts connections and wraps the connection
108
+// in a MalformedHostHeaderOverrideConn initilizing first to true.
109
+func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) {
110
+	c, err := l.Listener.Accept()
111
+	if err != nil {
112
+		return c, err
113
+	}
114
+	return &MalformedHostHeaderOverrideConn{c, true}, nil
115
+}
0 116
new file mode 100644
... ...
@@ -0,0 +1,115 @@
0
+// +build !windows
1
+
2
+package hack
3
+
4
+import (
5
+	"bytes"
6
+	"io"
7
+	"net"
8
+	"strings"
9
+	"testing"
10
+)
11
+
12
+func TestHeaderOverrideHack(t *testing.T) {
13
+	client, srv := net.Pipe()
14
+	tests := [][2][]byte{
15
+		{
16
+			[]byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"),
17
+			[]byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"),
18
+		},
19
+		{
20
+			[]byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"),
21
+			[]byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"),
22
+		},
23
+		{
24
+			[]byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"),
25
+			[]byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"),
26
+		},
27
+		{
28
+			[]byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)),
29
+			[]byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)),
30
+		},
31
+		{
32
+			[]byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"),
33
+			[]byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"),
34
+		},
35
+	}
36
+	l := MalformedHostHeaderOverrideConn{client, true}
37
+	read := make([]byte, 4096)
38
+
39
+	for _, pair := range tests {
40
+		go func(x []byte) {
41
+			srv.Write(x)
42
+		}(pair[0])
43
+		n, err := l.Read(read)
44
+		if err != nil && err != io.EOF {
45
+			t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n]))
46
+		}
47
+		if !bytes.Equal(read[:n], pair[1][:n]) {
48
+			t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n])
49
+		}
50
+		l.first = true
51
+		// clean out the slice
52
+		read = read[:0]
53
+	}
54
+	srv.Close()
55
+	l.Close()
56
+}
57
+
58
+func BenchmarkWithHack(b *testing.B) {
59
+	client, srv := net.Pipe()
60
+	done := make(chan struct{})
61
+	req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n")
62
+	read := make([]byte, 4096)
63
+	b.SetBytes(int64(len(req) * 30))
64
+
65
+	l := MalformedHostHeaderOverrideConn{client, true}
66
+	go func() {
67
+		for {
68
+			if _, err := srv.Write(req); err != nil {
69
+				srv.Close()
70
+				break
71
+			}
72
+			l.first = true // make sure each subsequent run uses the hack parsing
73
+		}
74
+		close(done)
75
+	}()
76
+
77
+	for i := 0; i < b.N; i++ {
78
+		for i := 0; i < 30; i++ {
79
+			if n, err := l.Read(read); err != nil && err != io.EOF {
80
+				b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n]))
81
+			}
82
+		}
83
+	}
84
+	l.Close()
85
+	<-done
86
+}
87
+
88
+func BenchmarkNoHack(b *testing.B) {
89
+	client, srv := net.Pipe()
90
+	done := make(chan struct{})
91
+	req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n")
92
+	read := make([]byte, 4096)
93
+	b.SetBytes(int64(len(req) * 30))
94
+
95
+	go func() {
96
+		for {
97
+			if _, err := srv.Write(req); err != nil {
98
+				srv.Close()
99
+				break
100
+			}
101
+		}
102
+		close(done)
103
+	}()
104
+
105
+	for i := 0; i < b.N; i++ {
106
+		for i := 0; i < 30; i++ {
107
+			if _, err := client.Read(read); err != nil && err != io.EOF {
108
+				b.Fatal(err)
109
+			}
110
+		}
111
+	}
112
+	client.Close()
113
+	<-done
114
+}
0 115
new file mode 100644
... ...
@@ -0,0 +1,95 @@
0
+// +build solaris
1
+
2
+package container
3
+
4
+import (
5
+	"os"
6
+	"path/filepath"
7
+
8
+	"github.com/docker/docker/volume"
9
+	"github.com/docker/engine-api/types/container"
10
+)
11
+
12
+// Container holds fields specific to the Solaris implementation. See
13
+// CommonContainer for standard fields common to all containers.
14
+type Container struct {
15
+	CommonContainer
16
+
17
+	// fields below here are platform specific.
18
+	HostnamePath   string
19
+	HostsPath      string
20
+	ResolvConfPath string
21
+}
22
+
23
+// ExitStatus provides exit reasons for a container.
24
+type ExitStatus struct {
25
+	// The exit code with which the container exited.
26
+	ExitCode int
27
+}
28
+
29
+// CreateDaemonEnvironment creates a new environment variable slice for this container.
30
+func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string {
31
+	return nil
32
+}
33
+
34
+func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
35
+	return volumeMounts, nil
36
+}
37
+
38
+// TrySetNetworkMount attempts to set the network mounts given a provided destination and
39
+// the path to use for it; return true if the given destination was a network mount file
40
+func (container *Container) TrySetNetworkMount(destination string, path string) bool {
41
+	return true
42
+}
43
+
44
+// NetworkMounts returns the list of network mounts.
45
+func (container *Container) NetworkMounts() []Mount {
46
+	var mount []Mount
47
+	return mount
48
+}
49
+
50
+// CopyImagePathContent copies files in destination to the volume.
51
+func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
52
+	return nil
53
+}
54
+
55
+// UnmountIpcMounts unmount Ipc related mounts.
56
+func (container *Container) UnmountIpcMounts(unmount func(pth string) error) {
57
+}
58
+
59
+// IpcMounts returns the list of Ipc related mounts.
60
+func (container *Container) IpcMounts() []Mount {
61
+	return nil
62
+}
63
+
64
+// UpdateContainer updates configuration of a container
65
+func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error {
66
+	return nil
67
+}
68
+
69
+// UnmountVolumes explicitly unmounts volumes from the container.
70
+func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error {
71
+	return nil
72
+}
73
+
74
+// TmpfsMounts returns the list of tmpfs mounts
75
+func (container *Container) TmpfsMounts() []Mount {
76
+	var mounts []Mount
77
+	return mounts
78
+}
79
+
80
+// cleanResourcePath cleans a resource path and prepares to combine with mnt path
81
+func cleanResourcePath(path string) string {
82
+	return filepath.Join(string(os.PathSeparator), path)
83
+}
84
+
85
+// BuildHostnameFile writes the container's hostname file.
86
+func (container *Container) BuildHostnameFile() error {
87
+	return nil
88
+}
89
+
90
+// canMountFS determines if the file system for the container
91
+// can be mounted locally. A no-op on non-Windows platforms
92
+func (container *Container) canMountFS() bool {
93
+	return true
94
+}
0 95
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package container
1
+
2
+// setFromExitStatus is a platform specific helper function to set the state
3
+// based on the ExitStatus structure.
4
+func (s *State) setFromExitStatus(exitStatus *ExitStatus) {
5
+	s.ExitCode = exitStatus.ExitCode
6
+}
... ...
@@ -966,6 +966,11 @@ _docker_events() {
966 966
 			__docker_complete_containers_all
967 967
 			return
968 968
 			;;
969
+		daemon)
970
+			local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p')
971
+			COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) )
972
+			return
973
+			;;
969 974
 		event)
970 975
 			COMPREPLY=( $( compgen -W "
971 976
 				attach
... ...
@@ -987,6 +992,7 @@ _docker_events() {
987 987
 				pause
988 988
 				pull
989 989
 				push
990
+				reload
990 991
 				rename
991 992
 				resize
992 993
 				restart
... ...
@@ -1012,7 +1018,7 @@ _docker_events() {
1012 1012
 			return
1013 1013
 			;;
1014 1014
 		type)
1015
-			COMPREPLY=( $( compgen -W "container image network volume" -- "${cur##*=}" ) )
1015
+			COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) )
1016 1016
 			return
1017 1017
 			;;
1018 1018
 		volume)
... ...
@@ -1024,7 +1030,7 @@ _docker_events() {
1024 1024
 
1025 1025
 	case "$prev" in
1026 1026
 		--filter|-f)
1027
-			COMPREPLY=( $( compgen -S = -W "container event image label network type volume" -- "$cur" ) )
1027
+			COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) )
1028 1028
 			__docker_nospace
1029 1029
 			return
1030 1030
 			;;
... ...
@@ -1907,15 +1913,29 @@ _docker_save() {
1907 1907
 }
1908 1908
 
1909 1909
 _docker_search() {
1910
+	local key=$(__docker_map_key_of_current_option '--filter|-f')
1911
+	case "$key" in
1912
+		is-automated)
1913
+			COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) )
1914
+			return
1915
+			;;
1916
+		is-official)
1917
+			COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) )
1918
+			return
1919
+			;;
1920
+	esac
1921
+
1910 1922
 	case "$prev" in
1911
-		--stars|-s)
1923
+		--filter|-f)
1924
+			COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) )
1925
+			__docker_nospace
1912 1926
 			return
1913 1927
 			;;
1914 1928
 	esac
1915 1929
 
1916 1930
 	case "$cur" in
1917 1931
 		-*)
1918
-			COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) )
1932
+			COMPREPLY=( $( compgen -W "--filter --help --no-trunc" -- "$cur" ) )
1919 1933
 			;;
1920 1934
 	esac
1921 1935
 }
... ...
@@ -311,6 +311,54 @@ __docker_complete_ps_filters() {
311 311
     return ret
312 312
 }
313 313
 
314
+__docker_complete_search_filters() {
315
+    [[ $PREFIX = -* ]] && return 1
316
+    integer ret=1
317
+    declare -a boolean_opts opts
318
+
319
+    boolean_opts=('true' 'false')
320
+    opts=('is-automated' 'is-official' 'stars')
321
+
322
+    if compset -P '*='; then
323
+        case "${${words[-1]%=*}#*=}" in
324
+            (is-automated|is-official)
325
+                _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0
326
+                ;;
327
+            *)
328
+                _message 'value' && ret=0
329
+                ;;
330
+        esac
331
+    else
332
+        _describe -t filter-opts "filter options" opts -qS "=" && ret=0
333
+    fi
334
+
335
+    return ret
336
+}
337
+
338
+__docker_complete_images_filters() {
339
+    [[ $PREFIX = -* ]] && return 1
340
+    integer ret=1
341
+    declare -a boolean_opts opts
342
+
343
+    boolean_opts=('true' 'false')
344
+    opts=('dangling' 'label')
345
+
346
+    if compset -P '*='; then
347
+        case "${${words[-1]%=*}#*=}" in
348
+            (dangling)
349
+                _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0
350
+                ;;
351
+            *)
352
+                _message 'value' && ret=0
353
+                ;;
354
+        esac
355
+    else
356
+        _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0
357
+    fi
358
+
359
+    return ret
360
+}
361
+
314 362
 __docker_network_complete_ls_filters() {
315 363
     [[ $PREFIX = -* ]] && return 1
316 364
     integer ret=1
... ...
@@ -929,11 +977,17 @@ __docker_subcommand() {
929 929
                 $opts_help \
930 930
                 "($help -a --all)"{-a,--all}"[Show all images]" \
931 931
                 "($help)--digests[Show digests]" \
932
-                "($help)*"{-f=,--filter=}"[Filter values]:filter: " \
932
+                "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \
933 933
                 "($help)--format[Pretty-print containers using a Go template]:format: " \
934 934
                 "($help)--no-trunc[Do not truncate output]" \
935 935
                 "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
936 936
                 "($help -): :__docker_repositories" && ret=0
937
+
938
+            case $state in
939
+                (filter-options)
940
+                    __docker_complete_images_filters && ret=0
941
+                    ;;
942
+            esac
937 943
             ;;
938 944
         (import)
939 945
             _arguments $(__docker_arguments) \
... ...
@@ -1126,10 +1180,15 @@ __docker_subcommand() {
1126 1126
         (search)
1127 1127
             _arguments $(__docker_arguments) \
1128 1128
                 $opts_help \
1129
-                "($help)--automated[Only show automated builds]" \
1129
+                "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \
1130 1130
                 "($help)--no-trunc[Do not truncate output]" \
1131
-                "($help -s --stars)"{-s=,--stars=}"[Only display with at least X stars]:stars:(0 10 100 1000)" \
1132 1131
                 "($help -):term: " && ret=0
1132
+
1133
+            case $state in
1134
+                (filter-options)
1135
+                    __docker_complete_search_filters && ret=0
1136
+                    ;;
1137
+            esac
1133 1138
             ;;
1134 1139
         (start)
1135 1140
             _arguments $(__docker_arguments) \
... ...
@@ -88,7 +88,7 @@ type CommonConfig struct {
88 88
 	Root                 string              `json:"graph,omitempty"`
89 89
 	SocketGroup          string              `json:"group,omitempty"`
90 90
 	TrustKeyPath         string              `json:"-"`
91
-	CorsHeaders          string              `json:"api-cors-headers,omitempty"`
91
+	CorsHeaders          string              `json:"api-cors-header,omitempty"`
92 92
 	EnableCors           bool                `json:"api-enable-cors,omitempty"`
93 93
 
94 94
 	// ClusterStore is the storage backend used for the cluster information. It is used by both
95 95
new file mode 100644
... ...
@@ -0,0 +1,39 @@
0
+package daemon
1
+
2
+import (
3
+	flag "github.com/docker/docker/pkg/mflag"
4
+)
5
+
6
+var (
7
+	defaultPidFile = "/var/run/docker.pid"
8
+	defaultGraph   = "/var/lib/docker"
9
+	defaultExec    = "zones"
10
+)
11
+
12
+// Config defines the configuration of a docker daemon.
13
+// These are the configuration settings that you pass
14
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
15
+type Config struct {
16
+	CommonConfig
17
+
18
+	// Fields below here are platform specific.
19
+	ExecRoot string `json:"exec-root,omitempty"`
20
+}
21
+
22
+// bridgeConfig stores all the bridge driver specific
23
+// configuration.
24
+type bridgeConfig struct {
25
+	commonBridgeConfig
26
+}
27
+
28
+// InstallFlags adds command-line options to the top-level flag parser for
29
+// the current process.
30
+// Subsequent calls to `flag.Parse` will populate config with values parsed
31
+// from the command-line.
32
+func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) {
33
+	// First handle install flags which are consistent cross-platform
34
+	config.InstallCommonFlags(cmd, usageFn)
35
+
36
+	// Then platform-specific install flags
37
+	config.attachExperimentalFlags(cmd, usageFn)
38
+}
... ...
@@ -41,7 +41,7 @@ type bridgeConfig struct {
41 41
 	EnableIPv6                  bool   `json:"ipv6,omitempty"`
42 42
 	EnableIPTables              bool   `json:"iptables,omitempty"`
43 43
 	EnableIPForward             bool   `json:"ip-forward,omitempty"`
44
-	EnableIPMasq                bool   `json:"ip-mask,omitempty"`
44
+	EnableIPMasq                bool   `json:"ip-masq,omitempty"`
45 45
 	EnableUserlandProxy         bool   `json:"userland-proxy,omitempty"`
46 46
 	DefaultIP                   net.IP `json:"ip,omitempty"`
47 47
 	IP                          string `json:"bip,omitempty"`
48 48
new file mode 100644
... ...
@@ -0,0 +1,50 @@
0
+// +build solaris
1
+
2
+package daemon
3
+
4
+import (
5
+	"fmt"
6
+
7
+	"github.com/docker/docker/container"
8
+	networktypes "github.com/docker/engine-api/types/network"
9
+	"github.com/docker/libnetwork"
10
+)
11
+
12
+func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
13
+	return nil, nil
14
+}
15
+
16
+// ConnectToNetwork connects a container to a network
17
+func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error {
18
+	return fmt.Errorf("Solaris does not support connecting a running container to a network")
19
+}
20
+
21
+// getSize returns real size & virtual size
22
+func (daemon *Daemon) getSize(container *container.Container) (int64, int64) {
23
+	return 0, 0
24
+}
25
+
26
+// DisconnectFromNetwork disconnects a container from the network
27
+func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error {
28
+	return fmt.Errorf("Solaris does not support disconnecting a running container from a network")
29
+}
30
+
31
+func (daemon *Daemon) setupIpcDirs(container *container.Container) error {
32
+	return nil
33
+}
34
+
35
+func (daemon *Daemon) mountVolumes(container *container.Container) error {
36
+	return nil
37
+}
38
+
39
+func killProcessDirectly(container *container.Container) error {
40
+	return nil
41
+}
42
+
43
+func detachMounted(path string) error {
44
+	return nil
45
+}
46
+
47
+func isLinkable(child *container.Container) bool {
48
+	return false
49
+}
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	"github.com/docker/docker/layer"
10 10
 	"github.com/docker/docker/pkg/idtools"
11 11
 	"github.com/docker/docker/pkg/stringid"
12
+	"github.com/docker/docker/runconfig"
12 13
 	volumestore "github.com/docker/docker/volume/store"
13 14
 	"github.com/docker/engine-api/types"
14 15
 	containertypes "github.com/docker/engine-api/types/container"
... ...
@@ -122,6 +123,9 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe
122 122
 	if params.NetworkingConfig != nil {
123 123
 		endpointsConfigs = params.NetworkingConfig.EndpointsConfig
124 124
 	}
125
+	// Make sure NetworkMode has an acceptable value. We do this to ensure
126
+	// backwards API compatibility.
127
+	container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
125 128
 
126 129
 	if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil {
127 130
 		return nil, err
... ...
@@ -6,6 +6,7 @@
6 6
 package daemon
7 7
 
8 8
 import (
9
+	"encoding/json"
9 10
 	"fmt"
10 11
 	"io"
11 12
 	"io/ioutil"
... ...
@@ -15,6 +16,7 @@ import (
15 15
 	"path/filepath"
16 16
 	"regexp"
17 17
 	"runtime"
18
+	"strconv"
18 19
 	"strings"
19 20
 	"sync"
20 21
 	"syscall"
... ...
@@ -23,7 +25,6 @@ import (
23 23
 	"github.com/Sirupsen/logrus"
24 24
 	containerd "github.com/docker/containerd/api/grpc/types"
25 25
 	"github.com/docker/docker/api"
26
-	"github.com/docker/docker/builder"
27 26
 	"github.com/docker/docker/container"
28 27
 	"github.com/docker/docker/daemon/events"
29 28
 	"github.com/docker/docker/daemon/exec"
... ...
@@ -40,7 +41,6 @@ import (
40 40
 	"github.com/docker/docker/distribution/xfer"
41 41
 	"github.com/docker/docker/dockerversion"
42 42
 	"github.com/docker/docker/image"
43
-	"github.com/docker/docker/image/tarexport"
44 43
 	"github.com/docker/docker/layer"
45 44
 	"github.com/docker/docker/libcontainerd"
46 45
 	"github.com/docker/docker/migrate/v1"
... ...
@@ -64,6 +64,7 @@ import (
64 64
 	volumedrivers "github.com/docker/docker/volume/drivers"
65 65
 	"github.com/docker/docker/volume/local"
66 66
 	"github.com/docker/docker/volume/store"
67
+	"github.com/docker/engine-api/types/filters"
67 68
 	"github.com/docker/go-connections/nat"
68 69
 	"github.com/docker/libnetwork"
69 70
 	nwconfig "github.com/docker/libnetwork/config"
... ...
@@ -78,15 +79,6 @@ var (
78 78
 	errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
79 79
 )
80 80
 
81
-// ErrImageDoesNotExist is error returned when no image can be found for a reference.
82
-type ErrImageDoesNotExist struct {
83
-	RefOrID string
84
-}
85
-
86
-func (e ErrImageDoesNotExist) Error() string {
87
-	return fmt.Sprintf("no such id: %s", e.RefOrID)
88
-}
89
-
90 81
 // Daemon holds information about the Docker daemon.
91 82
 type Daemon struct {
92 83
 	ID                        string
... ...
@@ -286,11 +278,6 @@ func (daemon *Daemon) restore() error {
286 286
 			defer wg.Done()
287 287
 			rm := c.RestartManager(false)
288 288
 			if c.IsRunning() || c.IsPaused() {
289
-				// Fix activityCount such that graph mounts can be unmounted later
290
-				if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil {
291
-					logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err)
292
-					return
293
-				}
294 289
 				if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
295 290
 					logrus.Errorf("Failed to restore with containerd: %q", err)
296 291
 					return
... ...
@@ -808,7 +795,7 @@ func NewDaemon(config *Config, registryService *registry.Service, containerdRemo
808 808
 	sysInfo := sysinfo.New(false)
809 809
 	// Check if Devices cgroup is mounted, it is hard requirement for container security,
810 810
 	// on Linux/FreeBSD.
811
-	if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled {
811
+	if runtime.GOOS != "windows" && runtime.GOOS != "solaris" && !sysInfo.CgroupDevicesEnabled {
812 812
 		return nil, fmt.Errorf("Devices cgroup isn't mounted")
813 813
 	}
814 814
 
... ...
@@ -1006,221 +993,6 @@ func isBrokenPipe(e error) bool {
1006 1006
 	return e == syscall.EPIPE
1007 1007
 }
1008 1008
 
1009
-// ExportImage exports a list of images to the given output stream. The
1010
-// exported images are archived into a tar when written to the output
1011
-// stream. All images with the given tag and all versions containing
1012
-// the same tag are exported. names is the set of tags to export, and
1013
-// outStream is the writer which the images are written to.
1014
-func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
1015
-	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
1016
-	return imageExporter.Save(names, outStream)
1017
-}
1018
-
1019
-// LookupImage looks up an image by name and returns it as an ImageInspect
1020
-// structure.
1021
-func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
1022
-	img, err := daemon.GetImage(name)
1023
-	if err != nil {
1024
-		return nil, fmt.Errorf("No such image: %s", name)
1025
-	}
1026
-
1027
-	refs := daemon.referenceStore.References(img.ID())
1028
-	repoTags := []string{}
1029
-	repoDigests := []string{}
1030
-	for _, ref := range refs {
1031
-		switch ref.(type) {
1032
-		case reference.NamedTagged:
1033
-			repoTags = append(repoTags, ref.String())
1034
-		case reference.Canonical:
1035
-			repoDigests = append(repoDigests, ref.String())
1036
-		}
1037
-	}
1038
-
1039
-	var size int64
1040
-	var layerMetadata map[string]string
1041
-	layerID := img.RootFS.ChainID()
1042
-	if layerID != "" {
1043
-		l, err := daemon.layerStore.Get(layerID)
1044
-		if err != nil {
1045
-			return nil, err
1046
-		}
1047
-		defer layer.ReleaseAndLog(daemon.layerStore, l)
1048
-		size, err = l.Size()
1049
-		if err != nil {
1050
-			return nil, err
1051
-		}
1052
-
1053
-		layerMetadata, err = l.Metadata()
1054
-		if err != nil {
1055
-			return nil, err
1056
-		}
1057
-	}
1058
-
1059
-	comment := img.Comment
1060
-	if len(comment) == 0 && len(img.History) > 0 {
1061
-		comment = img.History[len(img.History)-1].Comment
1062
-	}
1063
-
1064
-	imageInspect := &types.ImageInspect{
1065
-		ID:              img.ID().String(),
1066
-		RepoTags:        repoTags,
1067
-		RepoDigests:     repoDigests,
1068
-		Parent:          img.Parent.String(),
1069
-		Comment:         comment,
1070
-		Created:         img.Created.Format(time.RFC3339Nano),
1071
-		Container:       img.Container,
1072
-		ContainerConfig: &img.ContainerConfig,
1073
-		DockerVersion:   img.DockerVersion,
1074
-		Author:          img.Author,
1075
-		Config:          img.Config,
1076
-		Architecture:    img.Architecture,
1077
-		Os:              img.OS,
1078
-		Size:            size,
1079
-		VirtualSize:     size, // TODO: field unused, deprecate
1080
-		RootFS:          rootFSToAPIType(img.RootFS),
1081
-	}
1082
-
1083
-	imageInspect.GraphDriver.Name = daemon.GraphDriverName()
1084
-
1085
-	imageInspect.GraphDriver.Data = layerMetadata
1086
-
1087
-	return imageInspect, nil
1088
-}
1089
-
1090
-// LoadImage uploads a set of images into the repository. This is the
1091
-// complement of ImageExport.  The input stream is an uncompressed tar
1092
-// ball containing images and metadata.
1093
-func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
1094
-	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
1095
-	return imageExporter.Load(inTar, outStream, quiet)
1096
-}
1097
-
1098
-// ImageHistory returns a slice of ImageHistory structures for the specified image
1099
-// name by walking the image lineage.
1100
-func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) {
1101
-	img, err := daemon.GetImage(name)
1102
-	if err != nil {
1103
-		return nil, err
1104
-	}
1105
-
1106
-	history := []*types.ImageHistory{}
1107
-
1108
-	layerCounter := 0
1109
-	rootFS := *img.RootFS
1110
-	rootFS.DiffIDs = nil
1111
-
1112
-	for _, h := range img.History {
1113
-		var layerSize int64
1114
-
1115
-		if !h.EmptyLayer {
1116
-			if len(img.RootFS.DiffIDs) <= layerCounter {
1117
-				return nil, fmt.Errorf("too many non-empty layers in History section")
1118
-			}
1119
-
1120
-			rootFS.Append(img.RootFS.DiffIDs[layerCounter])
1121
-			l, err := daemon.layerStore.Get(rootFS.ChainID())
1122
-			if err != nil {
1123
-				return nil, err
1124
-			}
1125
-			layerSize, err = l.DiffSize()
1126
-			layer.ReleaseAndLog(daemon.layerStore, l)
1127
-			if err != nil {
1128
-				return nil, err
1129
-			}
1130
-
1131
-			layerCounter++
1132
-		}
1133
-
1134
-		history = append([]*types.ImageHistory{{
1135
-			ID:        "<missing>",
1136
-			Created:   h.Created.Unix(),
1137
-			CreatedBy: h.CreatedBy,
1138
-			Comment:   h.Comment,
1139
-			Size:      layerSize,
1140
-		}}, history...)
1141
-	}
1142
-
1143
-	// Fill in image IDs and tags
1144
-	histImg := img
1145
-	id := img.ID()
1146
-	for _, h := range history {
1147
-		h.ID = id.String()
1148
-
1149
-		var tags []string
1150
-		for _, r := range daemon.referenceStore.References(id) {
1151
-			if _, ok := r.(reference.NamedTagged); ok {
1152
-				tags = append(tags, r.String())
1153
-			}
1154
-		}
1155
-
1156
-		h.Tags = tags
1157
-
1158
-		id = histImg.Parent
1159
-		if id == "" {
1160
-			break
1161
-		}
1162
-		histImg, err = daemon.GetImage(id.String())
1163
-		if err != nil {
1164
-			break
1165
-		}
1166
-	}
1167
-
1168
-	return history, nil
1169
-}
1170
-
1171
-// GetImageID returns an image ID corresponding to the image referred to by
1172
-// refOrID.
1173
-func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
1174
-	id, ref, err := reference.ParseIDOrReference(refOrID)
1175
-	if err != nil {
1176
-		return "", err
1177
-	}
1178
-	if id != "" {
1179
-		if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
1180
-			return "", ErrImageDoesNotExist{refOrID}
1181
-		}
1182
-		return image.ID(id), nil
1183
-	}
1184
-
1185
-	if id, err := daemon.referenceStore.Get(ref); err == nil {
1186
-		return id, nil
1187
-	}
1188
-	if tagged, ok := ref.(reference.NamedTagged); ok {
1189
-		if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
1190
-			for _, namedRef := range daemon.referenceStore.References(id) {
1191
-				if namedRef.Name() == ref.Name() {
1192
-					return id, nil
1193
-				}
1194
-			}
1195
-		}
1196
-	}
1197
-
1198
-	// Search based on ID
1199
-	if id, err := daemon.imageStore.Search(refOrID); err == nil {
1200
-		return id, nil
1201
-	}
1202
-
1203
-	return "", ErrImageDoesNotExist{refOrID}
1204
-}
1205
-
1206
-// GetImage returns an image corresponding to the image referred to by refOrID.
1207
-func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
1208
-	imgID, err := daemon.GetImageID(refOrID)
1209
-	if err != nil {
1210
-		return nil, err
1211
-	}
1212
-	return daemon.imageStore.Get(imgID)
1213
-}
1214
-
1215
-// GetImageOnBuild looks up a Docker image referenced by `name`.
1216
-func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
1217
-	img, err := daemon.GetImage(name)
1218
-	if err != nil {
1219
-		return nil, err
1220
-	}
1221
-	return img, nil
1222
-}
1223
-
1224 1009
 // GraphDriverName returns the name of the graph driver used by the layer.Store
1225 1010
 func (daemon *Daemon) GraphDriverName() string {
1226 1011
 	return daemon.layerStore.DriverName()
... ...
@@ -1241,57 +1013,6 @@ func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
1241 1241
 	return uid, gid
1242 1242
 }
1243 1243
 
1244
-// GetCachedImage returns the most recent created image that is a child
1245
-// of the image with imgID, that had the same config when it was
1246
-// created. nil is returned if a child cannot be found. An error is
1247
-// returned if the parent image cannot be found.
1248
-func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
1249
-	// Loop on the children of the given image and check the config
1250
-	getMatch := func(siblings []image.ID) (*image.Image, error) {
1251
-		var match *image.Image
1252
-		for _, id := range siblings {
1253
-			img, err := daemon.imageStore.Get(id)
1254
-			if err != nil {
1255
-				return nil, fmt.Errorf("unable to find image %q", id)
1256
-			}
1257
-
1258
-			if runconfig.Compare(&img.ContainerConfig, config) {
1259
-				// check for the most up to date match
1260
-				if match == nil || match.Created.Before(img.Created) {
1261
-					match = img
1262
-				}
1263
-			}
1264
-		}
1265
-		return match, nil
1266
-	}
1267
-
1268
-	// In this case, this is `FROM scratch`, which isn't an actual image.
1269
-	if imgID == "" {
1270
-		images := daemon.imageStore.Map()
1271
-		var siblings []image.ID
1272
-		for id, img := range images {
1273
-			if img.Parent == imgID {
1274
-				siblings = append(siblings, id)
1275
-			}
1276
-		}
1277
-		return getMatch(siblings)
1278
-	}
1279
-
1280
-	// find match from child images
1281
-	siblings := daemon.imageStore.Children(imgID)
1282
-	return getMatch(siblings)
1283
-}
1284
-
1285
-// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
1286
-// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
1287
-func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
1288
-	cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
1289
-	if cache == nil || err != nil {
1290
-		return "", err
1291
-	}
1292
-	return cache.ID().String(), nil
1293
-}
1294
-
1295 1244
 // tempDir returns the default directory to use for temporary files.
1296 1245
 func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
1297 1246
 	var tmpDir string
... ...
@@ -1427,12 +1148,85 @@ func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *ty
1427 1427
 	return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx))
1428 1428
 }
1429 1429
 
1430
+var acceptedSearchFilterTags = map[string]bool{
1431
+	"is-automated": true,
1432
+	"is-official":  true,
1433
+	"stars":        true,
1434
+}
1435
+
1430 1436
 // SearchRegistryForImages queries the registry for images matching
1431 1437
 // term. authConfig is used to login.
1432
-func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string,
1438
+func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string,
1433 1439
 	authConfig *types.AuthConfig,
1434 1440
 	headers map[string][]string) (*registrytypes.SearchResults, error) {
1435
-	return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers)
1441
+
1442
+	searchFilters, err := filters.FromParam(filtersArgs)
1443
+	if err != nil {
1444
+		return nil, err
1445
+	}
1446
+	if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil {
1447
+		return nil, err
1448
+	}
1449
+
1450
+	unfilteredResult, err := daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers)
1451
+	if err != nil {
1452
+		return nil, err
1453
+	}
1454
+
1455
+	var isAutomated, isOfficial bool
1456
+	var hasStarFilter = 0
1457
+	if searchFilters.Include("is-automated") {
1458
+		if searchFilters.ExactMatch("is-automated", "true") {
1459
+			isAutomated = true
1460
+		} else if !searchFilters.ExactMatch("is-automated", "false") {
1461
+			return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated"))
1462
+		}
1463
+	}
1464
+	if searchFilters.Include("is-official") {
1465
+		if searchFilters.ExactMatch("is-official", "true") {
1466
+			isOfficial = true
1467
+		} else if !searchFilters.ExactMatch("is-official", "false") {
1468
+			return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official"))
1469
+		}
1470
+	}
1471
+	if searchFilters.Include("stars") {
1472
+		hasStars := searchFilters.Get("stars")
1473
+		for _, hasStar := range hasStars {
1474
+			iHasStar, err := strconv.Atoi(hasStar)
1475
+			if err != nil {
1476
+				return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar)
1477
+			}
1478
+			if iHasStar > hasStarFilter {
1479
+				hasStarFilter = iHasStar
1480
+			}
1481
+		}
1482
+	}
1483
+
1484
+	filteredResults := []registrytypes.SearchResult{}
1485
+	for _, result := range unfilteredResult.Results {
1486
+		if searchFilters.Include("is-automated") {
1487
+			if isAutomated != result.IsAutomated {
1488
+				continue
1489
+			}
1490
+		}
1491
+		if searchFilters.Include("is-official") {
1492
+			if isOfficial != result.IsOfficial {
1493
+				continue
1494
+			}
1495
+		}
1496
+		if searchFilters.Include("stars") {
1497
+			if result.StarCount < hasStarFilter {
1498
+				continue
1499
+			}
1500
+		}
1501
+		filteredResults = append(filteredResults, result)
1502
+	}
1503
+
1504
+	return &registrytypes.SearchResults{
1505
+		Query:      unfilteredResult.Query,
1506
+		NumResults: len(filteredResults),
1507
+		Results:    filteredResults,
1508
+	}, nil
1436 1509
 }
1437 1510
 
1438 1511
 // IsShuttingDown tells whether the daemon is shutting down or not
... ...
@@ -1539,6 +1333,11 @@ func (daemon *Daemon) initDiscovery(config *Config) error {
1539 1539
 func (daemon *Daemon) Reload(config *Config) error {
1540 1540
 	daemon.configStore.reloadLock.Lock()
1541 1541
 	defer daemon.configStore.reloadLock.Unlock()
1542
+
1543
+	if err := daemon.reloadClusterDiscovery(config); err != nil {
1544
+		return err
1545
+	}
1546
+
1542 1547
 	if config.IsValueSet("labels") {
1543 1548
 		daemon.configStore.Labels = config.Labels
1544 1549
 	}
... ...
@@ -1572,7 +1371,28 @@ func (daemon *Daemon) Reload(config *Config) error {
1572 1572
 		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
1573 1573
 	}
1574 1574
 
1575
-	return daemon.reloadClusterDiscovery(config)
1575
+	// We emit daemon reload event here with updatable configurations
1576
+	attributes := map[string]string{}
1577
+	attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
1578
+	attributes["cluster-store"] = daemon.configStore.ClusterStore
1579
+	if daemon.configStore.ClusterOpts != nil {
1580
+		opts, _ := json.Marshal(daemon.configStore.ClusterOpts)
1581
+		attributes["cluster-store-opts"] = string(opts)
1582
+	} else {
1583
+		attributes["cluster-store-opts"] = "{}"
1584
+	}
1585
+	attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
1586
+	if daemon.configStore.Labels != nil {
1587
+		labels, _ := json.Marshal(daemon.configStore.Labels)
1588
+		attributes["labels"] = string(labels)
1589
+	} else {
1590
+		attributes["labels"] = "[]"
1591
+	}
1592
+	attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
1593
+	attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
1594
+	daemon.LogDaemonEventWithAttributes("reload", attributes)
1595
+
1596
+	return nil
1576 1597
 }
1577 1598
 
1578 1599
 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
1579 1600
new file mode 100644
... ...
@@ -0,0 +1,159 @@
0
+// +build solaris,cgo
1
+
2
+package daemon
3
+
4
+import (
5
+	"fmt"
6
+
7
+	"github.com/docker/docker/container"
8
+	"github.com/docker/docker/image"
9
+	"github.com/docker/docker/layer"
10
+	"github.com/docker/docker/pkg/idtools"
11
+	"github.com/docker/docker/pkg/parsers/kernel"
12
+	"github.com/docker/docker/reference"
13
+	"github.com/docker/engine-api/types"
14
+	containertypes "github.com/docker/engine-api/types/container"
15
+	"github.com/docker/libnetwork"
16
+	nwconfig "github.com/docker/libnetwork/config"
17
+)
18
+
19
+//#include <zone.h>
20
+import "C"
21
+
22
+const (
23
+	defaultVirtualSwitch = "Virtual Switch"
24
+	platformSupported    = true
25
+	solarisMinCPUShares  = 1
26
+	solarisMaxCPUShares  = 65535
27
+)
28
+
29
+func (daemon *Daemon) cleanupMountsByID(id string) error {
30
+	return nil
31
+}
32
+
33
+func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
34
+	return nil
35
+}
36
+
37
+func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
38
+	return nil, nil, nil
39
+}
40
+
41
+func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
42
+	return nil
43
+}
44
+
45
+// setupInitLayer populates a directory with mountpoints suitable
46
+// for bind-mounting dockerinit into the container. The mountpoint is simply an
47
+// empty file at /.dockerinit
48
+//
49
+// This extra layer is used by all containers as the top-most ro layer. It protects
50
+// the container from unwanted side-effects on the rw layer.
51
+func setupInitLayer(initLayer string, rootUID, rootGID int) error {
52
+	return nil
53
+}
54
+
55
+func checkKernel() error {
56
+	// solaris can rely upon checkSystem() below, we don't skew kernel versions
57
+	return nil
58
+}
59
+
60
+func (daemon *Daemon) getCgroupDriver() string {
61
+	return ""
62
+}
63
+
64
+func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
65
+	return nil
66
+}
67
+
68
+// verifyPlatformContainerSettings performs platform-specific validation of the
69
+// hostconfig and config structures.
70
+func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
71
+	warnings := []string{}
72
+	return warnings, nil
73
+}
74
+
75
+// verifyDaemonSettings performs validation of daemon config struct
76
+func verifyDaemonSettings(config *Config) error {
77
+	// checkSystem validates platform-specific requirements
78
+	return nil
79
+}
80
+
81
+func checkSystem() error {
82
+	// check OS version for compatibility, ensure running in global zone
83
+	var err error
84
+	var id C.zoneid_t
85
+
86
+	if id, err = C.getzoneid(); err != nil {
87
+		return fmt.Errorf("Exiting. Error getting zone id: %+v", err)
88
+	}
89
+	if int(id) != 0 {
90
+		return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone")
91
+	}
92
+
93
+	v, err := kernel.GetKernelVersion()
94
+	if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 {
95
+		return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String())
96
+	}
97
+	return err
98
+}
99
+
100
+// configureMaxThreads sets the Go runtime max threads threshold
101
+// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
102
+func configureMaxThreads(config *Config) error {
103
+	return nil
104
+}
105
+
106
+// configureKernelSecuritySupport configures and validate security support for the kernel
107
+func configureKernelSecuritySupport(config *Config, driverName string) error {
108
+	return nil
109
+}
110
+
111
+func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) {
112
+	return nil, nil
113
+}
114
+
115
+// registerLinks sets up links between containers and writes the
116
+// configuration out for persistence.
117
+func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
118
+	return nil
119
+}
120
+
121
+func (daemon *Daemon) cleanupMounts() error {
122
+	return nil
123
+}
124
+
125
+// conditionalMountOnStart is a platform specific helper function during the
126
+// container start to call mount.
127
+func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
128
+	return nil
129
+}
130
+
131
+// conditionalUnmountOnCleanup is a platform specific helper function called
132
+// during the cleanup of a container to unmount.
133
+func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
134
+	return daemon.Unmount(container)
135
+}
136
+
137
+func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error {
138
+	// Solaris has no custom images to register
139
+	return nil
140
+}
141
+
142
+func driverOptions(config *Config) []nwconfig.Option {
143
+	return []nwconfig.Option{}
144
+}
145
+
146
+func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
147
+	return nil, nil
148
+}
149
+
150
+// setDefaultIsolation determine the default isolation mode for the
151
+// daemon to run in. This is only applicable on Windows
152
+func (daemon *Daemon) setDefaultIsolation() error {
153
+	return nil
154
+}
155
+
156
+func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
157
+	return types.RootFS{}
158
+}
... ...
@@ -488,7 +488,9 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
488 488
 	if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
489 489
 		return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
490 490
 	}
491
-	if sysInfo.IPv4ForwardingDisabled {
491
+
492
+	// ip-forwarding does not affect container with '--net=host' (or '--net=none')
493
+	if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
492 494
 		warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
493 495
 		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
494 496
 	}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!freebsd,!windows
1
+// +build !linux,!freebsd,!windows,!solaris
2 2
 
3 3
 package daemon
4 4
 
... ...
@@ -470,6 +470,10 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
470 470
 // daemon to run in. This is only applicable on Windows
471 471
 func (daemon *Daemon) setDefaultIsolation() error {
472 472
 	daemon.defaultIsolation = containertypes.Isolation("process")
473
+	// On client SKUs, default to Hyper-V
474
+	if system.IsWindowsClient() {
475
+		daemon.defaultIsolation = containertypes.Isolation("hyperv")
476
+	}
473 477
 	for _, option := range daemon.configStore.ExecOptions {
474 478
 		key, val, err := parsers.ParseKeyValueOpt(option)
475 479
 		if err != nil {
... ...
@@ -485,6 +489,12 @@ func (daemon *Daemon) setDefaultIsolation() error {
485 485
 			if containertypes.Isolation(val).IsHyperV() {
486 486
 				daemon.defaultIsolation = containertypes.Isolation("hyperv")
487 487
 			}
488
+			if containertypes.Isolation(val).IsProcess() {
489
+				if system.IsWindowsClient() {
490
+					return fmt.Errorf("Windows client operating systems only support Hyper-V containers")
491
+				}
492
+				daemon.defaultIsolation = containertypes.Isolation("process")
493
+			}
488 494
 		default:
489 495
 			return fmt.Errorf("Unrecognised exec-opt '%s'\n", key)
490 496
 		}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!darwin,!freebsd,!windows
1
+// +build !linux,!darwin,!freebsd,!windows,!solaris
2 2
 
3 3
 package daemon
4 4
 
... ...
@@ -80,6 +80,20 @@ func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, actio
80 80
 	daemon.EventsService.Log(action, events.NetworkEventType, actor)
81 81
 }
82 82
 
83
+// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes.
84
+func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) {
85
+	if daemon.EventsService != nil {
86
+		if info, err := daemon.SystemInfo(); err == nil && info.Name != "" {
87
+			attributes["name"] = info.Name
88
+		}
89
+		actor := events.Actor{
90
+			ID:         daemon.ID,
91
+			Attributes: attributes,
92
+		}
93
+		daemon.EventsService.Log(action, events.DaemonEventType, actor)
94
+	}
95
+}
96
+
83 97
 // SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events.
84 98
 func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) {
85 99
 	ef := daemonevents.NewFilter(filter)
... ...
@@ -20,6 +20,7 @@ func NewFilter(filter filters.Args) *Filter {
20 20
 func (ef *Filter) Include(ev events.Message) bool {
21 21
 	return ef.filter.ExactMatch("event", ev.Action) &&
22 22
 		ef.filter.ExactMatch("type", ev.Type) &&
23
+		ef.matchDaemon(ev) &&
23 24
 		ef.matchContainer(ev) &&
24 25
 		ef.matchVolume(ev) &&
25 26
 		ef.matchNetwork(ev) &&
... ...
@@ -34,6 +35,10 @@ func (ef *Filter) matchLabels(attributes map[string]string) bool {
34 34
 	return ef.filter.MatchKVList("label", attributes)
35 35
 }
36 36
 
37
+func (ef *Filter) matchDaemon(ev events.Message) bool {
38
+	return ef.fuzzyMatchName(ev, events.DaemonEventType)
39
+}
40
+
37 41
 func (ef *Filter) matchContainer(ev events.Message) bool {
38 42
 	return ef.fuzzyMatchName(ev, events.ContainerEventType)
39 43
 }
40 44
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/container"
4
+	"github.com/docker/docker/daemon/exec"
5
+	"github.com/docker/docker/libcontainerd"
6
+)
7
+
8
+func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error {
9
+	return nil
10
+}
... ...
@@ -70,6 +70,7 @@ type Driver struct {
70 70
 	root          string
71 71
 	uidMaps       []idtools.IDMap
72 72
 	gidMaps       []idtools.IDMap
73
+	ctr           *graphdriver.RefCounter
73 74
 	pathCacheLock sync.Mutex
74 75
 	pathCache     map[string]string
75 76
 }
... ...
@@ -108,6 +109,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
108 108
 		uidMaps:   uidMaps,
109 109
 		gidMaps:   gidMaps,
110 110
 		pathCache: make(map[string]string),
111
+		ctr:       graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
111 112
 	}
112 113
 
113 114
 	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
... ...
@@ -320,6 +322,9 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
320 320
 			m = a.getMountpoint(id)
321 321
 		}
322 322
 	}
323
+	if count := a.ctr.Increment(m); count > 1 {
324
+		return m, nil
325
+	}
323 326
 
324 327
 	// If a dir does not have a parent ( no layers )do not try to mount
325 328
 	// just return the diff path to the data
... ...
@@ -344,6 +349,9 @@ func (a *Driver) Put(id string) error {
344 344
 		a.pathCache[id] = m
345 345
 	}
346 346
 	a.pathCacheLock.Unlock()
347
+	if count := a.ctr.Decrement(m); count > 0 {
348
+		return nil
349
+	}
347 350
 
348 351
 	err := a.unmount(m)
349 352
 	if err != nil {
... ...
@@ -2,31 +2,66 @@ package graphdriver
2 2
 
3 3
 import "sync"
4 4
 
5
+type minfo struct {
6
+	check bool
7
+	count int
8
+}
9
+
5 10
 // RefCounter is a generic counter for use by graphdriver Get/Put calls
6 11
 type RefCounter struct {
7
-	counts map[string]int
8
-	mu     sync.Mutex
12
+	counts  map[string]*minfo
13
+	mu      sync.Mutex
14
+	checker Checker
9 15
 }
10 16
 
11 17
 // NewRefCounter returns a new RefCounter
12
-func NewRefCounter() *RefCounter {
13
-	return &RefCounter{counts: make(map[string]int)}
18
+func NewRefCounter(c Checker) *RefCounter {
19
+	return &RefCounter{
20
+		checker: c,
21
+		counts:  make(map[string]*minfo),
22
+	}
14 23
 }
15 24
 
16 25
 // Increment increaes the ref count for the given id and returns the current count
17
-func (c *RefCounter) Increment(id string) int {
26
+func (c *RefCounter) Increment(path string) int {
18 27
 	c.mu.Lock()
19
-	c.counts[id]++
20
-	count := c.counts[id]
28
+	m := c.counts[path]
29
+	if m == nil {
30
+		m = &minfo{}
31
+		c.counts[path] = m
32
+	}
33
+	// if we are checking this path for the first time check to make sure
34
+	// if it was already mounted on the system and make sure we have a correct ref
35
+	// count if it is mounted as it is in use.
36
+	if !m.check {
37
+		m.check = true
38
+		if c.checker.IsMounted(path) {
39
+			m.count++
40
+		}
41
+	}
42
+	m.count++
21 43
 	c.mu.Unlock()
22
-	return count
44
+	return m.count
23 45
 }
24 46
 
25 47
 // Decrement decreases the ref count for the given id and returns the current count
26
-func (c *RefCounter) Decrement(id string) int {
48
+func (c *RefCounter) Decrement(path string) int {
27 49
 	c.mu.Lock()
28
-	c.counts[id]--
29
-	count := c.counts[id]
50
+	m := c.counts[path]
51
+	if m == nil {
52
+		m = &minfo{}
53
+		c.counts[path] = m
54
+	}
55
+	// if we are checking this path for the first time check to make sure
56
+	// if it was already mounted on the system and make sure we have a correct ref
57
+	// count if it is mounted as it is in use.
58
+	if !m.check {
59
+		m.check = true
60
+		if c.checker.IsMounted(path) {
61
+			m.count++
62
+		}
63
+	}
64
+	m.count--
30 65
 	c.mu.Unlock()
31
-	return count
66
+	return m.count
32 67
 }
... ...
@@ -47,7 +47,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
47 47
 		home:      home,
48 48
 		uidMaps:   uidMaps,
49 49
 		gidMaps:   gidMaps,
50
-		ctr:       graphdriver.NewRefCounter(),
50
+		ctr:       graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
51 51
 	}
52 52
 
53 53
 	return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
... ...
@@ -160,35 +160,35 @@ func (d *Driver) Remove(id string) error {
160 160
 // Get mounts a device with given id into the root filesystem
161 161
 func (d *Driver) Get(id, mountLabel string) (string, error) {
162 162
 	mp := path.Join(d.home, "mnt", id)
163
-	if count := d.ctr.Increment(id); count > 1 {
163
+	if count := d.ctr.Increment(mp); count > 1 {
164 164
 		return mp, nil
165 165
 	}
166 166
 
167 167
 	uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
168 168
 	if err != nil {
169
-		d.ctr.Decrement(id)
169
+		d.ctr.Decrement(mp)
170 170
 		return "", err
171 171
 	}
172 172
 
173 173
 	// Create the target directories if they don't exist
174 174
 	if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
175
-		d.ctr.Decrement(id)
175
+		d.ctr.Decrement(mp)
176 176
 		return "", err
177 177
 	}
178 178
 	if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
179
-		d.ctr.Decrement(id)
179
+		d.ctr.Decrement(mp)
180 180
 		return "", err
181 181
 	}
182 182
 
183 183
 	// Mount the device
184 184
 	if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
185
-		d.ctr.Decrement(id)
185
+		d.ctr.Decrement(mp)
186 186
 		return "", err
187 187
 	}
188 188
 
189 189
 	rootFs := path.Join(mp, "rootfs")
190 190
 	if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
191
-		d.ctr.Decrement(id)
191
+		d.ctr.Decrement(mp)
192 192
 		d.DeviceSet.UnmountDevice(id, mp)
193 193
 		return "", err
194 194
 	}
... ...
@@ -198,7 +198,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
198 198
 		// Create an "id" file with the container/image id in it to help reconstruct this in case
199 199
 		// of later problems
200 200
 		if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
201
-			d.ctr.Decrement(id)
201
+			d.ctr.Decrement(mp)
202 202
 			d.DeviceSet.UnmountDevice(id, mp)
203 203
 			return "", err
204 204
 		}
... ...
@@ -209,10 +209,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
209 209
 
210 210
 // Put unmounts a device and removes it.
211 211
 func (d *Driver) Put(id string) error {
212
-	if count := d.ctr.Decrement(id); count > 0 {
212
+	mp := path.Join(d.home, "mnt", id)
213
+	if count := d.ctr.Decrement(mp); count > 0 {
213 214
 		return nil
214 215
 	}
215
-	mp := path.Join(d.home, "mnt", id)
216 216
 	err := d.DeviceSet.UnmountDevice(id, mp)
217 217
 	if err != nil {
218 218
 		logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
... ...
@@ -113,6 +113,12 @@ type FileGetCloser interface {
113 113
 	Close() error
114 114
 }
115 115
 
116
+// Checker makes checks on specified filesystems.
117
+type Checker interface {
118
+	// IsMounted returns true if the provided path is mounted for the specific checker
119
+	IsMounted(path string) bool
120
+}
121
+
116 122
 func init() {
117 123
 	drivers = make(map[string]InitFunc)
118 124
 }
... ...
@@ -5,6 +5,8 @@ package graphdriver
5 5
 import (
6 6
 	"path/filepath"
7 7
 	"syscall"
8
+
9
+	"github.com/docker/docker/pkg/mount"
8 10
 )
9 11
 
10 12
 const (
... ...
@@ -89,6 +91,36 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
89 89
 	return FsMagic(buf.Type), nil
90 90
 }
91 91
 
92
+// NewFsChecker returns a checker configured for the provied FsMagic
93
+func NewFsChecker(t FsMagic) Checker {
94
+	return &fsChecker{
95
+		t: t,
96
+	}
97
+}
98
+
99
+type fsChecker struct {
100
+	t FsMagic
101
+}
102
+
103
+func (c *fsChecker) IsMounted(path string) bool {
104
+	m, _ := Mounted(c.t, path)
105
+	return m
106
+}
107
+
108
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
109
+// if the specified path is mounted.
110
+func NewDefaultChecker() Checker {
111
+	return &defaultChecker{}
112
+}
113
+
114
+type defaultChecker struct {
115
+}
116
+
117
+func (c *defaultChecker) IsMounted(path string) bool {
118
+	m, _ := mount.Mounted(path)
119
+	return m
120
+}
121
+
92 122
 // Mounted checks if the given path is mounted as the fs type
93 123
 func Mounted(fsType FsMagic, mountPath string) (bool, error) {
94 124
 	var buf syscall.Statfs_t
95 125
new file mode 100644
... ...
@@ -0,0 +1,65 @@
0
+// +build solaris,cgo
1
+
2
+package graphdriver
3
+
4
+/*
5
+#include <sys/statvfs.h>
6
+#include <stdlib.h>
7
+
8
+static inline struct statvfs *getstatfs(char *s) {
9
+        struct statvfs *buf;
10
+        int err;
11
+        buf = (struct statvfs *)malloc(sizeof(struct statvfs));
12
+        err = statvfs(s, buf);
13
+        return buf;
14
+}
15
+*/
16
+import "C"
17
+import (
18
+	"path/filepath"
19
+	"unsafe"
20
+
21
+	log "github.com/Sirupsen/logrus"
22
+)
23
+
24
+const (
25
+	// FsMagicZfs filesystem id for Zfs
26
+	FsMagicZfs = FsMagic(0x2fc12fc1)
27
+)
28
+
29
+var (
30
+	// Slice of drivers that should be used in an order
31
+	priority = []string{
32
+		"zfs",
33
+	}
34
+
35
+	// FsNames maps filesystem id to name of the filesystem.
36
+	FsNames = map[FsMagic]string{
37
+		FsMagicZfs: "zfs",
38
+	}
39
+)
40
+
41
+// GetFSMagic returns the filesystem id given the path.
42
+func GetFSMagic(rootpath string) (FsMagic, error) {
43
+	return 0, nil
44
+}
45
+
46
+// Mounted checks if the given path is mounted as the fs type
47
+//Solaris supports only ZFS for now
48
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
49
+
50
+	cs := C.CString(filepath.Dir(mountPath))
51
+	buf := C.getstatfs(cs)
52
+
53
+	// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
54
+	if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
55
+		(buf.f_basetype[3] != 0) {
56
+		log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
57
+		C.free(unsafe.Pointer(buf))
58
+		return false, ErrPrerequisites
59
+	}
60
+
61
+	C.free(unsafe.Pointer(buf))
62
+	C.free(unsafe.Pointer(cs))
63
+	return true, nil
64
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!windows,!freebsd
1
+// +build !linux,!windows,!freebsd,!solaris
2 2
 
3 3
 package graphdriver
4 4
 
... ...
@@ -4,8 +4,6 @@ var (
4 4
 	// Slice of drivers that should be used in order
5 5
 	priority = []string{
6 6
 		"windowsfilter",
7
-		"windowsdiff",
8
-		"vfs",
9 7
 	}
10 8
 )
11 9
 
... ...
@@ -9,7 +9,6 @@ import (
9 9
 	"os"
10 10
 	"os/exec"
11 11
 	"path"
12
-	"sync"
13 12
 	"syscall"
14 13
 
15 14
 	"github.com/Sirupsen/logrus"
... ...
@@ -92,12 +91,10 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
92 92
 
93 93
 // Driver contains information about the home directory and the list of active mounts that are created using this driver.
94 94
 type Driver struct {
95
-	home          string
96
-	pathCacheLock sync.Mutex
97
-	pathCache     map[string]string
98
-	uidMaps       []idtools.IDMap
99
-	gidMaps       []idtools.IDMap
100
-	ctr           *graphdriver.RefCounter
95
+	home    string
96
+	uidMaps []idtools.IDMap
97
+	gidMaps []idtools.IDMap
98
+	ctr     *graphdriver.RefCounter
101 99
 }
102 100
 
103 101
 func init() {
... ...
@@ -141,11 +138,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
141 141
 	}
142 142
 
143 143
 	d := &Driver{
144
-		home:      home,
145
-		pathCache: make(map[string]string),
146
-		uidMaps:   uidMaps,
147
-		gidMaps:   gidMaps,
148
-		ctr:       graphdriver.NewRefCounter(),
144
+		home:    home,
145
+		uidMaps: uidMaps,
146
+		gidMaps: gidMaps,
147
+		ctr:     graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
149 148
 	}
150 149
 
151 150
 	return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
... ...
@@ -328,110 +324,64 @@ func (d *Driver) Remove(id string) error {
328 328
 	if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
329 329
 		return err
330 330
 	}
331
-	d.pathCacheLock.Lock()
332
-	delete(d.pathCache, id)
333
-	d.pathCacheLock.Unlock()
334 331
 	return nil
335 332
 }
336 333
 
337 334
 // Get creates and mounts the required file system for the given id and returns the mount path.
338
-func (d *Driver) Get(id string, mountLabel string) (string, error) {
335
+func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
339 336
 	dir := d.dir(id)
340 337
 	if _, err := os.Stat(dir); err != nil {
341 338
 		return "", err
342 339
 	}
343
-
344 340
 	// If id has a root, just return it
345 341
 	rootDir := path.Join(dir, "root")
346 342
 	if _, err := os.Stat(rootDir); err == nil {
347
-		d.pathCacheLock.Lock()
348
-		d.pathCache[id] = rootDir
349
-		d.pathCacheLock.Unlock()
350 343
 		return rootDir, nil
351 344
 	}
352
-
353
-	lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
354
-	if err != nil {
355
-		return "", err
356
-	}
357
-	lowerDir := path.Join(d.dir(string(lowerID)), "root")
358
-	upperDir := path.Join(dir, "upper")
359
-	workDir := path.Join(dir, "work")
360 345
 	mergedDir := path.Join(dir, "merged")
361
-
362
-	if count := d.ctr.Increment(id); count > 1 {
346
+	if count := d.ctr.Increment(mergedDir); count > 1 {
363 347
 		return mergedDir, nil
364 348
 	}
365
-
366
-	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
367
-
368
-	// if it's mounted already, just return
369
-	mounted, err := d.mounted(mergedDir)
349
+	defer func() {
350
+		if err != nil {
351
+			if c := d.ctr.Decrement(mergedDir); c <= 0 {
352
+				syscall.Unmount(mergedDir, 0)
353
+			}
354
+		}
355
+	}()
356
+	lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
370 357
 	if err != nil {
371
-		d.ctr.Decrement(id)
372 358
 		return "", err
373 359
 	}
374
-	if mounted {
375
-		d.ctr.Decrement(id)
376
-		return mergedDir, nil
377
-	}
378
-
360
+	var (
361
+		lowerDir = path.Join(d.dir(string(lowerID)), "root")
362
+		upperDir = path.Join(dir, "upper")
363
+		workDir  = path.Join(dir, "work")
364
+		opts     = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
365
+	)
379 366
 	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
380
-		d.ctr.Decrement(id)
381 367
 		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
382 368
 	}
383 369
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
384 370
 	// user namespace requires this to move a directory from lower to upper.
385 371
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
386 372
 	if err != nil {
387
-		d.ctr.Decrement(id)
388
-		syscall.Unmount(mergedDir, 0)
389 373
 		return "", err
390 374
 	}
391
-
392 375
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
393
-		d.ctr.Decrement(id)
394
-		syscall.Unmount(mergedDir, 0)
395 376
 		return "", err
396 377
 	}
397
-
398
-	d.pathCacheLock.Lock()
399
-	d.pathCache[id] = mergedDir
400
-	d.pathCacheLock.Unlock()
401
-
402 378
 	return mergedDir, nil
403 379
 }
404 380
 
405
-func (d *Driver) mounted(dir string) (bool, error) {
406
-	return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
407
-}
408
-
409 381
 // Put unmounts the mount path created for the give id.
410 382
 func (d *Driver) Put(id string) error {
411
-	if count := d.ctr.Decrement(id); count > 0 {
383
+	mountpoint := path.Join(d.dir(id), "merged")
384
+	if count := d.ctr.Decrement(mountpoint); count > 0 {
412 385
 		return nil
413 386
 	}
414
-	d.pathCacheLock.Lock()
415
-	mountpoint, exists := d.pathCache[id]
416
-	d.pathCacheLock.Unlock()
417
-
418
-	if !exists {
419
-		logrus.Debugf("Put on a non-mounted device %s", id)
420
-		// but it might be still here
421
-		if d.Exists(id) {
422
-			mountpoint = path.Join(d.dir(id), "merged")
423
-		}
424
-
425
-		d.pathCacheLock.Lock()
426
-		d.pathCache[id] = mountpoint
427
-		d.pathCacheLock.Unlock()
428
-	}
429
-
430
-	if mounted, err := d.mounted(mountpoint); mounted || err != nil {
431
-		if err = syscall.Unmount(mountpoint, 0); err != nil {
432
-			logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
433
-		}
434
-		return err
387
+	if err := syscall.Unmount(mountpoint, 0); err != nil {
388
+		logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
435 389
 	}
436 390
 	return nil
437 391
 }
... ...
@@ -1,4 +1,4 @@
1
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd
1
+// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
2 2
 
3 3
 package register
4 4
 
... ...
@@ -15,6 +15,7 @@ import (
15 15
 	"path/filepath"
16 16
 	"strconv"
17 17
 	"strings"
18
+	"sync"
18 19
 	"syscall"
19 20
 	"time"
20 21
 	"unsafe"
... ...
@@ -26,7 +27,6 @@ import (
26 26
 	"github.com/Sirupsen/logrus"
27 27
 	"github.com/docker/docker/daemon/graphdriver"
28 28
 	"github.com/docker/docker/pkg/archive"
29
-	"github.com/docker/docker/pkg/chrootarchive"
30 29
 	"github.com/docker/docker/pkg/idtools"
31 30
 	"github.com/docker/docker/pkg/ioutils"
32 31
 	"github.com/docker/docker/pkg/longpath"
... ...
@@ -35,28 +35,33 @@ import (
35 35
 	"github.com/vbatts/tar-split/tar/storage"
36 36
 )
37 37
 
38
+// filterDriver is an HCSShim driver type for the Windows Filter driver.
39
+const filterDriver = 1
40
+
38 41
 // init registers the windows graph drivers to the register.
39 42
 func init() {
40 43
 	graphdriver.Register("windowsfilter", InitFilter)
41
-	graphdriver.Register("windowsdiff", InitDiff)
42 44
 	reexec.Register("docker-windows-write-layer", writeLayer)
43 45
 }
44 46
 
45
-const (
46
-	// diffDriver is an hcsshim driver type
47
-	diffDriver = iota
48
-	// filterDriver is an hcsshim driver type
49
-	filterDriver
50
-)
47
+type checker struct {
48
+}
49
+
50
+func (c *checker) IsMounted(path string) bool {
51
+	return false
52
+}
51 53
 
52 54
 // Driver represents a windows graph driver.
53 55
 type Driver struct {
54 56
 	// info stores the shim driver information
55 57
 	info hcsshim.DriverInfo
58
+	ctr  *graphdriver.RefCounter
59
+	// it is safe for windows to use a cache here because it does not support
60
+	// restoring containers when the daemon dies.
61
+	cacheMu sync.Mutex
62
+	cache   map[string]string
56 63
 }
57 64
 
58
-var _ graphdriver.DiffGetterDriver = &Driver{}
59
-
60 65
 func isTP5OrOlder() bool {
61 66
 	return system.GetOSVersion().Build <= 14300
62 67
 }
... ...
@@ -69,32 +74,15 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
69 69
 			HomeDir: home,
70 70
 			Flavour: filterDriver,
71 71
 		},
72
-	}
73
-	return d, nil
74
-}
75
-
76
-// InitDiff returns a new Windows differencing disk driver.
77
-func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
78
-	logrus.Debugf("WindowsGraphDriver InitDiff at %s", home)
79
-	d := &Driver{
80
-		info: hcsshim.DriverInfo{
81
-			HomeDir: home,
82
-			Flavour: diffDriver,
83
-		},
72
+		cache: make(map[string]string),
73
+		ctr:   graphdriver.NewRefCounter(&checker{}),
84 74
 	}
85 75
 	return d, nil
86 76
 }
87 77
 
88 78
 // String returns the string representation of a driver.
89 79
 func (d *Driver) String() string {
90
-	switch d.info.Flavour {
91
-	case diffDriver:
92
-		return "windowsdiff"
93
-	case filterDriver:
94
-		return "windowsfilter"
95
-	default:
96
-		return "Unknown driver flavour"
97
-	}
80
+	return "Windows filter storage driver"
98 81
 }
99 82
 
100 83
 // Status returns the status of the driver.
... ...
@@ -238,17 +226,23 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
238 238
 	if err != nil {
239 239
 		return "", err
240 240
 	}
241
+	if count := d.ctr.Increment(rID); count > 1 {
242
+		return d.cache[rID], nil
243
+	}
241 244
 
242 245
 	// Getting the layer paths must be done outside of the lock.
243 246
 	layerChain, err := d.getLayerChain(rID)
244 247
 	if err != nil {
248
+		d.ctr.Decrement(rID)
245 249
 		return "", err
246 250
 	}
247 251
 
248 252
 	if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
253
+		d.ctr.Decrement(rID)
249 254
 		return "", err
250 255
 	}
251 256
 	if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
257
+		d.ctr.Decrement(rID)
252 258
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
253 259
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
254 260
 		}
... ...
@@ -257,11 +251,15 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
257 257
 
258 258
 	mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
259 259
 	if err != nil {
260
+		d.ctr.Decrement(rID)
260 261
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
261 262
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
262 263
 		}
263 264
 		return "", err
264 265
 	}
266
+	d.cacheMu.Lock()
267
+	d.cache[rID] = mountPath
268
+	d.cacheMu.Unlock()
265 269
 
266 270
 	// If the layer has a mount path, use that. Otherwise, use the
267 271
 	// folder path.
... ...
@@ -282,6 +280,12 @@ func (d *Driver) Put(id string) error {
282 282
 	if err != nil {
283 283
 		return err
284 284
 	}
285
+	if count := d.ctr.Decrement(rID); count > 0 {
286
+		return nil
287
+	}
288
+	d.cacheMu.Lock()
289
+	delete(d.cache, rID)
290
+	d.cacheMu.Unlock()
285 291
 
286 292
 	if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
287 293
 		return err
... ...
@@ -390,20 +394,6 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
390 390
 // new layer in bytes.
391 391
 // The layer should not be mounted when calling this function
392 392
 func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
393
-	if d.info.Flavour == diffDriver {
394
-		start := time.Now().UTC()
395
-		logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer")
396
-		destination := d.dir(id)
397
-		destination = filepath.Dir(destination)
398
-		size, err := chrootarchive.ApplyUncompressedLayer(destination, diff, nil)
399
-		if err != nil {
400
-			return 0, err
401
-		}
402
-		logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
403
-
404
-		return size, nil
405
-	}
406
-
407 393
 	var layerChain []string
408 394
 	if parent != "" {
409 395
 		rPId, err := d.resolveID(parent)
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd
1
+// +build linux freebsd solaris
2 2
 
3 3
 package zfs
4 4
 
... ...
@@ -105,7 +105,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
105 105
 		filesystemsCache: filesystemsCache,
106 106
 		uidMaps:          uidMaps,
107 107
 		gidMaps:          gidMaps,
108
-		ctr:              graphdriver.NewRefCounter(),
108
+		ctr:              graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
109 109
 	}
110 110
 	return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
111 111
 }
... ...
@@ -307,7 +307,7 @@ func (d *Driver) Remove(id string) error {
307 307
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
308 308
 func (d *Driver) Get(id, mountLabel string) (string, error) {
309 309
 	mountpoint := d.mountPath(id)
310
-	if count := d.ctr.Increment(id); count > 1 {
310
+	if count := d.ctr.Increment(mountpoint); count > 1 {
311 311
 		return mountpoint, nil
312 312
 	}
313 313
 
... ...
@@ -317,17 +317,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
317 317
 
318 318
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
319 319
 	if err != nil {
320
-		d.ctr.Decrement(id)
320
+		d.ctr.Decrement(mountpoint)
321 321
 		return "", err
322 322
 	}
323 323
 	// Create the target directories if they don't exist
324 324
 	if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
325
-		d.ctr.Decrement(id)
325
+		d.ctr.Decrement(mountpoint)
326 326
 		return "", err
327 327
 	}
328 328
 
329 329
 	if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
330
-		d.ctr.Decrement(id)
330
+		d.ctr.Decrement(mountpoint)
331 331
 		return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
332 332
 	}
333 333
 
... ...
@@ -335,7 +335,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
335 335
 	// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
336 336
 	if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
337 337
 		mount.Unmount(mountpoint)
338
-		d.ctr.Decrement(id)
338
+		d.ctr.Decrement(mountpoint)
339 339
 		return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
340 340
 	}
341 341
 
... ...
@@ -344,10 +344,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
344 344
 
345 345
 // Put removes the existing mountpoint for the given id if it exists.
346 346
 func (d *Driver) Put(id string) error {
347
-	if count := d.ctr.Decrement(id); count > 0 {
347
+	mountpoint := d.mountPath(id)
348
+	if count := d.ctr.Decrement(mountpoint); count > 0 {
348 349
 		return nil
349 350
 	}
350
-	mountpoint := d.mountPath(id)
351 351
 	mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
352 352
 	if err != nil || !mounted {
353 353
 		return err
354 354
new file mode 100644
... ...
@@ -0,0 +1,59 @@
0
+// +build solaris,cgo
1
+
2
+package zfs
3
+
4
+/*
5
+#include <sys/statvfs.h>
6
+#include <stdlib.h>
7
+
8
+static inline struct statvfs *getstatfs(char *s) {
9
+        struct statvfs *buf;
10
+        int err;
11
+        buf = (struct statvfs *)malloc(sizeof(struct statvfs));
12
+        err = statvfs(s, buf);
13
+        return buf;
14
+}
15
+*/
16
+import "C"
17
+import (
18
+	"path/filepath"
19
+	"strings"
20
+	"unsafe"
21
+
22
+	log "github.com/Sirupsen/logrus"
23
+	"github.com/docker/docker/daemon/graphdriver"
24
+)
25
+
26
+func checkRootdirFs(rootdir string) error {
27
+
28
+	cs := C.CString(filepath.Dir(rootdir))
29
+	buf := C.getstatfs(cs)
30
+
31
+	// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
32
+	if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
33
+		(buf.f_basetype[3] != 0) {
34
+		log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
35
+		C.free(unsafe.Pointer(buf))
36
+		return graphdriver.ErrPrerequisites
37
+	}
38
+
39
+	C.free(unsafe.Pointer(buf))
40
+	C.free(unsafe.Pointer(cs))
41
+	return nil
42
+}
43
+
44
+/* rootfs is introduced to comply with the OCI spec
45
+which states that root filesystem must be mounted at <CID>/rootfs/ instead of <CID>/
46
+*/
47
+func getMountpoint(id string) string {
48
+	maxlen := 12
49
+
50
+	// we need to preserve filesystem suffix
51
+	suffix := strings.SplitN(id, "-", 2)
52
+
53
+	if len(suffix) > 1 {
54
+		return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root")
55
+	}
56
+
57
+	return filepath.Join(id[:maxlen], "rootfs", "root")
58
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!freebsd
1
+// +build !linux,!freebsd,!solaris
2 2
 
3 3
 package zfs
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,124 @@
0
+package daemon
1
+
2
+import (
3
+	"fmt"
4
+
5
+	"github.com/docker/docker/builder"
6
+	"github.com/docker/docker/image"
7
+	"github.com/docker/docker/reference"
8
+	"github.com/docker/docker/runconfig"
9
+	containertypes "github.com/docker/engine-api/types/container"
10
+)
11
+
12
+// ErrImageDoesNotExist is error returned when no image can be found for a reference.
13
+type ErrImageDoesNotExist struct {
14
+	RefOrID string
15
+}
16
+
17
+func (e ErrImageDoesNotExist) Error() string {
18
+	return fmt.Sprintf("no such id: %s", e.RefOrID)
19
+}
20
+
21
+// GetImageID returns an image ID corresponding to the image referred to by
22
+// refOrID.
23
+func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
24
+	id, ref, err := reference.ParseIDOrReference(refOrID)
25
+	if err != nil {
26
+		return "", err
27
+	}
28
+	if id != "" {
29
+		if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
30
+			return "", ErrImageDoesNotExist{refOrID}
31
+		}
32
+		return image.ID(id), nil
33
+	}
34
+
35
+	if id, err := daemon.referenceStore.Get(ref); err == nil {
36
+		return id, nil
37
+	}
38
+	if tagged, ok := ref.(reference.NamedTagged); ok {
39
+		if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
40
+			for _, namedRef := range daemon.referenceStore.References(id) {
41
+				if namedRef.Name() == ref.Name() {
42
+					return id, nil
43
+				}
44
+			}
45
+		}
46
+	}
47
+
48
+	// Search based on ID
49
+	if id, err := daemon.imageStore.Search(refOrID); err == nil {
50
+		return id, nil
51
+	}
52
+
53
+	return "", ErrImageDoesNotExist{refOrID}
54
+}
55
+
56
+// GetImage returns an image corresponding to the image referred to by refOrID.
57
+func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
58
+	imgID, err := daemon.GetImageID(refOrID)
59
+	if err != nil {
60
+		return nil, err
61
+	}
62
+	return daemon.imageStore.Get(imgID)
63
+}
64
+
65
+// GetImageOnBuild looks up a Docker image referenced by `name`.
66
+func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
67
+	img, err := daemon.GetImage(name)
68
+	if err != nil {
69
+		return nil, err
70
+	}
71
+	return img, nil
72
+}
73
+
74
+// GetCachedImage returns the most recent created image that is a child
75
+// of the image with imgID, that had the same config when it was
76
+// created. nil is returned if a child cannot be found. An error is
77
+// returned if the parent image cannot be found.
78
+func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
79
+	// Loop on the children of the given image and check the config
80
+	getMatch := func(siblings []image.ID) (*image.Image, error) {
81
+		var match *image.Image
82
+		for _, id := range siblings {
83
+			img, err := daemon.imageStore.Get(id)
84
+			if err != nil {
85
+				return nil, fmt.Errorf("unable to find image %q", id)
86
+			}
87
+
88
+			if runconfig.Compare(&img.ContainerConfig, config) {
89
+				// check for the most up to date match
90
+				if match == nil || match.Created.Before(img.Created) {
91
+					match = img
92
+				}
93
+			}
94
+		}
95
+		return match, nil
96
+	}
97
+
98
+	// In this case, this is `FROM scratch`, which isn't an actual image.
99
+	if imgID == "" {
100
+		images := daemon.imageStore.Map()
101
+		var siblings []image.ID
102
+		for id, img := range images {
103
+			if img.Parent == imgID {
104
+				siblings = append(siblings, id)
105
+			}
106
+		}
107
+		return getMatch(siblings)
108
+	}
109
+
110
+	// find match from child images
111
+	siblings := daemon.imageStore.Children(imgID)
112
+	return getMatch(siblings)
113
+}
114
+
115
+// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
116
+// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
117
+func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
118
+	cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
119
+	if cache == nil || err != nil {
120
+		return "", err
121
+	}
122
+	return cache.ID().String(), nil
123
+}
0 124
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+package daemon
1
+
2
+import (
3
+	"io"
4
+
5
+	"github.com/docker/docker/image/tarexport"
6
+)
7
+
8
+// ExportImage exports a list of images to the given output stream. The
9
+// exported images are archived into a tar when written to the output
10
+// stream. All images with the given tag and all versions containing
11
+// the same tag are exported. names is the set of tags to export, and
12
+// outStream is the writer which the images are written to.
13
+func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
14
+	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
15
+	return imageExporter.Save(names, outStream)
16
+}
17
+
18
+// LoadImage uploads a set of images into the repository. This is the
19
+// complement of ImageExport.  The input stream is an uncompressed tar
20
+// ball containing images and metadata.
21
+func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
22
+	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
23
+	return imageExporter.Load(inTar, outStream, quiet)
24
+}
0 25
new file mode 100644
... ...
@@ -0,0 +1,82 @@
0
+package daemon
1
+
2
+import (
3
+	"fmt"
4
+
5
+	"github.com/docker/docker/layer"
6
+	"github.com/docker/docker/reference"
7
+	"github.com/docker/engine-api/types"
8
+)
9
+
10
+// ImageHistory returns a slice of ImageHistory structures for the specified image
11
+// name by walking the image lineage.
12
+func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) {
13
+	img, err := daemon.GetImage(name)
14
+	if err != nil {
15
+		return nil, err
16
+	}
17
+
18
+	history := []*types.ImageHistory{}
19
+
20
+	layerCounter := 0
21
+	rootFS := *img.RootFS
22
+	rootFS.DiffIDs = nil
23
+
24
+	for _, h := range img.History {
25
+		var layerSize int64
26
+
27
+		if !h.EmptyLayer {
28
+			if len(img.RootFS.DiffIDs) <= layerCounter {
29
+				return nil, fmt.Errorf("too many non-empty layers in History section")
30
+			}
31
+
32
+			rootFS.Append(img.RootFS.DiffIDs[layerCounter])
33
+			l, err := daemon.layerStore.Get(rootFS.ChainID())
34
+			if err != nil {
35
+				return nil, err
36
+			}
37
+			layerSize, err = l.DiffSize()
38
+			layer.ReleaseAndLog(daemon.layerStore, l)
39
+			if err != nil {
40
+				return nil, err
41
+			}
42
+
43
+			layerCounter++
44
+		}
45
+
46
+		history = append([]*types.ImageHistory{{
47
+			ID:        "<missing>",
48
+			Created:   h.Created.Unix(),
49
+			CreatedBy: h.CreatedBy,
50
+			Comment:   h.Comment,
51
+			Size:      layerSize,
52
+		}}, history...)
53
+	}
54
+
55
+	// Fill in image IDs and tags
56
+	histImg := img
57
+	id := img.ID()
58
+	for _, h := range history {
59
+		h.ID = id.String()
60
+
61
+		var tags []string
62
+		for _, r := range daemon.referenceStore.References(id) {
63
+			if _, ok := r.(reference.NamedTagged); ok {
64
+				tags = append(tags, r.String())
65
+			}
66
+		}
67
+
68
+		h.Tags = tags
69
+
70
+		id = histImg.Parent
71
+		if id == "" {
72
+			break
73
+		}
74
+		histImg, err = daemon.GetImage(id.String())
75
+		if err != nil {
76
+			break
77
+		}
78
+	}
79
+
80
+	return history, nil
81
+}
0 82
new file mode 100644
... ...
@@ -0,0 +1,81 @@
0
+package daemon
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	"github.com/docker/docker/layer"
7
+	"github.com/docker/docker/reference"
8
+	"github.com/docker/engine-api/types"
9
+)
10
+
11
+// LookupImage looks up an image by name and returns it as an ImageInspect
12
+// structure.
13
+func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
14
+	img, err := daemon.GetImage(name)
15
+	if err != nil {
16
+		return nil, fmt.Errorf("No such image: %s", name)
17
+	}
18
+
19
+	refs := daemon.referenceStore.References(img.ID())
20
+	repoTags := []string{}
21
+	repoDigests := []string{}
22
+	for _, ref := range refs {
23
+		switch ref.(type) {
24
+		case reference.NamedTagged:
25
+			repoTags = append(repoTags, ref.String())
26
+		case reference.Canonical:
27
+			repoDigests = append(repoDigests, ref.String())
28
+		}
29
+	}
30
+
31
+	var size int64
32
+	var layerMetadata map[string]string
33
+	layerID := img.RootFS.ChainID()
34
+	if layerID != "" {
35
+		l, err := daemon.layerStore.Get(layerID)
36
+		if err != nil {
37
+			return nil, err
38
+		}
39
+		defer layer.ReleaseAndLog(daemon.layerStore, l)
40
+		size, err = l.Size()
41
+		if err != nil {
42
+			return nil, err
43
+		}
44
+
45
+		layerMetadata, err = l.Metadata()
46
+		if err != nil {
47
+			return nil, err
48
+		}
49
+	}
50
+
51
+	comment := img.Comment
52
+	if len(comment) == 0 && len(img.History) > 0 {
53
+		comment = img.History[len(img.History)-1].Comment
54
+	}
55
+
56
+	imageInspect := &types.ImageInspect{
57
+		ID:              img.ID().String(),
58
+		RepoTags:        repoTags,
59
+		RepoDigests:     repoDigests,
60
+		Parent:          img.Parent.String(),
61
+		Comment:         comment,
62
+		Created:         img.Created.Format(time.RFC3339Nano),
63
+		Container:       img.Container,
64
+		ContainerConfig: &img.ContainerConfig,
65
+		DockerVersion:   img.DockerVersion,
66
+		Author:          img.Author,
67
+		Config:          img.Config,
68
+		Architecture:    img.Architecture,
69
+		Os:              img.OS,
70
+		Size:            size,
71
+		VirtualSize:     size, // TODO: field unused, deprecate
72
+		RootFS:          rootFSToAPIType(img.RootFS),
73
+	}
74
+
75
+	imageInspect.GraphDriver.Name = daemon.GraphDriverName()
76
+
77
+	imageInspect.GraphDriver.Data = layerMetadata
78
+
79
+	return imageInspect, nil
80
+}
0 81
new file mode 100644
... ...
@@ -0,0 +1,40 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/api/types/backend"
4
+	"github.com/docker/docker/container"
5
+	"github.com/docker/docker/daemon/exec"
6
+	"github.com/docker/engine-api/types"
7
+)
8
+
9
+// This sets platform-specific fields
10
+func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
11
+	return contJSONBase
12
+}
13
+
14
+// containerInspectPre120 get containers for pre 1.20 APIs.
15
+func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) {
16
+	return daemon.containerInspectCurrent(name, false)
17
+}
18
+
19
+func addMountPoints(container *container.Container) []types.MountPoint {
20
+	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
21
+	for _, m := range container.MountPoints {
22
+		mountPoints = append(mountPoints, types.MountPoint{
23
+			Name:        m.Name,
24
+			Source:      m.Path(),
25
+			Destination: m.Destination,
26
+			Driver:      m.Driver,
27
+			RW:          m.RW,
28
+		})
29
+	}
30
+	return mountPoints
31
+}
32
+
33
+func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
34
+	return &backend.ExecProcessConfig{
35
+		Tty:        e.Tty,
36
+		Entrypoint: e.Entrypoint,
37
+		Arguments:  e.Args,
38
+	}
39
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows
1
+// +build !windows,!solaris
2 2
 
3 3
 package daemon
4 4
 
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd
1
+// +build linux freebsd solaris
2 2
 
3 3
 package daemon
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/container"
4
+	"github.com/docker/docker/libcontainerd"
5
+)
6
+
7
+// platformConstructExitStatus returns a platform specific exit status structure
8
+func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus {
9
+	return &container.ExitStatus{
10
+		ExitCode: int(e.ExitCode),
11
+	}
12
+}
13
+
14
+// postRunProcessing perfoms any processing needed on the container after it has stopped.
15
+func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error {
16
+	return nil
17
+}
0 18
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/container"
4
+	"github.com/docker/docker/libcontainerd"
5
+	"github.com/docker/docker/oci"
6
+)
7
+
8
+func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) {
9
+	s := oci.DefaultSpec()
10
+	return (*libcontainerd.Spec)(&s), nil
11
+}
... ...
@@ -35,7 +35,7 @@ func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error {
35 35
 			return err
36 36
 		}
37 37
 	} else {
38
-		profile, err = seccomp.GetDefaultProfile()
38
+		profile, err = seccomp.GetDefaultProfile(rs)
39 39
 		if err != nil {
40 40
 			return err
41 41
 		}
42 42
new file mode 100644
... ...
@@ -0,0 +1,34 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/container"
4
+	"time"
5
+)
6
+
7
+// newStatsCollector returns a new statsCollector for collection stats
8
+// for a registered container at the specified interval. The collector allows
9
+// non-running containers to be added and will start processing stats when
10
+// they are started.
11
+func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector {
12
+	return &statsCollector{}
13
+}
14
+
15
+// statsCollector manages and provides container resource stats
16
+type statsCollector struct {
17
+}
18
+
19
+// collect registers the container with the collector and adds it to
20
+// the event loop for collection on the specified interval returning
21
+// a channel for the subscriber to receive on.
22
+func (s *statsCollector) collect(c *container.Container) chan interface{} {
23
+	return nil
24
+}
25
+
26
+// stopCollection closes the channels for all subscribers and removes
27
+// the container from metrics collection.
28
+func (s *statsCollector) stopCollection(c *container.Container) {
29
+}
30
+
31
+// unsubscribe removes a specific subscriber from receiving updates for a container's stats.
32
+func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
33
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows
1
+// +build !windows,!solaris
2 2
 
3 3
 package daemon
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package daemon
1
+
2
+import (
3
+	"github.com/docker/docker/libcontainerd"
4
+	"github.com/docker/engine-api/types/container"
5
+)
6
+
7
+func toContainerdResources(resources container.Resources) libcontainerd.Resources {
8
+	var r libcontainerd.Resources
9
+	return r
10
+}
... ...
@@ -121,10 +121,6 @@ func (ls *mockLayerStore) GetMountID(string) (string, error) {
121 121
 	return "", errors.New("not implemented")
122 122
 }
123 123
 
124
-func (ls *mockLayerStore) ReinitRWLayer(layer.RWLayer) error {
125
-	return errors.New("not implemented")
126
-}
127
-
128 124
 func (ls *mockLayerStore) Cleanup() error {
129 125
 	return nil
130 126
 }
... ...
@@ -23,9 +23,8 @@ HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER
23 23
 HUGO_BIND_IP=0.0.0.0
24 24
 
25 25
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
26
-DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
27
-DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH))
28
-
26
+GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
27
+DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
29 28
 
30 29
 DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
31 30
 
... ...
@@ -22,6 +22,13 @@ Unfortunately, Docker is a fast moving project, and newly introduced features
22 22
 may sometime introduce breaking changes and/or incompatibilities. This page
23 23
 documents these by Engine version.
24 24
 
25
+# Engine 1.12
26
+
27
+Docker clients <= 1.9.2 used an invalid Host header when making request to the
28
+daemon. Docker 1.12 is built using golang 1.6 which is now checking the validity
29
+of the Host header and as such clients <= 1.9.2 can't talk anymore to the daemon. 
30
+[An environment variable was added to overcome this issue.](reference/commandline/dockerd.md#miscellaneous-options)
31
+
25 32
 # Engine 1.10
26 33
 
27 34
 There were two breaking changes in the 1.10 release.
... ...
@@ -58,6 +58,15 @@ defining it at container creation (`POST /containers/create`).
58 58
 The `docker ps --before` and `docker ps --since` options are deprecated.
59 59
 Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
60 60
 
61
+### Docker search 'automated' and 'stars' options
62
+
63
+**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
64
+
65
+**Removed In Release: v1.14**
66
+
67
+The `docker search --automated` and `docker search --stars` options are deprecated.
68
+Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead.
69
+
61 70
 ### Command line short variant options
62 71
 **Deprecated In Release: v1.9**
63 72
 
... ...
@@ -118,6 +118,9 @@ This section lists each version from latest to oldest.  Each listing includes a
118 118
 * `POST /containers/create` now takes `MaximumIOps` and `MaximumIOBps` fields. Windows daemon only.
119 119
 * `POST /containers/create` now returns a HTTP 400 "bad parameter" message
120 120
   if no command is specified (instead of a HTTP 500 "server error")
121
+* `GET /images/search` now takes a `filters` query parameter.
122
+* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded.
123
+* `GET /events` now supports filtering by daemon name or ID.
121 124
 
122 125
 ### v1.23 API changes
123 126
 
... ...
@@ -2133,6 +2133,10 @@ Search for an image on [Docker Hub](https://hub.docker.com).
2133 2133
 Query Parameters:
2134 2134
 
2135 2135
 -   **term** – term to search
2136
+-   **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters:
2137
+  -   `stars=<number>`
2138
+  -   `is-automated=(true|false)`
2139
+  -   `is-official=(true|false)`
2136 2140
 
2137 2141
 Status Codes:
2138 2142
 
... ...
@@ -2416,6 +2420,10 @@ Docker networks report the following events:
2416 2416
 
2417 2417
     create, connect, disconnect, destroy
2418 2418
 
2419
+Docker daemon report the following event:
2420
+
2421
+    reload
2422
+
2419 2423
 **Example request**:
2420 2424
 
2421 2425
     GET /events?since=1374067924
... ...
@@ -2585,9 +2593,10 @@ Query Parameters:
2585 2585
   -   `event=<string>`; -- event to filter
2586 2586
   -   `image=<string>`; -- image to filter
2587 2587
   -   `label=<string>`; -- image and container label to filter
2588
-  -   `type=<string>`; -- either `container` or `image` or `volume` or `network`
2588
+  -   `type=<string>`; -- either `container` or `image` or `volume` or `network` or `daemon`
2589 2589
   -   `volume=<string>`; -- volume to filter
2590 2590
   -   `network=<string>`; -- network to filter
2591
+  -   `daemon=<string>`; -- daemon name or id to filter
2591 2592
 
2592 2593
 Status Codes:
2593 2594
 
... ...
@@ -362,6 +362,15 @@ RUN /bin/bash -c 'source $HOME/.bashrc ; echo $HOME'
362 362
 > `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`.
363 363
 > If you want shell processing then either use the *shell* form or execute
364 364
 > a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`.
365
+>
366
+> **Note**:
367
+> In the *JSON* form, it is necessary to escape backslashes. This is
368
+> particularly relevant on Windows where the backslash is the path seperator.
369
+> The following line would otherwise be treated as *shell* form due to not
370
+> being valid JSON, and fail in an unexpected way:
371
+> `RUN ["c:\windows\system32\tasklist.exe"]`
372
+> The correct syntax for this example is:
373
+> `RUN ["c:\\windows\\system32\\tasklist.exe"]`
365 374
 
366 375
 The cache for `RUN` instructions isn't invalidated automatically during
367 376
 the next build. The cache for an instruction like
... ...
@@ -225,7 +225,8 @@ uploaded context. The builder reference contains detailed information on
225 225
     $ docker build -t vieux/apache:2.0 .
226 226
 
227 227
 This will build like the previous example, but it will then tag the resulting
228
-image. The repository name will be `vieux/apache` and the tag will be `2.0`
228
+image. The repository name will be `vieux/apache` and the tag will be `2.0`.
229
+[Read more about valid tags](tag.md).
229 230
 
230 231
 You can apply multiple tags to an image. For example, you can apply the `latest`
231 232
 tag to a newly built image and add another tag that references a specific
... ...
@@ -298,6 +299,9 @@ accessed like regular environment variables in the `RUN` instruction of the
298 298
 Dockerfile. Also, these values don't persist in the intermediate or final images
299 299
 like `ENV` values do.
300 300
 
301
+Using this flag will not alter the output you see when the `ARG` lines from the
302
+Dockerfile are echoed during the build process.
303
+
301 304
 For detailed information on using `ARG` and `ENV` instructions, see the
302 305
 [Dockerfile reference](../builder.md).
303 306
 
... ...
@@ -24,6 +24,7 @@ It can be useful to commit a container's file changes or settings into a new
24 24
 image. This allows you debug a container by running an interactive shell, or to
25 25
 export a working dataset to another server. Generally, it is better to use
26 26
 Dockerfiles to manage your images in a documented and maintainable way.
27
+[Read more about valid image names and tags](tag.md).
27 28
 
28 29
 The commit operation will not include any data contained in
29 30
 volumes mounted inside the container.
... ...
@@ -81,7 +81,17 @@ you must be explicit with a relative or absolute path, for example:
81 81
     `/path/to/file:name.txt` or `./file:name.txt`
82 82
 
83 83
 It is not possible to copy certain system files such as resources under
84
-`/proc`, `/sys`, `/dev`, and mounts created by the user in the container.
84
+`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by
85
+the user in the container. However, you can still copy such files by manually
86
+running `tar` in `docker exec`. For example (consider `SRC_PATH` and `DEST_PATH`
87
+are directories):
88
+
89
+    $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH -
90
+
91
+or
92
+
93
+    $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH -
94
+
85 95
 
86 96
 Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive.
87 97
 The command extracts the content of the tar to the `DEST_PATH` in container's
... ...
@@ -164,7 +164,8 @@ Linux namespaces. On Microsoft Windows, you can specify these values:
164 164
 
165 165
 | Value     | Description                                                                                                                                                   |
166 166
 |-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
167
-| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value.  |
167
+| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the
168
+daemon is running on Windows server, or `hyperv` if running on Windows client.  |
168 169
 | `process` | Namespace isolation only.                                                                                                                                     |
169 170
 | `hyperv`   | Hyper-V hypervisor partition-based isolation.                                                                                                                  |
170 171
 
... ...
@@ -527,8 +527,9 @@ can specify default container isolation technology with this, for example:
527 527
 
528 528
     $ dockerd --exec-opt isolation=hyperv
529 529
 
530
-Will make `hyperv` the default isolation technology on Windows, without specifying
531
-isolation value on daemon start, Windows isolation technology will default to `process`.
530
+Will make `hyperv` the default isolation technology on Windows. If no isolation
531
+value is specified on daemon start, on Windows client, the default is
532
+`hyperv`, and on Windows server, the default is `process`. 
532 533
 
533 534
 ## Daemon DNS options
534 535
 
... ...
@@ -849,6 +850,19 @@ set like this:
849 849
     export DOCKER_TMPDIR=/mnt/disk2/tmp
850 850
     /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1
851 851
 
852
+Docker clients <= 1.9.2 used an invalid Host header when making request to the
853
+daemon. Docker 1.12 is built using golang 1.6 which is now checking the validity
854
+of the Host header and as such clients <= 1.9.2 can't talk anymore to the daemon.
855
+Docker supports overcoming this issue via a Docker daemon
856
+environment variable. In case you are seeing this error when contacting the
857
+daemon:
858
+
859
+    Error response from daemon: 400 Bad Request: malformed Host header
860
+
861
+The `DOCKER_HTTP_HOST_COMPAT` can be set like this:
862
+
863
+    DOCKER_HTTP_HOST_COMPAT=1 /usr/local/bin/dockerd ...
864
+
852 865
 
853 866
 ## Default cgroup parent
854 867
 
... ...
@@ -925,7 +939,7 @@ This is a full example of the allowed configuration options in the file:
925 925
 	"tlscacert": "",
926 926
 	"tlscert": "",
927 927
 	"tlskey": "",
928
-	"api-cors-headers": "",
928
+	"api-cors-header": "",
929 929
 	"selinux-enabled": false,
930 930
 	"userns-remap": "",
931 931
 	"group": "",
... ...
@@ -934,7 +948,7 @@ This is a full example of the allowed configuration options in the file:
934 934
 	"ipv6": false,
935 935
 	"iptables": false,
936 936
 	"ip-forward": false,
937
-	"ip-mask": false,
937
+	"ip-masq": false,
938 938
 	"userland-proxy": false,
939 939
 	"ip": "0.0.0.0",
940 940
 	"bridge": "",
... ...
@@ -35,6 +35,10 @@ Docker networks report the following events:
35 35
 
36 36
     create, connect, disconnect, destroy
37 37
 
38
+Docker daemon report the following events:
39
+
40
+    reload
41
+
38 42
 The `--since` and `--until` parameters can be Unix timestamps, date formatted
39 43
 timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
40 44
 relative to the client machine’s time. If you do not provide the `--since` option,
... ...
@@ -68,9 +72,10 @@ The currently supported filters are:
68 68
 * event (`event=<event action>`)
69 69
 * image (`image=<tag or id>`)
70 70
 * label (`label=<key>` or `label=<key>=<value>`)
71
-* type (`type=<container or image or volume or network>`)
71
+* type (`type=<container or image or volume or network or daemon>`)
72 72
 * volume (`volume=<name or id>`)
73 73
 * network (`network=<name or id>`)
74
+* daemon (`daemon=<name or id>`)
74 75
 
75 76
 ## Examples
76 77
 
... ...
@@ -19,6 +19,7 @@ parent = "smn_cli"
19 19
 
20 20
 Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com)
21 21
 registry or to a self-hosted one.
22
+[Read more about valid image names and tags](tag.md).
22 23
 
23 24
 Killing the `docker push` process, for example by pressing `CTRL-c` while it is
24 25
 running in a terminal, will terminate the push operation.
... ...
@@ -618,14 +618,16 @@ On Microsoft Windows, can take any of these values:
618 618
 | `process` | Namespace isolation only.                                                                                                                                     |
619 619
 | `hyperv`   | Hyper-V hypervisor partition-based isolation.                                                                                                                  |
620 620
 
621
-In practice, when running on Microsoft Windows without a `daemon` option set,  these two commands are equivalent:
622
-
621
+On Windows, the default isolation for client is `hyperv`, and for server is
622
+`process`. Therefore when running on Windows server without a `daemon` option 
623
+set, these two commands are equivalent:
623 624
 ```
624 625
 $ docker run -d --isolation default busybox top
625 626
 $ docker run -d --isolation process busybox top
626 627
 ```
627 628
 
628
-If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation:
629
+If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, 
630
+if running on Windows server, any of these commands also result in `hyperv` isolation:
629 631
 
630 632
 ```
631 633
 $ docker run -d --isolation default busybox top
... ...
@@ -14,10 +14,12 @@ parent = "smn_cli"
14 14
 
15 15
     Search the Docker Hub for images
16 16
 
17
-      --automated          Only show automated builds
17
+      --filter=[]          Filter output based on these conditions:
18
+                           - is-automated=(true|false)
19
+                           - is-official=(true|false)
20
+                           - stars=<number> - image has at least 'number' stars
18 21
       --help               Print usage
19 22
       --no-trunc           Don't truncate output
20
-      -s, --stars=0        Only displays with at least x stars
21 23
 
22 24
 Search [Docker Hub](https://hub.docker.com) for images
23 25
 
... ...
@@ -61,37 +63,59 @@ This example displays images with a name containing 'busybox':
61 61
     scottabernethy/busybox                                                           0                    [OK]
62 62
     marclop/busybox-solr
63 63
 
64
-### Search images by name and number of stars (-s, --stars)
64
+### Display non-truncated description (--no-trunc)
65
+
66
+This example displays images with a name containing 'busybox',
67
+at least 3 stars and the description isn't truncated in the output:
68
+
69
+    $ docker search --stars=3 --no-trunc busybox
70
+    NAME                 DESCRIPTION                                                                               STARS     OFFICIAL   AUTOMATED
71
+    busybox              Busybox base image.                                                                       325       [OK]       
72
+    progrium/busybox                                                                                               50                   [OK]
73
+    radial/busyboxplus   Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors.   8                    [OK]
74
+
75
+## Filtering
76
+
77
+The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more
78
+than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
79
+
80
+The currently supported filters are:
81
+
82
+* stars (int - number of stars the image has)
83
+* is-automated (true|false) - is the image automated or not
84
+* is-official (true|false) - is the image official or not
85
+
86
+
87
+### stars
65 88
 
66 89
 This example displays images with a name containing 'busybox' and at
67 90
 least 3 stars:
68 91
 
69
-    $ docker search --stars=3 busybox
92
+    $ docker search --filter stars=3 busybox
70 93
     NAME                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
71 94
     busybox              Busybox base image.                             325       [OK]       
72 95
     progrium/busybox                                                     50                   [OK]
73 96
     radial/busyboxplus   Full-chain, Internet enabled, busybox made...   8                    [OK]
74 97
 
75 98
 
76
-### Search automated images (--automated)
99
+### is-automated
77 100
 
78
-This example displays images with a name containing 'busybox', at
79
-least 3 stars and are automated builds:
101
+This example displays images with a name containing 'busybox'
102
+and are automated builds:
80 103
 
81
-    $ docker search --stars=3 --automated busybox
104
+    $ docker search --filter is-automated busybox
82 105
     NAME                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
83 106
     progrium/busybox                                                     50                   [OK]
84 107
     radial/busyboxplus   Full-chain, Internet enabled, busybox made...   8                    [OK]
85 108
 
109
+### is-official
86 110
 
87
-### Display non-truncated description (--no-trunc)
111
+This example displays images with a name containing 'busybox', at least
112
+3 stars and are official builds:
88 113
 
89
-This example displays images with a name containing 'busybox',
90
-at least 3 stars and the description isn't truncated in the output:
114
+    $ docker search --filter "is-automated=true" --filter "stars=3" busybox
115
+    NAME                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
116
+    progrium/busybox                                                     50                   [OK]
117
+    radial/busyboxplus   Full-chain, Internet enabled, busybox made...   8                    [OK]
91 118
 
92
-    $ docker search --stars=3 --no-trunc busybox
93
-    NAME                 DESCRIPTION                                                                               STARS     OFFICIAL   AUTOMATED
94
-    busybox              Busybox base image.                                                                       325       [OK]       
95
-    progrium/busybox                                                                                               50                   [OK]
96
-    radial/busyboxplus   Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors.   8                    [OK]
97 119
 
... ...
@@ -10,11 +10,57 @@ parent = "smn_cli"
10 10
 
11 11
 # tag
12 12
 
13
-    Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
13
+    Usage: docker tag [OPTIONS] NAME[:TAG] NAME[:TAG]
14 14
 
15 15
     Tag an image into a repository
16 16
 
17 17
       --help               Print usage
18 18
 
19
+An image name is made up of slash-separated name components, optionally prefixed
20
+by a registry hostname. The hostname must comply with standard DNS rules, but
21
+may not contain underscores. If a hostname is present, it may optionally be
22
+followed by a port number in the format `:8080`. If not present, the command 
23
+uses Docker's public registry located at `registry-1.docker.io` by default. Name 
24
+components may contain lowercase characters, digits and separators. A separator 
25
+is defined as a period, one or two underscores, or one or more dashes. A name 
26
+component may not start or end with a separator.
27
+
28
+A tag name may contain lowercase and uppercase characters, digits, underscores,
29
+periods and dashes. A tag name may not start with a period or a dash and may
30
+contain a maximum of 128 characters.
31
+
19 32
 You can group your images together using names and tags, and then upload them
20 33
 to [*Share Images via Repositories*](../../userguide/containers/dockerrepos.md#contributing-to-docker-hub).
34
+
35
+# Examples
36
+
37
+## Tagging an image referenced by ID
38
+
39
+To tag a local image with ID "0e5574283393" into the "fedora" repository with 
40
+"version1.0":
41
+
42
+    docker tag 0e5574283393 fedora/httpd:version1.0
43
+
44
+## Tagging an image referenced by Name
45
+
46
+To tag a local image with name "httpd" into the "fedora" repository with 
47
+"version1.0":
48
+
49
+    docker tag httpd fedora/httpd:version1.0
50
+
51
+Note that since the tag name is not specified, the alias is created for an
52
+existing local version `httpd:latest`.
53
+
54
+## Tagging an image referenced by Name and Tag
55
+
56
+To tag a local image with name "httpd" and tag "test" into the "fedora"
57
+repository with "version1.0.test":
58
+
59
+    docker tag httpd:test fedora/httpd:version1.0.test
60
+
61
+## Tagging an image for a private repository
62
+
63
+To push an image to a private registry and not the central Docker
64
+registry you must tag it with the registry hostname and port (if needed).
65
+
66
+    docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
... ...
@@ -630,15 +630,12 @@ with the same logic -- if the original volume was specified with a name it will
630 630
 
631 631
 
632 632
 You can override the default labeling scheme for each container by specifying
633
-the `--security-opt` flag. For example, you can specify the MCS/MLS level, a
634
-requirement for MLS systems. Specifying the level in the following command
633
+the `--security-opt` flag. Specifying the level in the following command
635 634
 allows you to share the same content between containers.
636 635
 
637 636
     $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash
638 637
 
639
-An MLS example might be:
640
-
641
-    $ docker run --security-opt label=level:TopSecret -it rhel7 bash
638
+> **Note**: Automatic translation of MLS labels is not currently supported.
642 639
 
643 640
 To disable the security labeling for this container versus running with the
644 641
 `--permissive` flag, use the following command:
... ...
@@ -1089,14 +1086,6 @@ one can use this flag:
1089 1089
     --privileged=false: Give extended privileges to this container
1090 1090
     --device=[]: Allows you to run devices inside the container without the --privileged flag.
1091 1091
 
1092
-> **Note:**
1093
-> With Docker 1.10 and greater, the default seccomp profile will also block
1094
-> syscalls, regardless of `--cap-add` passed to the container. We recommend in
1095
-> these cases to create your own custom seccomp profile based off our
1096
-> [default](https://github.com/docker/docker/blob/master/profiles/seccomp/default.json).
1097
-> Or if you don't want to run with the default seccomp profile, you can pass
1098
-> `--security-opt=seccomp=unconfined` on run.
1099
-
1100 1092
 By default, Docker containers are "unprivileged" and cannot, for
1101 1093
 example, run a Docker daemon inside a Docker container. This is because
1102 1094
 by default a container is not allowed to access any devices, but a
... ...
@@ -1214,6 +1203,11 @@ To mount a FUSE based filesystem, you need to combine both `--cap-add` and
1214 1214
     -rw-rw-r-- 1 1000 1000    461 Dec  4 06:08 .gitignore
1215 1215
     ....
1216 1216
 
1217
+The default seccomp profile will adjust to the selected capabilities, in order to allow
1218
+use of facilities allowed by the capabilities, so you should not have to adjust this,
1219
+since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary
1220
+to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding
1221
+capabilities.
1217 1222
 
1218 1223
 ## Logging drivers (--log-driver)
1219 1224
 
... ...
@@ -1451,7 +1445,7 @@ The `host-src` can either be an absolute path or a `name` value. If you
1451 1451
 supply an absolute path for the `host-dir`, Docker bind-mounts to the path
1452 1452
 you specify. If you supply a `name`, Docker creates a named volume by that `name`.
1453 1453
 
1454
-A `name` value must start with start with an alphanumeric character,
1454
+A `name` value must start with an alphanumeric character,
1455 1455
 followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen).
1456 1456
 An absolute path starts with a `/` (forward slash).
1457 1457
 
... ...
@@ -37,6 +37,9 @@ or to turn it on manually:
37 37
   net.ipv4.conf.all.forwarding = 1
38 38
 ```
39 39
 
40
+> **Note**: this setting does not affect containers that use the host
41
+> network stack (`--net=host`).
42
+
40 43
 Many using Docker will want `ip_forward` to be on, to at least make
41 44
 communication _possible_ between containers and the wider world. May also be
42 45
 needed for inter-container communication if you are in a multiple bridge setup.
... ...
@@ -57,7 +57,7 @@ docker0   Link encap:Ethernet  HWaddr 02:42:47:bc:3a:eb
57 57
           RX bytes:1100 (1.1 KB)  TX bytes:648 (648.0 B)
58 58
 ```
59 59
 
60
-The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at it's stack you see this:
60
+The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at its stack you see this:
61 61
 
62 62
 ```
63 63
 $ docker attach nonenetcontainer
... ...
@@ -93,7 +93,7 @@ if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; the
93 93
 		git status --porcelain --untracked-files=no
94 94
 		echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
95 95
 	fi
96
-	! BUILDTIME=$(date --rfc-3339 ns | sed -e 's/ /T/') &> /dev/null
96
+	! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null
97 97
 	if [ -z $BUILDTIME ]; then
98 98
 		# If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI
99 99
 		BUILDTIME=$(date -u)
... ...
@@ -113,6 +113,12 @@ if [ "$AUTO_GOPATH" ]; then
113 113
 	mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
114 114
 	ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
115 115
 	export GOPATH="${PWD}/.gopath:${PWD}/vendor"
116
+
117
+	if [ "$(go env GOOS)" = 'solaris' ]; then
118
+		# sys/unix is installed outside the standard library on solaris
119
+		# TODO need to allow for version change, need to get version from go
120
+		export GOPATH="${GOPATH}:/usr/lib/gocode/1.5"
121
+	fi
116 122
 fi
117 123
 
118 124
 if [ ! "$GOPATH" ]; then
... ...
@@ -1,10 +1,46 @@
1 1
 #!/usr/bin/env bash
2 2
 set -e
3 3
 
4
+# this script is used to update vendored dependencies
5
+#
6
+# Usage:
7
+# vendor.sh revendor all dependencies
8
+# vendor.sh github.com/docker/engine-api revendor only the engine-api dependency.
9
+# vendor.sh github.com/docker/engine-api v0.3.3 vendor only engine-api at the specified tag/commit.
10
+# vendor.sh git github.com/docker/engine-api v0.3.3 is the same but specifies the VCS for cases where the VCS is something else than git
11
+# vendor.sh git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git vendor only golang.org/x/sys downloading from the specified URL
12
+
4 13
 cd "$(dirname "$BASH_SOURCE")/.."
5
-rm -rf vendor/
6 14
 source 'hack/.vendor-helpers.sh'
7 15
 
16
+case $# in
17
+0)
18
+	rm -rf vendor/
19
+	;;
20
+# If user passed arguments to the script
21
+1)
22
+	eval "$(grep -E "^clone [^ ]+ $1" "$0")"
23
+	clean
24
+	exit 0
25
+	;;
26
+2)
27
+	rm -rf "vendor/src/$1"
28
+	clone git "$1" "$2"
29
+	clean
30
+	exit 0
31
+	;;
32
+[34])
33
+	rm -rf "vendor/src/$2"
34
+	clone "$@"
35
+	clean
36
+	exit 0
37
+	;;
38
+*)
39
+	>&2 echo "error: unexpected parameters"
40
+	exit 1
41
+	;;
42
+esac
43
+
8 44
 # the following lines are in sorted order, FYI
9 45
 clone git github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62
10 46
 clone git github.com/Microsoft/hcsshim v0.2.2
11 47
new file mode 100644
... ...
@@ -0,0 +1,639 @@
0
+# Docker Image Specification v1.1.0
1
+
2
+An *Image* is an ordered collection of root filesystem changes and the
3
+corresponding execution parameters for use within a container runtime. This
4
+specification outlines the format of these filesystem changes and corresponding
5
+parameters and describes how to create and use them for use with a container
6
+runtime and execution tool.
7
+
8
+This version of the image specification was adopted starting in Docker 1.10.
9
+
10
+## Terminology
11
+
12
+This specification uses the following terms:
13
+
14
+<dl>
15
+    <dt>
16
+        Layer
17
+    </dt>
18
+    <dd>
19
+        Images are composed of <i>layers</i>. Each layer is a set of filesystem
20
+        changes. Layers do not have configuration metadata such as environment
21
+        variables or default arguments - these are properties of the image as a
22
+        whole rather than any particular layer.
23
+    </dd>
24
+    <dt>
25
+        Image JSON
26
+    </dt>
27
+    <dd>
28
+        Each image has an associated JSON structure which describes some
29
+        basic information about the image such as date created, author, and the
30
+        ID of its parent image as well as execution/runtime configuration like
31
+        its entry point, default arguments, CPU/memory shares, networking, and
32
+        volumes. The JSON structure also references a cryptographic hash of
33
+        each layer used by the image, and provides history information for
34
+        those layers. This JSON is considered to be immutable, because changing
35
+        it would change the computed ImageID. Changing it means creating a new
36
+        derived image, instead of changing the existing image.
37
+    </dd>
38
+    <dt>
39
+        Image Filesystem Changeset
40
+    </dt>
41
+    <dd>
42
+        Each layer has an archive of the files which have been added, changed,
43
+        or deleted relative to its parent layer. Using a layer-based or union
44
+        filesystem such as AUFS, or by computing the diff from filesystem
45
+        snapshots, the filesystem changeset can be used to present a series of
46
+        image layers as if they were one cohesive filesystem.
47
+    </dd>
48
+    <dt>
49
+        Layer DiffID
50
+    </dt>
51
+    <dd>
52
+        Layers are referenced by cryptographic hashes of their serialized
53
+        representation. This is a SHA256 digest over the tar archive used to
54
+        transport the layer, represented as a hexadecimal encoding of 256 bits, e.g.,
55
+        <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
56
+        Layers must be packed and unpacked reproducibly to avoid changing the
57
+        layer ID, for example by using tar-split to save the tar headers. Note
58
+        that the digest used as the layer ID is taken over an uncompressed
59
+        version of the tar.
60
+    </dd>
61
+    <dt>
62
+        Layer ChainID
63
+    </dt>
64
+    <dd>
65
+        For convenience, it is sometimes useful to refer to a stack of layers
66
+        with a single identifier. This is called a <code>ChainID</code>. For a
67
+        single layer (or the layer at the bottom of a stack), the
68
+        <code>ChainID</code> is equal to the layer's <code>DiffID</code>.
69
+        Otherwise the <code>ChainID</code> is given by the formula:
70
+        <code>ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN))</code>.
71
+    </dd>
72
+    <dt>
73
+        ImageID <a name="id_desc"></a>
74
+    </dt>
75
+    <dd>
76
+        Each image's ID is given by the SHA256 hash of its configuration JSON. It is 
77
+        represented as a hexadecimal encoding of 256 bits, e.g.,
78
+        <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
79
+        Since the configuration JSON that gets hashed references hashes of each
80
+        layer in the image, this formulation of the ImageID makes images
81
+        content-addresable.
82
+    </dd>
83
+    <dt>
84
+        Tag
85
+    </dt>
86
+    <dd>
87
+        A tag serves to map a descriptive, user-given name to any single image
88
+        ID. Tag values are limited to the set of characters
89
+        <code>[a-zA-Z_0-9]</code>.
90
+    </dd>
91
+    <dt>
92
+        Repository
93
+    </dt>
94
+    <dd>
95
+        A collection of tags grouped under a common prefix (the name component
96
+        before <code>:</code>). For example, in an image tagged with the name
97
+        <code>my-app:3.1.4</code>, <code>my-app</code> is the <i>Repository</i>
98
+        component of the name. A repository name is made up of slash-separated
99
+        name components, optionally prefixed by a DNS hostname. The hostname
100
+        must follow comply with standard DNS rules, but may not contain
101
+        <code>_</code> characters. If a hostname is present, it may optionally
102
+        be followed by a port number in the format <code>:8080</code>.
103
+        Name components may contain lowercase characters, digits, and
104
+        separators. A separator is defined as a period, one or two underscores,
105
+        or one or more dashes. A name component may not start or end with
106
+        a separator.
107
+    </dd>
108
+</dl>
109
+
110
+## Image JSON Description
111
+
112
+Here is an example image JSON file:
113
+
114
+```
115
+{  
116
+    "created": "2015-10-31T22:22:56.015925234Z",
117
+    "author": "Alyssa P. Hacker &ltalyspdev@example.com&gt",
118
+    "architecture": "amd64",
119
+    "os": "linux",
120
+    "config": {
121
+        "User": "alice",
122
+        "Memory": 2048,
123
+        "MemorySwap": 4096,
124
+        "CpuShares": 8,
125
+        "ExposedPorts": {  
126
+            "8080/tcp": {}
127
+        },
128
+        "Env": [  
129
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
130
+            "FOO=docker_is_a_really",
131
+            "BAR=great_tool_you_know"
132
+        ],
133
+        "Entrypoint": [
134
+            "/bin/my-app-binary"
135
+        ],
136
+        "Cmd": [
137
+            "--foreground",
138
+            "--config",
139
+            "/etc/my-app.d/default.cfg"
140
+        ],
141
+        "Volumes": {
142
+            "/var/job-result-data": {},
143
+            "/var/log/my-app-logs": {},
144
+        },
145
+        "WorkingDir": "/home/alice",
146
+    },
147
+    "rootfs": {
148
+      "diff_ids": [
149
+        "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
150
+        "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
151
+      ],
152
+      "type": "layers"
153
+    },
154
+    "history": [
155
+      {
156
+        "created": "2015-10-31T22:22:54.690851953Z",
157
+        "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
158
+      },
159
+      {
160
+        "created": "2015-10-31T22:22:55.613815829Z",
161
+        "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
162
+        "empty_layer": true
163
+      }
164
+    ]
165
+}
166
+```
167
+
168
+Note that image JSON files produced by Docker don't contain formatting
169
+whitespace. It has been added to this example for clarity.
170
+
171
+### Image JSON Field Descriptions
172
+
173
+<dl>
174
+    <dt>
175
+        created <code>string</code>
176
+    </dt>
177
+    <dd>
178
+        ISO-8601 formatted combined date and time at which the image was
179
+        created.
180
+    </dd>
181
+    <dt>
182
+        author <code>string</code>
183
+    </dt>
184
+    <dd>
185
+        Gives the name and/or email address of the person or entity which
186
+        created and is responsible for maintaining the image.
187
+    </dd>
188
+    <dt>
189
+        architecture <code>string</code>
190
+    </dt>
191
+    <dd>
192
+        The CPU architecture which the binaries in this image are built to run
193
+        on. Possible values include:
194
+        <ul>
195
+            <li>386</li>
196
+            <li>amd64</li>
197
+            <li>arm</li>
198
+        </ul>
199
+        More values may be supported in the future and any of these may or may
200
+        not be supported by a given container runtime implementation.
201
+    </dd>
202
+    <dt>
203
+        os <code>string</code>
204
+    </dt>
205
+    <dd>
206
+        The name of the operating system which the image is built to run on.
207
+        Possible values include:
208
+        <ul>
209
+            <li>darwin</li>
210
+            <li>freebsd</li>
211
+            <li>linux</li>
212
+        </ul>
213
+        More values may be supported in the future and any of these may or may
214
+        not be supported by a given container runtime implementation.
215
+    </dd>
216
+    <dt>
217
+        config <code>struct</code>
218
+    </dt>
219
+    <dd>
220
+        The execution parameters which should be used as a base when running a
221
+        container using the image. This field can be <code>null</code>, in
222
+        which case any execution parameters should be specified at creation of
223
+        the container.
224
+
225
+        <h4>Container RunConfig Field Descriptions</h4>
226
+
227
+        <dl>
228
+            <dt>
229
+                User <code>string</code>
230
+            </dt>
231
+            <dd>
232
+                <p>The username or UID which the process in the container should
233
+                run as. This acts as a default value to use when the value is
234
+                not specified when creating a container.</p>
235
+
236
+                <p>All of the following are valid:</p>
237
+
238
+                <ul>
239
+                    <li><code>user</code></li>
240
+                    <li><code>uid</code></li>
241
+                    <li><code>user:group</code></li>
242
+                    <li><code>uid:gid</code></li>
243
+                    <li><code>uid:group</code></li>
244
+                    <li><code>user:gid</code></li>
245
+                </ul>
246
+
247
+                <p>If <code>group</code>/<code>gid</code> is not specified, the
248
+                default group and supplementary groups of the given
249
+                <code>user</code>/<code>uid</code> in <code>/etc/passwd</code>
250
+                from the container are applied.</p>
251
+            </dd>
252
+            <dt>
253
+                Memory <code>integer</code>
254
+            </dt>
255
+            <dd>
256
+                Memory limit (in bytes). This acts as a default value to use
257
+                when the value is not specified when creating a container.
258
+            </dd>
259
+            <dt>
260
+                MemorySwap <code>integer</code>
261
+            </dt>
262
+            <dd>
263
+                Total memory usage (memory + swap); set to <code>-1</code> to
264
+                disable swap. This acts as a default value to use when the
265
+                value is not specified when creating a container.
266
+            </dd>
267
+            <dt>
268
+                CpuShares <code>integer</code>
269
+            </dt>
270
+            <dd>
271
+                CPU shares (relative weight vs. other containers). This acts as
272
+                a default value to use when the value is not specified when
273
+                creating a container.
274
+            </dd>
275
+            <dt>
276
+                ExposedPorts <code>struct</code>
277
+            </dt>
278
+            <dd>
279
+                A set of ports to expose from a container running this image.
280
+                This JSON structure value is unusual because it is a direct
281
+                JSON serialization of the Go type
282
+                <code>map[string]struct{}</code> and is represented in JSON as
283
+                an object mapping its keys to an empty object. Here is an
284
+                example:
285
+
286
+<pre>{
287
+    "8080": {},
288
+    "53/udp": {},
289
+    "2356/tcp": {}
290
+}</pre>
291
+
292
+                Its keys can be in the format of:
293
+                <ul>
294
+                    <li>
295
+                        <code>"port/tcp"</code>
296
+                    </li>
297
+                    <li>
298
+                        <code>"port/udp"</code>
299
+                    </li>
300
+                    <li>
301
+                        <code>"port"</code>
302
+                    </li>
303
+                </ul>
304
+                with the default protocol being <code>"tcp"</code> if not
305
+                specified.
306
+
307
+                These values act as defaults and are merged with any specified
308
+                when creating a container.
309
+            </dd>
310
+            <dt>
311
+                Env <code>array of strings</code>
312
+            </dt>
313
+            <dd>
314
+                Entries are in the format of <code>VARNAME="var value"</code>.
315
+                These values act as defaults and are merged with any specified
316
+                when creating a container.
317
+            </dd>
318
+            <dt>
319
+                Entrypoint <code>array of strings</code>
320
+            </dt>
321
+            <dd>
322
+                A list of arguments to use as the command to execute when the
323
+                container starts. This value acts as a  default and is replaced
324
+                by an entrypoint specified when creating a container.
325
+            </dd>
326
+            <dt>
327
+                Cmd <code>array of strings</code>
328
+            </dt>
329
+            <dd>
330
+                Default arguments to the entry point of the container. These
331
+                values act as defaults and are replaced with any specified when
332
+                creating a container. If an <code>Entrypoint</code> value is
333
+                not specified, then the first entry of the <code>Cmd</code>
334
+                array should be interpreted as the executable to run.
335
+            </dd>
336
+            <dt>
337
+                Volumes <code>struct</code>
338
+            </dt>
339
+            <dd>
340
+                A set of directories which should be created as data volumes in
341
+                a container running this image. This JSON structure value is
342
+                unusual because it is a direct JSON serialization of the Go
343
+                type <code>map[string]struct{}</code> and is represented in
344
+                JSON as an object mapping its keys to an empty object. Here is
345
+                an example:
346
+<pre>{
347
+    "/var/my-app-data/": {},
348
+    "/etc/some-config.d/": {},
349
+}</pre>
350
+            </dd>
351
+            <dt>
352
+                WorkingDir <code>string</code>
353
+            </dt>
354
+            <dd>
355
+                Sets the current working directory of the entry point process
356
+                in the container. This value acts as a default and is replaced
357
+                by a working directory specified when creating a container.
358
+            </dd>
359
+        </dl>
360
+    </dd>
361
+    <dt>
362
+        rootfs <code>struct</code>
363
+    </dt>
364
+    <dd>
365
+        The rootfs key references the layer content addresses used by the
366
+        image. This makes the image config hash depend on the filesystem hash.
367
+        rootfs has two subkeys:
368
+
369
+        <ul>
370
+          <li>
371
+            <code>type</code> is usually set to <code>layers</code>. There is
372
+            also a Windows-specific value <code>layers+base</code> that allows
373
+            a base layer to be specified in a field of <code>rootfs</code>
374
+            called <code>base_layer</code>.
375
+          </li>
376
+          <li>
377
+            <code>diff_ids</code> is an array of layer content hashes (<code>DiffIDs</code>), in order from bottom-most to top-most.
378
+          </li>
379
+        </ul>
380
+
381
+
382
+        Here is an example rootfs section:
383
+
384
+<pre>"rootfs": {
385
+  "diff_ids": [
386
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
387
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
388
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
389
+  ],
390
+  "type": "layers"
391
+}</pre>
392
+    </dd>
393
+    <dt>
394
+        history <code>struct</code>
395
+    </dt>
396
+    <dd>
397
+        <code>history</code> is an array of objects describing the history of
398
+        each layer. The array is ordered from bottom-most layer to top-most
399
+        layer. The object has the following fields.
400
+
401
+        <ul>
402
+          <li>
403
+            <code>created</code>: Creation time, expressed as a ISO-8601 formatted
404
+            combined date and time
405
+          </li>
406
+          <li>
407
+            <code>author</code>: The author of the build point
408
+          </li>
409
+          <li>
410
+            <code>created_by</code>: The command which created the layer
411
+          </li>
412
+          <li>
413
+            <code>comment</code>: A custom message set when creating the layer
414
+          </li>
415
+          <li>
416
+            <code>empty_layer</code>: This field is used to mark if the history
417
+            item created a filesystem diff. It is set to true if this history
418
+            item doesn't correspond to an actual layer in the rootfs section
419
+            (for example, a command like ENV which results in no change to the
420
+            filesystem).
421
+          </li>
422
+        </ul>
423
+
424
+Here is an example history section:
425
+
426
+<pre>"history": [
427
+  {
428
+    "created": "2015-10-31T22:22:54.690851953Z",
429
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
430
+  },
431
+  {
432
+    "created": "2015-10-31T22:22:55.613815829Z",
433
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
434
+    "empty_layer": true
435
+  }
436
+]</pre>
437
+    </dd>
438
+</dl>
439
+
440
+Any extra fields in the Image JSON struct are considered implementation
441
+specific and should be ignored by any implementations which are unable to
442
+interpret them.
443
+
444
+## Creating an Image Filesystem Changeset
445
+
446
+An example of creating an Image Filesystem Changeset follows.
447
+
448
+An image root filesystem is first created as an empty directory. Here is the
449
+initial empty directory structure for the a changeset using the
450
+randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are
451
+generated based on the content](#id_desc)).
452
+
453
+```
454
+c3167915dc9d/
455
+```
456
+
457
+Files and directories are then created:
458
+
459
+```
460
+c3167915dc9d/
461
+    etc/
462
+        my-app-config
463
+    bin/
464
+        my-app-binary
465
+        my-app-tools
466
+```
467
+
468
+The `c3167915dc9d` directory is then committed as a plain Tar archive with
469
+entries for the following files:
470
+
471
+```
472
+etc/my-app-config
473
+bin/my-app-binary
474
+bin/my-app-tools
475
+```
476
+
477
+To make changes to the filesystem of this container image, create a new
478
+directory, such as `f60c56784b83`, and initialize it with a snapshot of the
479
+parent image's root filesystem, so that the directory is identical to that
480
+of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very
481
+efficient:
482
+
483
+```
484
+f60c56784b83/
485
+    etc/
486
+        my-app-config
487
+    bin/
488
+        my-app-binary
489
+        my-app-tools
490
+```
491
+
492
+This example change is going add a configuration directory at `/etc/my-app.d`
493
+which contains a default config file. There's also a change to the
494
+`my-app-tools` binary to handle the config layout change. The `f60c56784b83`
495
+directory then looks like this:
496
+
497
+```
498
+f60c56784b83/
499
+    etc/
500
+        my-app.d/
501
+            default.cfg
502
+    bin/
503
+        my-app-binary
504
+        my-app-tools
505
+```
506
+
507
+This reflects the removal of `/etc/my-app-config` and creation of a file and
508
+directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been
509
+replaced with an updated version. Before committing this directory to a
510
+changeset, because it has a parent image, it is first compared with the
511
+directory tree of the parent snapshot, `f60c56784b83`, looking for files and
512
+directories that have been added, modified, or removed. The following changeset
513
+is found:
514
+
515
+```
516
+Added:      /etc/my-app.d/default.cfg
517
+Modified:   /bin/my-app-tools
518
+Deleted:    /etc/my-app-config
519
+```
520
+
521
+A Tar Archive is then created which contains *only* this changeset: The added
522
+and modified files and directories in their entirety, and for each deleted item
523
+an entry for an empty file at the same location but with the basename of the
524
+deleted file or directory prefixed with `.wh.`. The filenames prefixed with
525
+`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible
526
+to create an image root filesystem which contains a file or directory with a
527
+name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has
528
+the following entries:
529
+
530
+```
531
+/etc/my-app.d/default.cfg
532
+/bin/my-app-tools
533
+/etc/.wh.my-app-config
534
+```
535
+
536
+Any given image is likely to be composed of several of these Image Filesystem
537
+Changeset tar archives.
538
+
539
+## Combined Image JSON + Filesystem Changeset Format
540
+
541
+There is also a format for a single archive which contains complete information
542
+about an image, including:
543
+
544
+ - repository names/tags
545
+ - image configuration JSON file
546
+ - all tar archives of each layer filesystem changesets
547
+
548
+For example, here's what the full archive of `library/busybox` is (displayed in
549
+`tree` format):
550
+
551
+```
552
+.
553
+├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json
554
+├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a
555
+│   ├── VERSION
556
+│   ├── json
557
+│   └── layer.tar
558
+├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198
559
+│   ├── VERSION
560
+│   ├── json
561
+│   └── layer.tar
562
+├── manifest.json
563
+└── repositories
564
+```
565
+
566
+There is a directory for each layer in the image. Each directory is named with
567
+a 64 character hex name that is deterministically generated from the layer
568
+information. These names are not necessarily layer DiffIDs or ChainIDs. Each of
569
+these directories contains 3 files:
570
+
571
+ * `VERSION` - The schema version of the `json` file
572
+ * `json` - The legacy JSON metadata for an image layer. In this version of
573
+    the image specification, layers don't have JSON metadata, but in
574
+    [version 1](v1.md), they did. A file is created for each layer in the
575
+    v1 format for backward compatiblity.
576
+ * `layer.tar` - The Tar archive of the filesystem changeset for an image
577
+   layer.
578
+
579
+Note that this directory layout is only important for backward compatibility.
580
+Current implementations use the paths specified in `manifest.json`.
581
+
582
+The content of the `VERSION` files is simply the semantic version of the JSON
583
+metadata schema:
584
+
585
+```
586
+1.0
587
+```
588
+
589
+The `repositories` file is another JSON file which describes names/tags:
590
+
591
+```
592
+{  
593
+    "busybox":{  
594
+        "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a"
595
+    }
596
+}
597
+```
598
+
599
+Every key in this object is the name of a repository, and maps to a collection
600
+of tag suffixes. Each tag maps to the ID of the image represented by that tag.
601
+This file is only used for backwards compatibility. Current implementations use
602
+the `manifest.json` file instead.
603
+
604
+The `manifest.json` file provides the image JSON for the top-level image, and
605
+optionally for parent images that this image was derived from. It consists of
606
+an array of metadata entries:
607
+
608
+```
609
+[
610
+  {
611
+    "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json",
612
+    "RepoTags": ["busybox:latest"],
613
+    "Layers": [
614
+      "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar",
615
+      "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar"
616
+    ]
617
+  }
618
+]
619
+```
620
+
621
+There is an entry in the array for each image.
622
+
623
+The `Config` field references another file in the tar which includes the image
624
+JSON for this image.
625
+
626
+The `RepoTags` field lists references pointing to this image.
627
+
628
+The `Layers` field points to the filesystem changeset tars.
629
+
630
+An optional `Parent` field references the imageID of the parent image. This
631
+parent must be part of the same `manifest.json` file.
632
+
633
+This file shouldn't be confused with the distribution manifest, used to push
634
+and pull images.
635
+
636
+Generally, implementations that support this version of the spec will use
637
+the `manifest.json` file if available, and older implementations will use the
638
+legacy `*/json` files and `repositories`.
... ...
@@ -486,3 +486,8 @@ func (d *Daemon) findContainerIP(id string) string {
486 486
 	}
487 487
 	return strings.Trim(out, " \r\n'")
488 488
 }
489
+
490
+func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) {
491
+	buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...)
492
+	return runCommandWithOutput(buildCmd)
493
+}
... ...
@@ -862,138 +862,6 @@ RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true);
862 862
 	}
863 863
 }
864 864
 
865
-func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) {
866
-	name := "testaddmultiplefilestofile"
867
-
868
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
869
-	ADD file1.txt file2.txt test
870
-	`,
871
-		map[string]string{
872
-			"file1.txt": "test1",
873
-			"file2.txt": "test1",
874
-		})
875
-	if err != nil {
876
-		c.Fatal(err)
877
-	}
878
-	defer ctx.Close()
879
-
880
-	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
881
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
882
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
883
-	}
884
-
885
-}
886
-
887
-func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) {
888
-	name := "testjsonaddmultiplefilestofile"
889
-
890
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
891
-	ADD ["file1.txt", "file2.txt", "test"]
892
-	`,
893
-		map[string]string{
894
-			"file1.txt": "test1",
895
-			"file2.txt": "test1",
896
-		})
897
-	if err != nil {
898
-		c.Fatal(err)
899
-	}
900
-	defer ctx.Close()
901
-
902
-	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
903
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
904
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
905
-	}
906
-
907
-}
908
-
909
-func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) {
910
-	name := "testaddmultiplefilestofilewild"
911
-
912
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
913
-	ADD file*.txt test
914
-	`,
915
-		map[string]string{
916
-			"file1.txt": "test1",
917
-			"file2.txt": "test1",
918
-		})
919
-	if err != nil {
920
-		c.Fatal(err)
921
-	}
922
-	defer ctx.Close()
923
-
924
-	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
925
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
926
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
927
-	}
928
-
929
-}
930
-
931
-func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) {
932
-	name := "testjsonaddmultiplefilestofilewild"
933
-
934
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
935
-	ADD ["file*.txt", "test"]
936
-	`,
937
-		map[string]string{
938
-			"file1.txt": "test1",
939
-			"file2.txt": "test1",
940
-		})
941
-	if err != nil {
942
-		c.Fatal(err)
943
-	}
944
-	defer ctx.Close()
945
-
946
-	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
947
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
948
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
949
-	}
950
-
951
-}
952
-
953
-func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) {
954
-	name := "testcopymultiplefilestofile"
955
-
956
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
957
-	COPY file1.txt file2.txt test
958
-	`,
959
-		map[string]string{
960
-			"file1.txt": "test1",
961
-			"file2.txt": "test1",
962
-		})
963
-	if err != nil {
964
-		c.Fatal(err)
965
-	}
966
-	defer ctx.Close()
967
-
968
-	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
969
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
970
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
971
-	}
972
-
973
-}
974
-
975
-func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) {
976
-	name := "testjsoncopymultiplefilestofile"
977
-
978
-	ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
979
-	COPY ["file1.txt", "file2.txt", "test"]
980
-	`,
981
-		map[string]string{
982
-			"file1.txt": "test1",
983
-			"file2.txt": "test1",
984
-		})
985
-	if err != nil {
986
-		c.Fatal(err)
987
-	}
988
-	defer ctx.Close()
989
-
990
-	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
991
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
992
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
993
-	}
994
-
995
-}
996
-
997 865
 func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) {
998 866
 	testRequires(c, DaemonIsLinux) // Not currently passing on Windows
999 867
 	name := "testaddfilewithwhitespace"
... ...
@@ -1066,48 +934,6 @@ RUN [ $(cat "/test dir/test_file6") = 'test6' ]`,
1066 1066
 	}
1067 1067
 }
1068 1068
 
1069
-func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) {
1070
-	name := "testaddmultiplefilestofilewithwhitespace"
1071
-	ctx, err := fakeContext(`FROM busybox
1072
-	ADD [ "test file1", "test file2", "test" ]
1073
-    `,
1074
-		map[string]string{
1075
-			"test file1": "test1",
1076
-			"test file2": "test2",
1077
-		})
1078
-	if err != nil {
1079
-		c.Fatal(err)
1080
-	}
1081
-	defer ctx.Close()
1082
-
1083
-	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
1084
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
1085
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
1086
-	}
1087
-
1088
-}
1089
-
1090
-func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) {
1091
-	name := "testcopymultiplefilestofilewithwhitespace"
1092
-	ctx, err := fakeContext(`FROM busybox
1093
-	COPY [ "test file1", "test file2", "test" ]
1094
-        `,
1095
-		map[string]string{
1096
-			"test file1": "test1",
1097
-			"test file2": "test2",
1098
-		})
1099
-	if err != nil {
1100
-		c.Fatal(err)
1101
-	}
1102
-	defer ctx.Close()
1103
-
1104
-	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
1105
-	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
1106
-		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
1107
-	}
1108
-
1109
-}
1110
-
1111 1069
 func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
1112 1070
 	testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
1113 1071
 	name := "testcopywildcard"
... ...
@@ -1159,26 +985,6 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
1159 1159
 
1160 1160
 }
1161 1161
 
1162
-func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) {
1163
-	name := "testcopywildcardnofind"
1164
-	ctx, err := fakeContext(`FROM busybox
1165
-	COPY file*.txt /tmp/
1166
-	`, nil)
1167
-	if err != nil {
1168
-		c.Fatal(err)
1169
-	}
1170
-	defer ctx.Close()
1171
-
1172
-	_, err = buildImageFromContext(name, ctx, true)
1173
-	if err == nil {
1174
-		c.Fatal("should have failed to find a file")
1175
-	}
1176
-	if !strings.Contains(err.Error(), "No source files were specified") {
1177
-		c.Fatalf("Wrong error %v, must be about no source files", err)
1178
-	}
1179
-
1180
-}
1181
-
1182 1162
 func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
1183 1163
 	name := "testcopywildcardinname"
1184 1164
 	ctx, err := fakeContext(`FROM busybox
... ...
@@ -1580,17 +1386,6 @@ COPY . /`,
1580 1580
 	}
1581 1581
 }
1582 1582
 
1583
-func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) {
1584
-	name := "testcopydisallowremote"
1585
-
1586
-	_, out, err := buildImageWithOut(name, `FROM `+minimalBaseImage()+`
1587
-COPY https://index.docker.io/robots.txt /`,
1588
-		true)
1589
-	if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") {
1590
-		c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out)
1591
-	}
1592
-}
1593
-
1594 1583
 func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
1595 1584
 	testRequires(c, DaemonIsLinux) // Not currently working on Windows
1596 1585
 
... ...
@@ -3289,18 +3084,6 @@ func (s *DockerSuite) TestBuildFails(c *check.C) {
3289 3289
 	}
3290 3290
 }
3291 3291
 
3292
-func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) {
3293
-	name := "testbuildfails"
3294
-	_, err := buildImage(name, ``, true)
3295
-	if err != nil {
3296
-		if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") {
3297
-			c.Fatalf("Wrong error %v, must be about empty Dockerfile", err)
3298
-		}
3299
-	} else {
3300
-		c.Fatal("Error must not be nil")
3301
-	}
3302
-}
3303
-
3304 3292
 func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
3305 3293
 	name := "testbuildonbuild"
3306 3294
 	_, err := buildImage(name,
... ...
@@ -3319,21 +3102,6 @@ func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
3319 3319
 	}
3320 3320
 }
3321 3321
 
3322
-func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) {
3323
-	name := "testbuildonbuildforbiddenchained"
3324
-	_, err := buildImage(name,
3325
-		`FROM busybox
3326
-		ONBUILD ONBUILD RUN touch foobar`,
3327
-		true)
3328
-	if err != nil {
3329
-		if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") {
3330
-			c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err)
3331
-		}
3332
-	} else {
3333
-		c.Fatal("Error must not be nil")
3334
-	}
3335
-}
3336
-
3337 3322
 // gh #2446
3338 3323
 func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
3339 3324
 	testRequires(c, DaemonIsLinux)
... ...
@@ -4564,16 +4332,6 @@ func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
4564 4564
 
4565 4565
 }
4566 4566
 
4567
-func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) {
4568
-	name := "testbuildignoreinvalidinstruction"
4569
-
4570
-	out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true)
4571
-	if err == nil {
4572
-		c.Fatalf("Should have failed: %s", out)
4573
-	}
4574
-
4575
-}
4576
-
4577 4567
 func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) {
4578 4568
 
4579 4569
 	if _, err := buildImage("parent", `
... ...
@@ -1616,35 +1616,6 @@ func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
1616 1616
 		check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out))
1617 1617
 }
1618 1618
 
1619
-// os.Kill should kill daemon ungracefully, leaving behind container mounts.
1620
-// A subsequent daemon restart shoud clean up said mounts.
1621
-func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonKill(c *check.C) {
1622
-	testRequires(c, NotExperimentalDaemon)
1623
-	c.Assert(s.d.StartWithBusybox(), check.IsNil)
1624
-
1625
-	out, err := s.d.Cmd("run", "-d", "busybox", "top")
1626
-	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
1627
-	id := strings.TrimSpace(out)
1628
-	c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
1629
-	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
1630
-	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
1631
-
1632
-	// container mounts should exist even after daemon has crashed.
1633
-	comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
1634
-	c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
1635
-
1636
-	// restart daemon.
1637
-	if err := s.d.Restart(); err != nil {
1638
-		c.Fatal(err)
1639
-	}
1640
-
1641
-	// Now, container mounts should be gone.
1642
-	mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
1643
-	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
1644
-	comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
1645
-	c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment)
1646
-}
1647
-
1648 1619
 func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) {
1649 1620
 	if err := s.d.StartWithBusybox(); err != nil {
1650 1621
 		t.Fatal(err)
... ...
@@ -2349,3 +2320,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec
2349 2349
 	c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads)
2350 2350
 	c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads)
2351 2351
 }
2352
+
2353
+func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) {
2354
+	err := s.d.Start("-b=none", "--iptables=false")
2355
+	c.Assert(err, check.IsNil)
2356
+	s.d.c.Logf("dockerBinary %s", dockerBinary)
2357
+	out, code, err := s.d.buildImageWithOut("busyboxs",
2358
+		`FROM busybox
2359
+                RUN cat /etc/hosts`, false)
2360
+	comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err)
2361
+	c.Assert(err, check.IsNil, comment)
2362
+	c.Assert(code, check.Equals, 0, comment)
2363
+}
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	"os"
10 10
 	"os/exec"
11 11
 	"strings"
12
+	"syscall"
12 13
 	"time"
13 14
 	"unicode"
14 15
 
... ...
@@ -366,3 +367,99 @@ func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) {
366 366
 	c.Assert(events[0], checker.Contains, "test-event-network-local")
367 367
 	c.Assert(events[0], checker.Contains, "type=bridge")
368 368
 }
369
+
370
+func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) {
371
+	testRequires(c, SameHostDaemon, DaemonIsLinux)
372
+
373
+	// daemon config file
374
+	configFilePath := "test.json"
375
+	configFile, err := os.Create(configFilePath)
376
+	c.Assert(err, checker.IsNil)
377
+	defer os.Remove(configFilePath)
378
+
379
+	daemonConfig := `{"labels":["foo=bar"]}`
380
+	fmt.Fprintf(configFile, "%s", daemonConfig)
381
+	configFile.Close()
382
+	c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil)
383
+
384
+	// Get daemon ID
385
+	out, err := s.d.Cmd("info")
386
+	c.Assert(err, checker.IsNil)
387
+	daemonID := ""
388
+	daemonName := ""
389
+	for _, line := range strings.Split(out, "\n") {
390
+		if strings.HasPrefix(line, "ID: ") {
391
+			daemonID = strings.TrimPrefix(line, "ID: ")
392
+		} else if strings.HasPrefix(line, "Name: ") {
393
+			daemonName = strings.TrimPrefix(line, "Name: ")
394
+		}
395
+	}
396
+	c.Assert(daemonID, checker.Not(checker.Equals), "")
397
+
398
+	configFile, err = os.Create(configFilePath)
399
+	c.Assert(err, checker.IsNil)
400
+	daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"]}`
401
+	fmt.Fprintf(configFile, "%s", daemonConfig)
402
+	configFile.Close()
403
+
404
+	syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP)
405
+
406
+	time.Sleep(3 * time.Second)
407
+
408
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c))
409
+	c.Assert(err, checker.IsNil)
410
+	c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, labels=[\"bar=foo\"], max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s)", daemonID, daemonName))
411
+}
412
+
413
+func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) {
414
+	testRequires(c, SameHostDaemon, DaemonIsLinux)
415
+
416
+	// daemon config file
417
+	configFilePath := "test.json"
418
+	configFile, err := os.Create(configFilePath)
419
+	c.Assert(err, checker.IsNil)
420
+	defer os.Remove(configFilePath)
421
+
422
+	daemonConfig := `{"labels":["foo=bar"]}`
423
+	fmt.Fprintf(configFile, "%s", daemonConfig)
424
+	configFile.Close()
425
+	c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil)
426
+
427
+	// Get daemon ID
428
+	out, err := s.d.Cmd("info")
429
+	c.Assert(err, checker.IsNil)
430
+	daemonID := ""
431
+	daemonName := ""
432
+	for _, line := range strings.Split(out, "\n") {
433
+		if strings.HasPrefix(line, "ID: ") {
434
+			daemonID = strings.TrimPrefix(line, "ID: ")
435
+		} else if strings.HasPrefix(line, "Name: ") {
436
+			daemonName = strings.TrimPrefix(line, "Name: ")
437
+		}
438
+	}
439
+	c.Assert(daemonID, checker.Not(checker.Equals), "")
440
+
441
+	syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP)
442
+
443
+	time.Sleep(3 * time.Second)
444
+
445
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID))
446
+	c.Assert(err, checker.IsNil)
447
+	c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID))
448
+
449
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName))
450
+	c.Assert(err, checker.IsNil)
451
+	c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID))
452
+
453
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo")
454
+	c.Assert(err, checker.IsNil)
455
+	c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID))
456
+
457
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon")
458
+	c.Assert(err, checker.IsNil)
459
+	c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID))
460
+
461
+	out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container")
462
+	c.Assert(err, checker.IsNil)
463
+	c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID))
464
+}
... ...
@@ -948,10 +948,10 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) {
948 948
 	testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace)
949 949
 
950 950
 	var group sync.WaitGroup
951
-	group.Add(4)
952
-	errChan := make(chan error, 4)
951
+	group.Add(11)
952
+	errChan := make(chan error, 11)
953 953
 	go func() {
954
-		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test")
954
+		out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test")
955 955
 		if err == nil || !strings.Contains(out, "Operation not permitted") {
956 956
 			errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out)
957 957
 		}
... ...
@@ -959,7 +959,7 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) {
959 959
 	}()
960 960
 
961 961
 	go func() {
962
-		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello")
962
+		out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test")
963 963
 		if err == nil || !strings.Contains(out, "Operation not permitted") {
964 964
 			errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out)
965 965
 		}
... ...
@@ -967,6 +967,62 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) {
967 967
 	}()
968 968
 
969 969
 	go func() {
970
+		out, _, err := dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test")
971
+		if err == nil || !strings.Contains(out, "No such file or directory") {
972
+			errChan <- fmt.Errorf("expected No such file or directory, got: %s", out)
973
+		}
974
+		group.Done()
975
+	}()
976
+
977
+	go func() {
978
+		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test")
979
+		if err == nil || !strings.Contains(out, "No such file or directory") {
980
+			errChan <- fmt.Errorf("expected No such file or directory, got: %s", out)
981
+		}
982
+		group.Done()
983
+	}()
984
+
985
+	go func() {
986
+		out, _, err := dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test")
987
+		if err == nil || !strings.Contains(out, "No such file or directory") {
988
+			errChan <- fmt.Errorf("expected No such file or directory, got: %s", out)
989
+		}
990
+		group.Done()
991
+	}()
992
+
993
+	go func() {
994
+		out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0")
995
+		if err == nil || !strings.Contains(out, "Operation not permitted") {
996
+			errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out)
997
+		}
998
+		group.Done()
999
+	}()
1000
+
1001
+	go func() {
1002
+		out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1")
1003
+		if err != nil || !strings.Contains(out, "hello1") {
1004
+			errChan <- fmt.Errorf("expected hello1, got: %s, %v", out, err)
1005
+		}
1006
+		group.Done()
1007
+	}()
1008
+
1009
+	go func() {
1010
+		out, _, err := dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2")
1011
+		if err != nil || !strings.Contains(out, "hello2") {
1012
+			errChan <- fmt.Errorf("expected hello2, got: %s, %v", out, err)
1013
+		}
1014
+		group.Done()
1015
+	}()
1016
+
1017
+	go func() {
1018
+		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3")
1019
+		if err != nil || !strings.Contains(out, "hello3") {
1020
+			errChan <- fmt.Errorf("expected hello3, got: %s, %v", out, err)
1021
+		}
1022
+		group.Done()
1023
+	}()
1024
+
1025
+	go func() {
970 1026
 		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test")
971 1027
 		if err == nil || !strings.Contains(out, "No such file or directory") {
972 1028
 			errChan <- fmt.Errorf("expected No such file or directory, got: %s", out)
... ...
@@ -975,9 +1031,9 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) {
975 975
 	}()
976 976
 
977 977
 	go func() {
978
-		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello")
979
-		if err != nil || !strings.Contains(out, "hello") {
980
-			errChan <- fmt.Errorf("expected hello, got: %s, %v", out, err)
978
+		out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4")
979
+		if err != nil || !strings.Contains(out, "hello4") {
980
+			errChan <- fmt.Errorf("expected hello4, got: %s, %v", out, err)
981 981
 		}
982 982
 		group.Done()
983 983
 	}()
... ...
@@ -16,34 +16,78 @@ func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) {
16 16
 }
17 17
 
18 18
 func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) {
19
-	out, _, err := dockerCmdWithError("search", "--stars=a", "busybox")
19
+	out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox")
20
+	c.Assert(err, check.NotNil, check.Commentf(out))
21
+	c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning"))
22
+
23
+	out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox")
24
+	c.Assert(err, check.NotNil, check.Commentf(out))
25
+	c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning"))
26
+
27
+	out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox")
28
+	c.Assert(err, check.NotNil, check.Commentf(out))
29
+	c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning"))
30
+
31
+	out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox")
32
+	c.Assert(err, check.NotNil, check.Commentf(out))
33
+	c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning"))
34
+
35
+	// -s --stars deprecated since Docker 1.13
36
+	out, _, err = dockerCmdWithError("search", "--stars=a", "busybox")
20 37
 	c.Assert(err, check.NotNil, check.Commentf(out))
21 38
 	c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning"))
22 39
 
40
+	// -s --stars deprecated since Docker 1.13
23 41
 	out, _, err = dockerCmdWithError("search", "-s=-1", "busybox")
24 42
 	c.Assert(err, check.NotNil, check.Commentf(out))
25 43
 	c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning"))
26 44
 }
27 45
 
28 46
 func (s *DockerSuite) TestSearchCmdOptions(c *check.C) {
29
-	testRequires(c, Network)
47
+	testRequires(c, Network, DaemonIsLinux)
30 48
 
31 49
 	out, _ := dockerCmd(c, "search", "--help")
32 50
 	c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM")
33 51
 
34 52
 	outSearchCmd, _ := dockerCmd(c, "search", "busybox")
35 53
 	outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox")
54
+
36 55
 	c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect."))
37 56
 
38
-	outSearchCmdautomated, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
57
+	outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
39 58
 	outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
40 59
 	for i := range outSearchCmdautomatedSlice {
41
-		c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", out))
60
+		c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated))
61
+	}
62
+
63
+	outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image.
64
+	outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n")
65
+	for i := range outSearchCmdNotOfficialSlice {
66
+		c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial))
42 67
 	}
43 68
 
44
-	outSearchCmdStars, _ := dockerCmd(c, "search", "-s=2", "busybox")
69
+	outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image.
70
+	outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n")
71
+	c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return
72
+	c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial))
73
+
74
+	outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox")
45 75
 	c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars))
46 76
 
77
+	dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox")
78
+
79
+	// --automated deprecated since Docker 1.13
80
+	outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
81
+	outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n")
82
+	for i := range outSearchCmdautomatedSlice1 {
83
+		c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated))
84
+	}
85
+
86
+	// -s --stars deprecated since Docker 1.13
87
+	outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox")
88
+	c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1))
89
+
90
+	// -s --stars deprecated since Docker 1.13
47 91
 	dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox")
48 92
 }
49 93
 
... ...
@@ -930,7 +930,15 @@ func getContainerState(c *check.C, id string) (int, bool, error) {
930 930
 }
931 931
 
932 932
 func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd {
933
-	args := []string{"build", "-t", name}
933
+	return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...)
934
+}
935
+
936
+func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd {
937
+	args := []string{}
938
+	if host != "" {
939
+		args = append(args, "--host", host)
940
+	}
941
+	args = append(args, "build", "-t", name)
934 942
 	if !useCache {
935 943
 		args = append(args, "--no-cache")
936 944
 	}
... ...
@@ -174,7 +174,6 @@ type Store interface {
174 174
 	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
175 175
 	GetRWLayer(id string) (RWLayer, error)
176 176
 	GetMountID(id string) (string, error)
177
-	ReinitRWLayer(l RWLayer) error
178 177
 	ReleaseRWLayer(RWLayer) ([]Metadata, error)
179 178
 
180 179
 	Cleanup() error
... ...
@@ -495,25 +495,6 @@ func (ls *layerStore) GetMountID(id string) (string, error) {
495 495
 	return mount.mountID, nil
496 496
 }
497 497
 
498
-// ReinitRWLayer reinitializes a given mount to the layerstore, specifically
499
-// initializing the usage count. It should strictly only be used in the
500
-// daemon's restore path to restore state of live containers.
501
-func (ls *layerStore) ReinitRWLayer(l RWLayer) error {
502
-	ls.mountL.Lock()
503
-	defer ls.mountL.Unlock()
504
-
505
-	m, ok := ls.mounts[l.Name()]
506
-	if !ok {
507
-		return ErrMountDoesNotExist
508
-	}
509
-
510
-	if err := m.incActivityCount(l); err != nil {
511
-		return err
512
-	}
513
-
514
-	return nil
515
-}
516
-
517 498
 func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
518 499
 	ls.mountL.Lock()
519 500
 	defer ls.mountL.Unlock()
... ...
@@ -174,10 +174,7 @@ func getCachedLayer(l Layer) *roLayer {
174 174
 }
175 175
 
176 176
 func getMountLayer(l RWLayer) *mountedLayer {
177
-	if rl, ok := l.(*referencedRWLayer); ok {
178
-		return rl.mountedLayer
179
-	}
180
-	return l.(*mountedLayer)
177
+	return l.(*referencedRWLayer).mountedLayer
181 178
 }
182 179
 
183 180
 func createMetadata(layers ...Layer) []Metadata {
... ...
@@ -400,14 +397,11 @@ func TestStoreRestore(t *testing.T) {
400 400
 	if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
401 401
 		t.Fatal(err)
402 402
 	}
403
-	assertActivityCount(t, m, 1)
404 403
 
405 404
 	if err := m.Unmount(); err != nil {
406 405
 		t.Fatal(err)
407 406
 	}
408 407
 
409
-	assertActivityCount(t, m, 0)
410
-
411 408
 	ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver)
412 409
 	if err != nil {
413 410
 		t.Fatal(err)
... ...
@@ -438,20 +432,15 @@ func TestStoreRestore(t *testing.T) {
438 438
 		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
439 439
 	}
440 440
 
441
-	assertActivityCount(t, m2, 1)
442
-
443 441
 	if mountPath, err := m2.Mount(""); err != nil {
444 442
 		t.Fatal(err)
445 443
 	} else if path != mountPath {
446 444
 		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
447 445
 	}
448
-	assertActivityCount(t, m2, 2)
449 446
 	if err := m2.Unmount(); err != nil {
450 447
 		t.Fatal(err)
451 448
 	}
452 449
 
453
-	assertActivityCount(t, m2, 1)
454
-
455 450
 	b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt"))
456 451
 	if err != nil {
457 452
 		t.Fatal(err)
... ...
@@ -464,8 +453,6 @@ func TestStoreRestore(t *testing.T) {
464 464
 		t.Fatal(err)
465 465
 	}
466 466
 
467
-	assertActivityCount(t, m2, 0)
468
-
469 467
 	if metadata, err := ls2.ReleaseRWLayer(m2); err != nil {
470 468
 		t.Fatal(err)
471 469
 	} else if len(metadata) != 0 {
... ...
@@ -674,13 +661,6 @@ func assertReferences(t *testing.T, references ...Layer) {
674 674
 	}
675 675
 }
676 676
 
677
-func assertActivityCount(t *testing.T, l RWLayer, expected int) {
678
-	rl := l.(*referencedRWLayer)
679
-	if rl.activityCount != expected {
680
-		t.Fatalf("Unexpected activity count %d, expected %d", rl.activityCount, expected)
681
-	}
682
-}
683
-
684 677
 func TestRegisterExistingLayer(t *testing.T) {
685 678
 	ls, _, cleanup := newTestStore(t)
686 679
 	defer cleanup()
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd darwin openbsd
1
+// +build linux freebsd darwin openbsd solaris
2 2
 
3 3
 package layer
4 4
 
... ...
@@ -380,8 +380,6 @@ func TestMountMigration(t *testing.T) {
380 380
 		Kind: archive.ChangeAdd,
381 381
 	})
382 382
 
383
-	assertActivityCount(t, rwLayer1, 1)
384
-
385 383
 	if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil {
386 384
 		t.Fatal("Expected error creating mount with same name")
387 385
 	} else if err != ErrMountNameConflict {
... ...
@@ -401,16 +399,10 @@ func TestMountMigration(t *testing.T) {
401 401
 		t.Fatal(err)
402 402
 	}
403 403
 
404
-	assertActivityCount(t, rwLayer2, 1)
405
-	assertActivityCount(t, rwLayer1, 1)
406
-
407 404
 	if _, err := rwLayer2.Mount(""); err != nil {
408 405
 		t.Fatal(err)
409 406
 	}
410 407
 
411
-	assertActivityCount(t, rwLayer2, 2)
412
-	assertActivityCount(t, rwLayer1, 1)
413
-
414 408
 	if metadata, err := ls.Release(layer1); err != nil {
415 409
 		t.Fatal(err)
416 410
 	} else if len(metadata) > 0 {
... ...
@@ -420,8 +412,6 @@ func TestMountMigration(t *testing.T) {
420 420
 	if err := rwLayer1.Unmount(); err != nil {
421 421
 		t.Fatal(err)
422 422
 	}
423
-	assertActivityCount(t, rwLayer2, 2)
424
-	assertActivityCount(t, rwLayer1, 0)
425 423
 
426 424
 	if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil {
427 425
 		t.Fatal(err)
... ...
@@ -430,9 +420,6 @@ func TestMountMigration(t *testing.T) {
430 430
 	if err := rwLayer2.Unmount(); err != nil {
431 431
 		t.Fatal(err)
432 432
 	}
433
-	if _, err := ls.ReleaseRWLayer(rwLayer2); err == nil {
434
-		t.Fatal("Expected error deleting active mount")
435
-	}
436 433
 	if err := rwLayer2.Unmount(); err != nil {
437 434
 		t.Fatal(err)
438 435
 	}
... ...
@@ -2,7 +2,6 @@ package layer
2 2
 
3 3
 import (
4 4
 	"io"
5
-	"sync"
6 5
 
7 6
 	"github.com/docker/docker/pkg/archive"
8 7
 )
... ...
@@ -50,14 +49,6 @@ func (ml *mountedLayer) Parent() Layer {
50 50
 	return nil
51 51
 }
52 52
 
53
-func (ml *mountedLayer) Mount(mountLabel string) (string, error) {
54
-	return ml.layerStore.driver.Get(ml.mountID, mountLabel)
55
-}
56
-
57
-func (ml *mountedLayer) Unmount() error {
58
-	return ml.layerStore.driver.Put(ml.mountID)
59
-}
60
-
61 53
 func (ml *mountedLayer) Size() (int64, error) {
62 54
 	return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
63 55
 }
... ...
@@ -83,106 +74,30 @@ func (ml *mountedLayer) hasReferences() bool {
83 83
 	return len(ml.references) > 0
84 84
 }
85 85
 
86
-func (ml *mountedLayer) incActivityCount(ref RWLayer) error {
87
-	rl, ok := ml.references[ref]
88
-	if !ok {
89
-		return ErrLayerNotRetained
90
-	}
91
-
92
-	if err := rl.acquire(); err != nil {
93
-		return err
94
-	}
95
-	return nil
96
-}
97
-
98 86
 func (ml *mountedLayer) deleteReference(ref RWLayer) error {
99
-	rl, ok := ml.references[ref]
100
-	if !ok {
87
+	if _, ok := ml.references[ref]; !ok {
101 88
 		return ErrLayerNotRetained
102 89
 	}
103
-
104
-	if err := rl.release(); err != nil {
105
-		return err
106
-	}
107 90
 	delete(ml.references, ref)
108
-
109 91
 	return nil
110 92
 }
111 93
 
112 94
 func (ml *mountedLayer) retakeReference(r RWLayer) {
113 95
 	if ref, ok := r.(*referencedRWLayer); ok {
114
-		ref.activityCount = 0
115 96
 		ml.references[ref] = ref
116 97
 	}
117 98
 }
118 99
 
119 100
 type referencedRWLayer struct {
120 101
 	*mountedLayer
121
-
122
-	activityL     sync.Mutex
123
-	activityCount int
124
-}
125
-
126
-func (rl *referencedRWLayer) acquire() error {
127
-	rl.activityL.Lock()
128
-	defer rl.activityL.Unlock()
129
-
130
-	rl.activityCount++
131
-
132
-	return nil
133
-}
134
-
135
-func (rl *referencedRWLayer) release() error {
136
-	rl.activityL.Lock()
137
-	defer rl.activityL.Unlock()
138
-
139
-	if rl.activityCount > 0 {
140
-		return ErrActiveMount
141
-	}
142
-
143
-	rl.activityCount = -1
144
-
145
-	return nil
146 102
 }
147 103
 
148 104
 func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
149
-	rl.activityL.Lock()
150
-	defer rl.activityL.Unlock()
151
-
152
-	if rl.activityCount == -1 {
153
-		return "", ErrLayerNotRetained
154
-	}
155
-
156
-	if rl.activityCount > 0 {
157
-		rl.activityCount++
158
-		return rl.path, nil
159
-	}
160
-
161
-	m, err := rl.mountedLayer.Mount(mountLabel)
162
-	if err == nil {
163
-		rl.activityCount++
164
-		rl.path = m
165
-	}
166
-	return m, err
105
+	return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
167 106
 }
168 107
 
169 108
 // Unmount decrements the activity count and unmounts the underlying layer
170 109
 // Callers should only call `Unmount` once per call to `Mount`, even on error.
171 110
 func (rl *referencedRWLayer) Unmount() error {
172
-	rl.activityL.Lock()
173
-	defer rl.activityL.Unlock()
174
-
175
-	if rl.activityCount == 0 {
176
-		return ErrNotMounted
177
-	}
178
-	if rl.activityCount == -1 {
179
-		return ErrLayerNotRetained
180
-	}
181
-
182
-	rl.activityCount--
183
-	if rl.activityCount > 0 {
184
-		return nil
185
-	}
186
-
187
-	return rl.mountedLayer.Unmount()
111
+	return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
188 112
 }
... ...
@@ -13,7 +13,7 @@ import (
13 13
 	containerd "github.com/docker/containerd/api/grpc/types"
14 14
 	"github.com/docker/docker/pkg/idtools"
15 15
 	"github.com/docker/docker/pkg/mount"
16
-	"github.com/opencontainers/specs/specs-go"
16
+	specs "github.com/opencontainers/specs/specs-go"
17 17
 	"golang.org/x/net/context"
18 18
 )
19 19
 
... ...
@@ -380,6 +380,81 @@ func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier {
380 380
 	return w
381 381
 }
382 382
 
383
+func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) {
384
+	clnt.lock(cont.Id)
385
+	defer clnt.unlock(cont.Id)
386
+
387
+	logrus.Debugf("restore container %s state %s", cont.Id, cont.Status)
388
+
389
+	containerID := cont.Id
390
+	if _, err := clnt.getContainer(containerID); err == nil {
391
+		return fmt.Errorf("container %s is already active", containerID)
392
+	}
393
+
394
+	defer func() {
395
+		if err != nil {
396
+			clnt.deleteContainer(cont.Id)
397
+		}
398
+	}()
399
+
400
+	container := clnt.newContainer(cont.BundlePath, options...)
401
+	container.systemPid = systemPid(cont)
402
+
403
+	var terminal bool
404
+	for _, p := range cont.Processes {
405
+		if p.Pid == InitFriendlyName {
406
+			terminal = p.Terminal
407
+		}
408
+	}
409
+
410
+	iopipe, err := container.openFifos(terminal)
411
+	if err != nil {
412
+		return err
413
+	}
414
+
415
+	if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil {
416
+		return err
417
+	}
418
+
419
+	clnt.appendContainer(container)
420
+
421
+	err = clnt.backend.StateChanged(containerID, StateInfo{
422
+		CommonStateInfo: CommonStateInfo{
423
+			State: StateRestore,
424
+			Pid:   container.systemPid,
425
+		}})
426
+
427
+	if err != nil {
428
+		return err
429
+	}
430
+
431
+	if event, ok := clnt.remote.pastEvents[containerID]; ok {
432
+		// This should only be a pause or resume event
433
+		if event.Type == StatePause || event.Type == StateResume {
434
+			return clnt.backend.StateChanged(containerID, StateInfo{
435
+				CommonStateInfo: CommonStateInfo{
436
+					State: event.Type,
437
+					Pid:   container.systemPid,
438
+				}})
439
+		}
440
+
441
+		logrus.Warnf("unexpected backlog event: %#v", event)
442
+	}
443
+
444
+	return nil
445
+}
446
+
447
+func (clnt *client) Restore(containerID string, options ...CreateOption) error {
448
+	cont, err := clnt.getContainerdContainer(containerID)
449
+	if err == nil && cont.Status != "stopped" {
450
+		if err := clnt.restore(cont, options...); err != nil {
451
+			logrus.Errorf("error restoring %s: %v", containerID, err)
452
+		}
453
+		return nil
454
+	}
455
+	return clnt.setExited(containerID)
456
+}
457
+
383 458
 type exitNotifier struct {
384 459
 	id     string
385 460
 	client *client
386 461
deleted file mode 100644
... ...
@@ -1,85 +0,0 @@
1
-// +build experimental
2
-
3
-package libcontainerd
4
-
5
-import (
6
-	"fmt"
7
-
8
-	"github.com/Sirupsen/logrus"
9
-	containerd "github.com/docker/containerd/api/grpc/types"
10
-)
11
-
12
-func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) {
13
-	clnt.lock(cont.Id)
14
-	defer clnt.unlock(cont.Id)
15
-
16
-	logrus.Debugf("restore container %s state %s", cont.Id, cont.Status)
17
-
18
-	containerID := cont.Id
19
-	if _, err := clnt.getContainer(containerID); err == nil {
20
-		return fmt.Errorf("container %s is already active", containerID)
21
-	}
22
-
23
-	defer func() {
24
-		if err != nil {
25
-			clnt.deleteContainer(cont.Id)
26
-		}
27
-	}()
28
-
29
-	container := clnt.newContainer(cont.BundlePath, options...)
30
-	container.systemPid = systemPid(cont)
31
-
32
-	var terminal bool
33
-	for _, p := range cont.Processes {
34
-		if p.Pid == InitFriendlyName {
35
-			terminal = p.Terminal
36
-		}
37
-	}
38
-
39
-	iopipe, err := container.openFifos(terminal)
40
-	if err != nil {
41
-		return err
42
-	}
43
-
44
-	if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil {
45
-		return err
46
-	}
47
-
48
-	clnt.appendContainer(container)
49
-
50
-	err = clnt.backend.StateChanged(containerID, StateInfo{
51
-		CommonStateInfo: CommonStateInfo{
52
-			State: StateRestore,
53
-			Pid:   container.systemPid,
54
-		}})
55
-
56
-	if err != nil {
57
-		return err
58
-	}
59
-
60
-	if event, ok := clnt.remote.pastEvents[containerID]; ok {
61
-		// This should only be a pause or resume event
62
-		if event.Type == StatePause || event.Type == StateResume {
63
-			return clnt.backend.StateChanged(containerID, StateInfo{
64
-				CommonStateInfo: CommonStateInfo{
65
-					State: event.Type,
66
-					Pid:   container.systemPid,
67
-				}})
68
-		}
69
-
70
-		logrus.Warnf("unexpected backlog event: %#v", event)
71
-	}
72
-
73
-	return nil
74
-}
75
-
76
-func (clnt *client) Restore(containerID string, options ...CreateOption) error {
77
-	cont, err := clnt.getContainerdContainer(containerID)
78
-	if err == nil && cont.Status != "stopped" {
79
-		if err := clnt.restore(cont, options...); err != nil {
80
-			logrus.Errorf("error restoring %s: %v", containerID, err)
81
-		}
82
-		return nil
83
-	}
84
-	return clnt.setExited(containerID)
85
-}
86 1
deleted file mode 100644
... ...
@@ -1,41 +0,0 @@
1
-// +build !experimental
2
-
3
-package libcontainerd
4
-
5
-import (
6
-	"syscall"
7
-	"time"
8
-
9
-	"github.com/Sirupsen/logrus"
10
-)
11
-
12
-func (clnt *client) Restore(containerID string, options ...CreateOption) error {
13
-	w := clnt.getOrCreateExitNotifier(containerID)
14
-	defer w.close()
15
-	cont, err := clnt.getContainerdContainer(containerID)
16
-	if err == nil && cont.Status != "stopped" {
17
-		clnt.lock(cont.Id)
18
-		container := clnt.newContainer(cont.BundlePath)
19
-		container.systemPid = systemPid(cont)
20
-		clnt.appendContainer(container)
21
-		clnt.unlock(cont.Id)
22
-
23
-		if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil {
24
-			logrus.Errorf("error sending sigterm to %v: %v", containerID, err)
25
-		}
26
-		select {
27
-		case <-time.After(10 * time.Second):
28
-			if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil {
29
-				logrus.Errorf("error sending sigkill to %v: %v", containerID, err)
30
-			}
31
-			select {
32
-			case <-time.After(2 * time.Second):
33
-			case <-w.wait():
34
-				return nil
35
-			}
36
-		case <-w.wait():
37
-			return nil
38
-		}
39
-	}
40
-	return clnt.setExited(containerID)
41
-}
42 1
new file mode 100644
... ...
@@ -0,0 +1,56 @@
0
+package libcontainerd
1
+
2
+type client struct {
3
+	clientCommon
4
+
5
+	// Platform specific properties below here.
6
+}
7
+
8
+func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error {
9
+	return nil
10
+}
11
+
12
+func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) {
13
+	return nil
14
+}
15
+
16
+func (clnt *client) Signal(containerID string, sig int) error {
17
+	return nil
18
+}
19
+
20
+func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
21
+	return nil
22
+}
23
+
24
+func (clnt *client) Pause(containerID string) error {
25
+	return nil
26
+}
27
+
28
+func (clnt *client) Resume(containerID string) error {
29
+	return nil
30
+}
31
+
32
+func (clnt *client) Stats(containerID string) (*Stats, error) {
33
+	return nil, nil
34
+}
35
+
36
+// Restore is the handler for restoring a container
37
+func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error {
38
+	return nil
39
+}
40
+
41
+func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
42
+	return nil, nil
43
+}
44
+
45
+// Summary returns a summary of the processes running in a container.
46
+func (clnt *client) Summary(containerID string) ([]Summary, error) {
47
+	return nil, nil
48
+}
49
+
50
+// UpdateResources updates resources for a running container.
51
+func (clnt *client) UpdateResources(containerID string, resources Resources) error {
52
+	// Updating resource isn't supported on Solaris
53
+	// but we should return nil for enabling updating container
54
+	return nil
55
+}
... ...
@@ -291,6 +291,9 @@ func (clnt *client) AddProcess(containerID, processFriendlyName string, procToAd
291 291
 		return err
292 292
 	}
293 293
 
294
+	// TEMP: Work around Windows BS/DEL behavior.
295
+	iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, procToAdd.Terminal)
296
+
294 297
 	// Convert io.ReadClosers to io.Readers
295 298
 	if stdout != nil {
296 299
 		iopipe.Stdout = openReaderFromPipe(stdout)
297 300
new file mode 100644
... ...
@@ -0,0 +1,5 @@
0
+package libcontainerd
1
+
2
+type container struct {
3
+	containerCommon
4
+}
... ...
@@ -102,6 +102,9 @@ func (ctr *container) start() error {
102 102
 	}
103 103
 	ctr.startedAt = time.Now()
104 104
 
105
+	// TEMP: Work around Windows BS/DEL behavior.
106
+	iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, ctr.ociSpec.Process.Terminal)
107
+
105 108
 	// Convert io.ReadClosers to io.Readers
106 109
 	if stdout != nil {
107 110
 		iopipe.Stdout = openReaderFromPipe(stdout)
108 111
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+package libcontainerd
1
+
2
+// process keeps the state for both main container process and exec process.
3
+type process struct {
4
+	processCommon
5
+}
... ...
@@ -2,6 +2,8 @@ package libcontainerd
2 2
 
3 3
 import (
4 4
 	"io"
5
+
6
+	"github.com/docker/docker/pkg/system"
5 7
 )
6 8
 
7 9
 // process keeps the state for both main container process and exec process.
... ...
@@ -25,3 +27,33 @@ func openReaderFromPipe(p io.ReadCloser) io.Reader {
25 25
 	}()
26 26
 	return r
27 27
 }
28
+
29
+// fixStdinBackspaceBehavior works around a bug in Windows before build 14350
30
+// where it interpreted DEL as VK_DELETE instead of as VK_BACK. This replaces
31
+// DEL with BS to work around this.
32
+func fixStdinBackspaceBehavior(w io.WriteCloser, tty bool) io.WriteCloser {
33
+	if !tty || system.GetOSVersion().Build >= 14350 {
34
+		return w
35
+	}
36
+	return &delToBsWriter{w}
37
+}
38
+
39
+type delToBsWriter struct {
40
+	io.WriteCloser
41
+}
42
+
43
+func (w *delToBsWriter) Write(b []byte) (int, error) {
44
+	const (
45
+		backspace = 0x8
46
+		del       = 0x7f
47
+	)
48
+	bc := make([]byte, len(b))
49
+	for i, c := range b {
50
+		if c == del {
51
+			bc[i] = backspace
52
+		} else {
53
+			bc[i] = c
54
+		}
55
+	}
56
+	return w.WriteCloser.Write(bc)
57
+}
28 58
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+package libcontainerd
1
+
2
+import "github.com/docker/docker/pkg/locker"
3
+
4
+type remote struct {
5
+}
6
+
7
+func (r *remote) Client(b Backend) (Client, error) {
8
+	c := &client{
9
+		clientCommon: clientCommon{
10
+			backend:    b,
11
+			containers: make(map[string]*container),
12
+			locker:     locker.New(),
13
+		},
14
+	}
15
+	return c, nil
16
+}
17
+
18
+func (r *remote) Cleanup() {
19
+}
20
+
21
+// New creates a fresh instance of libcontainerd remote.
22
+func New(_ string, _ ...RemoteOption) (Remote, error) {
23
+	return &remote{}, nil
24
+}
0 25
new file mode 100644
... ...
@@ -0,0 +1,38 @@
0
+package libcontainerd
1
+
2
+import (
3
+	"github.com/opencontainers/specs/specs-go"
4
+)
5
+
6
+// Spec is the base configuration for the container.  It specifies platform
7
+// independent configuration. This information must be included when the
8
+// bundle is packaged for distribution.
9
+type Spec specs.Spec
10
+
11
+// Process contains information to start a specific application inside the container.
12
+type Process struct {
13
+	// Terminal creates an interactive terminal for the container.
14
+	Terminal bool `json:"terminal"`
15
+	// Args specifies the binary and arguments for the application to execute.
16
+	Args []string `json:"args"`
17
+}
18
+
19
+// Stats contains a stats properties from containerd.
20
+type Stats struct{}
21
+
22
+// Summary container a container summary from containerd
23
+type Summary struct{}
24
+
25
+// StateInfo contains description about the new state container has entered.
26
+type StateInfo struct {
27
+	CommonStateInfo
28
+
29
+	// Platform specific StateInfo
30
+}
31
+
32
+// User specifies Solaris specific user and group information for the container's
33
+// main process.
34
+type User specs.User
35
+
36
+// Resources defines updatable container resource values.
37
+type Resources struct{}
... ...
@@ -91,7 +91,9 @@ set as the **URL**, the repository is cloned locally and then sent as the contex
91 91
    Remove intermediate containers after a successful build. The default is *true*.
92 92
 
93 93
 **-t**, **--tag**=""
94
-   Repository names (and optionally with tags) to be applied to the resulting image in case of success.
94
+   Repository names (and optionally with tags) to be applied to the resulting 
95
+   image in case of success. Refer to **docker-tag(1)** for more information
96
+   about valid tag names.
95 97
 
96 98
 **-m**, **--memory**=*MEMORY*
97 99
   Memory limit
... ...
@@ -16,7 +16,8 @@ CONTAINER [REPOSITORY[:TAG]]
16 16
 # DESCRIPTION
17 17
 Create a new image from an existing container specified by name or
18 18
 container ID.  The new image will contain the contents of the
19
-container filesystem, *excluding* any data volumes.
19
+container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)**
20
+for more information about valid image and tag names.
20 21
 
21 22
 While the `docker commit` command is a convenient way of extending an
22 23
 existing image, you should prefer the use of a Dockerfile and `docker
... ...
@@ -78,7 +78,16 @@ you must be explicit with a relative or absolute path, for example:
78 78
     `/path/to/file:name.txt` or `./file:name.txt`
79 79
 
80 80
 It is not possible to copy certain system files such as resources under
81
-`/proc`, `/sys`, `/dev`, and mounts created by the user in the container.
81
+`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container.
82
+However, you can still copy such files by manually running `tar` in `docker exec`.
83
+For example (consider `SRC_PATH` and `DEST_PATH` are directories):
84
+
85
+    $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH -
86
+
87
+or
88
+
89
+    $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH -
90
+
82 91
 
83 92
 Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive.
84 93
 The command extracts the content of the tar to the `DEST_PATH` in container's
... ...
@@ -197,7 +197,9 @@ two memory nodes.
197 197
                                'host': use the host shared memory,semaphores and message queues inside the container.  Note: the host mode gives the container full access to local shared memory and is therefore considered insecure.
198 198
 
199 199
 **--isolation**="*default*"
200
-   Isolation specifies the type of isolation technology used by containers. 
200
+   Isolation specifies the type of isolation technology used by containers. Note
201
+that the default on Windows server is `process`, and the default on Windows client
202
+is `hyperv`. Linux only supports `default`.
201 203
 
202 204
 **--kernel-memory**=""
203 205
    Kernel memory limit (format: `<number>[<unit>]`, where unit = b, k, m or g)
... ...
@@ -47,7 +47,7 @@ Docker networks report the following events:
47 47
 
48 48
 The `--since` and `--until` parameters can be Unix timestamps, date formatted
49 49
 timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
50
-relative to the client machine’s time. If you do not provide the `--since` option,
50
+relative to the client machine's time. If you do not provide the `--since` option,
51 51
 the command returns only new and/or live events.  Supported formats for date
52 52
 formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
53 53
 `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
... ...
@@ -21,7 +21,7 @@ any logs at the time you execute docker logs).
21 21
 
22 22
 The **docker logs --follow** command combines commands **docker logs** and
23 23
 **docker attach**. It will first return all logs from the beginning and
24
-then continue streaming new output from the container’s stdout and stderr.
24
+then continue streaming new output from the container's stdout and stderr.
25 25
 
26 26
 **Warning**: This command works only for the **json-file** or **journald**
27 27
 logging drivers.
... ...
@@ -46,7 +46,7 @@ logging drivers.
46 46
    Output the specified number of lines at the end of logs (defaults to all logs)
47 47
 
48 48
 The `--since` option can be Unix timestamps, date formatted timestamps, or Go
49
-duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s
49
+duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's
50 50
 time. Supported formats for date formatted time stamps include RFC3339Nano,
51 51
 RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`,
52 52
 `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be
... ...
@@ -13,7 +13,8 @@ NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG]
13 13
 
14 14
 This command pushes an image or a repository to a registry. If you do not
15 15
 specify a `REGISTRY_HOST`, the command uses Docker's public registry located at
16
-`registry-1.docker.io` by default. 
16
+`registry-1.docker.io` by default. Refer to **docker-tag(1)** for more 
17
+information about valid image and tag names.
17 18
 
18 19
 # OPTIONS
19 20
 **--help**
... ...
@@ -103,7 +103,7 @@ pull** IMAGE, before it starts the container from that image.
103 103
 
104 104
    In foreground mode (the default when **-d**
105 105
 is not specified), **docker run** can start the process in the container
106
-and attach the console to the process’s standard input, output, and standard
106
+and attach the console to the process's standard input, output, and standard
107 107
 error. It can even pretend to be a TTY (this is what most commandline
108 108
 executables expect) and pass along signals. The **-a** option can be set for
109 109
 each of stdin, stdout, and stderr.
... ...
@@ -297,7 +297,9 @@ redirection on the host system.
297 297
                                'host': use the host shared memory,semaphores and message queues inside the container.  Note: the host mode gives the container full access to local shared memory and is therefore considered insecure.
298 298
 
299 299
 **--isolation**="*default*"
300
-   Isolation specifies the type of isolation technology used by containers.
300
+   Isolation specifies the type of isolation technology used by containers. Note
301
+that the default on Windows server is `process`, and the default on Windows client
302
+is `hyperv`. Linux only supports `default`.
301 303
 
302 304
 **-l**, **--label**=[]
303 305
    Set metadata on the container (e.g., --label com.example.key=value)
... ...
@@ -735,7 +737,7 @@ This should list the message sent to logger.
735 735
 
736 736
 If you do not specify -a then Docker will attach everything (stdin,stdout,stderr)
737 737
 . You can specify to which of the three standard streams (stdin, stdout, stderr)
738
-you’d like to connect instead, as in:
738
+you'd like to connect instead, as in:
739 739
 
740 740
     # docker run -a stdin -a stdout -i -t fedora /bin/bash
741 741
 
... ...
@@ -849,7 +851,7 @@ If a container is connected to the default bridge network and `linked`
849 849
 with other containers, then the container's `/etc/hosts` file is updated
850 850
 with the linked container's name.
851 851
 
852
-> **Note** Since Docker may live update the container’s `/etc/hosts` file, there
852
+> **Note** Since Docker may live update the container's `/etc/hosts` file, there
853 853
 may be situations when processes inside the container can end up reading an
854 854
 empty or incomplete `/etc/hosts` file. In most cases, retrying the read again
855 855
 should fix the problem.
... ...
@@ -6,10 +6,9 @@ docker-search - Search the Docker Hub for images
6 6
 
7 7
 # SYNOPSIS
8 8
 **docker search**
9
-[**--automated**]
9
+[**-f**|**--filter**[=*[]*]]
10 10
 [**--help**]
11 11
 [**--no-trunc**]
12
-[**-s**|**--stars**[=*0*]]
13 12
 TERM
14 13
 
15 14
 # DESCRIPTION
... ...
@@ -21,8 +20,12 @@ of stars awarded, whether the image is official, and whether it is automated.
21 21
 *Note* - Search queries will only return up to 25 results
22 22
 
23 23
 # OPTIONS
24
-**--automated**=*true*|*false*
25
-   Only show automated builds. The default is *false*.
24
+
25
+**-f**, **--filter**=[]
26
+   Filter output based on these conditions:
27
+   - stars=<numberOfStar>
28
+   - is-automated=(true|false)
29
+   - is-official=(true|false)
26 30
 
27 31
 **--help**
28 32
   Print usage statement
... ...
@@ -30,9 +33,6 @@ of stars awarded, whether the image is official, and whether it is automated.
30 30
 **--no-trunc**=*true*|*false*
31 31
    Don't truncate output. The default is *false*.
32 32
 
33
-**-s**, **--stars**=*X*
34
-   Only displays with at least X stars. The default is zero.
35
-
36 33
 # EXAMPLES
37 34
 
38 35
 ## Search Docker Hub for ranked images
... ...
@@ -40,7 +40,7 @@ of stars awarded, whether the image is official, and whether it is automated.
40 40
 Search a registry for the term 'fedora' and only display those images
41 41
 ranked 3 or higher:
42 42
 
43
-    $ docker search -s 3 fedora
43
+    $ docker search --filter=stars=3 fedora
44 44
     NAME                  DESCRIPTION                                    STARS OFFICIAL  AUTOMATED
45 45
     mattdm/fedora         A basic Fedora image corresponding roughly...  50
46 46
     fedora                (Semi) Official Fedora base image.             38
... ...
@@ -52,7 +52,7 @@ ranked 3 or higher:
52 52
 Search Docker Hub for the term 'fedora' and only display automated images
53 53
 ranked 1 or higher:
54 54
 
55
-    $ docker search --automated -s 1 fedora
55
+    $ docker search --filter=is-automated=true --filter=stars=1 fedora
56 56
     NAME               DESCRIPTION                                     STARS OFFICIAL  AUTOMATED
57 57
     goldmann/wildfly   A WildFly application server running on a ...   3               [OK]
58 58
     tutum/fedora-20    Fedora 20 image with SSH access. For the r...   1               [OK]
... ...
@@ -62,4 +62,5 @@ April 2014, Originally compiled by William Henry (whenry at redhat dot com)
62 62
 based on docker.com source material and internal work.
63 63
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
64 64
 April 2015, updated by Mary Anthony for v2 <mary@docker.com>
65
+April 2016, updated by Vincent Demeester <vincent@sbr.pm>
65 66
 
... ...
@@ -7,44 +7,59 @@ docker-tag - Tag an image into a repository
7 7
 # SYNOPSIS
8 8
 **docker tag**
9 9
 [**--help**]
10
-IMAGE[:TAG] [REGISTRY_HOST/][USERNAME/]NAME[:TAG]
10
+NAME[:TAG] NAME[:TAG]
11 11
 
12 12
 # DESCRIPTION
13 13
 Assigns a new alias to an image in a registry. An alias refers to the
14 14
 entire image name including the optional `TAG` after the ':'. 
15 15
 
16
-If you do not specify a `REGISTRY_HOST`, the command uses Docker's public
17
-registry located at `registry-1.docker.io` by default. 
18
-
19 16
 # "OPTIONS"
20 17
 **--help**
21 18
    Print usage statement.
22 19
 
23
-**REGISTRY_HOST**
24
-   The hostname of the registry if required. This may also include the port
25
-separated by a ':'
26
-
27
-**USERNAME**
28
-   The username or other qualifying identifier for the image.
29
-
30 20
 **NAME**
31
-   The image name.
21
+   The image name which is made up of slash-separated name components, 
22
+   optionally prefixed by a registry hostname. The hostname must comply with 
23
+   standard DNS rules, but may not contain underscores. If a hostname is 
24
+   present, it may optionally be followed by a port number in the format 
25
+   `:8080`. If not present, the command uses Docker's public registry located at
26
+   `registry-1.docker.io` by default. Name components may contain lowercase 
27
+   characters, digits and separators. A separator is defined as a period, one or 
28
+   two underscores, or one or more dashes. A name component may not start or end 
29
+   with a separator.
32 30
 
33 31
 **TAG**
34
-   The tag you are assigning to the image.  Though this is arbitrary it is
35
-recommended to be used for a version to distinguish images with the same name.
36
-Also, for consistency tags should only include a-z0-9-_. .
37
-Note that here TAG is a part of the overall name or "tag".
32
+   The tag assigned to the image to version and distinguish images with the same
33
+   name. The tag name may contain lowercase and uppercase characters, digits, 
34
+   underscores, periods and dashes. A tag name may not start with a period or a 
35
+   dash and may contain a maximum of 128 characters.
38 36
 
39 37
 # EXAMPLES
40 38
 
41
-## Giving an image a new alias
39
+## Tagging an image referenced by ID
42 40
 
43
-Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and 
44
-tagging it into the "fedora" repository with "version1.0":
41
+To tag a local image with ID "0e5574283393" into the "fedora" repository with 
42
+"version1.0":
45 43
 
46 44
     docker tag 0e5574283393 fedora/httpd:version1.0
47 45
 
46
+## Tagging an image referenced by Name
47
+
48
+To tag a local image with name "httpd" into the "fedora" repository with 
49
+"version1.0":
50
+
51
+    docker tag httpd fedora/httpd:version1.0
52
+
53
+Note that since the tag name is not specified, the alias is created for an
54
+existing local version `httpd:latest`.
55
+
56
+## Tagging an image referenced by Name and Tag
57
+
58
+To tag a local image with name "httpd" and tag "test" into the "fedora"
59
+repository with "version1.0.test":
60
+
61
+    docker tag httpd:test fedora/httpd:version1.0.test
62
+
48 63
 ## Tagging an image for a private repository
49 64
 
50 65
 To push an image to a private registry and not the central Docker
... ...
@@ -39,6 +39,7 @@ dockerd - Enable daemon mode
39 39
 [**--ip-masq**[=*true*]]
40 40
 [**--iptables**[=*true*]]
41 41
 [**--ipv6**]
42
+[**--isolation**[=*default*]]
42 43
 [**-l**|**--log-level**[=*info*]]
43 44
 [**--label**[=*[]*]]
44 45
 [**--log-driver**[=*json-file*]]
... ...
@@ -183,6 +184,11 @@ unix://[/path/to/socket] to use.
183 183
 **--ipv6**=*true*|*false*
184 184
   Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6".
185 185
 
186
+**--isolation**="*default*"
187
+   Isolation specifies the type of isolation technology used by containers. Note
188
+that the default on Windows server is `process`, and the default on Windows client
189
+is `hyperv`. Linux only supports `default`.
190
+
186 191
 **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"
187 192
   Set the logging level. Default is `info`.
188 193
 
189 194
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package oci
1
+
2
+import (
3
+	"github.com/opencontainers/specs/specs-go"
4
+)
5
+
6
+// DefaultSpec returns default oci spec used by docker.
7
+func DefaultSpec() specs.Spec {
8
+	s := specs.Spec{}
9
+	return s
10
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd
1
+// +build linux freebsd solaris
2 2
 
3 3
 package directory
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package fileutils
1
+
2
+// GetTotalUsedFds Returns the number of used File Descriptors.
3
+// On Solaris these limits are per process and not systemwide
4
+func GetTotalUsedFds() int {
5
+	return -1
6
+}
... ...
@@ -14,30 +14,32 @@ import (
14 14
 )
15 15
 
16 16
 func TestIsKilledFalseWithNonKilledProcess(t *testing.T) {
17
-	// TODO Windows: Port this test
18
-	if runtime.GOOS == "windows" {
19
-		t.Skip("Needs porting to Windows")
17
+	var lsCmd *exec.Cmd
18
+	if runtime.GOOS != "windows" {
19
+		lsCmd = exec.Command("ls")
20
+	} else {
21
+		lsCmd = exec.Command("cmd", "/c", "dir")
20 22
 	}
21 23
 
22
-	lsCmd := exec.Command("ls")
23
-	lsCmd.Start()
24
-	// Wait for it to finish
25
-	err := lsCmd.Wait()
24
+	err := lsCmd.Run()
26 25
 	if IsKilled(err) {
27 26
 		t.Fatalf("Expected the ls command to not be killed, was.")
28 27
 	}
29 28
 }
30 29
 
31 30
 func TestIsKilledTrueWithKilledProcess(t *testing.T) {
32
-	// TODO Windows: Using golang 1.5.3, this seems to hit
33
-	// a bug in go where Process.Kill() causes a panic.
34
-	// Needs further investigation @jhowardmsft
35
-	if runtime.GOOS == "windows" {
36
-		t.SkipNow()
31
+	var longCmd *exec.Cmd
32
+	if runtime.GOOS != "windows" {
33
+		longCmd = exec.Command("top")
34
+	} else {
35
+		longCmd = exec.Command("powershell", "while ($true) { sleep 1 }")
37 36
 	}
38
-	longCmd := exec.Command("top")
37
+
39 38
 	// Start a command
40
-	longCmd.Start()
39
+	err := longCmd.Start()
40
+	if err != nil {
41
+		t.Fatal(err)
42
+	}
41 43
 	// Capture the error when *dying*
42 44
 	done := make(chan error, 1)
43 45
 	go func() {
... ...
@@ -46,7 +48,7 @@ func TestIsKilledTrueWithKilledProcess(t *testing.T) {
46 46
 	// Then kill it
47 47
 	longCmd.Process.Kill()
48 48
 	// Get the error
49
-	err := <-done
49
+	err = <-done
50 50
 	if !IsKilled(err) {
51 51
 		t.Fatalf("Expected the command to be killed, was not.")
52 52
 	}
53 53
new file mode 100644
... ...
@@ -0,0 +1,31 @@
0
+package listeners
1
+
2
+import (
3
+	"crypto/tls"
4
+	"fmt"
5
+	"net"
6
+
7
+	"github.com/docker/go-connections/sockets"
8
+)
9
+
10
+// Init creates new listeners for the server.
11
+func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) {
12
+	switch proto {
13
+	case "tcp":
14
+		l, err := sockets.NewTCPSocket(addr, tlsConfig)
15
+		if err != nil {
16
+			return nil, err
17
+		}
18
+		ls = append(ls, l)
19
+	case "unix":
20
+		l, err := sockets.NewUnixSocket(addr, socketGroup)
21
+		if err != nil {
22
+			return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err)
23
+		}
24
+		ls = append(ls, l)
25
+	default:
26
+		return nil, fmt.Errorf("Invalid protocol format: %q", proto)
27
+	}
28
+
29
+	return
30
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows
1
+// +build !windows,!solaris
2 2
 
3 3
 package listeners
4 4
 
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!freebsd freebsd,!cgo
1
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
2 2
 
3 3
 package mount
4 4
 
... ...
@@ -9,8 +9,8 @@ func GetMounts() ([]*Info, error) {
9 9
 	return parseMountTable()
10 10
 }
11 11
 
12
-// Mounted looks at /proc/self/mountinfo to determine of the specified
13
-// mountpoint has been mounted
12
+// Mounted determines if a specified mountpoint has been mounted.
13
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
14 14
 func Mounted(mountpoint string) (bool, error) {
15 15
 	entries, err := parseMountTable()
16 16
 	if err != nil {
17 17
new file mode 100644
... ...
@@ -0,0 +1,33 @@
0
+// +build solaris,cgo
1
+
2
+package mount
3
+
4
+import (
5
+	"golang.org/x/sys/unix"
6
+	"unsafe"
7
+)
8
+
9
+// #include <stdlib.h>
10
+// #include <stdio.h>
11
+// #include <sys/mount.h>
12
+// int Mount(const char *spec, const char *dir, int mflag,
13
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
14
+//     return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
15
+// }
16
+import "C"
17
+
18
+func mount(device, target, mType string, flag uintptr, data string) error {
19
+	spec := C.CString(device)
20
+	dir := C.CString(target)
21
+	fstype := C.CString(mType)
22
+	_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
23
+	C.free(unsafe.Pointer(spec))
24
+	C.free(unsafe.Pointer(dir))
25
+	C.free(unsafe.Pointer(fstype))
26
+	return err
27
+}
28
+
29
+func unmount(target string, flag int) error {
30
+	err := unix.Unmount(target, flag)
31
+	return err
32
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!freebsd freebsd,!cgo
1
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
2 2
 
3 3
 package mount
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+// +build solaris,cgo
1
+
2
+package mount
3
+
4
+/*
5
+#include <stdio.h>
6
+#include <sys/mnttab.h>
7
+*/
8
+import "C"
9
+
10
+import (
11
+	"fmt"
12
+)
13
+
14
+func parseMountTable() ([]*Info, error) {
15
+	mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
16
+	if mnttab == nil {
17
+		return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
18
+	}
19
+
20
+	var out []*Info
21
+	var mp C.struct_mnttab
22
+
23
+	ret := C.getmntent(mnttab, &mp)
24
+	for ret == 0 {
25
+		var mountinfo Info
26
+		mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
27
+		mountinfo.Source = C.GoString(mp.mnt_special)
28
+		mountinfo.Fstype = C.GoString(mp.mnt_fstype)
29
+		mountinfo.Opts = C.GoString(mp.mnt_mntopts)
30
+		out = append(out, &mountinfo)
31
+		ret = C.getmntent(mnttab, &mp)
32
+	}
33
+
34
+	C.fclose(mnttab)
35
+	return out, nil
36
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows,!linux,!freebsd freebsd,!cgo
1
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
2 2
 
3 3
 package mount
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,14 @@
0
+package kernel
1
+
2
+import (
3
+	"golang.org/x/sys/unix"
4
+)
5
+
6
+func uname() (*unix.Utsname, error) {
7
+	uts := &unix.Utsname{}
8
+
9
+	if err := unix.Uname(uts); err != nil {
10
+		return nil, err
11
+	}
12
+	return uts, nil
13
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux
1
+// +build !linux,!solaris
2 2
 
3 3
 package kernel
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+// +build solaris,cgo
1
+
2
+package operatingsystem
3
+
4
+/*
5
+#include <zone.h>
6
+*/
7
+import "C"
8
+
9
+import (
10
+	"bytes"
11
+	"errors"
12
+	"io/ioutil"
13
+)
14
+
15
+var etcOsRelease = "/etc/release"
16
+
17
+// GetOperatingSystem gets the name of the current operating system.
18
+func GetOperatingSystem() (string, error) {
19
+	b, err := ioutil.ReadFile(etcOsRelease)
20
+	if err != nil {
21
+		return "", err
22
+	}
23
+	if i := bytes.Index(b, []byte("\n")); i >= 0 {
24
+		b = bytes.Trim(b[:i], " ")
25
+		return string(b), nil
26
+	}
27
+	return "", errors.New("release not found")
28
+}
29
+
30
+// IsContainerized returns true if we are running inside a container.
31
+func IsContainerized() (bool, error) {
32
+	if C.getzoneid() != 0 {
33
+		return true, nil
34
+	}
35
+	return false, nil
36
+}
0 37
deleted file mode 100644
... ...
@@ -1,15 +0,0 @@
1
-package platform
2
-
3
-import (
4
-	"os/exec"
5
-)
6
-
7
-// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …)
8
-func runtimeArchitecture() (string, error) {
9
-	cmd := exec.Command("uname", "-m")
10
-	machine, err := cmd.Output()
11
-	if err != nil {
12
-		return "", err
13
-	}
14
-	return string(machine), nil
15
-}
16 1
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+// +build freebsd solaris
1
+
2
+package platform
3
+
4
+import (
5
+	"os/exec"
6
+	"strings"
7
+)
8
+
9
+// runtimeArchitecture get the name of the current architecture (i86pc, sun4v)
10
+func runtimeArchitecture() (string, error) {
11
+	cmd := exec.Command("/usr/bin/uname", "-m")
12
+	machine, err := cmd.Output()
13
+	if err != nil {
14
+		return "", err
15
+	}
16
+	return strings.TrimSpace(string(machine)), nil
17
+}
0 18
deleted file mode 100644
... ...
@@ -1,23 +0,0 @@
1
-// +build freebsd
2
-
3
-package reexec
4
-
5
-import (
6
-	"os/exec"
7
-)
8
-
9
-// Self returns the path to the current process's binary.
10
-// Uses os.Args[0].
11
-func Self() string {
12
-	return naiveSelf()
13
-}
14
-
15
-// Command returns *exec.Cmd which have Path as current binary.
16
-// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
17
-// be set to "/usr/bin/docker".
18
-func Command(args ...string) *exec.Cmd {
19
-	return &exec.Cmd{
20
-		Path: Self(),
21
-		Args: args,
22
-	}
23
-}
24 1
new file mode 100644
... ...
@@ -0,0 +1,23 @@
0
+// +build freebsd solaris
1
+
2
+package reexec
3
+
4
+import (
5
+	"os/exec"
6
+)
7
+
8
+// Self returns the path to the current process's binary.
9
+// Uses os.Args[0].
10
+func Self() string {
11
+	return naiveSelf()
12
+}
13
+
14
+// Command returns *exec.Cmd which have Path as current binary.
15
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
16
+// be set to "/usr/bin/docker".
17
+func Command(args ...string) *exec.Cmd {
18
+	return &exec.Cmd{
19
+		Path: Self(),
20
+		Args: args,
21
+	}
22
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!windows,!freebsd
1
+// +build !linux,!windows,!freebsd,!solaris
2 2
 
3 3
 package reexec
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,42 @@
0
+package signal
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+// SignalMap is a map of Solaris signals.
7
+// SIGINFO and SIGTHR not defined for Solaris
8
+var SignalMap = map[string]syscall.Signal{
9
+	"ABRT":   syscall.SIGABRT,
10
+	"ALRM":   syscall.SIGALRM,
11
+	"BUF":    syscall.SIGBUS,
12
+	"CHLD":   syscall.SIGCHLD,
13
+	"CONT":   syscall.SIGCONT,
14
+	"EMT":    syscall.SIGEMT,
15
+	"FPE":    syscall.SIGFPE,
16
+	"HUP":    syscall.SIGHUP,
17
+	"ILL":    syscall.SIGILL,
18
+	"INT":    syscall.SIGINT,
19
+	"IO":     syscall.SIGIO,
20
+	"IOT":    syscall.SIGIOT,
21
+	"KILL":   syscall.SIGKILL,
22
+	"LWP":    syscall.SIGLWP,
23
+	"PIPE":   syscall.SIGPIPE,
24
+	"PROF":   syscall.SIGPROF,
25
+	"QUIT":   syscall.SIGQUIT,
26
+	"SEGV":   syscall.SIGSEGV,
27
+	"STOP":   syscall.SIGSTOP,
28
+	"SYS":    syscall.SIGSYS,
29
+	"TERM":   syscall.SIGTERM,
30
+	"TRAP":   syscall.SIGTRAP,
31
+	"TSTP":   syscall.SIGTSTP,
32
+	"TTIN":   syscall.SIGTTIN,
33
+	"TTOU":   syscall.SIGTTOU,
34
+	"URG":    syscall.SIGURG,
35
+	"USR1":   syscall.SIGUSR1,
36
+	"USR2":   syscall.SIGUSR2,
37
+	"VTALRM": syscall.SIGVTALRM,
38
+	"WINCH":  syscall.SIGWINCH,
39
+	"XCPU":   syscall.SIGXCPU,
40
+	"XFSZ":   syscall.SIGXFSZ,
41
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!darwin,!freebsd,!windows
1
+// +build !linux,!darwin,!freebsd,!windows,!solaris
2 2
 
3 3
 package signal
4 4
 
5 5
new file mode 100644
... ...
@@ -0,0 +1,119 @@
0
+// +build solaris,cgo
1
+
2
+package sysinfo
3
+
4
+import (
5
+	"bytes"
6
+	"os/exec"
7
+	"strconv"
8
+	"strings"
9
+)
10
+
11
+/*
12
+#cgo LDFLAGS: -llgrp
13
+#include <unistd.h>
14
+#include <stdlib.h>
15
+#include <sys/lgrp_user.h>
16
+int getLgrpCount() {
17
+	lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE;
18
+	uint_t nlgrps;
19
+
20
+	if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) {
21
+		return -1;
22
+	}
23
+	nlgrps = lgrp_nlgrps(lgrpcookie);
24
+	return nlgrps;
25
+}
26
+*/
27
+import "C"
28
+
29
+// IsCPUSharesAvailable returns whether CPUShares setting is supported.
30
+// We need FSS to be set as default scheduling class to support CPU Shares
31
+func IsCPUSharesAvailable() bool {
32
+	cmd := exec.Command("/usr/sbin/dispadmin", "-d")
33
+	outBuf := new(bytes.Buffer)
34
+	errBuf := new(bytes.Buffer)
35
+	cmd.Stderr = errBuf
36
+	cmd.Stdout = outBuf
37
+
38
+	if err := cmd.Run(); err != nil {
39
+		return false
40
+	}
41
+	return (strings.Contains(outBuf.String(), "FSS"))
42
+}
43
+
44
+// New returns a new SysInfo, using the filesystem to detect which features
45
+// the kernel supports.
46
+//NOTE Solaris: If we change the below capabilities be sure
47
+// to update verifyPlatformContainerSettings() in daemon_solaris.go
48
+func New(quiet bool) *SysInfo {
49
+	sysInfo := &SysInfo{}
50
+	sysInfo.cgroupMemInfo = setCgroupMem(quiet)
51
+	sysInfo.cgroupCPUInfo = setCgroupCPU(quiet)
52
+	sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet)
53
+	sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet)
54
+
55
+	sysInfo.IPv4ForwardingDisabled = false
56
+
57
+	sysInfo.AppArmor = false
58
+
59
+	return sysInfo
60
+}
61
+
62
+// setCgroupMem reads the memory information for Solaris.
63
+func setCgroupMem(quiet bool) cgroupMemInfo {
64
+
65
+	return cgroupMemInfo{
66
+		MemoryLimit:       true,
67
+		SwapLimit:         true,
68
+		MemoryReservation: false,
69
+		OomKillDisable:    false,
70
+		MemorySwappiness:  false,
71
+		KernelMemory:      false,
72
+	}
73
+}
74
+
75
+// setCgroupCPU reads the cpu information for Solaris.
76
+func setCgroupCPU(quiet bool) cgroupCPUInfo {
77
+
78
+	return cgroupCPUInfo{
79
+		CPUShares:    true,
80
+		CPUCfsPeriod: false,
81
+		CPUCfsQuota:  true,
82
+	}
83
+}
84
+
85
+// blkio switches are not supported in Solaris.
86
+func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo {
87
+
88
+	return cgroupBlkioInfo{
89
+		BlkioWeight:       false,
90
+		BlkioWeightDevice: false,
91
+	}
92
+}
93
+
94
+// setCgroupCPUsetInfo reads the cpuset information for Solaris.
95
+func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo {
96
+
97
+	return cgroupCpusetInfo{
98
+		Cpuset: true,
99
+		Cpus:   getCPUCount(),
100
+		Mems:   getLgrpCount(),
101
+	}
102
+}
103
+
104
+func getCPUCount() string {
105
+	ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN)
106
+	if ncpus <= 0 {
107
+		return ""
108
+	}
109
+	return strconv.FormatInt(int64(ncpus), 16)
110
+}
111
+
112
+func getLgrpCount() string {
113
+	nlgrps := C.getLgrpCount()
114
+	if nlgrps <= 0 {
115
+		return ""
116
+	}
117
+	return strconv.FormatInt(int64(nlgrps), 16)
118
+}
0 119
new file mode 100644
... ...
@@ -0,0 +1,128 @@
0
+// +build solaris,cgo
1
+
2
+package system
3
+
4
+import (
5
+	"fmt"
6
+	"unsafe"
7
+)
8
+
9
+// #cgo LDFLAGS: -lkstat
10
+// #include <unistd.h>
11
+// #include <stdlib.h>
12
+// #include <stdio.h>
13
+// #include <kstat.h>
14
+// #include <sys/swap.h>
15
+// #include <sys/param.h>
16
+// struct swaptable *allocSwaptable(int num) {
17
+//	struct swaptable *st;
18
+//	struct swapent *swapent;
19
+// 	st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
20
+//	swapent = st->swt_ent;
21
+//	for (int i = 0; i < num; i++,swapent++) {
22
+//		swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
23
+//	}
24
+//	st->swt_n = num;
25
+//	return st;
26
+//}
27
+// void freeSwaptable (struct swaptable *st) {
28
+//	struct swapent *swapent = st->swt_ent;
29
+//	for (int i = 0; i < st->swt_n; i++,swapent++) {
30
+//		free(swapent->ste_path);
31
+//	}
32
+//	free(st);
33
+// }
34
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
35
+//	return ent[i];
36
+// }
37
+// int64_t getPpKernel() {
38
+//	int64_t pp_kernel = 0;
39
+//	kstat_ctl_t *ksc;
40
+//	kstat_t *ks;
41
+//	kstat_named_t *knp;
42
+//	kid_t kid;
43
+//
44
+//	if ((ksc = kstat_open()) == NULL) {
45
+//		return -1;
46
+//	}
47
+//	if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
48
+//		return -1;
49
+//	}
50
+//	if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
51
+//	    ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
52
+//		return -1;
53
+//	}
54
+//	switch (knp->data_type) {
55
+//	case KSTAT_DATA_UINT64:
56
+//		pp_kernel = knp->value.ui64;
57
+//		break;
58
+//	case KSTAT_DATA_UINT32:
59
+//		pp_kernel = knp->value.ui32;
60
+//		break;
61
+//	}
62
+//	pp_kernel *= sysconf(_SC_PAGESIZE);
63
+//	return (pp_kernel > 0 ? pp_kernel : -1);
64
+// }
65
+import "C"
66
+
67
+// Get the system memory info using sysconf same as prtconf
68
+func getTotalMem() int64 {
69
+	pagesize := C.sysconf(C._SC_PAGESIZE)
70
+	npages := C.sysconf(C._SC_PHYS_PAGES)
71
+	return int64(pagesize * npages)
72
+}
73
+
74
+func getFreeMem() int64 {
75
+	pagesize := C.sysconf(C._SC_PAGESIZE)
76
+	npages := C.sysconf(C._SC_AVPHYS_PAGES)
77
+	return int64(pagesize * npages)
78
+}
79
+
80
+// ReadMemInfo retrieves memory statistics of the host system and returns a
81
+//  MemInfo type.
82
+func ReadMemInfo() (*MemInfo, error) {
83
+
84
+	ppKernel := C.getPpKernel()
85
+	MemTotal := getTotalMem()
86
+	MemFree := getFreeMem()
87
+	SwapTotal, SwapFree, err := getSysSwap()
88
+
89
+	if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
90
+		SwapFree < 0 {
91
+		return nil, fmt.Errorf("Error getting system memory info %v\n", err)
92
+	}
93
+
94
+	meminfo := &MemInfo{}
95
+	// Total memory is total physical memory less than memory locked by kernel
96
+	meminfo.MemTotal = MemTotal - int64(ppKernel)
97
+	meminfo.MemFree = MemFree
98
+	meminfo.SwapTotal = SwapTotal
99
+	meminfo.SwapFree = SwapFree
100
+
101
+	return meminfo, nil
102
+}
103
+
104
+func getSysSwap() (int64, int64, error) {
105
+	var tSwap int64
106
+	var fSwap int64
107
+	var diskblksPerPage int64
108
+	num, err := C.swapctl(C.SC_GETNSWP, nil)
109
+	if err != nil {
110
+		return -1, -1, err
111
+	}
112
+	st := C.allocSwaptable(num)
113
+	_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
114
+	if err != nil {
115
+		C.freeSwaptable(st)
116
+		return -1, -1, err
117
+	}
118
+
119
+	diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
120
+	for i := 0; i < int(num); i++ {
121
+		swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
122
+		tSwap += int64(swapent.ste_pages) * diskblksPerPage
123
+		fSwap += int64(swapent.ste_free) * diskblksPerPage
124
+	}
125
+	C.freeSwaptable(st)
126
+	return tSwap, fSwap, nil
127
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !linux,!windows
1
+// +build !linux,!windows,!solaris
2 2
 
3 3
 package system
4 4
 
... ...
@@ -15,3 +15,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
15 15
 		rdev: uint64(s.Rdev),
16 16
 		mtim: s.Mtim}, nil
17 17
 }
18
+
19
+// FromStatT loads a system.StatT from a syscal.Stat_t.
20
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
21
+	return fromStatT(s)
22
+}
23
+
24
+// Stat takes a path to a file and returns
25
+// a system.StatT type pertaining to that file.
26
+//
27
+// Throws an error if the file does not exist
28
+func Stat(path string) (*StatT, error) {
29
+	s := &syscall.Stat_t{}
30
+	if err := syscall.Stat(path, s); err != nil {
31
+		return nil, err
32
+	}
33
+	return fromStatT(s)
34
+}
... ...
@@ -3,10 +3,13 @@ package system
3 3
 import (
4 4
 	"syscall"
5 5
 	"unsafe"
6
+
7
+	"github.com/Sirupsen/logrus"
6 8
 )
7 9
 
8 10
 var (
9
-	ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
11
+	ntuserApiset      = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
12
+	procGetVersionExW = modkernel32.NewProc("GetVersionExW")
10 13
 )
11 14
 
12 15
 // OSVersion is a wrapper for Windows version information
... ...
@@ -18,6 +21,21 @@ type OSVersion struct {
18 18
 	Build        uint16
19 19
 }
20 20
 
21
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
22
+type osVersionInfoEx struct {
23
+	OSVersionInfoSize uint32
24
+	MajorVersion      uint32
25
+	MinorVersion      uint32
26
+	BuildNumber       uint32
27
+	PlatformID        uint32
28
+	CSDVersion        [128]uint16
29
+	ServicePackMajor  uint16
30
+	ServicePackMinor  uint16
31
+	SuiteMask         uint16
32
+	ProductType       byte
33
+	Reserve           byte
34
+}
35
+
21 36
 // GetOSVersion gets the operating system version on Windows. Note that
22 37
 // docker.exe must be manifested to get the correct version information.
23 38
 func GetOSVersion() OSVersion {
... ...
@@ -34,6 +52,18 @@ func GetOSVersion() OSVersion {
34 34
 	return osv
35 35
 }
36 36
 
37
+// IsWindowsClient returns true if the SKU is client
38
+func IsWindowsClient() bool {
39
+	osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
40
+	r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
41
+	if r1 == 0 {
42
+		logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
43
+		return false
44
+	}
45
+	const verNTWorkstation = 0x00000001
46
+	return osviex.ProductType == verNTWorkstation
47
+}
48
+
37 49
 // Unmount is a platform-specific helper function to call
38 50
 // the unmount syscall. Not supported on Windows
39 51
 func Unmount(dest string) error {
... ...
@@ -83,11 +83,13 @@ func useNativeConsole() bool {
83 83
 		return false
84 84
 	}
85 85
 
86
-	// TODO Windows. The native emulator still has issues which
87
-	// mean it shouldn't be enabled for everyone. Change this next line to true
88
-	// to change the default to "enable if available". In the meantime, users
89
-	// can still try it out by using USE_NATIVE_CONSOLE env variable.
90
-	return false
86
+	// Must have a post-TP5 RS1 build of Windows Server 2016/Windows 10 for
87
+	// the native console to be usable.
88
+	if osv.Build < 14350 {
89
+		return false
90
+	}
91
+
92
+	return true
91 93
 }
92 94
 
93 95
 // getNativeConsole returns the console modes ('state') for the native Windows console
... ...
@@ -27,11 +27,6 @@
27 27
 			"args": []
28 28
 		},
29 29
 		{
30
-			"name": "arch_prctl",
31
-			"action": "SCMP_ACT_ALLOW",
32
-			"args": []
33
-		},
34
-		{
35 30
 			"name": "bind",
36 31
 			"action": "SCMP_ACT_ALLOW",
37 32
 			"args": []
... ...
@@ -62,21 +57,6 @@
62 62
 			"args": []
63 63
 		},
64 64
 		{
65
-			"name": "chown",
66
-			"action": "SCMP_ACT_ALLOW",
67
-			"args": []
68
-		},
69
-		{
70
-			"name": "chown32",
71
-			"action": "SCMP_ACT_ALLOW",
72
-			"args": []
73
-		},
74
-		{
75
-			"name": "chroot",
76
-			"action": "SCMP_ACT_ALLOW",
77
-			"args": []
78
-		},
79
-		{
80 65
 			"name": "clock_getres",
81 66
 			"action": "SCMP_ACT_ALLOW",
82 67
 			"args": []
... ...
@@ -92,18 +72,6 @@
92 92
 			"args": []
93 93
 		},
94 94
 		{
95
-			"name": "clone",
96
-			"action": "SCMP_ACT_ALLOW",
97
-			"args": [
98
-				{
99
-					"index": 0,
100
-					"value": 2080505856,
101
-					"valueTwo": 0,
102
-					"op": "SCMP_CMP_MASKED_EQ"
103
-				}
104
-			]
105
-		},
106
-		{
107 95
 			"name": "close",
108 96
 			"action": "SCMP_ACT_ALLOW",
109 97
 			"args": []
... ...
@@ -224,11 +192,6 @@
224 224
 			"args": []
225 225
 		},
226 226
 		{
227
-			"name": "fanotify_init",
228
-			"action": "SCMP_ACT_ALLOW",
229
-			"args": []
230
-		},
231
-		{
232 227
 			"name": "fanotify_mark",
233 228
 			"action": "SCMP_ACT_ALLOW",
234 229
 			"args": []
... ...
@@ -249,21 +212,6 @@
249 249
 			"args": []
250 250
 		},
251 251
 		{
252
-			"name": "fchown",
253
-			"action": "SCMP_ACT_ALLOW",
254
-			"args": []
255
-		},
256
-		{
257
-			"name": "fchown32",
258
-			"action": "SCMP_ACT_ALLOW",
259
-			"args": []
260
-		},
261
-		{
262
-			"name": "fchownat",
263
-			"action": "SCMP_ACT_ALLOW",
264
-			"args": []
265
-		},
266
-		{
267 252
 			"name": "fcntl",
268 253
 			"action": "SCMP_ACT_ALLOW",
269 254
 			"args": []
... ...
@@ -609,16 +557,6 @@
609 609
 			"args": []
610 610
 		},
611 611
 		{
612
-			"name": "lchown",
613
-			"action": "SCMP_ACT_ALLOW",
614
-			"args": []
615
-		},
616
-		{
617
-			"name": "lchown32",
618
-			"action": "SCMP_ACT_ALLOW",
619
-			"args": []
620
-		},
621
-		{
622 612
 			"name": "lgetxattr",
623 613
 			"action": "SCMP_ACT_ALLOW",
624 614
 			"args": []
... ...
@@ -1165,11 +1103,6 @@
1165 1165
 			"args": []
1166 1166
 		},
1167 1167
 		{
1168
-			"name": "setdomainname",
1169
-			"action": "SCMP_ACT_ALLOW",
1170
-			"args": []
1171
-		},
1172
-		{
1173 1168
 			"name": "setfsgid",
1174 1169
 			"action": "SCMP_ACT_ALLOW",
1175 1170
 			"args": []
... ...
@@ -1210,11 +1143,6 @@
1210 1210
 			"args": []
1211 1211
 		},
1212 1212
 		{
1213
-			"name": "sethostname",
1214
-			"action": "SCMP_ACT_ALLOW",
1215
-			"args": []
1216
-		},
1217
-		{
1218 1213
 			"name": "setitimer",
1219 1214
 			"action": "SCMP_ACT_ALLOW",
1220 1215
 			"args": []
... ...
@@ -1365,6 +1293,11 @@
1365 1365
 			"args": []
1366 1366
 		},
1367 1367
 		{
1368
+			"name": "socketcall",
1369
+			"action": "SCMP_ACT_ALLOW",
1370
+			"args": []
1371
+		},
1372
+		{
1368 1373
 			"name": "socketpair",
1369 1374
 			"action": "SCMP_ACT_ALLOW",
1370 1375
 			"args": []
... ...
@@ -1580,22 +1513,69 @@
1580 1580
 			"args": []
1581 1581
 		},
1582 1582
 		{
1583
+			"name": "arch_prctl",
1584
+			"action": "SCMP_ACT_ALLOW",
1585
+			"args": []
1586
+		},
1587
+		{
1583 1588
 			"name": "modify_ldt",
1584 1589
 			"action": "SCMP_ACT_ALLOW",
1585 1590
 			"args": []
1586 1591
 		},
1587 1592
 		{
1588
-			"name": "breakpoint",
1593
+			"name": "chown",
1594
+			"action": "SCMP_ACT_ALLOW",
1595
+			"args": []
1596
+		},
1597
+		{
1598
+			"name": "chown32",
1589 1599
 			"action": "SCMP_ACT_ALLOW",
1590 1600
 			"args": []
1591 1601
 		},
1592 1602
 		{
1593
-			"name": "cacheflush",
1603
+			"name": "fchown",
1594 1604
 			"action": "SCMP_ACT_ALLOW",
1595 1605
 			"args": []
1596 1606
 		},
1597 1607
 		{
1598
-			"name": "set_tls",
1608
+			"name": "fchown32",
1609
+			"action": "SCMP_ACT_ALLOW",
1610
+			"args": []
1611
+		},
1612
+		{
1613
+			"name": "fchownat",
1614
+			"action": "SCMP_ACT_ALLOW",
1615
+			"args": []
1616
+		},
1617
+		{
1618
+			"name": "lchown",
1619
+			"action": "SCMP_ACT_ALLOW",
1620
+			"args": []
1621
+		},
1622
+		{
1623
+			"name": "lchown32",
1624
+			"action": "SCMP_ACT_ALLOW",
1625
+			"args": []
1626
+		},
1627
+		{
1628
+			"name": "chroot",
1629
+			"action": "SCMP_ACT_ALLOW",
1630
+			"args": []
1631
+		},
1632
+		{
1633
+			"name": "clone",
1634
+			"action": "SCMP_ACT_ALLOW",
1635
+			"args": [
1636
+				{
1637
+					"index": 0,
1638
+					"value": 2080505856,
1639
+					"valueTwo": 0,
1640
+					"op": "SCMP_CMP_MASKED_EQ"
1641
+				}
1642
+			]
1643
+		},
1644
+		{
1645
+			"name": "fchown",
1599 1646
 			"action": "SCMP_ACT_ALLOW",
1600 1647
 			"args": []
1601 1648
 		}
... ...
@@ -8,6 +8,7 @@ import (
8 8
 	"os"
9 9
 	"path/filepath"
10 10
 
11
+	"github.com/docker/docker/oci"
11 12
 	"github.com/docker/docker/profiles/seccomp"
12 13
 )
13 14
 
... ...
@@ -20,8 +21,10 @@ func main() {
20 20
 	}
21 21
 	f := filepath.Join(wd, "default.json")
22 22
 
23
+	rs := oci.DefaultSpec()
24
+
23 25
 	// write the default profile to the file
24
-	b, err := json.MarshalIndent(seccomp.DefaultProfile, "", "\t")
26
+	b, err := json.MarshalIndent(seccomp.DefaultProfile(&rs), "", "\t")
25 27
 	if err != nil {
26 28
 		panic(err)
27 29
 	}
... ...
@@ -13,8 +13,8 @@ import (
13 13
 //go:generate go run -tags 'seccomp' generate.go
14 14
 
15 15
 // GetDefaultProfile returns the default seccomp profile.
16
-func GetDefaultProfile() (*specs.Seccomp, error) {
17
-	return setupSeccomp(DefaultProfile)
16
+func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) {
17
+	return setupSeccomp(DefaultProfile(rs))
18 18
 }
19 19
 
20 20
 // LoadProfile takes a file path and decodes the seccomp profile.
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"syscall"
7 7
 
8 8
 	"github.com/docker/engine-api/types"
9
+	"github.com/opencontainers/specs/specs-go"
9 10
 	libseccomp "github.com/seccomp/libseccomp-golang"
10 11
 )
11 12
 
... ...
@@ -34,10 +35,9 @@ func arches() []types.Arch {
34 34
 }
35 35
 
36 36
 // DefaultProfile defines the whitelist for the default seccomp profile.
37
-var DefaultProfile = &types.Seccomp{
38
-	DefaultAction: types.ActErrno,
39
-	Architectures: arches(),
40
-	Syscalls: []*types.Syscall{
37
+func DefaultProfile(rs *specs.Spec) *types.Seccomp {
38
+
39
+	syscalls := []*types.Syscall{
41 40
 		{
42 41
 			Name:   "accept",
43 42
 			Action: types.ActAllow,
... ...
@@ -59,11 +59,6 @@ var DefaultProfile = &types.Seccomp{
59 59
 			Args:   []*types.Arg{},
60 60
 		},
61 61
 		{
62
-			Name:   "arch_prctl",
63
-			Action: types.ActAllow,
64
-			Args:   []*types.Arg{},
65
-		},
66
-		{
67 62
 			Name:   "bind",
68 63
 			Action: types.ActAllow,
69 64
 			Args:   []*types.Arg{},
... ...
@@ -94,21 +89,6 @@ var DefaultProfile = &types.Seccomp{
94 94
 			Args:   []*types.Arg{},
95 95
 		},
96 96
 		{
97
-			Name:   "chown",
98
-			Action: types.ActAllow,
99
-			Args:   []*types.Arg{},
100
-		},
101
-		{
102
-			Name:   "chown32",
103
-			Action: types.ActAllow,
104
-			Args:   []*types.Arg{},
105
-		},
106
-		{
107
-			Name:   "chroot",
108
-			Action: types.ActAllow,
109
-			Args:   []*types.Arg{},
110
-		},
111
-		{
112 97
 			Name:   "clock_getres",
113 98
 			Action: types.ActAllow,
114 99
 			Args:   []*types.Arg{},
... ...
@@ -124,18 +104,6 @@ var DefaultProfile = &types.Seccomp{
124 124
 			Args:   []*types.Arg{},
125 125
 		},
126 126
 		{
127
-			Name:   "clone",
128
-			Action: types.ActAllow,
129
-			Args: []*types.Arg{
130
-				{
131
-					Index:    0,
132
-					Value:    syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
133
-					ValueTwo: 0,
134
-					Op:       types.OpMaskedEqual,
135
-				},
136
-			},
137
-		},
138
-		{
139 127
 			Name:   "close",
140 128
 			Action: types.ActAllow,
141 129
 			Args:   []*types.Arg{},
... ...
@@ -256,11 +224,6 @@ var DefaultProfile = &types.Seccomp{
256 256
 			Args:   []*types.Arg{},
257 257
 		},
258 258
 		{
259
-			Name:   "fanotify_init",
260
-			Action: types.ActAllow,
261
-			Args:   []*types.Arg{},
262
-		},
263
-		{
264 259
 			Name:   "fanotify_mark",
265 260
 			Action: types.ActAllow,
266 261
 			Args:   []*types.Arg{},
... ...
@@ -281,21 +244,6 @@ var DefaultProfile = &types.Seccomp{
281 281
 			Args:   []*types.Arg{},
282 282
 		},
283 283
 		{
284
-			Name:   "fchown",
285
-			Action: types.ActAllow,
286
-			Args:   []*types.Arg{},
287
-		},
288
-		{
289
-			Name:   "fchown32",
290
-			Action: types.ActAllow,
291
-			Args:   []*types.Arg{},
292
-		},
293
-		{
294
-			Name:   "fchownat",
295
-			Action: types.ActAllow,
296
-			Args:   []*types.Arg{},
297
-		},
298
-		{
299 284
 			Name:   "fcntl",
300 285
 			Action: types.ActAllow,
301 286
 			Args:   []*types.Arg{},
... ...
@@ -641,16 +589,6 @@ var DefaultProfile = &types.Seccomp{
641 641
 			Args:   []*types.Arg{},
642 642
 		},
643 643
 		{
644
-			Name:   "lchown",
645
-			Action: types.ActAllow,
646
-			Args:   []*types.Arg{},
647
-		},
648
-		{
649
-			Name:   "lchown32",
650
-			Action: types.ActAllow,
651
-			Args:   []*types.Arg{},
652
-		},
653
-		{
654 644
 			Name:   "lgetxattr",
655 645
 			Action: types.ActAllow,
656 646
 			Args:   []*types.Arg{},
... ...
@@ -1194,11 +1132,6 @@ var DefaultProfile = &types.Seccomp{
1194 1194
 			Args:   []*types.Arg{},
1195 1195
 		},
1196 1196
 		{
1197
-			Name:   "setdomainname",
1198
-			Action: types.ActAllow,
1199
-			Args:   []*types.Arg{},
1200
-		},
1201
-		{
1202 1197
 			Name:   "setfsgid",
1203 1198
 			Action: types.ActAllow,
1204 1199
 			Args:   []*types.Arg{},
... ...
@@ -1239,11 +1172,6 @@ var DefaultProfile = &types.Seccomp{
1239 1239
 			Args:   []*types.Arg{},
1240 1240
 		},
1241 1241
 		{
1242
-			Name:   "sethostname",
1243
-			Action: types.ActAllow,
1244
-			Args:   []*types.Arg{},
1245
-		},
1246
-		{
1247 1242
 			Name:   "setitimer",
1248 1243
 			Action: types.ActAllow,
1249 1244
 			Args:   []*types.Arg{},
... ...
@@ -1394,6 +1322,11 @@ var DefaultProfile = &types.Seccomp{
1394 1394
 			Args:   []*types.Arg{},
1395 1395
 		},
1396 1396
 		{
1397
+			Name:   "socketcall",
1398
+			Action: types.ActAllow,
1399
+			Args:   []*types.Arg{},
1400
+		},
1401
+		{
1397 1402
 			Name:   "socketpair",
1398 1403
 			Action: types.ActAllow,
1399 1404
 			Args:   []*types.Arg{},
... ...
@@ -1608,27 +1541,332 @@ var DefaultProfile = &types.Seccomp{
1608 1608
 			Action: types.ActAllow,
1609 1609
 			Args:   []*types.Arg{},
1610 1610
 		},
1611
-		// i386 specific syscalls
1612
-		{
1613
-			Name:   "modify_ldt",
1614
-			Action: types.ActAllow,
1615
-			Args:   []*types.Arg{},
1616
-		},
1617
-		// arm specific syscalls
1618
-		{
1619
-			Name:   "breakpoint",
1620
-			Action: types.ActAllow,
1621
-			Args:   []*types.Arg{},
1622
-		},
1623
-		{
1624
-			Name:   "cacheflush",
1625
-			Action: types.ActAllow,
1626
-			Args:   []*types.Arg{},
1627
-		},
1628
-		{
1629
-			Name:   "set_tls",
1630
-			Action: types.ActAllow,
1631
-			Args:   []*types.Arg{},
1632
-		},
1633
-	},
1611
+	}
1612
+
1613
+	var arch string
1614
+	var native, err = libseccomp.GetNativeArch()
1615
+	if err == nil {
1616
+		arch = native.String()
1617
+	}
1618
+	switch arch {
1619
+	case "arm", "arm64":
1620
+		syscalls = append(syscalls, []*types.Syscall{
1621
+			{
1622
+				Name:   "breakpoint",
1623
+				Action: types.ActAllow,
1624
+				Args:   []*types.Arg{},
1625
+			},
1626
+			{
1627
+				Name:   "cacheflush",
1628
+				Action: types.ActAllow,
1629
+				Args:   []*types.Arg{},
1630
+			},
1631
+			{
1632
+				Name:   "set_tls",
1633
+				Action: types.ActAllow,
1634
+				Args:   []*types.Arg{},
1635
+			},
1636
+		}...)
1637
+	case "amd64", "x32":
1638
+		syscalls = append(syscalls, []*types.Syscall{
1639
+			{
1640
+				Name:   "arch_prctl",
1641
+				Action: types.ActAllow,
1642
+				Args:   []*types.Arg{},
1643
+			},
1644
+		}...)
1645
+		fallthrough
1646
+	case "x86":
1647
+		syscalls = append(syscalls, []*types.Syscall{
1648
+			{
1649
+				Name:   "modify_ldt",
1650
+				Action: types.ActAllow,
1651
+				Args:   []*types.Arg{},
1652
+			},
1653
+		}...)
1654
+	}
1655
+
1656
+	capSysAdmin := false
1657
+
1658
+	var cap string
1659
+	for _, cap = range rs.Process.Capabilities {
1660
+		switch cap {
1661
+		case "CAP_CHOWN":
1662
+			syscalls = append(syscalls, []*types.Syscall{
1663
+				{
1664
+					Name:   "chown",
1665
+					Action: types.ActAllow,
1666
+					Args:   []*types.Arg{},
1667
+				},
1668
+				{
1669
+					Name:   "chown32",
1670
+					Action: types.ActAllow,
1671
+					Args:   []*types.Arg{},
1672
+				},
1673
+				{
1674
+					Name:   "fchown",
1675
+					Action: types.ActAllow,
1676
+					Args:   []*types.Arg{},
1677
+				},
1678
+				{
1679
+					Name:   "fchown32",
1680
+					Action: types.ActAllow,
1681
+					Args:   []*types.Arg{},
1682
+				},
1683
+				{
1684
+					Name:   "fchownat",
1685
+					Action: types.ActAllow,
1686
+					Args:   []*types.Arg{},
1687
+				},
1688
+				{
1689
+					Name:   "lchown",
1690
+					Action: types.ActAllow,
1691
+					Args:   []*types.Arg{},
1692
+				},
1693
+				{
1694
+					Name:   "lchown32",
1695
+					Action: types.ActAllow,
1696
+					Args:   []*types.Arg{},
1697
+				},
1698
+			}...)
1699
+		case "CAP_DAC_READ_SEARCH":
1700
+			syscalls = append(syscalls, []*types.Syscall{
1701
+				{
1702
+					Name:   "name_to_handle_at",
1703
+					Action: types.ActAllow,
1704
+					Args:   []*types.Arg{},
1705
+				},
1706
+				{
1707
+					Name:   "open_by_handle_at",
1708
+					Action: types.ActAllow,
1709
+					Args:   []*types.Arg{},
1710
+				},
1711
+			}...)
1712
+		case "CAP_IPC_LOCK":
1713
+			syscalls = append(syscalls, []*types.Syscall{
1714
+				{
1715
+					Name:   "mlock",
1716
+					Action: types.ActAllow,
1717
+					Args:   []*types.Arg{},
1718
+				},
1719
+				{
1720
+					Name:   "mlock2",
1721
+					Action: types.ActAllow,
1722
+					Args:   []*types.Arg{},
1723
+				},
1724
+				{
1725
+					Name:   "mlockall",
1726
+					Action: types.ActAllow,
1727
+					Args:   []*types.Arg{},
1728
+				},
1729
+			}...)
1730
+		case "CAP_SYS_ADMIN":
1731
+			capSysAdmin = true
1732
+			syscalls = append(syscalls, []*types.Syscall{
1733
+				{
1734
+					Name:   "bpf",
1735
+					Action: types.ActAllow,
1736
+					Args:   []*types.Arg{},
1737
+				},
1738
+				{
1739
+					Name:   "clone",
1740
+					Action: types.ActAllow,
1741
+					Args:   []*types.Arg{},
1742
+				},
1743
+				{
1744
+					Name:   "fanotify_init",
1745
+					Action: types.ActAllow,
1746
+					Args:   []*types.Arg{},
1747
+				},
1748
+				{
1749
+					Name:   "lookup_dcookie",
1750
+					Action: types.ActAllow,
1751
+					Args:   []*types.Arg{},
1752
+				},
1753
+				{
1754
+					Name:   "mount",
1755
+					Action: types.ActAllow,
1756
+					Args:   []*types.Arg{},
1757
+				},
1758
+				{
1759
+					Name:   "perf_event_open",
1760
+					Action: types.ActAllow,
1761
+					Args:   []*types.Arg{},
1762
+				},
1763
+				{
1764
+					Name:   "setdomainname",
1765
+					Action: types.ActAllow,
1766
+					Args:   []*types.Arg{},
1767
+				},
1768
+				{
1769
+					Name:   "sethostname",
1770
+					Action: types.ActAllow,
1771
+					Args:   []*types.Arg{},
1772
+				},
1773
+				{
1774
+					Name:   "setns",
1775
+					Action: types.ActAllow,
1776
+					Args:   []*types.Arg{},
1777
+				},
1778
+				{
1779
+					Name:   "umount",
1780
+					Action: types.ActAllow,
1781
+					Args:   []*types.Arg{},
1782
+				},
1783
+				{
1784
+					Name:   "umount2",
1785
+					Action: types.ActAllow,
1786
+					Args:   []*types.Arg{},
1787
+				},
1788
+				{
1789
+					Name:   "unshare",
1790
+					Action: types.ActAllow,
1791
+					Args:   []*types.Arg{},
1792
+				},
1793
+			}...)
1794
+		case "CAP_SYS_BOOT":
1795
+			syscalls = append(syscalls, []*types.Syscall{
1796
+				{
1797
+					Name:   "reboot",
1798
+					Action: types.ActAllow,
1799
+					Args:   []*types.Arg{},
1800
+				},
1801
+			}...)
1802
+		case "CAP_SYS_CHROOT":
1803
+			syscalls = append(syscalls, []*types.Syscall{
1804
+				{
1805
+					Name:   "chroot",
1806
+					Action: types.ActAllow,
1807
+					Args:   []*types.Arg{},
1808
+				},
1809
+			}...)
1810
+		case "CAP_SYS_MODULE":
1811
+			syscalls = append(syscalls, []*types.Syscall{
1812
+				{
1813
+					Name:   "delete_module",
1814
+					Action: types.ActAllow,
1815
+					Args:   []*types.Arg{},
1816
+				},
1817
+				{
1818
+					Name:   "init_module",
1819
+					Action: types.ActAllow,
1820
+					Args:   []*types.Arg{},
1821
+				},
1822
+				{
1823
+					Name:   "finit_module",
1824
+					Action: types.ActAllow,
1825
+					Args:   []*types.Arg{},
1826
+				},
1827
+				{
1828
+					Name:   "query_module",
1829
+					Action: types.ActAllow,
1830
+					Args:   []*types.Arg{},
1831
+				},
1832
+			}...)
1833
+		case "CAP_SYS_PACCT":
1834
+			syscalls = append(syscalls, []*types.Syscall{
1835
+				{
1836
+					Name:   "acct",
1837
+					Action: types.ActAllow,
1838
+					Args:   []*types.Arg{},
1839
+				},
1840
+			}...)
1841
+		case "CAP_SYS_PTRACE":
1842
+			syscalls = append(syscalls, []*types.Syscall{
1843
+				{
1844
+					Name:   "kcmp",
1845
+					Action: types.ActAllow,
1846
+					Args:   []*types.Arg{},
1847
+				},
1848
+				{
1849
+					Name:   "process_vm_readv",
1850
+					Action: types.ActAllow,
1851
+					Args:   []*types.Arg{},
1852
+				},
1853
+				{
1854
+					Name:   "process_vm_writev",
1855
+					Action: types.ActAllow,
1856
+					Args:   []*types.Arg{},
1857
+				},
1858
+				{
1859
+					Name:   "ptrace",
1860
+					Action: types.ActAllow,
1861
+					Args:   []*types.Arg{},
1862
+				},
1863
+			}...)
1864
+		case "CAP_SYS_RAWIO":
1865
+			syscalls = append(syscalls, []*types.Syscall{
1866
+				{
1867
+					Name:   "iopl",
1868
+					Action: types.ActAllow,
1869
+					Args:   []*types.Arg{},
1870
+				},
1871
+				{
1872
+					Name:   "ioperm",
1873
+					Action: types.ActAllow,
1874
+					Args:   []*types.Arg{},
1875
+				},
1876
+			}...)
1877
+		case "CAP_SYS_TIME":
1878
+			syscalls = append(syscalls, []*types.Syscall{
1879
+				{
1880
+					Name:   "settimeofday",
1881
+					Action: types.ActAllow,
1882
+					Args:   []*types.Arg{},
1883
+				},
1884
+				{
1885
+					Name:   "stime",
1886
+					Action: types.ActAllow,
1887
+					Args:   []*types.Arg{},
1888
+				},
1889
+				{
1890
+					Name:   "adjtimex",
1891
+					Action: types.ActAllow,
1892
+					Args:   []*types.Arg{},
1893
+				},
1894
+			}...)
1895
+		case "CAP_SYS_TTY_CONFIG":
1896
+			syscalls = append(syscalls, []*types.Syscall{
1897
+				{
1898
+					Name:   "vhangup",
1899
+					Action: types.ActAllow,
1900
+					Args:   []*types.Arg{},
1901
+				},
1902
+			}...)
1903
+		}
1904
+	}
1905
+
1906
+	if !capSysAdmin {
1907
+		syscalls = append(syscalls, []*types.Syscall{
1908
+			{
1909
+				Name:   "clone",
1910
+				Action: types.ActAllow,
1911
+				Args: []*types.Arg{
1912
+					{
1913
+						Index:    0,
1914
+						Value:    syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
1915
+						ValueTwo: 0,
1916
+						Op:       types.OpMaskedEqual,
1917
+					},
1918
+				},
1919
+			},
1920
+		}...)
1921
+	}
1922
+
1923
+	// We need some additional syscalls in this case see #22252
1924
+	if !rs.Process.NoNewPrivileges {
1925
+		syscalls = append(syscalls, []*types.Syscall{
1926
+			{
1927
+				Name:   "fchown",
1928
+				Action: types.ActAllow,
1929
+				Args:   []*types.Arg{},
1930
+			},
1931
+		}...)
1932
+	}
1933
+
1934
+	return &types.Seccomp{
1935
+		DefaultAction: types.ActErrno,
1936
+		Architectures: arches(),
1937
+		Syscalls:      syscalls,
1938
+	}
1634 1939
 }
... ...
@@ -2,9 +2,12 @@
2 2
 
3 3
 package seccomp
4 4
 
5
-import "github.com/docker/engine-api/types"
6
-
7
-var (
8
-	// DefaultProfile is a nil pointer on unsupported systems.
9
-	DefaultProfile *types.Seccomp
5
+import (
6
+	"github.com/docker/engine-api/types"
7
+	"github.com/opencontainers/specs/specs-go"
10 8
 )
9
+
10
+// DefaultProfile returns a nil pointer on unsupported systems.
11
+func DefaultProfile(rs *specs.Spec) *types.Seccomp {
12
+	return nil
13
+}
11 14
new file mode 100644
... ...
@@ -0,0 +1,48 @@
0
+package runconfig
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	"github.com/docker/engine-api/types/container"
7
+)
8
+
9
+// DefaultDaemonNetworkMode returns the default network stack the daemon should
10
+// use.
11
+func DefaultDaemonNetworkMode() container.NetworkMode {
12
+	return container.NetworkMode("default")
13
+}
14
+
15
+// IsPreDefinedNetwork indicates if a network is predefined by the daemon
16
+func IsPreDefinedNetwork(network string) bool {
17
+	return false
18
+}
19
+
20
+// ValidateNetMode ensures that the various combinations of requested
21
+// network settings are valid.
22
+func ValidateNetMode(c *container.Config, hc *container.HostConfig) error {
23
+	// We may not be passed a host config, such as in the case of docker commit
24
+	if hc == nil {
25
+		return nil
26
+	}
27
+	parts := strings.Split(string(hc.NetworkMode), ":")
28
+	switch mode := parts[0]; mode {
29
+	case "default", "none":
30
+	default:
31
+		return fmt.Errorf("invalid --net: %s", hc.NetworkMode)
32
+	}
33
+	return nil
34
+}
35
+
36
+// ValidateIsolation performs platform specific validation of the
37
+// isolation level in the hostconfig structure.
38
+// This setting is currently discarded for Solaris so this is a no-op.
39
+func ValidateIsolation(hc *container.HostConfig) error {
40
+	return nil
41
+}
42
+
43
+// ValidateQoS performs platform specific validation of the QoS settings
44
+// a disk can be limited by either Bps or IOps, but not both.
45
+func ValidateQoS(hc *container.HostConfig) error {
46
+	return nil
47
+}
... ...
@@ -1,4 +1,4 @@
1
-// +build !windows
1
+// +build !windows,!solaris
2 2
 
3 3
 package runconfig
4 4
 
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd
1
+// +build linux freebsd solaris
2 2
 
3 3
 // Package local provides the default implementation for volumes. It
4 4
 // is used to mount data volume containers and directories local to
... ...
@@ -1,4 +1,4 @@
1
-// +build linux freebsd
1
+// +build linux freebsd solaris
2 2
 
3 3
 package store
4 4