Browse code

Merge branch 'master' of github.com:docker/docker into debug

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)

Dan Walsh authored on 2014/11/26 04:09:19
Showing 115 changed files
... ...
@@ -1,5 +1,21 @@
1 1
 # Changelog
2 2
 
3
+## 1.3.2 (2014-11-20)
4
+
5
+#### Security
6
+- Fix tar breakout vulnerability
7
+* Extractions are now sandboxed chroot
8
+- Security options are no longer committed to images
9
+
10
+#### Runtime
11
+- Fix deadlock in `docker ps -f exited=1`
12
+- Fix a bug when `--volumes-from` references a container that failed to start
13
+
14
+#### Registry
15
++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16
16
+* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag
17
+- Skip the experimental registry v2 API when mirroring is enabled
18
+
3 19
 ## 1.3.1 (2014-10-28)
4 20
 
5 21
 #### Security
... ...
@@ -172,7 +172,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it
172 172
 needs an absolute majority from the maintainers of `docs/` AND, separately, an
173 173
 absolute majority of the maintainers of `registry/`.
174 174
 
175
-For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
175
+For more details see [MAINTAINERS.md](project/MAINTAINERS.md)
176 176
 
177 177
 ### Sign your work
178 178
 
... ...
@@ -1,23 +1,39 @@
1 1
 .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
2 2
 
3
+# env vars passed through directly to Docker's build scripts
4
+# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
5
+# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
6
+DOCKER_ENVS := \
7
+	-e BUILDFLAGS \
8
+	-e DOCKER_CLIENTONLY \
9
+	-e DOCKER_EXECDRIVER \
10
+	-e DOCKER_GRAPHDRIVER \
11
+	-e TESTDIRS \
12
+	-e TESTFLAGS \
13
+	-e TIMEOUT
14
+# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
15
+
3 16
 # to allow `make BINDDIR=. shell` or `make BINDDIR= test`
4 17
 # (default to no bind mount if DOCKER_HOST is set)
5 18
 BINDDIR := $(if $(DOCKER_HOST),,bundles)
19
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
20
+
21
+# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
22
+DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
23
+
6 24
 # to allow `make DOCSPORT=9000 docs`
7 25
 DOCSPORT := 8000
8 26
 
9 27
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
10
-GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
11 28
 DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
12 29
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
13
-DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
14 30
 
15
-DOCKER_ENVS := -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS \
16
-  -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER \
17
-  -e DOCKER_CLIENTONLY
18 31
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
19
-# to allow `make DOCSDIR=docs docs-shell`
20
-DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
32
+
33
+DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET
34
+
35
+# for some docs workarounds (see below in "docs-build" target)
36
+GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
21 37
 
22 38
 default: binary
23 39
 
... ...
@@ -1 +1 @@
1
-1.3.1-dev
1
+1.3.2-dev
... ...
@@ -38,6 +38,7 @@ import (
38 38
 	"github.com/docker/docker/pkg/term"
39 39
 	"github.com/docker/docker/pkg/timeutils"
40 40
 	"github.com/docker/docker/pkg/units"
41
+	"github.com/docker/docker/pkg/urlutil"
41 42
 	"github.com/docker/docker/registry"
42 43
 	"github.com/docker/docker/runconfig"
43 44
 	"github.com/docker/docker/utils"
... ...
@@ -47,6 +48,10 @@ const (
47 47
 	tarHeaderSize = 512
48 48
 )
49 49
 
50
+var (
51
+	acceptedImageFilterTags = map[string]struct{}{"dangling": {}}
52
+)
53
+
50 54
 func (cli *DockerCli) CmdHelp(args ...string) error {
51 55
 	if len(args) > 1 {
52 56
 		method, exists := cli.getMethod(args[:2]...)
... ...
@@ -77,6 +82,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
77 77
 	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
78 78
 	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
79 79
 	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
80
+	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
80 81
 	if err := cmd.Parse(args); err != nil {
81 82
 		return nil
82 83
 	}
... ...
@@ -110,13 +116,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
110 110
 		} else {
111 111
 			context = ioutil.NopCloser(buf)
112 112
 		}
113
-	} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
113
+	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
114 114
 		isRemote = true
115 115
 	} else {
116 116
 		root := cmd.Arg(0)
117
-		if utils.IsGIT(root) {
117
+		if urlutil.IsGitURL(root) {
118 118
 			remoteURL := cmd.Arg(0)
119
-			if !utils.ValidGitTransport(remoteURL) {
119
+			if !urlutil.IsGitTransport(remoteURL) {
120 120
 				remoteURL = "https://" + remoteURL
121 121
 			}
122 122
 
... ...
@@ -213,6 +219,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
213 213
 		v.Set("forcerm", "1")
214 214
 	}
215 215
 
216
+	if *pull {
217
+		v.Set("pull", "1")
218
+	}
216 219
 	cli.LoadConfigFile()
217 220
 
218 221
 	headers := http.Header(make(map[string][]string))
... ...
@@ -508,6 +517,12 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
508 508
 	if remoteInfo.Exists("MemTotal") {
509 509
 		fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal"))))
510 510
 	}
511
+	if remoteInfo.Exists("Name") {
512
+		fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name"))
513
+	}
514
+	if remoteInfo.Exists("ID") {
515
+		fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID"))
516
+	}
511 517
 
512 518
 	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
513 519
 		if remoteInfo.Exists("Debug") {
... ...
@@ -548,6 +563,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
548 548
 	if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") {
549 549
 		fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
550 550
 	}
551
+	if remoteInfo.Exists("Labels") {
552
+		fmt.Fprintln(cli.out, "Labels:")
553
+		for _, attribute := range remoteInfo.GetList("Labels") {
554
+			fmt.Fprintf(cli.out, " %s\n", attribute)
555
+		}
556
+	}
557
+
551 558
 	return nil
552 559
 }
553 560
 
... ...
@@ -1336,6 +1358,12 @@ func (cli *DockerCli) CmdImages(args ...string) error {
1336 1336
 		}
1337 1337
 	}
1338 1338
 
1339
+	for name := range imageFilterArgs {
1340
+		if _, ok := acceptedImageFilterTags[name]; !ok {
1341
+			return fmt.Errorf("Invalid filter '%s'", name)
1342
+		}
1343
+	}
1344
+
1339 1345
 	matchName := cmd.Arg(0)
1340 1346
 	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
1341 1347
 	if *flViz || *flTree {
... ...
@@ -2145,7 +2173,11 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
2145 2145
 	stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false)
2146 2146
 	//if image not found try to pull it
2147 2147
 	if statusCode == 404 {
2148
-		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image)
2148
+		repo, tag := parsers.ParseRepositoryTag(config.Image)
2149
+		if tag == "" {
2150
+			tag = graph.DEFAULTTAG
2151
+		}
2152
+		fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag)
2149 2153
 
2150 2154
 		// we don't want to write to stdout anything apart from container.ID
2151 2155
 		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
... ...
@@ -3,12 +3,15 @@ package api
3 3
 import (
4 4
 	"fmt"
5 5
 	"mime"
6
+	"os"
7
+	"path"
6 8
 	"strings"
7 9
 
8 10
 	log "github.com/Sirupsen/logrus"
9 11
 	"github.com/docker/docker/engine"
10 12
 	"github.com/docker/docker/pkg/parsers"
11 13
 	"github.com/docker/docker/pkg/version"
14
+	"github.com/docker/docker/vendor/src/github.com/docker/libtrust"
12 15
 )
13 16
 
14 17
 const (
... ...
@@ -47,3 +50,25 @@ func MatchesContentType(contentType, expectedType string) bool {
47 47
 	}
48 48
 	return err == nil && mimetype == expectedType
49 49
 }
50
+
51
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
52
+// otherwise generates a new one
53
+func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
54
+	err := os.MkdirAll(path.Dir(trustKeyPath), 0700)
55
+	if err != nil {
56
+		return nil, err
57
+	}
58
+	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
59
+	if err == libtrust.ErrKeyFileDoesNotExist {
60
+		trustKey, err = libtrust.GenerateECP256PrivateKey()
61
+		if err != nil {
62
+			return nil, fmt.Errorf("Error generating key: %s", err)
63
+		}
64
+		if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
65
+			return nil, fmt.Errorf("Error saving key file: %s", err)
66
+		}
67
+	} else if err != nil {
68
+		return nil, fmt.Errorf("Error loading key file: %s", err)
69
+	}
70
+	return trustKey, nil
71
+}
... ...
@@ -1016,6 +1016,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
1016 1016
 	} else {
1017 1017
 		job.Setenv("rm", r.FormValue("rm"))
1018 1018
 	}
1019
+	if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") {
1020
+		job.Setenv("pull", "1")
1021
+	}
1019 1022
 	job.Stdin.Add(r.Body)
1020 1023
 	job.Setenv("remote", r.FormValue("remote"))
1021 1024
 	job.Setenv("t", r.FormValue("t"))
... ...
@@ -31,21 +31,39 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
31 31
 // in the dockerfile available from the next statement on via ${foo}.
32 32
 //
33 33
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
34
-	if len(args) != 2 {
35
-		return fmt.Errorf("ENV accepts two arguments")
34
+	if len(args) == 0 {
35
+		return fmt.Errorf("ENV is missing arguments")
36
+	}
37
+
38
+	if len(args)%2 != 0 {
39
+		// should never get here, but just in case
40
+		return fmt.Errorf("Bad input to ENV, too many args")
36 41
 	}
37 42
 
38
-	fullEnv := fmt.Sprintf("%s=%s", args[0], args[1])
43
+	commitStr := "ENV"
39 44
 
40
-	for i, envVar := range b.Config.Env {
41
-		envParts := strings.SplitN(envVar, "=", 2)
42
-		if args[0] == envParts[0] {
43
-			b.Config.Env[i] = fullEnv
44
-			return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
45
+	for j := 0; j < len(args); j++ {
46
+		// name  ==> args[j]
47
+		// value ==> args[j+1]
48
+		newVar := args[j] + "=" + args[j+1] + ""
49
+		commitStr += " " + newVar
50
+
51
+		gotOne := false
52
+		for i, envVar := range b.Config.Env {
53
+			envParts := strings.SplitN(envVar, "=", 2)
54
+			if envParts[0] == args[j] {
55
+				b.Config.Env[i] = newVar
56
+				gotOne = true
57
+				break
58
+			}
59
+		}
60
+		if !gotOne {
61
+			b.Config.Env = append(b.Config.Env, newVar)
45 62
 		}
63
+		j++
46 64
 	}
47
-	b.Config.Env = append(b.Config.Env, fullEnv)
48
-	return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
65
+
66
+	return b.commit("", b.Config.Cmd, commitStr)
49 67
 }
50 68
 
51 69
 // MAINTAINER some text <maybe@an.email.address>
... ...
@@ -97,6 +115,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
97 97
 	name := args[0]
98 98
 
99 99
 	image, err := b.Daemon.Repositories().LookupImage(name)
100
+	if b.Pull {
101
+		image, err = b.pullImage(name)
102
+		if err != nil {
103
+			return err
104
+		}
105
+	}
100 106
 	if err != nil {
101 107
 		if b.Daemon.Graph().IsNotExist(err) {
102 108
 			image, err = b.pullImage(name)
... ...
@@ -90,6 +90,7 @@ type Builder struct {
90 90
 	// controls how images and containers are handled between steps.
91 91
 	Remove      bool
92 92
 	ForceRemove bool
93
+	Pull        bool
93 94
 
94 95
 	AuthConfig     *registry.AuthConfig
95 96
 	AuthConfigFile *registry.ConfigFile
... ...
@@ -24,10 +24,12 @@ import (
24 24
 	"github.com/docker/docker/daemon"
25 25
 	imagepkg "github.com/docker/docker/image"
26 26
 	"github.com/docker/docker/pkg/archive"
27
+	"github.com/docker/docker/pkg/chrootarchive"
27 28
 	"github.com/docker/docker/pkg/parsers"
28 29
 	"github.com/docker/docker/pkg/symlink"
29 30
 	"github.com/docker/docker/pkg/system"
30 31
 	"github.com/docker/docker/pkg/tarsum"
32
+	"github.com/docker/docker/pkg/urlutil"
31 33
 	"github.com/docker/docker/registry"
32 34
 	"github.com/docker/docker/utils"
33 35
 )
... ...
@@ -46,7 +48,8 @@ func (b *Builder) readContext(context io.Reader) error {
46 46
 	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
47 47
 		return err
48 48
 	}
49
-	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
49
+
50
+	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
50 51
 		return err
51 52
 	}
52 53
 
... ...
@@ -215,7 +218,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
215 215
 	origPath = strings.TrimPrefix(origPath, "./")
216 216
 
217 217
 	// In the remote/URL case, download it and gen its hashcode
218
-	if utils.IsURL(origPath) {
218
+	if urlutil.IsURL(origPath) {
219 219
 		if !allowRemote {
220 220
 			return fmt.Errorf("Source can't be a URL for %s", cmdName)
221 221
 		}
... ...
@@ -627,7 +630,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
627 627
 		}
628 628
 
629 629
 		// try to successfully untar the orig
630
-		if err := archive.UntarPath(origPath, tarDest); err == nil {
630
+		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
631 631
 			return nil
632 632
 		} else if err != io.EOF {
633 633
 			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
... ...
@@ -637,7 +640,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
637 637
 	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
638 638
 		return err
639 639
 	}
640
-	if err := archive.CopyWithTar(origPath, destPath); err != nil {
640
+	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
641 641
 		return err
642 642
 	}
643 643
 
... ...
@@ -650,7 +653,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
650 650
 }
651 651
 
652 652
 func copyAsDirectory(source, destination string, destinationExists bool) error {
653
-	if err := archive.CopyWithTar(source, destination); err != nil {
653
+	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
654 654
 		return err
655 655
 	}
656 656
 
... ...
@@ -11,6 +11,7 @@ import (
11 11
 	"github.com/docker/docker/graph"
12 12
 	"github.com/docker/docker/pkg/archive"
13 13
 	"github.com/docker/docker/pkg/parsers"
14
+	"github.com/docker/docker/pkg/urlutil"
14 15
 	"github.com/docker/docker/registry"
15 16
 	"github.com/docker/docker/utils"
16 17
 )
... ...
@@ -35,6 +36,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
35 35
 		noCache        = job.GetenvBool("nocache")
36 36
 		rm             = job.GetenvBool("rm")
37 37
 		forceRm        = job.GetenvBool("forcerm")
38
+		pull           = job.GetenvBool("pull")
38 39
 		authConfig     = &registry.AuthConfig{}
39 40
 		configFile     = &registry.ConfigFile{}
40 41
 		tag            string
... ...
@@ -57,8 +59,8 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
57 57
 
58 58
 	if remoteURL == "" {
59 59
 		context = ioutil.NopCloser(job.Stdin)
60
-	} else if utils.IsGIT(remoteURL) {
61
-		if !utils.ValidGitTransport(remoteURL) {
60
+	} else if urlutil.IsGitURL(remoteURL) {
61
+		if !urlutil.IsGitTransport(remoteURL) {
62 62
 			remoteURL = "https://" + remoteURL
63 63
 		}
64 64
 		root, err := ioutil.TempDir("", "docker-build-git")
... ...
@@ -76,7 +78,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
76 76
 			return job.Error(err)
77 77
 		}
78 78
 		context = c
79
-	} else if utils.IsURL(remoteURL) {
79
+	} else if urlutil.IsURL(remoteURL) {
80 80
 		f, err := utils.Download(remoteURL)
81 81
 		if err != nil {
82 82
 			return job.Error(err)
... ...
@@ -111,6 +113,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
111 111
 		UtilizeCache:    !noCache,
112 112
 		Remove:          rm,
113 113
 		ForceRemove:     forceRm,
114
+		Pull:            pull,
114 115
 		OutOld:          job.Stdout,
115 116
 		StreamFormatter: sf,
116 117
 		AuthConfig:      authConfig,
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"fmt"
13 13
 	"strconv"
14 14
 	"strings"
15
+	"unicode"
15 16
 )
16 17
 
17 18
 var (
... ...
@@ -41,17 +42,139 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) {
41 41
 // parse environment like statements. Note that this does *not* handle
42 42
 // variable interpolation, which will be handled in the evaluator.
43 43
 func parseEnv(rest string) (*Node, map[string]bool, error) {
44
-	node := &Node{}
45
-	rootnode := node
46
-	strs := TOKEN_WHITESPACE.Split(rest, 2)
44
+	// This is kind of tricky because we need to support the old
45
+	// variant:   ENV name value
46
+	// as well as the new one:    ENV name=value ...
47
+	// The trigger to know which one is being used will be whether we hit
48
+	// a space or = first.  space ==> old, "=" ==> new
49
+
50
+	const (
51
+		inSpaces = iota // looking for start of a word
52
+		inWord
53
+		inQuote
54
+	)
55
+
56
+	words := []string{}
57
+	phase := inSpaces
58
+	word := ""
59
+	quote := '\000'
60
+	blankOK := false
61
+	var ch rune
62
+
63
+	for pos := 0; pos <= len(rest); pos++ {
64
+		if pos != len(rest) {
65
+			ch = rune(rest[pos])
66
+		}
67
+
68
+		if phase == inSpaces { // Looking for start of word
69
+			if pos == len(rest) { // end of input
70
+				break
71
+			}
72
+			if unicode.IsSpace(ch) { // skip spaces
73
+				continue
74
+			}
75
+			phase = inWord // found it, fall thru
76
+		}
77
+		if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
78
+			if blankOK || len(word) > 0 {
79
+				words = append(words, word)
80
+			}
81
+			break
82
+		}
83
+		if phase == inWord {
84
+			if unicode.IsSpace(ch) {
85
+				phase = inSpaces
86
+				if blankOK || len(word) > 0 {
87
+					words = append(words, word)
88
+
89
+					// Look for = and if no there assume
90
+					// we're doing the old stuff and
91
+					// just read the rest of the line
92
+					if !strings.Contains(word, "=") {
93
+						word = strings.TrimSpace(rest[pos:])
94
+						words = append(words, word)
95
+						break
96
+					}
97
+				}
98
+				word = ""
99
+				blankOK = false
100
+				continue
101
+			}
102
+			if ch == '\'' || ch == '"' {
103
+				quote = ch
104
+				blankOK = true
105
+				phase = inQuote
106
+				continue
107
+			}
108
+			if ch == '\\' {
109
+				if pos+1 == len(rest) {
110
+					continue // just skip \ at end
111
+				}
112
+				pos++
113
+				ch = rune(rest[pos])
114
+			}
115
+			word += string(ch)
116
+			continue
117
+		}
118
+		if phase == inQuote {
119
+			if ch == quote {
120
+				phase = inWord
121
+				continue
122
+			}
123
+			if ch == '\\' {
124
+				if pos+1 == len(rest) {
125
+					phase = inWord
126
+					continue // just skip \ at end
127
+				}
128
+				pos++
129
+				ch = rune(rest[pos])
130
+			}
131
+			word += string(ch)
132
+		}
133
+	}
47 134
 
48
-	if len(strs) < 2 {
49
-		return nil, nil, fmt.Errorf("ENV must have two arguments")
135
+	if len(words) == 0 {
136
+		return nil, nil, fmt.Errorf("ENV must have some arguments")
50 137
 	}
51 138
 
52
-	node.Value = strs[0]
53
-	node.Next = &Node{}
54
-	node.Next.Value = strs[1]
139
+	// Old format (ENV name value)
140
+	var rootnode *Node
141
+
142
+	if !strings.Contains(words[0], "=") {
143
+		node := &Node{}
144
+		rootnode = node
145
+		strs := TOKEN_WHITESPACE.Split(rest, 2)
146
+
147
+		if len(strs) < 2 {
148
+			return nil, nil, fmt.Errorf("ENV must have two arguments")
149
+		}
150
+
151
+		node.Value = strs[0]
152
+		node.Next = &Node{}
153
+		node.Next.Value = strs[1]
154
+	} else {
155
+		var prevNode *Node
156
+		for i, word := range words {
157
+			if !strings.Contains(word, "=") {
158
+				return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
159
+			}
160
+			parts := strings.SplitN(word, "=", 2)
161
+
162
+			name := &Node{}
163
+			value := &Node{}
164
+
165
+			name.Next = value
166
+			name.Value = parts[0]
167
+			value.Value = parts[1]
168
+
169
+			if i == 0 {
170
+				rootnode = name
171
+			} else {
172
+				prevNode.Next = name
173
+			}
174
+			prevNode = value
175
+		}
176
+	}
55 177
 
56 178
 	return rootnode, nil, nil
57 179
 }
... ...
@@ -125,6 +125,12 @@ func Parse(rwc io.Reader) (*Node, error) {
125 125
 					break
126 126
 				}
127 127
 			}
128
+			if child == nil && line != "" {
129
+				line, child, err = parseLine(line)
130
+				if err != nil {
131
+					return nil, err
132
+				}
133
+			}
128 134
 		}
129 135
 
130 136
 		if child != nil {
131 137
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-FROM busybox
2
-
3
-ENV PATH=PATH
4 1
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+FROM busybox
1
+
2
+ENV PATH
0 3
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+FROM ubuntu
1
+ENV name value
2
+ENV name=value
3
+ENV name=value name2=value2
4
+ENV name="value value1"
5
+ENV name=value\ value2
6
+ENV name="value'quote space'value2"
7
+ENV name='value"double quote"value2'
8
+ENV name=value\ value2 name2=value2\ value3
9
+ENV name=value \
10
+    name1=value1 \
11
+    name2="value2a \
12
+           value2b" \
13
+    name3="value3a\n\"value3b\"" \
14
+	name4="value4a\\nvalue4b" \
0 15
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+(from "ubuntu")
1
+(env "name" "value")
2
+(env "name" "value")
3
+(env "name" "value" "name2" "value2")
4
+(env "name" "value value1")
5
+(env "name" "value value2")
6
+(env "name" "value'quote space'value2")
7
+(env "name" "value\"double quote\"value2")
8
+(env "name" "value value2" "name2" "value2 value3")
9
+(env "name" "value" "name1" "value1" "name2" "value2a            value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b")
... ...
@@ -1,8 +1,8 @@
1
-#!bash
1
+#!/bin/bash
2 2
 #
3 3
 # bash completion file for core docker commands
4 4
 #
5
-# This script provides supports completion of:
5
+# This script provides completion of:
6 6
 #  - commands and their options
7 7
 #  - container ids and names
8 8
 #  - image repos and tags
... ...
@@ -11,9 +11,9 @@
11 11
 # To enable the completions either:
12 12
 #  - place this file in /etc/bash_completion.d
13 13
 #  or
14
-#  - copy this file and add the line below to your .bashrc after
15
-#    bash completion features are loaded
16
-#     . docker.bash
14
+#  - copy this file to e.g. ~/.docker-completion.sh and add the line
15
+#    below to your .bashrc after bash completion features are loaded
16
+#    . ~/.docker-completion.sh
17 17
 #
18 18
 # Note:
19 19
 # Currently, the completions will not work if the docker daemon is not
... ...
@@ -99,13 +99,60 @@ __docker_pos_first_nonflag() {
99 99
 	echo $counter
100 100
 }
101 101
 
102
+__docker_resolve_hostname() {
103
+	command -v host >/dev/null 2>&1 || return
104
+	COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') )
105
+}
106
+
107
+__docker_capabilities() {
108
+	# The list of capabilities is defined in types.go, ALL was added manually.
109
+	COMPREPLY=( $( compgen -W "
110
+		ALL
111
+		AUDIT_CONTROL
112
+		AUDIT_WRITE
113
+		BLOCK_SUSPEND
114
+		CHOWN
115
+		DAC_OVERRIDE
116
+		DAC_READ_SEARCH
117
+		FOWNER
118
+		FSETID
119
+		IPC_LOCK
120
+		IPC_OWNER
121
+		KILL
122
+		LEASE
123
+		LINUX_IMMUTABLE
124
+		MAC_ADMIN
125
+		MAC_OVERRIDE
126
+		MKNOD
127
+		NET_ADMIN
128
+		NET_BIND_SERVICE
129
+		NET_BROADCAST
130
+		NET_RAW
131
+		SETFCAP
132
+		SETGID
133
+		SETPCAP
134
+		SETUID
135
+		SYS_ADMIN
136
+		SYS_BOOT
137
+		SYS_CHROOT
138
+		SYSLOG
139
+		SYS_MODULE
140
+		SYS_NICE
141
+		SYS_PACCT
142
+		SYS_PTRACE
143
+		SYS_RAWIO
144
+		SYS_RESOURCE
145
+		SYS_TIME
146
+		SYS_TTY_CONFIG
147
+		WAKE_ALARM
148
+	" -- "$cur" ) )
149
+}
150
+
102 151
 _docker_docker() {
103 152
 	case "$prev" in
104 153
 		-H)
105 154
 			return
106 155
 			;;
107
-		*)
108
-			;;
109 156
 	esac
110 157
 
111 158
 	case "$cur" in
... ...
@@ -138,8 +185,6 @@ _docker_build() {
138 138
 			__docker_image_repos_and_tags
139 139
 			return
140 140
 			;;
141
-		*)
142
-			;;
143 141
 	esac
144 142
 
145 143
 	case "$cur" in
... ...
@@ -160,8 +205,6 @@ _docker_commit() {
160 160
 		-m|--message|-a|--author|--run)
161 161
 			return
162 162
 			;;
163
-		*)
164
-			;;
165 163
 	esac
166 164
 
167 165
 	case "$cur" in
... ...
@@ -222,7 +265,7 @@ _docker_create() {
222 222
 			__docker_containers_all
223 223
 			return
224 224
 			;;
225
-		-v|--volume)
225
+		-v|--volume|--device)
226 226
 			case "$cur" in
227 227
 				*:*)
228 228
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
... ...
@@ -255,19 +298,72 @@ _docker_create() {
255 255
 			esac
256 256
 			return
257 257
 			;;
258
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
258
+		--add-host)
259
+			case "$cur" in
260
+				*:)
261
+					__docker_resolve_hostname
262
+					return
263
+					;;
264
+			esac
265
+			;;
266
+		--cap-add|--cap-drop)
267
+			__docker_capabilities
259 268
 			return
260 269
 			;;
261
-		*)
270
+		--net)
271
+			case "$cur" in
272
+				container:*)
273
+					local cur=${cur#*:}
274
+					__docker_containers_all
275
+					;;
276
+				*)
277
+					COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
278
+					if [ "${COMPREPLY[*]}" = "container:" ] ; then
279
+						compopt -o nospace
280
+					fi
281
+					;;
282
+			esac
283
+			return
284
+			;;
285
+		--restart)
286
+			case "$cur" in
287
+				on-failure:*)
288
+					;;
289
+				*)
290
+					COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
291
+					;;
292
+			esac
293
+			return
294
+			;;
295
+		--security-opt)
296
+			case "$cur" in
297
+				label:*:*)
298
+					;;
299
+				label:*)
300
+					local cur=${cur##*:}
301
+					COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
302
+					if [ "${COMPREPLY[*]}" != "disable" ] ; then
303
+						compopt -o nospace
304
+					fi
305
+					;;
306
+				*)
307
+					COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
308
+					compopt -o nospace
309
+					;;
310
+			esac
311
+			return
312
+			;;
313
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
314
+			return
262 315
 			;;
263 316
 	esac
264 317
 
265 318
 	case "$cur" in
266 319
 		-*)
267
-			COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) )
320
+			COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
268 321
 			;;
269 322
 		*)
270
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf')
323
+			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
271 324
 
272 325
 			if [ $cword -eq $counter ]; then
273 326
 				__docker_image_repos_and_tags_and_ids
... ...
@@ -288,16 +384,12 @@ _docker_events() {
288 288
 		--since)
289 289
 			return
290 290
 			;;
291
-		*)
292
-			;;
293 291
 	esac
294 292
 
295 293
 	case "$cur" in
296 294
 		-*)
297 295
 			COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
298 296
 			;;
299
-		*)
300
-			;;
301 297
 	esac
302 298
 }
303 299
 
... ...
@@ -376,8 +468,6 @@ _docker_inspect() {
376 376
 		-f|--format)
377 377
 			return
378 378
 			;;
379
-		*)
380
-			;;
381 379
 	esac
382 380
 
383 381
 	case "$cur" in
... ...
@@ -403,16 +493,12 @@ _docker_login() {
403 403
 		-u|--username|-p|--password|-e|--email)
404 404
 			return
405 405
 			;;
406
-		*)
407
-			;;
408 406
 	esac
409 407
 
410 408
 	case "$cur" in
411 409
 		-*)
412 410
 			COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
413 411
 			;;
414
-		*)
415
-			;;
416 412
 	esac
417 413
 }
418 414
 
... ...
@@ -452,16 +538,12 @@ _docker_ps() {
452 452
 		-n)
453 453
 			return
454 454
 			;;
455
-		*)
456
-			;;
457 455
 	esac
458 456
 
459 457
 	case "$cur" in
460 458
 		-*)
461 459
 			COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
462 460
 			;;
463
-		*)
464
-			;;
465 461
 	esac
466 462
 }
467 463
 
... ...
@@ -470,8 +552,6 @@ _docker_pull() {
470 470
 		-t|--tag)
471 471
 			return
472 472
 			;;
473
-		*)
474
-			;;
475 473
 	esac
476 474
 
477 475
 	case "$cur" in
... ...
@@ -499,8 +579,6 @@ _docker_restart() {
499 499
 		-t|--time)
500 500
 			return
501 501
 			;;
502
-		*)
503
-			;;
504 502
 	esac
505 503
 
506 504
 	case "$cur" in
... ...
@@ -520,7 +598,6 @@ _docker_rm() {
520 520
 			return
521 521
 			;;
522 522
 		*)
523
-			local force=
524 523
 			for arg in "${COMP_WORDS[@]}"; do
525 524
 				case "$arg" in
526 525
 					-f|--force)
... ...
@@ -553,7 +630,7 @@ _docker_run() {
553 553
 			__docker_containers_all
554 554
 			return
555 555
 			;;
556
-		-v|--volume)
556
+		-v|--volume|--device)
557 557
 			case "$cur" in
558 558
 				*:*)
559 559
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
... ...
@@ -586,20 +663,72 @@ _docker_run() {
586 586
 			esac
587 587
 			return
588 588
 			;;
589
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
589
+		--add-host)
590
+			case "$cur" in
591
+				*:)
592
+					__docker_resolve_hostname
593
+					return
594
+					;;
595
+			esac
596
+			;;
597
+		--cap-add|--cap-drop)
598
+			__docker_capabilities
590 599
 			return
591 600
 			;;
592
-		*)
601
+		--net)
602
+			case "$cur" in
603
+				container:*)
604
+					local cur=${cur#*:}
605
+					__docker_containers_all
606
+					;;
607
+				*)
608
+					COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
609
+					if [ "${COMPREPLY[*]}" = "container:" ] ; then
610
+						compopt -o nospace
611
+					fi
612
+					;;
613
+			esac
614
+			return
615
+			;;
616
+		--restart)
617
+			case "$cur" in
618
+				on-failure:*)
619
+					;;
620
+				*)
621
+					COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
622
+					;;
623
+			esac
624
+			return
625
+			;;
626
+		--security-opt)
627
+			case "$cur" in
628
+				label:*:*)
629
+					;;
630
+				label:*)
631
+					local cur=${cur##*:}
632
+					COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
633
+					if [ "${COMPREPLY[*]}" != "disable" ] ; then
634
+						compopt -o nospace
635
+					fi
636
+					;;
637
+				*)
638
+					COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
639
+					compopt -o nospace
640
+					;;
641
+			esac
642
+			return
643
+			;;
644
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
645
+			return
593 646
 			;;
594 647
 	esac
595 648
 
596 649
 	case "$cur" in
597 650
 		-*)
598
-			COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) )
651
+			COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
599 652
 			;;
600 653
 		*)
601
-
602
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt')
654
+			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
603 655
 
604 656
 			if [ $cword -eq $counter ]; then
605 657
 				__docker_image_repos_and_tags_and_ids
... ...
@@ -620,16 +749,12 @@ _docker_search() {
620 620
 		-s|--stars)
621 621
 			return
622 622
 			;;
623
-		*)
624
-			;;
625 623
 	esac
626 624
 
627 625
 	case "$cur" in
628 626
 		-*)
629 627
 			COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
630 628
 			;;
631
-		*)
632
-			;;
633 629
 	esac
634 630
 }
635 631
 
... ...
@@ -649,8 +774,6 @@ _docker_stop() {
649 649
 		-t|--time)
650 650
 			return
651 651
 			;;
652
-		*)
653
-			;;
654 652
 	esac
655 653
 
656 654
 	case "$cur" in
... ...
@@ -752,7 +875,7 @@ _docker() {
752 752
 	local cur prev words cword
753 753
 	_get_comp_words_by_ref -n : cur prev words cword
754 754
 
755
-	local command='docker'
755
+	local command='docker' cpos=0
756 756
 	local counter=1
757 757
 	while [ $counter -lt $cword ]; do
758 758
 		case "${words[$counter]}" in
... ...
@@ -177,7 +177,9 @@ __docker_commands () {
177 177
     if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
178 178
         && ! _retrieve_cache docker_subcommands;
179 179
     then
180
-        _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:})
180
+        local -a lines
181
+        lines=(${(f)"$(_call_program commands docker 2>&1)"})
182
+        _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I)    *]}]}## #}/ ##/:})
181 183
         _docker_subcommands=($_docker_subcommands 'help:Show help for a command')
182 184
         _store_cache docker_subcommands _docker_subcommands
183 185
     fi
... ...
@@ -15,9 +15,12 @@ done
15 15
 suite="$1"
16 16
 shift
17 17
 
18
+# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ...
19
+: ${DEBOOTSTRAP:=debootstrap}
20
+
18 21
 (
19 22
 	set -x
20
-	debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@"
23
+	$DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@"
21 24
 )
22 25
 
23 26
 # now for some Docker-specific tweaks
... ...
@@ -40,6 +40,8 @@ type Config struct {
40 40
 	DisableNetwork              bool
41 41
 	EnableSelinuxSupport        bool
42 42
 	Context                     map[string][]string
43
+	TrustKeyPath                string
44
+	Labels                      []string
43 45
 }
44 46
 
45 47
 // InstallFlags adds command-line options to the top-level flag parser for
... ...
@@ -68,6 +70,7 @@ func (config *Config) InstallFlags() {
68 68
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
69 69
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
70 70
 	opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror")
71
+	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)")
71 72
 
72 73
 	// Localhost is by default considered as an insecure registry
73 74
 	// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).
... ...
@@ -83,8 +83,8 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
83 83
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
84 84
 		return nil, nil, err
85 85
 	}
86
-	if hostConfig != nil && config.SecurityOpt == nil {
87
-		config.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode)
86
+	if hostConfig != nil && hostConfig.SecurityOpt == nil {
87
+		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode)
88 88
 		if err != nil {
89 89
 			return nil, nil, err
90 90
 		}
... ...
@@ -15,6 +15,7 @@ import (
15 15
 	"github.com/docker/libcontainer/label"
16 16
 
17 17
 	log "github.com/Sirupsen/logrus"
18
+	"github.com/docker/docker/api"
18 19
 	"github.com/docker/docker/daemon/execdriver"
19 20
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
20 21
 	"github.com/docker/docker/daemon/execdriver/lxc"
... ...
@@ -83,6 +84,7 @@ func (c *contStore) List() []*Container {
83 83
 }
84 84
 
85 85
 type Daemon struct {
86
+	ID             string
86 87
 	repository     string
87 88
 	sysInitPath    string
88 89
 	containers     *contStore
... ...
@@ -529,10 +531,10 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string)
529 529
 	return entrypoint, args
530 530
 }
531 531
 
532
-func parseSecurityOpt(container *Container, config *runconfig.Config) error {
532
+func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
533 533
 	var (
534
-		label_opts []string
535
-		err        error
534
+		labelOpts []string
535
+		err       error
536 536
 	)
537 537
 
538 538
 	for _, opt := range config.SecurityOpt {
... ...
@@ -542,7 +544,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error {
542 542
 		}
543 543
 		switch con[0] {
544 544
 		case "label":
545
-			label_opts = append(label_opts, con[1])
545
+			labelOpts = append(labelOpts, con[1])
546 546
 		case "apparmor":
547 547
 			container.AppArmorProfile = con[1]
548 548
 		default:
... ...
@@ -550,7 +552,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error {
550 550
 		}
551 551
 	}
552 552
 
553
-	container.ProcessLabel, container.MountLabel, err = label.InitLabels(label_opts)
553
+	container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
554 554
 	return err
555 555
 }
556 556
 
... ...
@@ -584,7 +586,6 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
584 584
 		execCommands:    newExecStore(),
585 585
 	}
586 586
 	container.root = daemon.containerRoot(container.ID)
587
-	err = parseSecurityOpt(container, config)
588 587
 	return container, err
589 588
 }
590 589
 
... ...
@@ -893,7 +894,13 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
893 893
 		return nil, err
894 894
 	}
895 895
 
896
+	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
897
+	if err != nil {
898
+		return nil, err
899
+	}
900
+
896 901
 	daemon := &Daemon{
902
+		ID:             trustKey.PublicKey().KeyID(),
897 903
 		repository:     daemonRepo,
898 904
 		containers:     &contStore{s: make(map[string]*Container)},
899 905
 		execCommands:   newExecStore(),
... ...
@@ -8,7 +8,7 @@ import (
8 8
 
9 9
 func TestParseSecurityOpt(t *testing.T) {
10 10
 	container := &Container{}
11
-	config := &runconfig.Config{}
11
+	config := &runconfig.HostConfig{}
12 12
 
13 13
 	// test apparmor
14 14
 	config.SecurityOpt = []string{"apparmor:test_profile"}
... ...
@@ -122,8 +122,6 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
122 122
 	entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
123 123
 
124 124
 	processConfig := execdriver.ProcessConfig{
125
-		Privileged: config.Privileged,
126
-		User:       config.User,
127 125
 		Tty:        config.Tty,
128 126
 		Entrypoint: entrypoint,
129 127
 		Arguments:  args,
... ...
@@ -33,6 +33,7 @@ import (
33 33
 	log "github.com/Sirupsen/logrus"
34 34
 	"github.com/docker/docker/daemon/graphdriver"
35 35
 	"github.com/docker/docker/pkg/archive"
36
+	"github.com/docker/docker/pkg/chrootarchive"
36 37
 	mountpk "github.com/docker/docker/pkg/mount"
37 38
 	"github.com/docker/docker/utils"
38 39
 	"github.com/docker/libcontainer/label"
... ...
@@ -305,7 +306,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
305 305
 }
306 306
 
307 307
 func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
308
-	return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
308
+	return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
309 309
 }
310 310
 
311 311
 // DiffSize calculates the changes between the specified id
... ...
@@ -4,18 +4,25 @@ import (
4 4
 	"crypto/sha256"
5 5
 	"encoding/hex"
6 6
 	"fmt"
7
-	"github.com/docker/docker/daemon/graphdriver"
8
-	"github.com/docker/docker/pkg/archive"
9 7
 	"io/ioutil"
10 8
 	"os"
11 9
 	"path"
12 10
 	"testing"
11
+
12
+	"github.com/docker/docker/daemon/graphdriver"
13
+	"github.com/docker/docker/pkg/archive"
14
+	"github.com/docker/docker/pkg/reexec"
13 15
 )
14 16
 
15 17
 var (
16
-	tmp = path.Join(os.TempDir(), "aufs-tests", "aufs")
18
+	tmpOuter = path.Join(os.TempDir(), "aufs-tests")
19
+	tmp      = path.Join(tmpOuter, "aufs")
17 20
 )
18 21
 
22
+func init() {
23
+	reexec.Init()
24
+}
25
+
19 26
 func testInit(dir string, t *testing.T) graphdriver.Driver {
20 27
 	d, err := Init(dir, nil)
21 28
 	if err != nil {
... ...
@@ -640,8 +647,8 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
640 640
 		t.Fatal(err)
641 641
 	}
642 642
 
643
-	d := testInit(mountPath, t).(*Driver)
644 643
 	defer os.RemoveAll(mountPath)
644
+	d := testInit(mountPath, t).(*Driver)
645 645
 	defer d.Cleanup()
646 646
 	var last string
647 647
 	var expected int
... ...
@@ -662,24 +669,24 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
662 662
 
663 663
 		if err := d.Create(current, parent); err != nil {
664 664
 			t.Logf("Current layer %d", i)
665
-			t.Fatal(err)
665
+			t.Error(err)
666 666
 		}
667 667
 		point, err := d.Get(current, "")
668 668
 		if err != nil {
669 669
 			t.Logf("Current layer %d", i)
670
-			t.Fatal(err)
670
+			t.Error(err)
671 671
 		}
672 672
 		f, err := os.Create(path.Join(point, current))
673 673
 		if err != nil {
674 674
 			t.Logf("Current layer %d", i)
675
-			t.Fatal(err)
675
+			t.Error(err)
676 676
 		}
677 677
 		f.Close()
678 678
 
679 679
 		if i%10 == 0 {
680 680
 			if err := os.Remove(path.Join(point, parent)); err != nil {
681 681
 				t.Logf("Current layer %d", i)
682
-				t.Fatal(err)
682
+				t.Error(err)
683 683
 			}
684 684
 			expected--
685 685
 		}
... ...
@@ -689,28 +696,30 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
689 689
 	// Perform the actual mount for the top most image
690 690
 	point, err := d.Get(last, "")
691 691
 	if err != nil {
692
-		t.Fatal(err)
692
+		t.Error(err)
693 693
 	}
694 694
 	files, err := ioutil.ReadDir(point)
695 695
 	if err != nil {
696
-		t.Fatal(err)
696
+		t.Error(err)
697 697
 	}
698 698
 	if len(files) != expected {
699
-		t.Fatalf("Expected %d got %d", expected, len(files))
699
+		t.Errorf("Expected %d got %d", expected, len(files))
700 700
 	}
701 701
 }
702 702
 
703 703
 func TestMountMoreThan42Layers(t *testing.T) {
704
+	os.RemoveAll(tmpOuter)
704 705
 	testMountMoreThan42Layers(t, tmp)
705 706
 }
706 707
 
707 708
 func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
708
-	tmp := "aufs-tests"
709
+	defer os.RemoveAll(tmpOuter)
710
+	zeroes := "0"
709 711
 	for {
710 712
 		// This finds a mount path so that when combined into aufs mount options
711 713
 		// 4096 byte boundary would be in between the paths or in permission
712
-		// section. For '/tmp' it will use '/tmp/aufs-tests00000000/aufs'
713
-		mountPath := path.Join(os.TempDir(), tmp, "aufs")
714
+		// section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs'
715
+		mountPath := path.Join(tmpOuter, zeroes, "aufs")
714 716
 		pathLength := 77 + len(mountPath)
715 717
 
716 718
 		if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 {
... ...
@@ -718,6 +727,6 @@ func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
718 718
 			testMountMoreThan42Layers(t, mountPath)
719 719
 			return
720 720
 		}
721
-		tmp += "0"
721
+		zeroes += "0"
722 722
 	}
723 723
 }
... ...
@@ -13,6 +13,9 @@ func init() {
13 13
 	DefaultDataLoopbackSize = 300 * 1024 * 1024
14 14
 	DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
15 15
 	DefaultBaseFsSize = 300 * 1024 * 1024
16
+	if err := graphtest.InitLoopbacks(); err != nil {
17
+		panic(err)
18
+	}
16 19
 }
17 20
 
18 21
 // This avoids creating a new driver for each test if all tests are run
... ...
@@ -8,6 +8,7 @@ import (
8 8
 
9 9
 	log "github.com/Sirupsen/logrus"
10 10
 	"github.com/docker/docker/pkg/archive"
11
+	"github.com/docker/docker/pkg/chrootarchive"
11 12
 	"github.com/docker/docker/pkg/ioutils"
12 13
 	"github.com/docker/docker/utils"
13 14
 )
... ...
@@ -122,7 +123,7 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea
122 122
 
123 123
 	start := time.Now().UTC()
124 124
 	log.Debugf("Start untar layer")
125
-	if err = archive.ApplyLayer(layerFs, diff); err != nil {
125
+	if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
126 126
 		return
127 127
 	}
128 128
 	log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
... ...
@@ -1,6 +1,7 @@
1 1
 package graphtest
2 2
 
3 3
 import (
4
+	"fmt"
4 5
 	"io/ioutil"
5 6
 	"os"
6 7
 	"path"
... ...
@@ -20,6 +21,46 @@ type Driver struct {
20 20
 	refCount int
21 21
 }
22 22
 
23
+// InitLoopbacks ensures that the loopback devices are properly created within
24
+// the system running the device mapper tests.
25
+func InitLoopbacks() error {
26
+	stat_t, err := getBaseLoopStats()
27
+	if err != nil {
28
+		return err
29
+	}
30
+	// create atleast 8 loopback files, ya, that is a good number
31
+	for i := 0; i < 8; i++ {
32
+		loopPath := fmt.Sprintf("/dev/loop%d", i)
33
+		// only create new loopback files if they don't exist
34
+		if _, err := os.Stat(loopPath); err != nil {
35
+			if mkerr := syscall.Mknod(loopPath,
36
+				uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
37
+				return mkerr
38
+			}
39
+			os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
40
+		}
41
+	}
42
+	return nil
43
+}
44
+
45
+// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the
46
+// loop0 device on the system.  If it does not exist we assume 0,0,0660 for the
47
+// stat data
48
+func getBaseLoopStats() (*syscall.Stat_t, error) {
49
+	loop0, err := os.Stat("/dev/loop0")
50
+	if err != nil {
51
+		if os.IsNotExist(err) {
52
+			return &syscall.Stat_t{
53
+				Uid:  0,
54
+				Gid:  0,
55
+				Mode: 0660,
56
+			}, nil
57
+		}
58
+		return nil, err
59
+	}
60
+	return loop0.Sys().(*syscall.Stat_t), nil
61
+}
62
+
23 63
 func newDriver(t *testing.T, name string) *Driver {
24 64
 	root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-")
25 65
 	if err != nil {
... ...
@@ -129,6 +129,7 @@ func supportsOverlayfs() error {
129 129
 			return nil
130 130
 		}
131 131
 	}
132
+	log.Error("'overlayfs' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlayfs support loaded.")
132 133
 	return graphdriver.ErrNotSupported
133 134
 }
134 135
 
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"path"
9 9
 
10 10
 	"github.com/docker/docker/daemon/graphdriver"
11
-	"github.com/docker/docker/pkg/archive"
11
+	"github.com/docker/docker/pkg/chrootarchive"
12 12
 	"github.com/docker/libcontainer/label"
13 13
 )
14 14
 
... ...
@@ -66,7 +66,7 @@ func (d *Driver) Create(id, parent string) error {
66 66
 	if err != nil {
67 67
 		return fmt.Errorf("%s: %s", parent, err)
68 68
 	}
69
-	if err := archive.CopyWithTar(parentDir, dir); err != nil {
69
+	if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil {
70 70
 		return err
71 71
 	}
72 72
 	return nil
... ...
@@ -1,10 +1,17 @@
1 1
 package vfs
2 2
 
3 3
 import (
4
-	"github.com/docker/docker/daemon/graphdriver/graphtest"
5 4
 	"testing"
5
+
6
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
7
+
8
+	"github.com/docker/docker/pkg/reexec"
6 9
 )
7 10
 
11
+func init() {
12
+	reexec.Init()
13
+}
14
+
8 15
 // This avoids creating a new driver for each test if all tests are run
9 16
 // Make sure to put new tests between TestVfsSetup and TestVfsTeardown
10 17
 func TestVfsSetup(t *testing.T) {
... ...
@@ -56,6 +56,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
56 56
 		return job.Error(err)
57 57
 	}
58 58
 	v := &engine.Env{}
59
+	v.Set("ID", daemon.ID)
59 60
 	v.SetInt("Containers", len(daemon.List()))
60 61
 	v.SetInt("Images", imgcount)
61 62
 	v.Set("Driver", daemon.GraphDriver().String())
... ...
@@ -75,6 +76,10 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
75 75
 	v.Set("InitPath", initPath)
76 76
 	v.SetInt("NCPU", runtime.NumCPU())
77 77
 	v.SetInt64("MemTotal", meminfo.MemTotal)
78
+	if hostname, err := os.Hostname(); err == nil {
79
+		v.Set("Name", hostname)
80
+	}
81
+	v.SetList("Labels", daemon.Config().Labels)
78 82
 	if _, err := v.WriteTo(job.Stdout); err != nil {
79 83
 		return job.Error(err)
80 84
 	}
... ...
@@ -47,6 +47,7 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
47 47
 		out.Set("ProcessLabel", container.ProcessLabel)
48 48
 		out.SetJson("Volumes", container.Volumes)
49 49
 		out.SetJson("VolumesRW", container.VolumesRW)
50
+		out.SetJson("AppArmorProfile", container.AppArmorProfile)
50 51
 
51 52
 		if children, err := daemon.Children(container.Name); err == nil {
52 53
 			for linkAlias, child := range children {
... ...
@@ -195,7 +195,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
195 195
 			if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil {
196 196
 				return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
197 197
 			} else if len(output) != 0 {
198
-				return fmt.Errorf("Error iptables postrouting: %s", output)
198
+				return &iptables.ChainError{Chain: "POSTROUTING", Output: output}
199 199
 			}
200 200
 		}
201 201
 	}
... ...
@@ -236,7 +236,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
236 236
 		if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil {
237 237
 			return fmt.Errorf("Unable to allow outgoing packets: %s", err)
238 238
 		} else if len(output) != 0 {
239
-			return fmt.Errorf("Error iptables allow outgoing: %s", output)
239
+			return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output}
240 240
 		}
241 241
 	}
242 242
 
... ...
@@ -247,15 +247,15 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
247 247
 		if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil {
248 248
 			return fmt.Errorf("Unable to allow incoming packets: %s", err)
249 249
 		} else if len(output) != 0 {
250
-			return fmt.Errorf("Error iptables allow incoming: %s", output)
250
+			return &iptables.ChainError{Chain: "FORWARD incoming", Output: output}
251 251
 		}
252 252
 	}
253 253
 	return nil
254 254
 }
255 255
 
256
-// configureBridge attempts to create and configure a network bridge interface named `ifaceName` on the host
256
+// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host
257 257
 // If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges
258
-// If the bridge `ifaceName` already exists, it will only perform the IP address association with the existing
258
+// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
259 259
 // bridge (fixes issue #8444)
260 260
 // If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
261 261
 func configureBridge(bridgeIP string) error {
... ...
@@ -145,7 +145,7 @@ func (p *proxyCommand) Start() error {
145 145
 	select {
146 146
 	case err := <-errchan:
147 147
 		return err
148
-	case <-time.After(1 * time.Second):
148
+	case <-time.After(16 * time.Second):
149 149
 		return fmt.Errorf("Timed out proxy starting the userland proxy")
150 150
 	}
151 151
 }
... ...
@@ -44,6 +44,9 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
44 44
 }
45 45
 
46 46
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
47
+	if err := parseSecurityOpt(container, hostConfig); err != nil {
48
+		return err
49
+	}
47 50
 	// Validate the HostConfig binds. Make sure that:
48 51
 	// the source exists
49 52
 	for _, bind := range hostConfig.Binds {
... ...
@@ -12,7 +12,7 @@ import (
12 12
 
13 13
 	log "github.com/Sirupsen/logrus"
14 14
 	"github.com/docker/docker/daemon/execdriver"
15
-	"github.com/docker/docker/pkg/archive"
15
+	"github.com/docker/docker/pkg/chrootarchive"
16 16
 	"github.com/docker/docker/pkg/symlink"
17 17
 	"github.com/docker/docker/volumes"
18 18
 )
... ...
@@ -320,7 +320,7 @@ func copyExistingContents(source, destination string) error {
320 320
 
321 321
 		if len(srcList) == 0 {
322 322
 			// If the source volume is empty copy files from the root into the volume
323
-			if err := archive.CopyWithTar(source, destination); err != nil {
323
+			if err := chrootarchive.CopyWithTar(source, destination); err != nil {
324 324
 				return err
325 325
 			}
326 326
 		}
... ...
@@ -34,6 +34,8 @@ func mainDaemon() {
34 34
 	eng := engine.New()
35 35
 	signal.Trap(eng.Shutdown)
36 36
 
37
+	daemonCfg.TrustKeyPath = *flTrustKey
38
+
37 39
 	// Load builtins
38 40
 	if err := builtins.Register(eng); err != nil {
39 41
 		log.Fatal(err)
... ...
@@ -83,9 +83,14 @@ func main() {
83 83
 	)
84 84
 	tlsConfig.InsecureSkipVerify = true
85 85
 
86
+	// Regardless of whether the user sets it to true or false, if they
87
+	// specify --tlsverify at all then we need to turn on tls
88
+	if flag.IsSet("-tlsverify") {
89
+		*flTls = true
90
+	}
91
+
86 92
 	// If we should verify the server, we need to load a trusted ca
87 93
 	if *flTlsVerify {
88
-		*flTls = true
89 94
 		certPool := x509.NewCertPool()
90 95
 		file, err := ioutil.ReadFile(*flCa)
91 96
 		if err != nil {
... ...
@@ -35,7 +35,7 @@ var (
35 35
 	flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
36 36
 	flLogLevel    = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
37 37
 	flEnableCors  = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
38
-	flTls         = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
38
+	flTls         = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify flag")
39 39
 	flTlsVerify   = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
40 40
 
41 41
 	// these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs
... ...
@@ -68,6 +68,9 @@ unix://[/path/to/socket] to use.
68 68
 **-l**, **--log-level**="*debug*|*info*|*error*|*fatal*""
69 69
   Set the logging level. Default is `info`.
70 70
 
71
+**--label**="[]"
72
+  Set key=value labels to the daemon (displayed in `docker info`)
73
+
71 74
 **--mtu**=VALUE
72 75
   Set the containers network mtu. Default is `1500`.
73 76
 
... ...
@@ -49,8 +49,9 @@ You can still call an old version of the API using
49 49
 `GET /info`
50 50
 
51 51
 **New!**
52
-`info` now returns the number of CPUs available on the machine (`NCPU`) and
53
-total memory available (`MemTotal`).
52
+`info` now returns the number of CPUs available on the machine (`NCPU`),
53
+total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and
54
+a list of daemon labels (`Labels`).
54 55
 
55 56
 `POST /containers/create`
56 57
 
... ...
@@ -524,6 +524,7 @@ Start the container `id`
524 524
         HTTP/1.1 204 No Content
525 525
 
526 526
 Json Parameters:
527
+
527 528
 -   **Binds** – A list of volume bindings for this container.  Each volume
528 529
         binding is a string of the form `container_path` (to create a new
529 530
         volume for the container), `host_path:container_path` (to bind-mount
... ...
@@ -1560,7 +1561,6 @@ Sets up an exec instance in a running container `id`
1560 1560
 	     "Cmd":[
1561 1561
                      "date"
1562 1562
              ],
1563
-	     "Container":"e90e34656806",
1564 1563
         }
1565 1564
 
1566 1565
 **Example response**:
... ...
@@ -1574,7 +1574,12 @@ Sets up an exec instance in a running container `id`
1574 1574
 
1575 1575
 Json Parameters:
1576 1576
 
1577
--   **execConfig** ? exec configuration.
1577
+-   **AttachStdin** - Boolean value, attaches to stdin of the exec command.
1578
+-   **AttachStdout** - Boolean value, attaches to stdout of the exec command.
1579
+-   **AttachStderr** - Boolean value, attaches to stderr of the exec command.
1580
+-   **Tty** - Boolean value to allocate a pseudo-TTY
1581
+-   **Cmd** - Command to run specified as a string or an array of strings.
1582
+
1578 1583
 
1579 1584
 Status Codes:
1580 1585
 
... ...
@@ -1585,8 +1590,9 @@ Status Codes:
1585 1585
 
1586 1586
 `POST /exec/(id)/start`
1587 1587
 
1588
-Starts a previously set up exec instance `id`. If `detach` is true, this API returns after
1589
-starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command.
1588
+Starts a previously set up exec instance `id`. If `detach` is true, this API
1589
+returns after starting the `exec` command. Otherwise, this API sets up an
1590
+interactive session with the `exec` command.
1590 1591
 
1591 1592
 **Example request**:
1592 1593
 
... ...
@@ -1607,7 +1613,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session
1607 1607
 
1608 1608
 Json Parameters:
1609 1609
 
1610
--   **execConfig** ? exec configuration.
1610
+-   **Detach** - Detach from the exec command
1611
+-   **Tty** - Boolean value to allocate a pseudo-TTY
1611 1612
 
1612 1613
 Status Codes:
1613 1614
 
... ...
@@ -1156,6 +1156,7 @@ Query Parameters:
1156 1156
         the resulting image in case of success
1157 1157
 -   **q** – suppress verbose build output
1158 1158
 -   **nocache** – do not use the cache when building the image
1159
+-   **pull** - attempt to pull the image even if an older image exists locally
1159 1160
 -   **rm** - remove intermediate containers after a successful build (default behavior)
1160 1161
 -   **forcerm - always remove intermediate containers (includes rm)
1161 1162
 
... ...
@@ -1220,6 +1221,8 @@ Display system-wide information
1220 1220
              "KernelVersion":"3.12.0-1-amd64"
1221 1221
              "NCPU":1,
1222 1222
              "MemTotal":2099236864,
1223
+             "Name":"prod-server-42",
1224
+             "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
1223 1225
              "Debug":false,
1224 1226
              "NFd": 11,
1225 1227
              "NGoroutines":21,
... ...
@@ -1228,7 +1231,8 @@ Display system-wide information
1228 1228
              "IndexServerAddress":["https://index.docker.io/v1/"],
1229 1229
              "MemoryLimit":true,
1230 1230
              "SwapLimit":false,
1231
-             "IPv4Forwarding":true
1231
+             "IPv4Forwarding":true,
1232
+             "Labels":["storage=ssd"]
1232 1233
         }
1233 1234
 
1234 1235
 Status Codes:
... ...
@@ -1511,7 +1515,6 @@ Sets up an exec instance in a running container `id`
1511 1511
 	     "Cmd":[
1512 1512
                      "date"
1513 1513
              ],
1514
-	     "Container":"e90e34656806",
1515 1514
         }
1516 1515
 
1517 1516
 **Example response**:
... ...
@@ -1525,7 +1528,12 @@ Sets up an exec instance in a running container `id`
1525 1525
 
1526 1526
 Json Parameters:
1527 1527
 
1528
--   **execConfig** ? exec configuration.
1528
+-   **AttachStdin** - Boolean value, attaches to stdin of the exec command.
1529
+-   **AttachStdout** - Boolean value, attaches to stdout of the exec command.
1530
+-   **AttachStderr** - Boolean value, attaches to stderr of the exec command.
1531
+-   **Tty** - Boolean value to allocate a pseudo-TTY
1532
+-   **Cmd** - Command to run specified as a string or an array of strings.
1533
+
1529 1534
 
1530 1535
 Status Codes:
1531 1536
 
... ...
@@ -1536,8 +1544,9 @@ Status Codes:
1536 1536
 
1537 1537
 `POST /exec/(id)/start`
1538 1538
 
1539
-Starts a previously set up exec instance `id`. If `detach` is true, this API returns after
1540
-starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command.
1539
+Starts a previously set up exec instance `id`. If `detach` is true, this API
1540
+returns after starting the `exec` command. Otherwise, this API sets up an
1541
+interactive session with the `exec` command.
1541 1542
 
1542 1543
 **Example request**:
1543 1544
 
... ...
@@ -1558,7 +1567,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session
1558 1558
 
1559 1559
 Json Parameters:
1560 1560
 
1561
--   **execConfig** ? exec configuration.
1561
+-   **Detach** - Detach from the exec command
1562
+-   **Tty** - Boolean value to allocate a pseudo-TTY
1562 1563
 
1563 1564
 Status Codes:
1564 1565
 
... ...
@@ -337,11 +337,36 @@ expose ports to the host, at runtime,
337 337
 ## ENV
338 338
 
339 339
     ENV <key> <value>
340
+    ENV <key>=<value> ...
340 341
 
341 342
 The `ENV` instruction sets the environment variable `<key>` to the value
342 343
 `<value>`. This value will be passed to all future `RUN` instructions. This is
343 344
 functionally equivalent to prefixing the command with `<key>=<value>`
344 345
 
346
+The `ENV` instruction has two forms. The first form, `ENV <key> <value>`,
347
+will set a single variable to a value. The entire string after the first
348
+space will be treated as the `<value>` - including characters such as 
349
+spaces and quotes.
350
+
351
+The second form, `ENV <key>=<value> ...`, allows for multiple variables to 
352
+be set at one time. Notice that the second form uses the equals sign (=) 
353
+in the syntax, while the first form does not. Like command line parsing, 
354
+quotes and backslashes can be used to include spaces within values.
355
+
356
+For example:
357
+
358
+    ENV myName="John Doe" myDog=Rex\ The\ Dog \
359
+        myCat=fluffy
360
+
361
+and
362
+
363
+    ENV myName John Doe
364
+    ENV myDog Rex The Dog
365
+    ENV myCat fluffy
366
+
367
+will yield the same net results in the final container, but the first form 
368
+does it all in one layer.
369
+
345 370
 The environment variables set using `ENV` will persist when a container is run
346 371
 from the resulting image. You can view the values using `docker inspect`, and
347 372
 change them using `docker run --env <key>=<value>`.
... ...
@@ -566,6 +591,17 @@ To examine the result further, you can use `docker exec`:
566 566
 
567 567
 And you can gracefully request `top` to shut down using `docker stop test`.
568 568
 
569
+The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the
570
+foreground (i.e., as `PID 1`):
571
+
572
+```
573
+FROM debian:stable
574
+RUN apt-get update && apt-get install -y --force-yes apache2
575
+EXPOSE 80 443
576
+VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"]
577
+ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"]
578
+```
579
+
569 580
 If you need to write a starter script for a single executable, you can ensure that
570 581
 the final executable receives the Unix signals by using `exec` and `gosu`
571 582
 (see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint)
... ...
@@ -11,7 +11,7 @@ or execute `docker help`:
11 11
       Usage: docker [OPTIONS] COMMAND [arg...]
12 12
         -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
13 13
 
14
-      A self-sufficient runtime for linux containers.
14
+      A self-sufficient runtime for Linux containers.
15 15
 
16 16
       ...
17 17
 
... ...
@@ -76,7 +76,7 @@ expect an integer, and they can only be specified once.
76 76
       --ip-masq=true                             Enable IP masquerading for bridge's IP range
77 77
       --iptables=true                            Enable Docker's addition of iptables rules
78 78
        -l, --log-level="info"                    Set the logging level
79
-
79
+      --label=[]                                 Set key=value labels to the daemon (displayed in `docker info`)
80 80
       --mtu=0                                    Set the containers network MTU
81 81
                                                    if no value is provided: default to the default route MTU or 1500 if no default route is available
82 82
       -p, --pidfile="/var/run/docker.pid"        Path to use for daemon PID file
... ...
@@ -84,7 +84,7 @@ expect an integer, and they can only be specified once.
84 84
       -s, --storage-driver=""                    Force the Docker runtime to use a specific storage driver
85 85
       --selinux-enabled=false                    Enable selinux support. SELinux does not presently support the BTRFS storage driver
86 86
       --storage-opt=[]                           Set storage driver options
87
-      --tls=false                                Use TLS; implied by tls-verify flags
87
+      --tls=false                                Use TLS; implied by --tlsverify flag
88 88
       --tlscacert="/home/sven/.docker/ca.pem"    Trust only remotes providing a certificate signed by the CA given here
89 89
       --tlscert="/home/sven/.docker/cert.pem"    Path to TLS certificate file
90 90
       --tlskey="/home/sven/.docker/key.pem"      Path to TLS key file
... ...
@@ -111,7 +111,7 @@ requiring either `root` permission, or `docker` group membership.
111 111
 If you need to access the Docker daemon remotely, you need to enable the `tcp`
112 112
 Socket. Beware that the default setup provides un-encrypted and un-authenticated
113 113
 direct access to the Docker daemon - and should be secured either using the
114
-[built in https encrypted socket](/articles/https/), or by putting a secure web
114
+[built in HTTPS encrypted socket](/articles/https/), or by putting a secure web
115 115
 proxy in front of it. You can listen on port `2375` on all network interfaces
116 116
 with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP
117 117
 address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375`
... ...
@@ -155,8 +155,8 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva
155 155
 
156 156
 ### Daemon storage-driver option
157 157
 
158
-The Docker daemon has support for three different image layer storage drivers: `aufs`,
159
-`devicemapper`, and `btrfs`.
158
+The Docker daemon has support for several different image layer storage drivers: `aufs`,
159
+`devicemapper`, `btrfs` and `overlayfs`.
160 160
 
161 161
 The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that
162 162
 is unlikely to be merged into the main kernel. These are also known to cause some
... ...
@@ -175,6 +175,9 @@ To tell the Docker daemon to use `devicemapper`, use
175 175
 The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not
176 176
 share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`.
177 177
 
178
+The `overlayfs` is a very fast union filesystem. It is now merged in the main
179
+Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137).
180
+Call `docker -d -s overlayfs` to use it.
178 181
 
179 182
 ### Docker exec-driver option
180 183
 
... ...
@@ -312,6 +315,7 @@ To kill the container, use `docker kill`.
312 312
 
313 313
       --force-rm=false     Always remove intermediate containers, even after unsuccessful builds
314 314
       --no-cache=false     Do not use cache when building the image
315
+      --pull=false         Always attempt to pull a newer version of the image
315 316
       -q, --quiet=false    Suppress the verbose output generated by the containers
316 317
       --rm=true            Remove intermediate containers after a successful build
317 318
       -t, --tag=""         Repository name (and optionally a tag) to be applied to the resulting image in case of success
... ...
@@ -458,7 +462,7 @@ Supported formats are: bzip2, gzip and xz.
458 458
 This will clone the GitHub repository and use the cloned repository as
459 459
 context. The Dockerfile at the root of the
460 460
 repository is used as Dockerfile. Note that you
461
-can specify an arbitrary Git repository by using the `git://`
461
+can specify an arbitrary Git repository by using the `git://` or `git@`
462 462
 schema.
463 463
 
464 464
 > **Note:** `docker build` will return a `no such file or directory` error
... ...
@@ -738,19 +742,24 @@ decrease disk usage, and speed up `docker build` by
738 738
 allowing each step to be cached. These intermediate layers are not shown
739 739
 by default.
740 740
 
741
+An image will be listed more than once if it has multiple repository names
742
+or tags. This single image (identifiable by its matching `IMAGE ID`)
743
+uses up the `VIRTUAL SIZE` listed only once.
744
+
741 745
 #### Listing the most recently created images
742 746
 
743 747
     $ sudo docker images | head
744
-    REPOSITORY                    TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
745
-    <none>                        <none>              77af4d6b9913        19 hours ago        1.089 GB
746
-    committest                    latest              b6fa739cedf5        19 hours ago        1.089 GB
747
-    <none>                        <none>              78a85c484f71        19 hours ago        1.089 GB
748
-    docker                        latest              30557a29d5ab        20 hours ago        1.089 GB
749
-    <none>                        <none>              0124422dd9f9        20 hours ago        1.089 GB
750
-    <none>                        <none>              18ad6fad3402        22 hours ago        1.082 GB
751
-    <none>                        <none>              f9f1e26352f0        23 hours ago        1.089 GB
752
-    tryout                        latest              2629d1fa0b81        23 hours ago        131.5 MB
753
-    <none>                        <none>              5ed6274db6ce        24 hours ago        1.089 GB
748
+    REPOSITORY                TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
749
+    <none>                    <none>              77af4d6b9913        19 hours ago        1.089 GB
750
+    committ                   latest              b6fa739cedf5        19 hours ago        1.089 GB
751
+    <none>                    <none>              78a85c484f71        19 hours ago        1.089 GB
752
+    docker                    latest              30557a29d5ab        20 hours ago        1.089 GB
753
+    <none>                    <none>              5ed6274db6ce        24 hours ago        1.089 GB
754
+    postgres                  9                   746b819f315e        4 days ago          213.4 MB
755
+    postgres                  9.3                 746b819f315e        4 days ago          213.4 MB
756
+    postgres                  9.3.5               746b819f315e        4 days ago          213.4 MB
757
+    postgres                  latest              746b819f315e        4 days ago          213.4 MB
758
+
754 759
 
755 760
 #### Listing the full length image IDs
756 761
 
... ...
@@ -851,11 +860,15 @@ For example:
851 851
     $ sudo docker -D info
852 852
     Containers: 14
853 853
     Images: 52
854
-    Storage Driver: btrfs
854
+    Storage Driver: aufs
855
+     Root Dir: /var/lib/docker/aufs
856
+     Dirs: 545
855 857
     Execution Driver: native-0.2
856 858
     Kernel Version: 3.13.0-24-generic
857 859
     Operating System: Ubuntu 14.04 LTS
858 860
     CPUs: 1
861
+    Name: prod-server-42
862
+    ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS
859 863
     Total Memory: 2 GiB
860 864
     Debug mode (server): false
861 865
     Debug mode (client): true
... ...
@@ -865,6 +878,8 @@ For example:
865 865
     Init Path: /usr/bin/docker
866 866
     Username: svendowideit
867 867
     Registry: [https://index.docker.io/v1/]
868
+    Labels:
869
+     storage=ssd
868 870
 
869 871
 The global `-D` option tells all `docker` commands to output debug information.
870 872
 
... ...
@@ -4,6 +4,35 @@ page_keywords: docker, documentation, about, technology, understanding, release
4 4
 
5 5
 #Release Notes
6 6
 
7
+##Version 1.3.2
8
+(2014-11-24)
9
+
10
+This release fixes some bugs and addresses some security issues. We have also
11
+made improvements to aspects of `docker run`.
12
+
13
+*Security fixes*
14
+
15
+Patches and changes were made to address CVE-2014-6407 and CVE-2014-6408.
16
+Specifically, changes were made in order to:
17
+
18
+* Prevent host privilege escalation from an image extraction vulnerability (CVE-2014-6407).
19
+
20
+* Prevent container escalation from malicious security options applied to images (CVE-2014-6408).
21
+
22
+*Daemon fixes*
23
+
24
+The `--insecure-registry` flag of the `docker run` command has undergone
25
+several refinements and additions. For details, please see the
26
+[command-line reference](http://docs.docker.com/reference/commandline/cli/#run).
27
+
28
+* You can now specify a sub-net in order to set a range of registries which the Docker daemon will consider insecure.
29
+
30
+* By default, Docker now defines `localhost` as an insecure registry.
31
+
32
+* Registries can now be referenced using the Classless Inter-Domain Routing (CIDR) format.
33
+
34
+* When mirroring is enabled, the experimental registry v2 API is skipped.
35
+
7 36
 ##Version 1.3.1
8 37
 (2014-10-28)
9 38
 
... ...
@@ -30,24 +30,21 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
30 30
 	defer os.RemoveAll(tempdir)
31 31
 
32 32
 	rootRepoMap := map[string]Repository{}
33
+	addKey := func(name string, tag string, id string) {
34
+		log.Debugf("add key [%s:%s]", name, tag)
35
+		if repo, ok := rootRepoMap[name]; !ok {
36
+			rootRepoMap[name] = Repository{tag: id}
37
+		} else {
38
+			repo[tag] = id
39
+		}
40
+	}
33 41
 	for _, name := range job.Args {
34 42
 		log.Debugf("Serializing %s", name)
35 43
 		rootRepo := s.Repositories[name]
36 44
 		if rootRepo != nil {
37 45
 			// this is a base repo name, like 'busybox'
38
-			for _, id := range rootRepo {
39
-				if _, ok := rootRepoMap[name]; !ok {
40
-					rootRepoMap[name] = rootRepo
41
-				} else {
42
-					log.Debugf("Duplicate key [%s]", name)
43
-					if rootRepoMap[name].Contains(rootRepo) {
44
-						log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo)
45
-						continue
46
-					}
47
-					log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo)
48
-					rootRepoMap[name].Update(rootRepo)
49
-				}
50
-
46
+			for tag, id := range rootRepo {
47
+				addKey(name, tag, id)
51 48
 				if err := s.exportImage(job.Eng, id, tempdir); err != nil {
52 49
 					return job.Error(err)
53 50
 				}
... ...
@@ -65,18 +62,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
65 65
 				// check this length, because a lookup of a truncated has will not have a tag
66 66
 				// and will not need to be added to this map
67 67
 				if len(repoTag) > 0 {
68
-					if _, ok := rootRepoMap[repoName]; !ok {
69
-						rootRepoMap[repoName] = Repository{repoTag: img.ID}
70
-					} else {
71
-						log.Debugf("Duplicate key [%s]", repoName)
72
-						newRepo := Repository{repoTag: img.ID}
73
-						if rootRepoMap[repoName].Contains(newRepo) {
74
-							log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo)
75
-							continue
76
-						}
77
-						log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo)
78
-						rootRepoMap[repoName].Update(newRepo)
79
-					}
68
+					addKey(repoName, repoTag, img.ID)
80 69
 				}
81 70
 				if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
82 71
 					return job.Error(err)
... ...
@@ -1,3 +1,5 @@
1
+// +build linux
2
+
1 3
 package graph
2 4
 
3 5
 import (
... ...
@@ -11,6 +13,7 @@ import (
11 11
 	"github.com/docker/docker/engine"
12 12
 	"github.com/docker/docker/image"
13 13
 	"github.com/docker/docker/pkg/archive"
14
+	"github.com/docker/docker/pkg/chrootarchive"
14 15
 )
15 16
 
16 17
 // Loads a set of images into the repository. This is the complementary of ImageExport.
... ...
@@ -53,7 +56,7 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
53 53
 		excludes[i] = k
54 54
 		i++
55 55
 	}
56
-	if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil {
56
+	if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil {
57 57
 		return job.Error(err)
58 58
 	}
59 59
 
60 60
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+// +build !linux
1
+
2
+package graph
3
+
4
+import (
5
+	"github.com/docker/docker/engine"
6
+)
7
+
8
+func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
9
+	return job.Errorf("CmdLoad is not supported on this platform")
10
+}
... ...
@@ -1,6 +1,14 @@
1 1
 package graph
2 2
 
3
-import "testing"
3
+import (
4
+	"testing"
5
+
6
+	"github.com/docker/docker/pkg/reexec"
7
+)
8
+
9
+func init() {
10
+	reexec.Init()
11
+}
4 12
 
5 13
 func TestPools(t *testing.T) {
6 14
 	s := &TagStore{
... ...
@@ -179,6 +179,7 @@ func TestBuildEnvironmentReplacementAddCopy(t *testing.T) {
179 179
 	if err != nil {
180 180
 		t.Fatal(err)
181 181
 	}
182
+	defer ctx.Close()
182 183
 
183 184
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
184 185
 		t.Fatal(err)
... ...
@@ -632,6 +633,8 @@ func TestBuildSixtySteps(t *testing.T) {
632 632
 	if err != nil {
633 633
 		t.Fatal(err)
634 634
 	}
635
+	defer ctx.Close()
636
+
635 637
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
636 638
 		t.Fatal(err)
637 639
 	}
... ...
@@ -656,6 +659,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
656 656
 	if err != nil {
657 657
 		t.Fatal(err)
658 658
 	}
659
+	defer ctx.Close()
660
+
659 661
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
660 662
 		t.Fatal(err)
661 663
 	}
... ...
@@ -674,6 +679,8 @@ ADD test_file .`,
674 674
 	if err != nil {
675 675
 		t.Fatal(err)
676 676
 	}
677
+	defer ctx.Close()
678
+
677 679
 	done := make(chan struct{})
678 680
 	go func() {
679 681
 		if _, err := buildImageFromContext(name, ctx, true); err != nil {
... ...
@@ -708,6 +715,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio'
708 708
 	if err != nil {
709 709
 		t.Fatal(err)
710 710
 	}
711
+	defer ctx.Close()
712
+
711 713
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
712 714
 		t.Fatal(err)
713 715
 	}
... ...
@@ -947,6 +956,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
947 947
 	if err != nil {
948 948
 		t.Fatal(err)
949 949
 	}
950
+	defer ctx.Close()
951
+
950 952
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
951 953
 		t.Fatal(err)
952 954
 	}
... ...
@@ -971,6 +982,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
971 971
 	if err != nil {
972 972
 		t.Fatal(err)
973 973
 	}
974
+	defer ctx.Close()
975
+
974 976
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
975 977
 		t.Fatal(err)
976 978
 	}
... ...
@@ -996,6 +1009,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
996 996
 	if err != nil {
997 997
 		t.Fatal(err)
998 998
 	}
999
+	defer ctx.Close()
1000
+
999 1001
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1000 1002
 		t.Fatal(err)
1001 1003
 	}
... ...
@@ -1022,6 +1037,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
1022 1022
 	if err != nil {
1023 1023
 		t.Fatal(err)
1024 1024
 	}
1025
+	defer ctx.Close()
1026
+
1025 1027
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1026 1028
 		t.Fatal(err)
1027 1029
 	}
... ...
@@ -1040,6 +1057,8 @@ ADD . /`,
1040 1040
 	if err != nil {
1041 1041
 		t.Fatal(err)
1042 1042
 	}
1043
+	defer ctx.Close()
1044
+
1043 1045
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1044 1046
 		t.Fatal(err)
1045 1047
 	}
... ...
@@ -1064,6 +1083,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
1064 1064
 	if err != nil {
1065 1065
 		t.Fatal(err)
1066 1066
 	}
1067
+	defer ctx.Close()
1068
+
1067 1069
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1068 1070
 		t.Fatal(err)
1069 1071
 	}
... ...
@@ -1082,6 +1103,8 @@ COPY test_file .`,
1082 1082
 	if err != nil {
1083 1083
 		t.Fatal(err)
1084 1084
 	}
1085
+	defer ctx.Close()
1086
+
1085 1087
 	done := make(chan struct{})
1086 1088
 	go func() {
1087 1089
 		if _, err := buildImageFromContext(name, ctx, true); err != nil {
... ...
@@ -1116,6 +1139,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio'
1116 1116
 	if err != nil {
1117 1117
 		t.Fatal(err)
1118 1118
 	}
1119
+	defer ctx.Close()
1120
+
1119 1121
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1120 1122
 		t.Fatal(err)
1121 1123
 	}
... ...
@@ -1140,6 +1165,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
1140 1140
 	if err != nil {
1141 1141
 		t.Fatal(err)
1142 1142
 	}
1143
+	defer ctx.Close()
1144
+
1143 1145
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1144 1146
 		t.Fatal(err)
1145 1147
 	}
... ...
@@ -1163,6 +1190,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
1163 1163
 	if err != nil {
1164 1164
 		t.Fatal(err)
1165 1165
 	}
1166
+	defer ctx.Close()
1167
+
1166 1168
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1167 1169
 		t.Fatal(err)
1168 1170
 	}
... ...
@@ -1188,6 +1217,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
1188 1188
 	if err != nil {
1189 1189
 		t.Fatal(err)
1190 1190
 	}
1191
+	defer ctx.Close()
1192
+
1191 1193
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1192 1194
 		t.Fatal(err)
1193 1195
 	}
... ...
@@ -1214,6 +1245,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
1214 1214
 	if err != nil {
1215 1215
 		t.Fatal(err)
1216 1216
 	}
1217
+	defer ctx.Close()
1218
+
1217 1219
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1218 1220
 		t.Fatal(err)
1219 1221
 	}
... ...
@@ -1231,6 +1264,8 @@ COPY . /`,
1231 1231
 	if err != nil {
1232 1232
 		t.Fatal(err)
1233 1233
 	}
1234
+	defer ctx.Close()
1235
+
1234 1236
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
1235 1237
 		t.Fatal(err)
1236 1238
 	}
... ...
@@ -1858,6 +1893,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
1858 1858
 		if err != nil {
1859 1859
 			t.Fatal(err)
1860 1860
 		}
1861
+		defer ctx.Close()
1861 1862
 
1862 1863
 		out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".")
1863 1864
 		if err != nil {
... ...
@@ -1874,6 +1910,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
1874 1874
 		if err != nil {
1875 1875
 			t.Fatal(err)
1876 1876
 		}
1877
+		defer ctx.Close()
1877 1878
 
1878 1879
 		out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".")
1879 1880
 		if err != nil {
... ...
@@ -1890,6 +1927,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
1890 1890
 		if err != nil {
1891 1891
 			t.Fatal(err)
1892 1892
 		}
1893
+		defer ctx.Close()
1893 1894
 
1894 1895
 		out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".")
1895 1896
 		if err != nil {
... ...
@@ -2391,7 +2429,7 @@ func TestBuildNoContext(t *testing.T) {
2391 2391
 		t.Fatalf("build failed to complete: %v %v", out, err)
2392 2392
 	}
2393 2393
 
2394
-	if out, _, err := cmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil {
2394
+	if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil {
2395 2395
 		t.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
2396 2396
 	}
2397 2397
 
... ...
@@ -2984,6 +3022,8 @@ RUN    [ "$(cat $TO)" = "hello" ]
2984 2984
 	if err != nil {
2985 2985
 		t.Fatal(err)
2986 2986
 	}
2987
+	defer ctx.Close()
2988
+
2987 2989
 	_, err = buildImageFromContext(name, ctx, true)
2988 2990
 	if err != nil {
2989 2991
 		t.Fatal(err)
... ...
@@ -2991,6 +3031,46 @@ RUN    [ "$(cat $TO)" = "hello" ]
2991 2991
 	logDone("build - environment variables usage")
2992 2992
 }
2993 2993
 
2994
+func TestBuildEnvUsage2(t *testing.T) {
2995
+	name := "testbuildenvusage2"
2996
+	defer deleteImages(name)
2997
+	dockerfile := `FROM busybox
2998
+ENV    abc=def
2999
+RUN    [ "$abc" = "def" ]
3000
+ENV    def="hello world"
3001
+RUN    [ "$def" = "hello world" ]
3002
+ENV    def=hello\ world
3003
+RUN    [ "$def" = "hello world" ]
3004
+ENV    v1=abc v2="hi there"
3005
+RUN    [ "$v1" = "abc" ]
3006
+RUN    [ "$v2" = "hi there" ]
3007
+ENV    v3='boogie nights' v4="with'quotes too"
3008
+RUN    [ "$v3" = "boogie nights" ]
3009
+RUN    [ "$v4" = "with'quotes too" ]
3010
+ENV    abc=zzz FROM=hello/docker/world
3011
+ENV    abc=zzz TO=/docker/world/hello
3012
+ADD    $FROM $TO
3013
+RUN    [ "$(cat $TO)" = "hello" ]
3014
+ENV    abc "zzz"
3015
+RUN    [ $abc = \"zzz\" ]
3016
+ENV    abc 'yyy'
3017
+RUN    [ $abc = \'yyy\' ]
3018
+ENV    abc=
3019
+RUN    [ "$abc" = "" ]
3020
+`
3021
+	ctx, err := fakeContext(dockerfile, map[string]string{
3022
+		"hello/docker/world": "hello",
3023
+	})
3024
+	if err != nil {
3025
+		t.Fatal(err)
3026
+	}
3027
+	_, err = buildImageFromContext(name, ctx, true)
3028
+	if err != nil {
3029
+		t.Fatal(err)
3030
+	}
3031
+	logDone("build - environment variables usage2")
3032
+}
3033
+
2994 3034
 func TestBuildAddScript(t *testing.T) {
2995 3035
 	name := "testbuildaddscript"
2996 3036
 	defer deleteImages(name)
... ...
@@ -3006,6 +3086,8 @@ RUN [ "$(cat /testfile)" = 'test!' ]`
3006 3006
 	if err != nil {
3007 3007
 		t.Fatal(err)
3008 3008
 	}
3009
+	defer ctx.Close()
3010
+
3009 3011
 	_, err = buildImageFromContext(name, ctx, true)
3010 3012
 	if err != nil {
3011 3013
 		t.Fatal(err)
... ...
@@ -3060,6 +3142,7 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
3060 3060
 		}
3061 3061
 		return &FakeContext{Dir: tmpDir}
3062 3062
 	}()
3063
+	defer ctx.Close()
3063 3064
 
3064 3065
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
3065 3066
 		t.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
... ...
@@ -23,7 +23,7 @@ const (
23 23
 // Test for #5656
24 24
 // Check that garbage paths don't escape the container's rootfs
25 25
 func TestCpGarbagePath(t *testing.T) {
26
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
26
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
27 27
 	if err != nil || exitCode != 0 {
28 28
 		t.Fatal("failed to create a container", out, err)
29 29
 	}
... ...
@@ -31,7 +31,7 @@ func TestCpGarbagePath(t *testing.T) {
31 31
 	cleanedContainerID := stripTrailingCharacters(out)
32 32
 	defer deleteContainer(cleanedContainerID)
33 33
 
34
-	out, _, err = cmd(t, "wait", cleanedContainerID)
34
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
35 35
 	if err != nil || stripTrailingCharacters(out) != "0" {
36 36
 		t.Fatal("failed to set up container", out, err)
37 37
 	}
... ...
@@ -59,7 +59,7 @@ func TestCpGarbagePath(t *testing.T) {
59 59
 
60 60
 	path := filepath.Join("../../../../../../../../../../../../", cpFullPath)
61 61
 
62
-	_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
62
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
63 63
 	if err != nil {
64 64
 		t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err)
65 65
 	}
... ...
@@ -85,7 +85,7 @@ func TestCpGarbagePath(t *testing.T) {
85 85
 
86 86
 // Check that relative paths are relative to the container's rootfs
87 87
 func TestCpRelativePath(t *testing.T) {
88
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
88
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
89 89
 	if err != nil || exitCode != 0 {
90 90
 		t.Fatal("failed to create a container", out, err)
91 91
 	}
... ...
@@ -93,7 +93,7 @@ func TestCpRelativePath(t *testing.T) {
93 93
 	cleanedContainerID := stripTrailingCharacters(out)
94 94
 	defer deleteContainer(cleanedContainerID)
95 95
 
96
-	out, _, err = cmd(t, "wait", cleanedContainerID)
96
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
97 97
 	if err != nil || stripTrailingCharacters(out) != "0" {
98 98
 		t.Fatal("failed to set up container", out, err)
99 99
 	}
... ...
@@ -122,7 +122,7 @@ func TestCpRelativePath(t *testing.T) {
122 122
 
123 123
 	path, _ := filepath.Rel("/", cpFullPath)
124 124
 
125
-	_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
125
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
126 126
 	if err != nil {
127 127
 		t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err)
128 128
 	}
... ...
@@ -148,7 +148,7 @@ func TestCpRelativePath(t *testing.T) {
148 148
 
149 149
 // Check that absolute paths are relative to the container's rootfs
150 150
 func TestCpAbsolutePath(t *testing.T) {
151
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
151
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
152 152
 	if err != nil || exitCode != 0 {
153 153
 		t.Fatal("failed to create a container", out, err)
154 154
 	}
... ...
@@ -156,7 +156,7 @@ func TestCpAbsolutePath(t *testing.T) {
156 156
 	cleanedContainerID := stripTrailingCharacters(out)
157 157
 	defer deleteContainer(cleanedContainerID)
158 158
 
159
-	out, _, err = cmd(t, "wait", cleanedContainerID)
159
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
160 160
 	if err != nil || stripTrailingCharacters(out) != "0" {
161 161
 		t.Fatal("failed to set up container", out, err)
162 162
 	}
... ...
@@ -185,7 +185,7 @@ func TestCpAbsolutePath(t *testing.T) {
185 185
 
186 186
 	path := cpFullPath
187 187
 
188
-	_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
188
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
189 189
 	if err != nil {
190 190
 		t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
191 191
 	}
... ...
@@ -212,7 +212,7 @@ func TestCpAbsolutePath(t *testing.T) {
212 212
 // Test for #5619
213 213
 // Check that absolute symlinks are still relative to the container's rootfs
214 214
 func TestCpAbsoluteSymlink(t *testing.T) {
215
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path")
215
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path")
216 216
 	if err != nil || exitCode != 0 {
217 217
 		t.Fatal("failed to create a container", out, err)
218 218
 	}
... ...
@@ -220,7 +220,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
220 220
 	cleanedContainerID := stripTrailingCharacters(out)
221 221
 	defer deleteContainer(cleanedContainerID)
222 222
 
223
-	out, _, err = cmd(t, "wait", cleanedContainerID)
223
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
224 224
 	if err != nil || stripTrailingCharacters(out) != "0" {
225 225
 		t.Fatal("failed to set up container", out, err)
226 226
 	}
... ...
@@ -249,7 +249,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
249 249
 
250 250
 	path := filepath.Join("/", "container_path")
251 251
 
252
-	_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
252
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
253 253
 	if err != nil {
254 254
 		t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
255 255
 	}
... ...
@@ -276,7 +276,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
276 276
 // Test for #5619
277 277
 // Check that symlinks which are part of the resource path are still relative to the container's rootfs
278 278
 func TestCpSymlinkComponent(t *testing.T) {
279
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path")
279
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path")
280 280
 	if err != nil || exitCode != 0 {
281 281
 		t.Fatal("failed to create a container", out, err)
282 282
 	}
... ...
@@ -284,7 +284,7 @@ func TestCpSymlinkComponent(t *testing.T) {
284 284
 	cleanedContainerID := stripTrailingCharacters(out)
285 285
 	defer deleteContainer(cleanedContainerID)
286 286
 
287
-	out, _, err = cmd(t, "wait", cleanedContainerID)
287
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
288 288
 	if err != nil || stripTrailingCharacters(out) != "0" {
289 289
 		t.Fatal("failed to set up container", out, err)
290 290
 	}
... ...
@@ -313,7 +313,7 @@ func TestCpSymlinkComponent(t *testing.T) {
313 313
 
314 314
 	path := filepath.Join("/", "container_path", cpTestName)
315 315
 
316
-	_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
316
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
317 317
 	if err != nil {
318 318
 		t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err)
319 319
 	}
... ...
@@ -339,7 +339,7 @@ func TestCpSymlinkComponent(t *testing.T) {
339 339
 
340 340
 // Check that cp with unprivileged user doesn't return any error
341 341
 func TestCpUnprivilegedUser(t *testing.T) {
342
-	out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName)
342
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName)
343 343
 	if err != nil || exitCode != 0 {
344 344
 		t.Fatal("failed to create a container", out, err)
345 345
 	}
... ...
@@ -347,7 +347,7 @@ func TestCpUnprivilegedUser(t *testing.T) {
347 347
 	cleanedContainerID := stripTrailingCharacters(out)
348 348
 	defer deleteContainer(cleanedContainerID)
349 349
 
350
-	out, _, err = cmd(t, "wait", cleanedContainerID)
350
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
351 351
 	if err != nil || stripTrailingCharacters(out) != "0" {
352 352
 		t.Fatal("failed to set up container", out, err)
353 353
 	}
... ...
@@ -389,7 +389,7 @@ func TestCpVolumePath(t *testing.T) {
389 389
 		t.Fatal(err)
390 390
 	}
391 391
 
392
-	out, exitCode, err := cmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar")
392
+	out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar")
393 393
 	if err != nil || exitCode != 0 {
394 394
 		t.Fatal("failed to create a container", out, err)
395 395
 	}
... ...
@@ -397,13 +397,13 @@ func TestCpVolumePath(t *testing.T) {
397 397
 	cleanedContainerID := stripTrailingCharacters(out)
398 398
 	defer deleteContainer(cleanedContainerID)
399 399
 
400
-	out, _, err = cmd(t, "wait", cleanedContainerID)
400
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
401 401
 	if err != nil || stripTrailingCharacters(out) != "0" {
402 402
 		t.Fatal("failed to set up container", out, err)
403 403
 	}
404 404
 
405 405
 	// Copy actual volume path
406
-	_, _, err = cmd(t, "cp", cleanedContainerID+":/foo", outDir)
406
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir)
407 407
 	if err != nil {
408 408
 		t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
409 409
 	}
... ...
@@ -423,7 +423,7 @@ func TestCpVolumePath(t *testing.T) {
423 423
 	}
424 424
 
425 425
 	// Copy file nested in volume
426
-	_, _, err = cmd(t, "cp", cleanedContainerID+":/foo/bar", outDir)
426
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir)
427 427
 	if err != nil {
428 428
 		t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
429 429
 	}
... ...
@@ -436,7 +436,7 @@ func TestCpVolumePath(t *testing.T) {
436 436
 	}
437 437
 
438 438
 	// Copy Bind-mounted dir
439
-	_, _, err = cmd(t, "cp", cleanedContainerID+":/baz", outDir)
439
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir)
440 440
 	if err != nil {
441 441
 		t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err)
442 442
 	}
... ...
@@ -449,7 +449,7 @@ func TestCpVolumePath(t *testing.T) {
449 449
 	}
450 450
 
451 451
 	// Copy file nested in bind-mounted dir
452
-	_, _, err = cmd(t, "cp", cleanedContainerID+":/baz/test", outDir)
452
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir)
453 453
 	fb, err := ioutil.ReadFile(outDir + "/baz/test")
454 454
 	if err != nil {
455 455
 		t.Fatal(err)
... ...
@@ -463,7 +463,7 @@ func TestCpVolumePath(t *testing.T) {
463 463
 	}
464 464
 
465 465
 	// Copy bind-mounted file
466
-	_, _, err = cmd(t, "cp", cleanedContainerID+":/test", outDir)
466
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir)
467 467
 	fb, err = ioutil.ReadFile(outDir + "/test")
468 468
 	if err != nil {
469 469
 		t.Fatal(err)
... ...
@@ -16,12 +16,12 @@ import (
16 16
 )
17 17
 
18 18
 func TestEventsUntag(t *testing.T) {
19
-	out, _, _ := cmd(t, "images", "-q")
19
+	out, _, _ := dockerCmd(t, "images", "-q")
20 20
 	image := strings.Split(out, "\n")[0]
21
-	cmd(t, "tag", image, "utest:tag1")
22
-	cmd(t, "tag", image, "utest:tag2")
23
-	cmd(t, "rmi", "utest:tag1")
24
-	cmd(t, "rmi", "utest:tag2")
21
+	dockerCmd(t, "tag", image, "utest:tag1")
22
+	dockerCmd(t, "tag", image, "utest:tag2")
23
+	dockerCmd(t, "rmi", "utest:tag1")
24
+	dockerCmd(t, "rmi", "utest:tag2")
25 25
 	eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1")
26 26
 	out, _, _ = runCommandWithOutput(eventsCmd)
27 27
 	events := strings.Split(out, "\n")
... ...
@@ -39,11 +39,11 @@ func TestEventsUntag(t *testing.T) {
39 39
 
40 40
 func TestEventsPause(t *testing.T) {
41 41
 	name := "testeventpause"
42
-	out, _, _ := cmd(t, "images", "-q")
42
+	out, _, _ := dockerCmd(t, "images", "-q")
43 43
 	image := strings.Split(out, "\n")[0]
44
-	cmd(t, "run", "-d", "--name", name, image, "sleep", "2")
45
-	cmd(t, "pause", name)
46
-	cmd(t, "unpause", name)
44
+	dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2")
45
+	dockerCmd(t, "pause", name)
46
+	dockerCmd(t, "unpause", name)
47 47
 
48 48
 	defer deleteAllContainers()
49 49
 
... ...
@@ -75,7 +75,7 @@ func TestEventsPause(t *testing.T) {
75 75
 func TestEventsContainerFailStartDie(t *testing.T) {
76 76
 	defer deleteAllContainers()
77 77
 
78
-	out, _, _ := cmd(t, "images", "-q")
78
+	out, _, _ := dockerCmd(t, "images", "-q")
79 79
 	image := strings.Split(out, "\n")[0]
80 80
 	eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg")
81 81
 	_, _, err := runCommandWithOutput(eventsCmd)
... ...
@@ -106,7 +106,7 @@ func TestEventsContainerFailStartDie(t *testing.T) {
106 106
 func TestEventsLimit(t *testing.T) {
107 107
 	defer deleteAllContainers()
108 108
 	for i := 0; i < 30; i++ {
109
-		cmd(t, "run", "busybox", "echo", strconv.Itoa(i))
109
+		dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i))
110 110
 	}
111 111
 	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
112 112
 	out, _, _ := runCommandWithOutput(eventsCmd)
... ...
@@ -119,7 +119,7 @@ func TestEventsLimit(t *testing.T) {
119 119
 }
120 120
 
121 121
 func TestEventsContainerEvents(t *testing.T) {
122
-	cmd(t, "run", "--rm", "busybox", "true")
122
+	dockerCmd(t, "run", "--rm", "busybox", "true")
123 123
 	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
124 124
 	out, exitCode, err := runCommandWithOutput(eventsCmd)
125 125
 	if exitCode != 0 || err != nil {
... ...
@@ -190,7 +190,7 @@ func TestEventsRedirectStdout(t *testing.T) {
190 190
 
191 191
 	since := time.Now().Unix()
192 192
 
193
-	cmd(t, "run", "busybox", "true")
193
+	dockerCmd(t, "run", "busybox", "true")
194 194
 
195 195
 	defer deleteAllContainers()
196 196
 
... ...
@@ -186,3 +186,30 @@ func TestExecAfterDaemonRestart(t *testing.T) {
186 186
 
187 187
 	logDone("exec - exec running container after daemon restart")
188 188
 }
189
+
190
+// Regresssion test for #9155, #9044
191
+func TestExecEnv(t *testing.T) {
192
+	defer deleteAllContainers()
193
+
194
+	runCmd := exec.Command(dockerBinary, "run",
195
+		"-e", "LALA=value1",
196
+		"-e", "LALA=value2",
197
+		"-d", "--name", "testing", "busybox", "top")
198
+	if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
199
+		t.Fatal(out, err)
200
+	}
201
+
202
+	execCmd := exec.Command(dockerBinary, "exec", "testing", "env")
203
+	out, _, err := runCommandWithOutput(execCmd)
204
+	if err != nil {
205
+		t.Fatal(out, err)
206
+	}
207
+
208
+	if strings.Contains(out, "LALA=value1") ||
209
+		!strings.Contains(out, "LALA=value2") ||
210
+		!strings.Contains(out, "HOME=/root") {
211
+		t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root")
212
+	}
213
+
214
+	logDone("exec - exec inherits correct env")
215
+}
... ...
@@ -1,7 +1,10 @@
1 1
 package main
2 2
 
3 3
 import (
4
+	"fmt"
4 5
 	"os/exec"
6
+	"reflect"
7
+	"sort"
5 8
 	"strings"
6 9
 	"testing"
7 10
 	"time"
... ...
@@ -63,3 +66,59 @@ func TestImagesOrderedByCreationDate(t *testing.T) {
63 63
 
64 64
 	logDone("images - ordering by creation date")
65 65
 }
66
+
67
+func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) {
68
+	imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123")
69
+	out, _, err := runCommandWithOutput(imagesCmd)
70
+	if !strings.Contains(out, "Invalid filter") {
71
+		t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err)
72
+	}
73
+
74
+	logDone("images - invalid filter name check working")
75
+}
76
+
77
+func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) {
78
+	imageName := "images_filter_test"
79
+	defer deleteAllContainers()
80
+	defer deleteImages(imageName)
81
+	buildImage(imageName,
82
+		`FROM scratch
83
+		 RUN touch /test/foo
84
+		 RUN touch /test/bar
85
+		 RUN touch /test/baz`, true)
86
+
87
+	filters := []string{
88
+		"dangling=true",
89
+		"Dangling=true",
90
+		" dangling=true",
91
+		"dangling=true ",
92
+		"dangling = true",
93
+	}
94
+
95
+	imageListings := make([][]string, 5, 5)
96
+	for idx, filter := range filters {
97
+		cmd := exec.Command(dockerBinary, "images", "-f", filter)
98
+		out, _, err := runCommandWithOutput(cmd)
99
+		if err != nil {
100
+			t.Fatal(err)
101
+		}
102
+		listing := strings.Split(out, "\n")
103
+		sort.Strings(listing)
104
+		imageListings[idx] = listing
105
+	}
106
+
107
+	for idx, listing := range imageListings {
108
+		if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) {
109
+			for idx, errListing := range imageListings {
110
+				fmt.Printf("out %d", idx)
111
+				for _, image := range errListing {
112
+					fmt.Print(image)
113
+				}
114
+				fmt.Print("")
115
+			}
116
+			t.Fatalf("All output must be the same")
117
+		}
118
+	}
119
+
120
+	logDone("images - white space trimming and lower casing")
121
+}
... ...
@@ -62,21 +62,21 @@ func TestLinksPingUnlinkedContainers(t *testing.T) {
62 62
 
63 63
 func TestLinksPingLinkedContainers(t *testing.T) {
64 64
 	var out string
65
-	out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
65
+	out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
66 66
 	idA := stripTrailingCharacters(out)
67
-	out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
67
+	out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
68 68
 	idB := stripTrailingCharacters(out)
69
-	cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
70
-	cmd(t, "kill", idA)
71
-	cmd(t, "kill", idB)
69
+	dockerCmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
70
+	dockerCmd(t, "kill", idA)
71
+	dockerCmd(t, "kill", idB)
72 72
 	deleteAllContainers()
73 73
 
74 74
 	logDone("links - ping linked container")
75 75
 }
76 76
 
77 77
 func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
78
-	cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10")
79
-	cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10")
78
+	dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10")
79
+	dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10")
80 80
 
81 81
 	childIP := findContainerIP(t, "child")
82 82
 	parentIP := findContainerIP(t, "parent")
... ...
@@ -87,13 +87,13 @@ func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
87 87
 		t.Fatal("Iptables rules not found")
88 88
 	}
89 89
 
90
-	cmd(t, "rm", "--link", "parent/http")
90
+	dockerCmd(t, "rm", "--link", "parent/http")
91 91
 	if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) {
92 92
 		t.Fatal("Iptables rules should be removed when unlink")
93 93
 	}
94 94
 
95
-	cmd(t, "kill", "child")
96
-	cmd(t, "kill", "parent")
95
+	dockerCmd(t, "kill", "child")
96
+	dockerCmd(t, "kill", "parent")
97 97
 	deleteAllContainers()
98 98
 
99 99
 	logDone("link - verify iptables when link and unlink")
... ...
@@ -105,9 +105,9 @@ func TestLinksInspectLinksStarted(t *testing.T) {
105 105
 		result   []string
106 106
 	)
107 107
 	defer deleteAllContainers()
108
-	cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
109
-	cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
110
-	cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
108
+	dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
109
+	dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
110
+	dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
111 111
 	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
112 112
 	if err != nil {
113 113
 		t.Fatal(err)
... ...
@@ -134,9 +134,9 @@ func TestLinksInspectLinksStopped(t *testing.T) {
134 134
 		result   []string
135 135
 	)
136 136
 	defer deleteAllContainers()
137
-	cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
138
-	cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
139
-	cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
137
+	dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
138
+	dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
139
+	dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
140 140
 	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
141 141
 	if err != nil {
142 142
 		t.Fatal(err)
... ...
@@ -29,7 +29,7 @@ func TestRmiWithContainerFails(t *testing.T) {
29 29
 	}
30 30
 
31 31
 	// make sure it didn't delete the busybox name
32
-	images, _, _ := cmd(t, "images")
32
+	images, _, _ := dockerCmd(t, "images")
33 33
 	if !strings.Contains(images, "busybox") {
34 34
 		t.Fatalf("The name 'busybox' should not have been removed from images: %q", images)
35 35
 	}
... ...
@@ -40,35 +40,35 @@ func TestRmiWithContainerFails(t *testing.T) {
40 40
 }
41 41
 
42 42
 func TestRmiTag(t *testing.T) {
43
-	imagesBefore, _, _ := cmd(t, "images", "-a")
44
-	cmd(t, "tag", "busybox", "utest:tag1")
45
-	cmd(t, "tag", "busybox", "utest/docker:tag2")
46
-	cmd(t, "tag", "busybox", "utest:5000/docker:tag3")
43
+	imagesBefore, _, _ := dockerCmd(t, "images", "-a")
44
+	dockerCmd(t, "tag", "busybox", "utest:tag1")
45
+	dockerCmd(t, "tag", "busybox", "utest/docker:tag2")
46
+	dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3")
47 47
 	{
48
-		imagesAfter, _, _ := cmd(t, "images", "-a")
48
+		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
49 49
 		if nLines(imagesAfter) != nLines(imagesBefore)+3 {
50 50
 			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
51 51
 		}
52 52
 	}
53
-	cmd(t, "rmi", "utest/docker:tag2")
53
+	dockerCmd(t, "rmi", "utest/docker:tag2")
54 54
 	{
55
-		imagesAfter, _, _ := cmd(t, "images", "-a")
55
+		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
56 56
 		if nLines(imagesAfter) != nLines(imagesBefore)+2 {
57 57
 			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
58 58
 		}
59 59
 
60 60
 	}
61
-	cmd(t, "rmi", "utest:5000/docker:tag3")
61
+	dockerCmd(t, "rmi", "utest:5000/docker:tag3")
62 62
 	{
63
-		imagesAfter, _, _ := cmd(t, "images", "-a")
63
+		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
64 64
 		if nLines(imagesAfter) != nLines(imagesBefore)+1 {
65 65
 			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
66 66
 		}
67 67
 
68 68
 	}
69
-	cmd(t, "rmi", "utest:tag1")
69
+	dockerCmd(t, "rmi", "utest:tag1")
70 70
 	{
71
-		imagesAfter, _, _ := cmd(t, "images", "-a")
71
+		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
72 72
 		if nLines(imagesAfter) != nLines(imagesBefore)+0 {
73 73
 			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
74 74
 		}
... ...
@@ -798,7 +798,7 @@ func TestRunLoopbackWhenNetworkDisabled(t *testing.T) {
798 798
 }
799 799
 
800 800
 func TestRunNetHostNotAllowedWithLinks(t *testing.T) {
801
-	_, _, err := cmd(t, "run", "--name", "linked", "busybox", "true")
801
+	_, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true")
802 802
 
803 803
 	cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true")
804 804
 	_, _, err = runCommandWithOutput(cmd)
... ...
@@ -1204,7 +1204,7 @@ func TestRunModeHostname(t *testing.T) {
1204 1204
 }
1205 1205
 
1206 1206
 func TestRunRootWorkdir(t *testing.T) {
1207
-	s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd")
1207
+	s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd")
1208 1208
 	if err != nil {
1209 1209
 		t.Fatal(s, err)
1210 1210
 	}
... ...
@@ -1218,7 +1218,7 @@ func TestRunRootWorkdir(t *testing.T) {
1218 1218
 }
1219 1219
 
1220 1220
 func TestRunAllowBindMountingRoot(t *testing.T) {
1221
-	s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host")
1221
+	s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host")
1222 1222
 	if err != nil {
1223 1223
 		t.Fatal(s, err)
1224 1224
 	}
... ...
@@ -1257,6 +1257,7 @@ func TestRunWithVolumesIsRecursive(t *testing.T) {
1257 1257
 	if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil {
1258 1258
 		t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err)
1259 1259
 	}
1260
+	defer mount.Unmount(tmpfsDir)
1260 1261
 
1261 1262
 	f, err := ioutil.TempFile(tmpfsDir, "touch-me")
1262 1263
 	if err != nil {
... ...
@@ -2687,3 +2688,28 @@ func TestContainerNetworkMode(t *testing.T) {
2687 2687
 
2688 2688
 	logDone("run - container shared network namespace")
2689 2689
 }
2690
+
2691
+func TestRunTLSverify(t *testing.T) {
2692
+	cmd := exec.Command(dockerBinary, "ps")
2693
+	out, ec, err := runCommandWithOutput(cmd)
2694
+	if err != nil || ec != 0 {
2695
+		t.Fatalf("Should have worked: %v:\n%v", err, out)
2696
+	}
2697
+
2698
+	// Regardless of whether we specify true or false we need to
2699
+	// test to make sure tls is turned on if --tlsverify is specified at all
2700
+
2701
+	cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps")
2702
+	out, ec, err = runCommandWithOutput(cmd)
2703
+	if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") {
2704
+		t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
2705
+	}
2706
+
2707
+	cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps")
2708
+	out, ec, err = runCommandWithOutput(cmd)
2709
+	if err == nil || ec == 0 || !strings.Contains(out, "cert") {
2710
+		t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
2711
+	}
2712
+
2713
+	logDone("run - verify tls is set for --tlsverify")
2714
+}
... ...
@@ -8,6 +8,8 @@ import (
8 8
 	"os/exec"
9 9
 	"path/filepath"
10 10
 	"reflect"
11
+	"sort"
12
+	"strings"
11 13
 	"testing"
12 14
 
13 15
 	"github.com/docker/docker/vendor/src/github.com/kr/pty"
... ...
@@ -260,6 +262,66 @@ func TestSaveMultipleNames(t *testing.T) {
260 260
 	logDone("save - save by multiple names")
261 261
 }
262 262
 
263
+func TestSaveRepoWithMultipleImages(t *testing.T) {
264
+
265
+	makeImage := func(from string, tag string) string {
266
+		runCmd := exec.Command(dockerBinary, "run", "-d", from, "true")
267
+		var (
268
+			out string
269
+			err error
270
+		)
271
+		if out, _, err = runCommandWithOutput(runCmd); err != nil {
272
+			t.Fatalf("failed to create a container: %v %v", out, err)
273
+		}
274
+		cleanedContainerID := stripTrailingCharacters(out)
275
+
276
+		commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag)
277
+		if out, _, err = runCommandWithOutput(commitCmd); err != nil {
278
+			t.Fatalf("failed to commit container: %v %v", out, err)
279
+		}
280
+		imageID := stripTrailingCharacters(out)
281
+
282
+		deleteContainer(cleanedContainerID)
283
+		return imageID
284
+	}
285
+
286
+	repoName := "foobar-save-multi-images-test"
287
+	tagFoo := repoName + ":foo"
288
+	tagBar := repoName + ":bar"
289
+
290
+	idFoo := makeImage("busybox:latest", tagFoo)
291
+	idBar := makeImage("busybox:latest", tagBar)
292
+
293
+	deleteImages(repoName)
294
+
295
+	// create the archive
296
+	saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep 'VERSION' |cut -d / -f1", dockerBinary, repoName)
297
+	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
298
+	out, _, err := runCommandWithOutput(saveCmd)
299
+	if err != nil {
300
+		t.Fatalf("failed to save multiple images: %s, %v", out, err)
301
+	}
302
+	actual := strings.Split(stripTrailingCharacters(out), "\n")
303
+
304
+	// make the list of expected layers
305
+	historyCmdFinal := fmt.Sprintf("%v history -q --no-trunc %v", dockerBinary, "busybox:latest")
306
+	historyCmd := exec.Command("bash", "-c", historyCmdFinal)
307
+	out, _, err = runCommandWithOutput(historyCmd)
308
+	if err != nil {
309
+		t.Fatalf("failed to get history: %s, %v", out, err)
310
+	}
311
+
312
+	expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar)
313
+
314
+	sort.Strings(actual)
315
+	sort.Strings(expected)
316
+	if !reflect.DeepEqual(expected, actual) {
317
+		t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected)
318
+	}
319
+
320
+	logDone("save - save repository with multiple images")
321
+}
322
+
263 323
 // Issue #6722 #5892 ensure directories are included in changes
264 324
 func TestSaveDirectoryPermissions(t *testing.T) {
265 325
 	layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
... ...
@@ -12,8 +12,8 @@ import (
12 12
 func TestStartAttachReturnsOnError(t *testing.T) {
13 13
 	defer deleteAllContainers()
14 14
 
15
-	cmd(t, "run", "-d", "--name", "test", "busybox")
16
-	cmd(t, "stop", "test")
15
+	dockerCmd(t, "run", "-d", "--name", "test", "busybox")
16
+	dockerCmd(t, "stop", "test")
17 17
 
18 18
 	// Expect this to fail because the above container is stopped, this is what we want
19 19
 	if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil {
... ...
@@ -73,7 +73,7 @@ func TestStartRecordError(t *testing.T) {
73 73
 	defer deleteAllContainers()
74 74
 
75 75
 	// when container runs successfully, we should not have state.Error
76
-	cmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
76
+	dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
77 77
 	stateErr, err := inspectField("test", "State.Error")
78 78
 	if err != nil {
79 79
 		t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
... ...
@@ -97,8 +97,8 @@ func TestStartRecordError(t *testing.T) {
97 97
 	}
98 98
 
99 99
 	// Expect the conflict to be resolved when we stop the initial container
100
-	cmd(t, "stop", "test")
101
-	cmd(t, "start", "test2")
100
+	dockerCmd(t, "stop", "test")
101
+	dockerCmd(t, "start", "test2")
102 102
 	stateErr, err = inspectField("test2", "State.Error")
103 103
 	if err != nil {
104 104
 		t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
... ...
@@ -115,7 +115,7 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
115 115
 	defer deleteAllContainers()
116 116
 
117 117
 	// Create the first data volume
118
-	cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
118
+	dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
119 119
 
120 120
 	// Expect this to fail because the data test after contaienr doesn't exist yet
121 121
 	if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
... ...
@@ -123,13 +123,13 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
123 123
 	}
124 124
 
125 125
 	// Create the second data volume
126
-	cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
126
+	dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
127 127
 
128 128
 	// Now, all the volumes should be there
129
-	cmd(t, "start", "consumer")
129
+	dockerCmd(t, "start", "consumer")
130 130
 
131 131
 	// Check that we have the volumes we want
132
-	out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
132
+	out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
133 133
 	n_volumes := strings.Trim(out, " \r\n'")
134 134
 	if n_volumes != "2" {
135 135
 		t.Fatalf("Missing volumes: expected 2, got %s", n_volumes)
... ...
@@ -356,11 +356,6 @@ func pullImageIfNotExist(image string) (err error) {
356 356
 	return
357 357
 }
358 358
 
359
-// deprecated, use dockerCmd instead
360
-func cmd(t *testing.T, args ...string) (string, int, error) {
361
-	return dockerCmd(t, args...)
362
-}
363
-
364 359
 func dockerCmd(t *testing.T, args ...string) (string, int, error) {
365 360
 	out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
366 361
 	if err != nil {
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	"net/http/httptest"
10 10
 	"os"
11 11
 	"path"
12
+	"path/filepath"
12 13
 	"strings"
13 14
 	"testing"
14 15
 	"time"
... ...
@@ -187,6 +188,7 @@ func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine {
187 187
 		// Either InterContainerCommunication or EnableIptables must be set,
188 188
 		// otherwise NewDaemon will fail because of conflicting settings.
189 189
 		InterContainerCommunication: true,
190
+		TrustKeyPath:                filepath.Join(root, "key.json"),
190 191
 	}
191 192
 	d, err := daemon.NewDaemon(cfg, eng)
192 193
 	if err != nil {
... ...
@@ -43,6 +43,10 @@ func MirrorListVar(values *[]string, names []string, usage string) {
43 43
 	flag.Var(newListOptsRef(values, ValidateMirror), names, usage)
44 44
 }
45 45
 
46
+func LabelListVar(values *[]string, names []string, usage string) {
47
+	flag.Var(newListOptsRef(values, ValidateLabel), names, usage)
48
+}
49
+
46 50
 // ListOpts type
47 51
 type ListOpts struct {
48 52
 	values    *[]string
... ...
@@ -227,3 +231,10 @@ func ValidateMirror(val string) (string, error) {
227 227
 
228 228
 	return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil
229 229
 }
230
+
231
+func ValidateLabel(val string) (string, error) {
232
+	if strings.Count(val, "=") != 1 {
233
+		return "", fmt.Errorf("bad attribute format: %s", val)
234
+	}
235
+	return val, nil
236
+}
... ...
@@ -42,6 +42,11 @@ type (
42 42
 	Archiver struct {
43 43
 		Untar func(io.Reader, string, *TarOptions) error
44 44
 	}
45
+
46
+	// breakoutError is used to differentiate errors related to breaking out
47
+	// When testing archive breakout in the unit tests, this error is expected
48
+	// in order for the test to pass.
49
+	breakoutError error
45 50
 )
46 51
 
47 52
 var (
... ...
@@ -287,11 +292,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
287 287
 		}
288 288
 
289 289
 	case tar.TypeLink:
290
-		if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil {
290
+		targetPath := filepath.Join(extractDir, hdr.Linkname)
291
+		// check for hardlink breakout
292
+		if !strings.HasPrefix(targetPath, extractDir) {
293
+			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
294
+		}
295
+		if err := os.Link(targetPath, path); err != nil {
291 296
 			return err
292 297
 		}
293 298
 
294 299
 	case tar.TypeSymlink:
300
+		// 	path 				-> hdr.Linkname = targetPath
301
+		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
302
+		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
303
+
304
+		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
305
+		// that symlink would first have to be created, which would be caught earlier, at this very check:
306
+		if !strings.HasPrefix(targetPath, extractDir) {
307
+			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
308
+		}
295 309
 		if err := os.Symlink(hdr.Linkname, path); err != nil {
296 310
 			return err
297 311
 		}
... ...
@@ -451,6 +470,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
451 451
 //  identity (uncompressed), gzip, bzip2, xz.
452 452
 // FIXME: specify behavior when target path exists vs. doesn't exist.
453 453
 func Untar(archive io.Reader, dest string, options *TarOptions) error {
454
+	dest = filepath.Clean(dest)
455
+
454 456
 	if options == nil {
455 457
 		options = &TarOptions{}
456 458
 	}
... ...
@@ -488,6 +509,7 @@ loop:
488 488
 		}
489 489
 
490 490
 		// Normalize name, for safety and for a simple is-root check
491
+		// This keeps "../" as-is, but normalizes "/../" to "/"
491 492
 		hdr.Name = filepath.Clean(hdr.Name)
492 493
 
493 494
 		for _, exclude := range options.Excludes {
... ...
@@ -508,7 +530,11 @@ loop:
508 508
 			}
509 509
 		}
510 510
 
511
+		// Prevent symlink breakout
511 512
 		path := filepath.Join(dest, hdr.Name)
513
+		if !strings.HasPrefix(path, dest) {
514
+			return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
515
+		}
512 516
 
513 517
 		// If path exits we almost always just want to remove and replace it
514 518
 		// The only exception is when it is a directory *and* the file from
... ...
@@ -742,17 +768,20 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
742 742
 		return nil, err
743 743
 	}
744 744
 	size := st.Size()
745
-	return &TempArchive{f, size}, nil
745
+	return &TempArchive{f, size, 0}, nil
746 746
 }
747 747
 
748 748
 type TempArchive struct {
749 749
 	*os.File
750 750
 	Size int64 // Pre-computed from Stat().Size() as a convenience
751
+	read int64
751 752
 }
752 753
 
753 754
 func (archive *TempArchive) Read(data []byte) (int, error) {
754 755
 	n, err := archive.File.Read(data)
755
-	if err != nil {
756
+	archive.read += int64(n)
757
+	if err != nil || archive.read == archive.Size {
758
+		archive.File.Close()
756 759
 		os.Remove(archive.File.Name())
757 760
 	}
758 761
 	return n, err
... ...
@@ -8,6 +8,7 @@ import (
8 8
 	"os"
9 9
 	"os/exec"
10 10
 	"path"
11
+	"path/filepath"
11 12
 	"syscall"
12 13
 	"testing"
13 14
 	"time"
... ...
@@ -214,7 +215,12 @@ func TestTarWithOptions(t *testing.T) {
214 214
 // Failing prevents the archives from being uncompressed during ADD
215 215
 func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
216 216
 	hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
217
-	err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true)
217
+	tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
218
+	if err != nil {
219
+		t.Fatal(err)
220
+	}
221
+	defer os.RemoveAll(tmpDir)
222
+	err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
218 223
 	if err != nil {
219 224
 		t.Fatal(err)
220 225
 	}
... ...
@@ -403,3 +409,201 @@ func BenchmarkTarUntarWithLinks(b *testing.B) {
403 403
 		os.RemoveAll(target)
404 404
 	}
405 405
 }
406
+
407
+func TestUntarInvalidFilenames(t *testing.T) {
408
+	for i, headers := range [][]*tar.Header{
409
+		{
410
+			{
411
+				Name:     "../victim/dotdot",
412
+				Typeflag: tar.TypeReg,
413
+				Mode:     0644,
414
+			},
415
+		},
416
+		{
417
+			{
418
+				// Note the leading slash
419
+				Name:     "/../victim/slash-dotdot",
420
+				Typeflag: tar.TypeReg,
421
+				Mode:     0644,
422
+			},
423
+		},
424
+	} {
425
+		if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
426
+			t.Fatalf("i=%d. %v", i, err)
427
+		}
428
+	}
429
+}
430
+
431
+func TestUntarInvalidHardlink(t *testing.T) {
432
+	for i, headers := range [][]*tar.Header{
433
+		{ // try reading victim/hello (../)
434
+			{
435
+				Name:     "dotdot",
436
+				Typeflag: tar.TypeLink,
437
+				Linkname: "../victim/hello",
438
+				Mode:     0644,
439
+			},
440
+		},
441
+		{ // try reading victim/hello (/../)
442
+			{
443
+				Name:     "slash-dotdot",
444
+				Typeflag: tar.TypeLink,
445
+				// Note the leading slash
446
+				Linkname: "/../victim/hello",
447
+				Mode:     0644,
448
+			},
449
+		},
450
+		{ // try writing victim/file
451
+			{
452
+				Name:     "loophole-victim",
453
+				Typeflag: tar.TypeLink,
454
+				Linkname: "../victim",
455
+				Mode:     0755,
456
+			},
457
+			{
458
+				Name:     "loophole-victim/file",
459
+				Typeflag: tar.TypeReg,
460
+				Mode:     0644,
461
+			},
462
+		},
463
+		{ // try reading victim/hello (hardlink, symlink)
464
+			{
465
+				Name:     "loophole-victim",
466
+				Typeflag: tar.TypeLink,
467
+				Linkname: "../victim",
468
+				Mode:     0755,
469
+			},
470
+			{
471
+				Name:     "symlink",
472
+				Typeflag: tar.TypeSymlink,
473
+				Linkname: "loophole-victim/hello",
474
+				Mode:     0644,
475
+			},
476
+		},
477
+		{ // Try reading victim/hello (hardlink, hardlink)
478
+			{
479
+				Name:     "loophole-victim",
480
+				Typeflag: tar.TypeLink,
481
+				Linkname: "../victim",
482
+				Mode:     0755,
483
+			},
484
+			{
485
+				Name:     "hardlink",
486
+				Typeflag: tar.TypeLink,
487
+				Linkname: "loophole-victim/hello",
488
+				Mode:     0644,
489
+			},
490
+		},
491
+		{ // Try removing victim directory (hardlink)
492
+			{
493
+				Name:     "loophole-victim",
494
+				Typeflag: tar.TypeLink,
495
+				Linkname: "../victim",
496
+				Mode:     0755,
497
+			},
498
+			{
499
+				Name:     "loophole-victim",
500
+				Typeflag: tar.TypeReg,
501
+				Mode:     0644,
502
+			},
503
+		},
504
+	} {
505
+		if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
506
+			t.Fatalf("i=%d. %v", i, err)
507
+		}
508
+	}
509
+}
510
+
511
+func TestUntarInvalidSymlink(t *testing.T) {
512
+	for i, headers := range [][]*tar.Header{
513
+		{ // try reading victim/hello (../)
514
+			{
515
+				Name:     "dotdot",
516
+				Typeflag: tar.TypeSymlink,
517
+				Linkname: "../victim/hello",
518
+				Mode:     0644,
519
+			},
520
+		},
521
+		{ // try reading victim/hello (/../)
522
+			{
523
+				Name:     "slash-dotdot",
524
+				Typeflag: tar.TypeSymlink,
525
+				// Note the leading slash
526
+				Linkname: "/../victim/hello",
527
+				Mode:     0644,
528
+			},
529
+		},
530
+		{ // try writing victim/file
531
+			{
532
+				Name:     "loophole-victim",
533
+				Typeflag: tar.TypeSymlink,
534
+				Linkname: "../victim",
535
+				Mode:     0755,
536
+			},
537
+			{
538
+				Name:     "loophole-victim/file",
539
+				Typeflag: tar.TypeReg,
540
+				Mode:     0644,
541
+			},
542
+		},
543
+		{ // try reading victim/hello (symlink, symlink)
544
+			{
545
+				Name:     "loophole-victim",
546
+				Typeflag: tar.TypeSymlink,
547
+				Linkname: "../victim",
548
+				Mode:     0755,
549
+			},
550
+			{
551
+				Name:     "symlink",
552
+				Typeflag: tar.TypeSymlink,
553
+				Linkname: "loophole-victim/hello",
554
+				Mode:     0644,
555
+			},
556
+		},
557
+		{ // try reading victim/hello (symlink, hardlink)
558
+			{
559
+				Name:     "loophole-victim",
560
+				Typeflag: tar.TypeSymlink,
561
+				Linkname: "../victim",
562
+				Mode:     0755,
563
+			},
564
+			{
565
+				Name:     "hardlink",
566
+				Typeflag: tar.TypeLink,
567
+				Linkname: "loophole-victim/hello",
568
+				Mode:     0644,
569
+			},
570
+		},
571
+		{ // try removing victim directory (symlink)
572
+			{
573
+				Name:     "loophole-victim",
574
+				Typeflag: tar.TypeSymlink,
575
+				Linkname: "../victim",
576
+				Mode:     0755,
577
+			},
578
+			{
579
+				Name:     "loophole-victim",
580
+				Typeflag: tar.TypeReg,
581
+				Mode:     0644,
582
+			},
583
+		},
584
+		{ // try writing to victim/newdir/newfile with a symlink in the path
585
+			{
586
+				// this header needs to be before the next one, or else there is an error
587
+				Name:     "dir/loophole",
588
+				Typeflag: tar.TypeSymlink,
589
+				Linkname: "../../victim",
590
+				Mode:     0755,
591
+			},
592
+			{
593
+				Name:     "dir/loophole/newdir/newfile",
594
+				Typeflag: tar.TypeReg,
595
+				Mode:     0644,
596
+			},
597
+		},
598
+	} {
599
+		if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
600
+			t.Fatalf("i=%d. %v", i, err)
601
+		}
602
+	}
603
+}
... ...
@@ -18,6 +18,8 @@ import (
18 18
 // ApplyLayer parses a diff in the standard layer format from `layer`, and
19 19
 // applies it to the directory `dest`.
20 20
 func ApplyLayer(dest string, layer ArchiveReader) error {
21
+	dest = filepath.Clean(dest)
22
+
21 23
 	// We need to be able to set any perms
22 24
 	oldmask, err := system.Umask(0)
23 25
 	if err != nil {
... ...
@@ -91,6 +93,12 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
91 91
 
92 92
 		path := filepath.Join(dest, hdr.Name)
93 93
 		base := filepath.Base(path)
94
+
95
+		// Prevent symlink breakout
96
+		if !strings.HasPrefix(path, dest) {
97
+			return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
98
+		}
99
+
94 100
 		if strings.HasPrefix(base, ".wh.") {
95 101
 			originalBase := base[len(".wh."):]
96 102
 			originalPath := filepath.Join(filepath.Dir(path), originalBase)
97 103
new file mode 100644
... ...
@@ -0,0 +1,191 @@
0
+package archive
1
+
2
+import (
3
+	"testing"
4
+
5
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
6
+)
7
+
8
+func TestApplyLayerInvalidFilenames(t *testing.T) {
9
+	for i, headers := range [][]*tar.Header{
10
+		{
11
+			{
12
+				Name:     "../victim/dotdot",
13
+				Typeflag: tar.TypeReg,
14
+				Mode:     0644,
15
+			},
16
+		},
17
+		{
18
+			{
19
+				// Note the leading slash
20
+				Name:     "/../victim/slash-dotdot",
21
+				Typeflag: tar.TypeReg,
22
+				Mode:     0644,
23
+			},
24
+		},
25
+	} {
26
+		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
27
+			t.Fatalf("i=%d. %v", i, err)
28
+		}
29
+	}
30
+}
31
+
32
+func TestApplyLayerInvalidHardlink(t *testing.T) {
33
+	for i, headers := range [][]*tar.Header{
34
+		{ // try reading victim/hello (../)
35
+			{
36
+				Name:     "dotdot",
37
+				Typeflag: tar.TypeLink,
38
+				Linkname: "../victim/hello",
39
+				Mode:     0644,
40
+			},
41
+		},
42
+		{ // try reading victim/hello (/../)
43
+			{
44
+				Name:     "slash-dotdot",
45
+				Typeflag: tar.TypeLink,
46
+				// Note the leading slash
47
+				Linkname: "/../victim/hello",
48
+				Mode:     0644,
49
+			},
50
+		},
51
+		{ // try writing victim/file
52
+			{
53
+				Name:     "loophole-victim",
54
+				Typeflag: tar.TypeLink,
55
+				Linkname: "../victim",
56
+				Mode:     0755,
57
+			},
58
+			{
59
+				Name:     "loophole-victim/file",
60
+				Typeflag: tar.TypeReg,
61
+				Mode:     0644,
62
+			},
63
+		},
64
+		{ // try reading victim/hello (hardlink, symlink)
65
+			{
66
+				Name:     "loophole-victim",
67
+				Typeflag: tar.TypeLink,
68
+				Linkname: "../victim",
69
+				Mode:     0755,
70
+			},
71
+			{
72
+				Name:     "symlink",
73
+				Typeflag: tar.TypeSymlink,
74
+				Linkname: "loophole-victim/hello",
75
+				Mode:     0644,
76
+			},
77
+		},
78
+		{ // Try reading victim/hello (hardlink, hardlink)
79
+			{
80
+				Name:     "loophole-victim",
81
+				Typeflag: tar.TypeLink,
82
+				Linkname: "../victim",
83
+				Mode:     0755,
84
+			},
85
+			{
86
+				Name:     "hardlink",
87
+				Typeflag: tar.TypeLink,
88
+				Linkname: "loophole-victim/hello",
89
+				Mode:     0644,
90
+			},
91
+		},
92
+		{ // Try removing victim directory (hardlink)
93
+			{
94
+				Name:     "loophole-victim",
95
+				Typeflag: tar.TypeLink,
96
+				Linkname: "../victim",
97
+				Mode:     0755,
98
+			},
99
+			{
100
+				Name:     "loophole-victim",
101
+				Typeflag: tar.TypeReg,
102
+				Mode:     0644,
103
+			},
104
+		},
105
+	} {
106
+		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
107
+			t.Fatalf("i=%d. %v", i, err)
108
+		}
109
+	}
110
+}
111
+
112
+func TestApplyLayerInvalidSymlink(t *testing.T) {
113
+	for i, headers := range [][]*tar.Header{
114
+		{ // try reading victim/hello (../)
115
+			{
116
+				Name:     "dotdot",
117
+				Typeflag: tar.TypeSymlink,
118
+				Linkname: "../victim/hello",
119
+				Mode:     0644,
120
+			},
121
+		},
122
+		{ // try reading victim/hello (/../)
123
+			{
124
+				Name:     "slash-dotdot",
125
+				Typeflag: tar.TypeSymlink,
126
+				// Note the leading slash
127
+				Linkname: "/../victim/hello",
128
+				Mode:     0644,
129
+			},
130
+		},
131
+		{ // try writing victim/file
132
+			{
133
+				Name:     "loophole-victim",
134
+				Typeflag: tar.TypeSymlink,
135
+				Linkname: "../victim",
136
+				Mode:     0755,
137
+			},
138
+			{
139
+				Name:     "loophole-victim/file",
140
+				Typeflag: tar.TypeReg,
141
+				Mode:     0644,
142
+			},
143
+		},
144
+		{ // try reading victim/hello (symlink, symlink)
145
+			{
146
+				Name:     "loophole-victim",
147
+				Typeflag: tar.TypeSymlink,
148
+				Linkname: "../victim",
149
+				Mode:     0755,
150
+			},
151
+			{
152
+				Name:     "symlink",
153
+				Typeflag: tar.TypeSymlink,
154
+				Linkname: "loophole-victim/hello",
155
+				Mode:     0644,
156
+			},
157
+		},
158
+		{ // try reading victim/hello (symlink, hardlink)
159
+			{
160
+				Name:     "loophole-victim",
161
+				Typeflag: tar.TypeSymlink,
162
+				Linkname: "../victim",
163
+				Mode:     0755,
164
+			},
165
+			{
166
+				Name:     "hardlink",
167
+				Typeflag: tar.TypeLink,
168
+				Linkname: "loophole-victim/hello",
169
+				Mode:     0644,
170
+			},
171
+		},
172
+		{ // try removing victim directory (symlink)
173
+			{
174
+				Name:     "loophole-victim",
175
+				Typeflag: tar.TypeSymlink,
176
+				Linkname: "../victim",
177
+				Mode:     0755,
178
+			},
179
+			{
180
+				Name:     "loophole-victim",
181
+				Typeflag: tar.TypeReg,
182
+				Mode:     0644,
183
+			},
184
+		},
185
+	} {
186
+		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
187
+			t.Fatalf("i=%d. %v", i, err)
188
+		}
189
+	}
190
+}
0 191
new file mode 100644
... ...
@@ -0,0 +1,166 @@
0
+package archive
1
+
2
+import (
3
+	"bytes"
4
+	"fmt"
5
+	"io"
6
+	"io/ioutil"
7
+	"os"
8
+	"path/filepath"
9
+	"time"
10
+
11
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
12
+)
13
+
14
+var testUntarFns = map[string]func(string, io.Reader) error{
15
+	"untar": func(dest string, r io.Reader) error {
16
+		return Untar(r, dest, nil)
17
+	},
18
+	"applylayer": func(dest string, r io.Reader) error {
19
+		return ApplyLayer(dest, ArchiveReader(r))
20
+	},
21
+}
22
+
23
+// testBreakout is a helper function that, within the provided `tmpdir` directory,
24
+// creates a `victim` folder with a generated `hello` file in it.
25
+// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
26
+//
27
+// Here are the tested scenarios:
28
+// - removed `victim` folder				(write)
29
+// - removed files from `victim` folder			(write)
30
+// - new files in `victim` folder			(write)
31
+// - modified files in `victim` folder			(write)
32
+// - file in `dest` with same content as `victim/hello` (read)
33
+//
34
+// When using testBreakout make sure you cover one of the scenarios listed above.
35
+func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
36
+	tmpdir, err := ioutil.TempDir("", tmpdir)
37
+	if err != nil {
38
+		return err
39
+	}
40
+	defer os.RemoveAll(tmpdir)
41
+
42
+	dest := filepath.Join(tmpdir, "dest")
43
+	if err := os.Mkdir(dest, 0755); err != nil {
44
+		return err
45
+	}
46
+
47
+	victim := filepath.Join(tmpdir, "victim")
48
+	if err := os.Mkdir(victim, 0755); err != nil {
49
+		return err
50
+	}
51
+	hello := filepath.Join(victim, "hello")
52
+	helloData, err := time.Now().MarshalText()
53
+	if err != nil {
54
+		return err
55
+	}
56
+	if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
57
+		return err
58
+	}
59
+	helloStat, err := os.Stat(hello)
60
+	if err != nil {
61
+		return err
62
+	}
63
+
64
+	reader, writer := io.Pipe()
65
+	go func() {
66
+		t := tar.NewWriter(writer)
67
+		for _, hdr := range headers {
68
+			t.WriteHeader(hdr)
69
+		}
70
+		t.Close()
71
+	}()
72
+
73
+	untar := testUntarFns[untarFn]
74
+	if untar == nil {
75
+		return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
76
+	}
77
+	if err := untar(dest, reader); err != nil {
78
+		if _, ok := err.(breakoutError); !ok {
79
+			// If untar returns an error unrelated to an archive breakout,
80
+			// then consider this an unexpected error and abort.
81
+			return err
82
+		}
83
+		// Here, untar detected the breakout.
84
+		// Let's move on verifying that indeed there was no breakout.
85
+		fmt.Printf("breakoutError: %v\n", err)
86
+	}
87
+
88
+	// Check victim folder
89
+	f, err := os.Open(victim)
90
+	if err != nil {
91
+		// codepath taken if victim folder was removed
92
+		return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
93
+	}
94
+	defer f.Close()
95
+
96
+	// Check contents of victim folder
97
+	//
98
+	// We are only interested in getting 2 files from the victim folder, because if all is well
99
+	// we expect only one result, the `hello` file. If there is a second result, it cannot
100
+	// hold the same name `hello` and we assume that a new file got created in the victim folder.
101
+	// That is enough to detect an archive breakout.
102
+	names, err := f.Readdirnames(2)
103
+	if err != nil {
104
+		// codepath taken if victim is not a folder
105
+		return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
106
+	}
107
+	for _, name := range names {
108
+		if name != "hello" {
109
+			// codepath taken if new file was created in victim folder
110
+			return fmt.Errorf("archive breakout: new file %q", name)
111
+		}
112
+	}
113
+
114
+	// Check victim/hello
115
+	f, err = os.Open(hello)
116
+	if err != nil {
117
+		// codepath taken if read permissions were removed
118
+		return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
119
+	}
120
+	defer f.Close()
121
+	b, err := ioutil.ReadAll(f)
122
+	if err != nil {
123
+		return err
124
+	}
125
+	fi, err := f.Stat()
126
+	if err != nil {
127
+		return err
128
+	}
129
+	if helloStat.IsDir() != fi.IsDir() ||
130
+		// TODO: cannot check for fi.ModTime() change
131
+		helloStat.Mode() != fi.Mode() ||
132
+		helloStat.Size() != fi.Size() ||
133
+		!bytes.Equal(helloData, b) {
134
+		// codepath taken if hello has been modified
135
+		return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
136
+	}
137
+
138
+	// Check that nothing in dest/ has the same content as victim/hello.
139
+	// Since victim/hello was generated with time.Now(), it is safe to assume
140
+	// that any file whose content matches exactly victim/hello, managed somehow
141
+	// to access victim/hello.
142
+	return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
143
+		if info.IsDir() {
144
+			if err != nil {
145
+				// skip directory if error
146
+				return filepath.SkipDir
147
+			}
148
+			// enter directory
149
+			return nil
150
+		}
151
+		if err != nil {
152
+			// skip file if error
153
+			return nil
154
+		}
155
+		b, err := ioutil.ReadFile(path)
156
+		if err != nil {
157
+			// Houston, we have a problem. Aborting (space)walk.
158
+			return err
159
+		}
160
+		if bytes.Equal(helloData, b) {
161
+			return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
162
+		}
163
+		return nil
164
+	})
165
+}
0 166
new file mode 100644
... ...
@@ -0,0 +1,90 @@
0
+package chrootarchive
1
+
2
+import (
3
+	"bytes"
4
+	"encoding/json"
5
+	"flag"
6
+	"fmt"
7
+	"io"
8
+	"os"
9
+	"runtime"
10
+	"strings"
11
+	"syscall"
12
+
13
+	"github.com/docker/docker/pkg/archive"
14
+	"github.com/docker/docker/pkg/reexec"
15
+)
16
+
17
+func untar() {
18
+	runtime.LockOSThread()
19
+	flag.Parse()
20
+
21
+	if err := syscall.Chroot(flag.Arg(0)); err != nil {
22
+		fatal(err)
23
+	}
24
+	if err := syscall.Chdir("/"); err != nil {
25
+		fatal(err)
26
+	}
27
+	options := new(archive.TarOptions)
28
+	dec := json.NewDecoder(strings.NewReader(flag.Arg(1)))
29
+	if err := dec.Decode(options); err != nil {
30
+		fatal(err)
31
+	}
32
+	if err := archive.Untar(os.Stdin, "/", options); err != nil {
33
+		fatal(err)
34
+	}
35
+	os.Exit(0)
36
+}
37
+
38
+var (
39
+	chrootArchiver = &archive.Archiver{Untar}
40
+)
41
+
42
+func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
43
+	var buf bytes.Buffer
44
+	enc := json.NewEncoder(&buf)
45
+	if err := enc.Encode(options); err != nil {
46
+		return fmt.Errorf("Untar json encode: %v", err)
47
+	}
48
+	if _, err := os.Stat(dest); os.IsNotExist(err) {
49
+		if err := os.MkdirAll(dest, 0777); err != nil {
50
+			return err
51
+		}
52
+	}
53
+
54
+	cmd := reexec.Command("docker-untar", dest, buf.String())
55
+	cmd.Stdin = archive
56
+	out, err := cmd.CombinedOutput()
57
+	if err != nil {
58
+		return fmt.Errorf("Untar %s %s", err, out)
59
+	}
60
+	return nil
61
+}
62
+
63
+func TarUntar(src, dst string) error {
64
+	return chrootArchiver.TarUntar(src, dst)
65
+}
66
+
67
+// CopyWithTar creates a tar archive of filesystem path `src`, and
68
+// unpacks it at filesystem path `dst`.
69
+// The archive is streamed directly with fixed buffering and no
70
+// intermediary disk IO.
71
+func CopyWithTar(src, dst string) error {
72
+	return chrootArchiver.CopyWithTar(src, dst)
73
+}
74
+
75
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
76
+// for a single file. It copies a regular file from path `src` to
77
+// path `dst`, and preserves all its metadata.
78
+//
79
+// If `dst` ends with a trailing slash '/', the final destination path
80
+// will be `dst/base(src)`.
81
+func CopyFileWithTar(src, dst string) (err error) {
82
+	return chrootArchiver.CopyFileWithTar(src, dst)
83
+}
84
+
85
+// UntarPath is a convenience function which looks for an archive
86
+// at filesystem path `src`, and unpacks it at `dst`.
87
+func UntarPath(src, dst string) error {
88
+	return chrootArchiver.UntarPath(src, dst)
89
+}
0 90
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+package chrootarchive
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"path/filepath"
6
+	"testing"
7
+
8
+	"github.com/docker/docker/pkg/archive"
9
+	"github.com/docker/docker/pkg/reexec"
10
+)
11
+
12
+func init() {
13
+	reexec.Init()
14
+}
15
+
16
+func TestChrootTarUntar(t *testing.T) {
17
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar")
18
+	if err != nil {
19
+		t.Fatal(err)
20
+	}
21
+	defer os.RemoveAll(tmpdir)
22
+	src := filepath.Join(tmpdir, "src")
23
+	if err := os.MkdirAll(src, 0700); err != nil {
24
+		t.Fatal(err)
25
+	}
26
+	if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
27
+		t.Fatal(err)
28
+	}
29
+	if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil {
30
+		t.Fatal(err)
31
+	}
32
+	stream, err := archive.Tar(src, archive.Uncompressed)
33
+	if err != nil {
34
+		t.Fatal(err)
35
+	}
36
+	dest := filepath.Join(tmpdir, "src")
37
+	if err := os.MkdirAll(dest, 0700); err != nil {
38
+		t.Fatal(err)
39
+	}
40
+	if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil {
41
+		t.Fatal(err)
42
+	}
43
+}
0 44
new file mode 100644
... ...
@@ -0,0 +1,46 @@
0
+package chrootarchive
1
+
2
+import (
3
+	"flag"
4
+	"fmt"
5
+	"io/ioutil"
6
+	"os"
7
+	"runtime"
8
+	"syscall"
9
+
10
+	"github.com/docker/docker/pkg/archive"
11
+	"github.com/docker/docker/pkg/reexec"
12
+)
13
+
14
+func applyLayer() {
15
+	runtime.LockOSThread()
16
+	flag.Parse()
17
+
18
+	if err := syscall.Chroot(flag.Arg(0)); err != nil {
19
+		fatal(err)
20
+	}
21
+	if err := syscall.Chdir("/"); err != nil {
22
+		fatal(err)
23
+	}
24
+	tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
25
+	if err != nil {
26
+		fatal(err)
27
+	}
28
+	os.Setenv("TMPDIR", tmpDir)
29
+	if err := archive.ApplyLayer("/", os.Stdin); err != nil {
30
+		os.RemoveAll(tmpDir)
31
+		fatal(err)
32
+	}
33
+	os.RemoveAll(tmpDir)
34
+	os.Exit(0)
35
+}
36
+
37
+func ApplyLayer(dest string, layer archive.ArchiveReader) error {
38
+	cmd := reexec.Command("docker-applyLayer", dest)
39
+	cmd.Stdin = layer
40
+	out, err := cmd.CombinedOutput()
41
+	if err != nil {
42
+		return fmt.Errorf("ApplyLayer %s %s", err, out)
43
+	}
44
+	return nil
45
+}
0 46
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+package chrootarchive
1
+
2
+import (
3
+	"fmt"
4
+	"os"
5
+
6
+	"github.com/docker/docker/pkg/reexec"
7
+)
8
+
9
+func init() {
10
+	reexec.Register("docker-untar", untar)
11
+	reexec.Register("docker-applyLayer", applyLayer)
12
+}
13
+
14
+func fatal(err error) {
15
+	fmt.Fprint(os.Stderr, err)
16
+	os.Exit(1)
17
+}
... ...
@@ -63,7 +63,7 @@ var (
63 63
 	ErrGetLibraryVersion      = errors.New("dm_get_library_version failed")
64 64
 	ErrCreateRemoveTask       = errors.New("Can't create task of type DeviceRemove")
65 65
 	ErrRunRemoveDevice        = errors.New("running RemoveDevice failed")
66
-	ErrInvalidAddNode         = errors.New("Invalide AddNoce type")
66
+	ErrInvalidAddNode         = errors.New("Invalid AddNode type")
67 67
 	ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
68 68
 	ErrLoopbackSetCapacity    = errors.New("Unable set loopback capacity")
69 69
 	ErrBusy                   = errors.New("Device is Busy")
... ...
@@ -104,6 +104,20 @@ func (t *Task) destroy() {
104 104
 	}
105 105
 }
106 106
 
107
+// TaskCreateNamed is a convenience function for TaskCreate when a name
108
+// will be set on the task as well
109
+func TaskCreateNamed(t TaskType, name string) (*Task, error) {
110
+	task := TaskCreate(t)
111
+	if task == nil {
112
+		return nil, fmt.Errorf("Can't create task of type %d", int(t))
113
+	}
114
+	if err := task.SetName(name); err != nil {
115
+		return nil, fmt.Errorf("Can't set task name %s", name)
116
+	}
117
+	return task, nil
118
+}
119
+
120
+// TaskCreate initializes a devicemapper task of tasktype
107 121
 func TaskCreate(tasktype TaskType) *Task {
108 122
 	Ctask := DmTaskCreate(int(tasktype))
109 123
 	if Ctask == nil {
... ...
@@ -298,7 +312,7 @@ func GetLibraryVersion() (string, error) {
298 298
 func RemoveDevice(name string) error {
299 299
 	log.Debugf("[devmapper] RemoveDevice START")
300 300
 	defer log.Debugf("[devmapper] RemoveDevice END")
301
-	task, err := createTask(DeviceRemove, name)
301
+	task, err := TaskCreateNamed(DeviceRemove, name)
302 302
 	if task == nil {
303 303
 		return err
304 304
 	}
... ...
@@ -354,7 +368,7 @@ func BlockDeviceDiscard(path string) error {
354 354
 
355 355
 // This is the programmatic example of "dmsetup create"
356 356
 func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
357
-	task, err := createTask(DeviceCreate, poolName)
357
+	task, err := TaskCreateNamed(DeviceCreate, poolName)
358 358
 	if task == nil {
359 359
 		return err
360 360
 	}
... ...
@@ -373,18 +387,17 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
373 373
 	if err := task.SetCookie(&cookie, 0); err != nil {
374 374
 		return fmt.Errorf("Can't set cookie %s", err)
375 375
 	}
376
+	defer UdevWait(cookie)
376 377
 
377 378
 	if err := task.Run(); err != nil {
378 379
 		return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err)
379 380
 	}
380 381
 
381
-	UdevWait(cookie)
382
-
383 382
 	return nil
384 383
 }
385 384
 
386 385
 func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
387
-	task, err := createTask(DeviceReload, poolName)
386
+	task, err := TaskCreateNamed(DeviceReload, poolName)
388 387
 	if task == nil {
389 388
 		return err
390 389
 	}
... ...
@@ -406,19 +419,8 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
406 406
 	return nil
407 407
 }
408 408
 
409
-func createTask(t TaskType, name string) (*Task, error) {
410
-	task := TaskCreate(t)
411
-	if task == nil {
412
-		return nil, fmt.Errorf("Can't create task of type %d", int(t))
413
-	}
414
-	if err := task.SetName(name); err != nil {
415
-		return nil, fmt.Errorf("Can't set task name %s", name)
416
-	}
417
-	return task, nil
418
-}
419
-
420 409
 func GetDeps(name string) (*Deps, error) {
421
-	task, err := createTask(DeviceDeps, name)
410
+	task, err := TaskCreateNamed(DeviceDeps, name)
422 411
 	if task == nil {
423 412
 		return nil, err
424 413
 	}
... ...
@@ -429,7 +431,7 @@ func GetDeps(name string) (*Deps, error) {
429 429
 }
430 430
 
431 431
 func GetInfo(name string) (*Info, error) {
432
-	task, err := createTask(DeviceInfo, name)
432
+	task, err := TaskCreateNamed(DeviceInfo, name)
433 433
 	if task == nil {
434 434
 		return nil, err
435 435
 	}
... ...
@@ -451,9 +453,9 @@ func GetDriverVersion() (string, error) {
451 451
 }
452 452
 
453 453
 func GetStatus(name string) (uint64, uint64, string, string, error) {
454
-	task, err := createTask(DeviceStatus, name)
454
+	task, err := TaskCreateNamed(DeviceStatus, name)
455 455
 	if task == nil {
456
-		log.Debugf("GetStatus: Error createTask: %s", err)
456
+		log.Debugf("GetStatus: Error TaskCreateNamed: %s", err)
457 457
 		return 0, 0, "", "", err
458 458
 	}
459 459
 	if err := task.Run(); err != nil {
... ...
@@ -476,7 +478,7 @@ func GetStatus(name string) (uint64, uint64, string, string, error) {
476 476
 }
477 477
 
478 478
 func SetTransactionId(poolName string, oldId uint64, newId uint64) error {
479
-	task, err := createTask(DeviceTargetMsg, poolName)
479
+	task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
480 480
 	if task == nil {
481 481
 		return err
482 482
 	}
... ...
@@ -496,7 +498,7 @@ func SetTransactionId(poolName string, oldId uint64, newId uint64) error {
496 496
 }
497 497
 
498 498
 func SuspendDevice(name string) error {
499
-	task, err := createTask(DeviceSuspend, name)
499
+	task, err := TaskCreateNamed(DeviceSuspend, name)
500 500
 	if task == nil {
501 501
 		return err
502 502
 	}
... ...
@@ -507,7 +509,7 @@ func SuspendDevice(name string) error {
507 507
 }
508 508
 
509 509
 func ResumeDevice(name string) error {
510
-	task, err := createTask(DeviceResume, name)
510
+	task, err := TaskCreateNamed(DeviceResume, name)
511 511
 	if task == nil {
512 512
 		return err
513 513
 	}
... ...
@@ -516,13 +518,12 @@ func ResumeDevice(name string) error {
516 516
 	if err := task.SetCookie(&cookie, 0); err != nil {
517 517
 		return fmt.Errorf("Can't set cookie %s", err)
518 518
 	}
519
+	defer UdevWait(cookie)
519 520
 
520 521
 	if err := task.Run(); err != nil {
521 522
 		return fmt.Errorf("Error running DeviceResume %s", err)
522 523
 	}
523 524
 
524
-	UdevWait(cookie)
525
-
526 525
 	return nil
527 526
 }
528 527
 
... ...
@@ -530,7 +531,7 @@ func CreateDevice(poolName string, deviceId *int) error {
530 530
 	log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
531 531
 
532 532
 	for {
533
-		task, err := createTask(DeviceTargetMsg, poolName)
533
+		task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
534 534
 		if task == nil {
535 535
 			return err
536 536
 		}
... ...
@@ -558,7 +559,7 @@ func CreateDevice(poolName string, deviceId *int) error {
558 558
 }
559 559
 
560 560
 func DeleteDevice(poolName string, deviceId int) error {
561
-	task, err := createTask(DeviceTargetMsg, poolName)
561
+	task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
562 562
 	if task == nil {
563 563
 		return err
564 564
 	}
... ...
@@ -578,7 +579,7 @@ func DeleteDevice(poolName string, deviceId int) error {
578 578
 }
579 579
 
580 580
 func ActivateDevice(poolName string, name string, deviceId int, size uint64) error {
581
-	task, err := createTask(DeviceCreate, name)
581
+	task, err := TaskCreateNamed(DeviceCreate, name)
582 582
 	if task == nil {
583 583
 		return err
584 584
 	}
... ...
@@ -596,12 +597,12 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err
596 596
 		return fmt.Errorf("Can't set cookie %s", err)
597 597
 	}
598 598
 
599
+	defer UdevWait(cookie)
600
+
599 601
 	if err := task.Run(); err != nil {
600 602
 		return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err)
601 603
 	}
602 604
 
603
-	UdevWait(cookie)
604
-
605 605
 	return nil
606 606
 }
607 607
 
... ...
@@ -616,7 +617,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic
616 616
 	}
617 617
 
618 618
 	for {
619
-		task, err := createTask(DeviceTargetMsg, poolName)
619
+		task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
620 620
 		if task == nil {
621 621
 			if doSuspend {
622 622
 				ResumeDevice(baseName)
... ...
@@ -20,9 +20,9 @@ const (
20 20
 )
21 21
 
22 22
 var (
23
-	ErrIptablesNotFound = errors.New("Iptables not found")
24 23
 	nat                 = []string{"-t", "nat"}
25 24
 	supportsXlock       = false
25
+	ErrIptablesNotFound = errors.New("Iptables not found")
26 26
 )
27 27
 
28 28
 type Chain struct {
... ...
@@ -30,6 +30,15 @@ type Chain struct {
30 30
 	Bridge string
31 31
 }
32 32
 
33
+type ChainError struct {
34
+	Chain  string
35
+	Output []byte
36
+}
37
+
38
+func (e *ChainError) Error() string {
39
+	return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output))
40
+}
41
+
33 42
 func init() {
34 43
 	supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil
35 44
 }
... ...
@@ -73,11 +82,12 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
73 73
 		"-p", proto,
74 74
 		"-d", daddr,
75 75
 		"--dport", strconv.Itoa(port),
76
+		"!", "-i", c.Bridge,
76 77
 		"-j", "DNAT",
77 78
 		"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil {
78 79
 		return err
79 80
 	} else if len(output) != 0 {
80
-		return fmt.Errorf("Error iptables forward: %s", output)
81
+		return &ChainError{Chain: "FORWARD", Output: output}
81 82
 	}
82 83
 
83 84
 	fAction := action
... ...
@@ -93,18 +103,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
93 93
 		"-j", "ACCEPT"); err != nil {
94 94
 		return err
95 95
 	} else if len(output) != 0 {
96
-		return fmt.Errorf("Error iptables forward: %s", output)
97
-	}
98
-
99
-	if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING",
100
-		"-p", proto,
101
-		"-s", dest_addr,
102
-		"-d", dest_addr,
103
-		"--dport", strconv.Itoa(dest_port),
104
-		"-j", "MASQUERADE"); err != nil {
105
-		return err
106
-	} else if len(output) != 0 {
107
-		return fmt.Errorf("Error iptables forward: %s", output)
96
+		return &ChainError{Chain: "FORWARD", Output: output}
108 97
 	}
109 98
 
110 99
 	return nil
... ...
@@ -118,7 +117,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error {
118 118
 	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
119 119
 		return err
120 120
 	} else if len(output) != 0 {
121
-		return fmt.Errorf("Error iptables prerouting: %s", output)
121
+		return &ChainError{Chain: "PREROUTING", Output: output}
122 122
 	}
123 123
 	return nil
124 124
 }
... ...
@@ -131,7 +130,7 @@ func (c *Chain) Output(action Action, args ...string) error {
131 131
 	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
132 132
 		return err
133 133
 	} else if len(output) != 0 {
134
-		return fmt.Errorf("Error iptables output: %s", output)
134
+		return &ChainError{Chain: "OUTPUT", Output: output}
135 135
 	}
136 136
 	return nil
137 137
 }
... ...
@@ -394,12 +394,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
394 394
 	return f.formal[name]
395 395
 }
396 396
 
397
+// Indicates whether the specified flag was specified at all on the cmd line
398
+func (f *FlagSet) IsSet(name string) bool {
399
+	return f.actual[name] != nil
400
+}
401
+
397 402
 // Lookup returns the Flag structure of the named command-line flag,
398 403
 // returning nil if none exists.
399 404
 func Lookup(name string) *Flag {
400 405
 	return CommandLine.formal[name]
401 406
 }
402 407
 
408
+// Indicates whether the specified flag was specified at all on the cmd line
409
+func IsSet(name string) bool {
410
+	return CommandLine.IsSet(name)
411
+}
412
+
403 413
 // Set sets the value of the named flag.
404 414
 func (f *FlagSet) Set(name, value string) error {
405 415
 	flag, ok := f.formal[name]
... ...
@@ -168,11 +168,14 @@ func testParse(f *FlagSet, t *testing.T) {
168 168
 	}
169 169
 	boolFlag := f.Bool([]string{"bool"}, false, "bool value")
170 170
 	bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value")
171
+	f.Bool([]string{"bool3"}, false, "bool3 value")
172
+	bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value")
171 173
 	intFlag := f.Int([]string{"-int"}, 0, "int value")
172 174
 	int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value")
173 175
 	uintFlag := f.Uint([]string{"uint"}, 0, "uint value")
174 176
 	uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value")
175 177
 	stringFlag := f.String([]string{"string"}, "0", "string value")
178
+	f.String([]string{"string2"}, "0", "string2 value")
176 179
 	singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value")
177 180
 	doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value")
178 181
 	mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value")
... ...
@@ -185,6 +188,7 @@ func testParse(f *FlagSet, t *testing.T) {
185 185
 	args := []string{
186 186
 		"-bool",
187 187
 		"-bool2=true",
188
+		"-bool4=false",
188 189
 		"--int", "22",
189 190
 		"--int64", "0x23",
190 191
 		"-uint", "24",
... ...
@@ -212,6 +216,18 @@ func testParse(f *FlagSet, t *testing.T) {
212 212
 	if *bool2Flag != true {
213 213
 		t.Error("bool2 flag should be true, is ", *bool2Flag)
214 214
 	}
215
+	if !f.IsSet("bool2") {
216
+		t.Error("bool2 should be marked as set")
217
+	}
218
+	if f.IsSet("bool3") {
219
+		t.Error("bool3 should not be marked as set")
220
+	}
221
+	if !f.IsSet("bool4") {
222
+		t.Error("bool4 should be marked as set")
223
+	}
224
+	if *bool4Flag != false {
225
+		t.Error("bool4 flag should be false, is ", *bool4Flag)
226
+	}
215 227
 	if *intFlag != 22 {
216 228
 		t.Error("int flag should be 22, is ", *intFlag)
217 229
 	}
... ...
@@ -227,6 +243,12 @@ func testParse(f *FlagSet, t *testing.T) {
227 227
 	if *stringFlag != "hello" {
228 228
 		t.Error("string flag should be `hello`, is ", *stringFlag)
229 229
 	}
230
+	if !f.IsSet("string") {
231
+		t.Error("string flag should be marked as set")
232
+	}
233
+	if f.IsSet("string2") {
234
+		t.Error("string2 flag should not be marked as set")
235
+	}
230 236
 	if *singleQuoteFlag != "single" {
231 237
 		t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag)
232 238
 	}
... ...
@@ -29,7 +29,9 @@ func ParseFlag(arg string, prev Args) (Args, error) {
29 29
 	}
30 30
 
31 31
 	f := strings.SplitN(arg, "=", 2)
32
-	filters[f[0]] = append(filters[f[0]], f[1])
32
+	name := strings.ToLower(strings.TrimSpace(f[0]))
33
+	value := strings.TrimSpace(f[1])
34
+	filters[name] = append(filters[name], value)
33 35
 
34 36
 	return filters, nil
35 37
 }
36 38
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+// +build linux
1
+
2
+package reexec
3
+
4
+import (
5
+	"os/exec"
6
+	"syscall"
7
+)
8
+
9
+func Command(args ...string) *exec.Cmd {
10
+	return &exec.Cmd{
11
+		Path: Self(),
12
+		Args: args,
13
+		SysProcAttr: &syscall.SysProcAttr{
14
+			Pdeathsig: syscall.SIGTERM,
15
+		},
16
+	}
17
+}
0 18
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+// +build !linux
1
+
2
+package reexec
3
+
4
+import (
5
+	"os/exec"
6
+)
7
+
8
+func Command(args ...string) *exec.Cmd {
9
+	return nil
10
+}
... ...
@@ -27,19 +27,16 @@ func Init() bool {
27 27
 
28 28
 		return true
29 29
 	}
30
-
31 30
 	return false
32 31
 }
33 32
 
34 33
 // Self returns the path to the current processes binary
35 34
 func Self() string {
36 35
 	name := os.Args[0]
37
-
38 36
 	if filepath.Base(name) == name {
39 37
 		if lp, err := exec.LookPath(name); err == nil {
40 38
 			name = lp
41 39
 		}
42 40
 	}
43
-
44 41
 	return name
45 42
 }
... ...
@@ -12,6 +12,12 @@ const maxLoopCounter = 100
12 12
 
13 13
 // FollowSymlink will follow an existing link and scope it to the root
14 14
 // path provided.
15
+// The role of this function is to return an absolute path in the root
16
+// or normalize to the root if the symlink leads to a path which is
17
+// outside of the root.
18
+// Errors encountered while attempting to follow the symlink in path
19
+// will be reported.
20
+// Normalizations to the root don't constitute errors.
15 21
 func FollowSymlinkInScope(link, root string) (string, error) {
16 22
 	root, err := filepath.Abs(root)
17 23
 	if err != nil {
... ...
@@ -60,25 +66,36 @@ func FollowSymlinkInScope(link, root string) (string, error) {
60 60
 				}
61 61
 				return "", err
62 62
 			}
63
-			if stat.Mode()&os.ModeSymlink == os.ModeSymlink {
64
-				dest, err := os.Readlink(prev)
65
-				if err != nil {
66
-					return "", err
67
-				}
68 63
 
69
-				if path.IsAbs(dest) {
70
-					prev = filepath.Join(root, dest)
71
-				} else {
72
-					prev, _ = filepath.Abs(prev)
64
+			// let's break if we're not dealing with a symlink
65
+			if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
66
+				break
67
+			}
73 68
 
74
-					if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) {
75
-						prev = filepath.Join(root, filepath.Base(dest))
76
-					}
77
-				}
69
+			// process the symlink
70
+			dest, err := os.Readlink(prev)
71
+			if err != nil {
72
+				return "", err
73
+			}
74
+
75
+			if path.IsAbs(dest) {
76
+				prev = filepath.Join(root, dest)
78 77
 			} else {
79
-				break
78
+				prev, _ = filepath.Abs(prev)
79
+
80
+				dir := filepath.Dir(prev)
81
+				prev = filepath.Join(dir, dest)
82
+				if dir == root && !strings.HasPrefix(prev, root) {
83
+					prev = root
84
+				}
85
+				if len(prev) < len(root) || (len(prev) == len(root) && prev != root) {
86
+					prev = filepath.Join(root, filepath.Base(dest))
87
+				}
80 88
 			}
81 89
 		}
82 90
 	}
91
+	if prev == "/" {
92
+		prev = root
93
+	}
83 94
 	return prev, nil
84 95
 }
... ...
@@ -46,6 +46,7 @@ func TestFollowSymLinkUnderLinkedDir(t *testing.T) {
46 46
 	if err != nil {
47 47
 		t.Fatal(err)
48 48
 	}
49
+	defer os.RemoveAll(dir)
49 50
 
50 51
 	os.Mkdir(filepath.Join(dir, "realdir"), 0700)
51 52
 	os.Symlink("realdir", filepath.Join(dir, "linkdir"))
... ...
@@ -97,25 +98,151 @@ func TestFollowSymLinkRelativeLink(t *testing.T) {
97 97
 }
98 98
 
99 99
 func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
100
-	link := "testdata/fs/a/f"
101
-
102
-	rewrite, err := FollowSymlinkInScope(link, "testdata")
103
-	if err != nil {
104
-		t.Fatal(err)
105
-	}
106
-
107
-	if expected := abs(t, "testdata/test"); expected != rewrite {
108
-		t.Fatalf("Expected %s got %s", expected, rewrite)
109
-	}
110
-
111
-	link = "testdata/fs/b/h"
112
-
113
-	rewrite, err = FollowSymlinkInScope(link, "testdata")
114
-	if err != nil {
115
-		t.Fatal(err)
116
-	}
117
-
118
-	if expected := abs(t, "testdata/root"); expected != rewrite {
119
-		t.Fatalf("Expected %s got %s", expected, rewrite)
100
+	// avoid letting symlink f lead us out of the "testdata" scope
101
+	// we don't normalize because symlink f is in scope and there is no
102
+	// information leak
103
+	{
104
+		link := "testdata/fs/a/f"
105
+
106
+		rewrite, err := FollowSymlinkInScope(link, "testdata")
107
+		if err != nil {
108
+			t.Fatal(err)
109
+		}
110
+
111
+		if expected := abs(t, "testdata/test"); expected != rewrite {
112
+			t.Fatalf("Expected %s got %s", expected, rewrite)
113
+		}
114
+	}
115
+
116
+	// avoid letting symlink f lead us out of the "testdata/fs" scope
117
+	// we don't normalize because symlink f is in scope and there is no
118
+	// information leak
119
+	{
120
+		link := "testdata/fs/a/f"
121
+
122
+		rewrite, err := FollowSymlinkInScope(link, "testdata/fs")
123
+		if err != nil {
124
+			t.Fatal(err)
125
+		}
126
+
127
+		if expected := abs(t, "testdata/fs/test"); expected != rewrite {
128
+			t.Fatalf("Expected %s got %s", expected, rewrite)
129
+		}
130
+	}
131
+
132
+	// avoid letting symlink g (pointed at by symlink h) take out of scope
133
+	// TODO: we should probably normalize to scope here because ../[....]/root
134
+	// is out of scope and we leak information
135
+	{
136
+		link := "testdata/fs/b/h"
137
+
138
+		rewrite, err := FollowSymlinkInScope(link, "testdata")
139
+		if err != nil {
140
+			t.Fatal(err)
141
+		}
142
+
143
+		if expected := abs(t, "testdata/root"); expected != rewrite {
144
+			t.Fatalf("Expected %s got %s", expected, rewrite)
145
+		}
146
+	}
147
+
148
+	// avoid letting allowing symlink e lead us to ../b
149
+	// normalize to the "testdata/fs/a"
150
+	{
151
+		link := "testdata/fs/a/e"
152
+
153
+		rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a")
154
+		if err != nil {
155
+			t.Fatal(err)
156
+		}
157
+
158
+		if expected := abs(t, "testdata/fs/a"); expected != rewrite {
159
+			t.Fatalf("Expected %s got %s", expected, rewrite)
160
+		}
161
+	}
162
+
163
+	// avoid letting symlink -> ../directory/file escape from scope
164
+	// normalize to "testdata/fs/j"
165
+	{
166
+		link := "testdata/fs/j/k"
167
+
168
+		rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j")
169
+		if err != nil {
170
+			t.Fatal(err)
171
+		}
172
+
173
+		if expected := abs(t, "testdata/fs/j"); expected != rewrite {
174
+			t.Fatalf("Expected %s got %s", expected, rewrite)
175
+		}
176
+	}
177
+
178
+	// make sure we don't allow escaping to /
179
+	// normalize to dir
180
+	{
181
+		dir, err := ioutil.TempDir("", "docker-fs-test")
182
+		if err != nil {
183
+			t.Fatal(err)
184
+		}
185
+		defer os.RemoveAll(dir)
186
+
187
+		linkFile := filepath.Join(dir, "foo")
188
+		os.Mkdir(filepath.Join(dir, ""), 0700)
189
+		os.Symlink("/", linkFile)
190
+
191
+		rewrite, err := FollowSymlinkInScope(linkFile, dir)
192
+		if err != nil {
193
+			t.Fatal(err)
194
+		}
195
+
196
+		if rewrite != dir {
197
+			t.Fatalf("Expected %s got %s", dir, rewrite)
198
+		}
199
+	}
200
+
201
+	// make sure we don't allow escaping to /
202
+	// normalize to dir
203
+	{
204
+		dir, err := ioutil.TempDir("", "docker-fs-test")
205
+		if err != nil {
206
+			t.Fatal(err)
207
+		}
208
+		defer os.RemoveAll(dir)
209
+
210
+		linkFile := filepath.Join(dir, "foo")
211
+		os.Mkdir(filepath.Join(dir, ""), 0700)
212
+		os.Symlink("/../../", linkFile)
213
+
214
+		rewrite, err := FollowSymlinkInScope(linkFile, dir)
215
+		if err != nil {
216
+			t.Fatal(err)
217
+		}
218
+
219
+		if rewrite != dir {
220
+			t.Fatalf("Expected %s got %s", dir, rewrite)
221
+		}
222
+	}
223
+
224
+	// make sure we stay in scope without leaking information
225
+	// this also checks for escaping to /
226
+	// normalize to dir
227
+	{
228
+		dir, err := ioutil.TempDir("", "docker-fs-test")
229
+		if err != nil {
230
+			t.Fatal(err)
231
+		}
232
+		defer os.RemoveAll(dir)
233
+
234
+		linkFile := filepath.Join(dir, "foo")
235
+		os.Mkdir(filepath.Join(dir, ""), 0700)
236
+		os.Symlink("../../", linkFile)
237
+
238
+		rewrite, err := FollowSymlinkInScope(linkFile, dir)
239
+		if err != nil {
240
+			t.Fatal(err)
241
+		}
242
+
243
+		if rewrite != dir {
244
+			t.Fatalf("Expected %s got %s", dir, rewrite)
245
+		}
120 246
 	}
121 247
 }
122 248
new file mode 120000
... ...
@@ -0,0 +1 @@
0
+../i/a
0 1
\ No newline at end of file
... ...
@@ -1,11 +1,13 @@
1 1
 package system
2 2
 
3 3
 import (
4
+	"os"
4 5
 	"testing"
5 6
 )
6 7
 
7 8
 func TestLstat(t *testing.T) {
8
-	file, invalid, _ := prepareFiles(t)
9
+	file, invalid, _, dir := prepareFiles(t)
10
+	defer os.RemoveAll(dir)
9 11
 
10 12
 	statFile, err := Lstat(file)
11 13
 	if err != nil {
... ...
@@ -1,12 +1,14 @@
1 1
 package system
2 2
 
3 3
 import (
4
+	"os"
4 5
 	"syscall"
5 6
 	"testing"
6 7
 )
7 8
 
8 9
 func TestFromStatT(t *testing.T) {
9
-	file, _, _ := prepareFiles(t)
10
+	file, _, _, dir := prepareFiles(t)
11
+	defer os.RemoveAll(dir)
10 12
 
11 13
 	stat := &syscall.Stat_t{}
12 14
 	err := syscall.Lstat(file, stat)
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"testing"
9 9
 )
10 10
 
11
-func prepareFiles(t *testing.T) (string, string, string) {
11
+func prepareFiles(t *testing.T) (string, string, string, string) {
12 12
 	dir, err := ioutil.TempDir("", "docker-system-test")
13 13
 	if err != nil {
14 14
 		t.Fatal(err)
... ...
@@ -26,11 +26,12 @@ func prepareFiles(t *testing.T) (string, string, string) {
26 26
 		t.Fatal(err)
27 27
 	}
28 28
 
29
-	return file, invalid, symlink
29
+	return file, invalid, symlink, dir
30 30
 }
31 31
 
32 32
 func TestLUtimesNano(t *testing.T) {
33
-	file, invalid, symlink := prepareFiles(t)
33
+	file, invalid, symlink, dir := prepareFiles(t)
34
+	defer os.RemoveAll(dir)
34 35
 
35 36
 	before, err := os.Stat(file)
36 37
 	if err != nil {
... ...
@@ -27,11 +27,7 @@ const (
27 27
 // including the byte payload of the image's json metadata as well, and for
28 28
 // calculating the checksums for buildcache.
29 29
 func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
30
-	headerSelector, err := getTarHeaderSelector(v)
31
-	if err != nil {
32
-		return nil, err
33
-	}
34
-	return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil
30
+	return NewTarSumHash(r, dc, v, DefaultTHash)
35 31
 }
36 32
 
37 33
 // Create a new TarSum, providing a THash to use rather than the DefaultTHash
... ...
@@ -40,7 +36,9 @@ func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error)
40 40
 	if err != nil {
41 41
 		return nil, err
42 42
 	}
43
-	return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil
43
+	ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
44
+	err = ts.initTarSum()
45
+	return ts, err
44 46
 }
45 47
 
46 48
 // TarSum is the generic interface for calculating fixed time
... ...
@@ -134,12 +132,6 @@ func (ts *tarSum) initTarSum() error {
134 134
 }
135 135
 
136 136
 func (ts *tarSum) Read(buf []byte) (int, error) {
137
-	if ts.writer == nil {
138
-		if err := ts.initTarSum(); err != nil {
139
-			return 0, err
140
-		}
141
-	}
142
-
143 137
 	if ts.finished {
144 138
 		return ts.bufWriter.Read(buf)
145 139
 	}
... ...
@@ -230,6 +230,17 @@ func TestEmptyTar(t *testing.T) {
230 230
 	if resultSum != expectedSum {
231 231
 		t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
232 232
 	}
233
+
234
+	// Test without ever actually writing anything.
235
+	if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil {
236
+		t.Fatal(err)
237
+	}
238
+
239
+	resultSum = ts.Sum(nil)
240
+
241
+	if resultSum != expectedSum {
242
+		t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
243
+	}
233 244
 }
234 245
 
235 246
 var (
... ...
@@ -318,6 +329,153 @@ func TestTarSums(t *testing.T) {
318 318
 	}
319 319
 }
320 320
 
321
+func TestIteration(t *testing.T) {
322
+	headerTests := []struct {
323
+		expectedSum string // TODO(vbatts) it would be nice to get individual sums of each
324
+		version     Version
325
+		hdr         *tar.Header
326
+		data        []byte
327
+	}{
328
+		{
329
+			"tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
330
+			Version0,
331
+			&tar.Header{
332
+				Name:     "file.txt",
333
+				Size:     0,
334
+				Typeflag: tar.TypeReg,
335
+				Devminor: 0,
336
+				Devmajor: 0,
337
+			},
338
+			[]byte(""),
339
+		},
340
+		{
341
+			"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
342
+			VersionDev,
343
+			&tar.Header{
344
+				Name:     "file.txt",
345
+				Size:     0,
346
+				Typeflag: tar.TypeReg,
347
+				Devminor: 0,
348
+				Devmajor: 0,
349
+			},
350
+			[]byte(""),
351
+		},
352
+		{
353
+			"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
354
+			VersionDev,
355
+			&tar.Header{
356
+				Name:     "another.txt",
357
+				Uid:      1000,
358
+				Gid:      1000,
359
+				Uname:    "slartibartfast",
360
+				Gname:    "users",
361
+				Size:     4,
362
+				Typeflag: tar.TypeReg,
363
+				Devminor: 0,
364
+				Devmajor: 0,
365
+			},
366
+			[]byte("test"),
367
+		},
368
+		{
369
+			"tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd",
370
+			VersionDev,
371
+			&tar.Header{
372
+				Name:     "xattrs.txt",
373
+				Uid:      1000,
374
+				Gid:      1000,
375
+				Uname:    "slartibartfast",
376
+				Gname:    "users",
377
+				Size:     4,
378
+				Typeflag: tar.TypeReg,
379
+				Xattrs: map[string]string{
380
+					"user.key1": "value1",
381
+					"user.key2": "value2",
382
+				},
383
+			},
384
+			[]byte("test"),
385
+		},
386
+		{
387
+			"tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760",
388
+			VersionDev,
389
+			&tar.Header{
390
+				Name:     "xattrs.txt",
391
+				Uid:      1000,
392
+				Gid:      1000,
393
+				Uname:    "slartibartfast",
394
+				Gname:    "users",
395
+				Size:     4,
396
+				Typeflag: tar.TypeReg,
397
+				Xattrs: map[string]string{
398
+					"user.KEY1": "value1", // adding different case to ensure different sum
399
+					"user.key2": "value2",
400
+				},
401
+			},
402
+			[]byte("test"),
403
+		},
404
+		{
405
+			"tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa",
406
+			Version0,
407
+			&tar.Header{
408
+				Name:     "xattrs.txt",
409
+				Uid:      1000,
410
+				Gid:      1000,
411
+				Uname:    "slartibartfast",
412
+				Gname:    "users",
413
+				Size:     4,
414
+				Typeflag: tar.TypeReg,
415
+				Xattrs: map[string]string{
416
+					"user.NOT": "CALCULATED",
417
+				},
418
+			},
419
+			[]byte("test"),
420
+		},
421
+	}
422
+	for _, htest := range headerTests {
423
+		s, err := renderSumForHeader(htest.version, htest.hdr, htest.data)
424
+		if err != nil {
425
+			t.Fatal(err)
426
+		}
427
+
428
+		if s != htest.expectedSum {
429
+			t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s)
430
+		}
431
+	}
432
+
433
+}
434
+
435
+func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) {
436
+	buf := bytes.NewBuffer(nil)
437
+	// first build our test tar
438
+	tw := tar.NewWriter(buf)
439
+	if err := tw.WriteHeader(h); err != nil {
440
+		return "", err
441
+	}
442
+	if _, err := tw.Write(data); err != nil {
443
+		return "", err
444
+	}
445
+	tw.Close()
446
+
447
+	ts, err := NewTarSum(buf, true, v)
448
+	if err != nil {
449
+		return "", err
450
+	}
451
+	tr := tar.NewReader(ts)
452
+	for {
453
+		hdr, err := tr.Next()
454
+		if hdr == nil || err == io.EOF {
455
+			break
456
+		}
457
+		if err != nil {
458
+			return "", err
459
+		}
460
+		if _, err = io.Copy(ioutil.Discard, tr); err != nil {
461
+			return "", err
462
+		}
463
+		break // we're just reading one header ...
464
+	}
465
+	return ts.Sum(nil), nil
466
+}
467
+
321 468
 func Benchmark9kTar(b *testing.B) {
322 469
 	buf := bytes.NewBuffer([]byte{})
323 470
 	fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
324 471
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+package urlutil
1
+
2
+import "strings"
3
+
4
+var (
5
+	validPrefixes = []string{
6
+		"git://",
7
+		"github.com/",
8
+		"git@",
9
+	}
10
+)
11
+
12
+// IsGitURL returns true if the provided str is a git repository URL.
13
+func IsGitURL(str string) bool {
14
+	if IsURL(str) && strings.HasSuffix(str, ".git") {
15
+		return true
16
+	}
17
+	for _, prefix := range validPrefixes {
18
+		if strings.HasPrefix(str, prefix) {
19
+			return true
20
+		}
21
+	}
22
+	return false
23
+}
24
+
25
+// IsGitTransport returns true if the provided str is a git transport by inspecting
26
+// the prefix of the string for known protocols used in git.
27
+func IsGitTransport(str string) bool {
28
+	return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
29
+}
0 30
new file mode 100644
... ...
@@ -0,0 +1,43 @@
0
+package urlutil
1
+
2
+import "testing"
3
+
4
+var (
5
+	gitUrls = []string{
6
+		"git://github.com/docker/docker",
7
+		"git@github.com:docker/docker.git",
8
+		"git@bitbucket.org:atlassianlabs/atlassian-docker.git",
9
+		"https://github.com/docker/docker.git",
10
+		"http://github.com/docker/docker.git",
11
+	}
12
+	incompleteGitUrls = []string{
13
+		"github.com/docker/docker",
14
+	}
15
+)
16
+
17
+func TestValidGitTransport(t *testing.T) {
18
+	for _, url := range gitUrls {
19
+		if IsGitTransport(url) == false {
20
+			t.Fatalf("%q should be detected as valid Git prefix", url)
21
+		}
22
+	}
23
+
24
+	for _, url := range incompleteGitUrls {
25
+		if IsGitTransport(url) == true {
26
+			t.Fatalf("%q should not be detected as valid Git prefix", url)
27
+		}
28
+	}
29
+}
30
+
31
+func TestIsGIT(t *testing.T) {
32
+	for _, url := range gitUrls {
33
+		if IsGitURL(url) == false {
34
+			t.Fatalf("%q should be detected as valid Git url", url)
35
+		}
36
+	}
37
+	for _, url := range incompleteGitUrls {
38
+		if IsGitURL(url) == false {
39
+			t.Fatalf("%q should be detected as valid Git url", url)
40
+		}
41
+	}
42
+}
0 43
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+package urlutil
1
+
2
+import "strings"
3
+
4
+var validUrlPrefixes = []string{
5
+	"http://",
6
+	"https://",
7
+}
8
+
9
+// IsURL returns true if the provided str is a valid URL by doing
10
+// a simple change for the transport of the url.
11
+func IsURL(str string) bool {
12
+	for _, prefix := range validUrlPrefixes {
13
+		if strings.HasPrefix(str, prefix) {
14
+			return true
15
+		}
16
+	}
17
+	return false
18
+}
... ...
@@ -105,6 +105,10 @@ if [ -z "$DEBUG" ]; then
105 105
 fi
106 106
 
107 107
 LDFLAGS_STATIC='-linkmode external'
108
+# Cgo -H windows is incompatible with -linkmode external.
109
+if [ "$(go env GOOS)" == 'windows' ]; then
110
+	LDFLAGS_STATIC=''
111
+fi
108 112
 EXTLDFLAGS_STATIC='-static'
109 113
 # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build
110 114
 # with options like -race.
... ...
@@ -219,7 +223,7 @@ bundle() {
219 219
 	bundle=$(basename $bundlescript)
220 220
 	echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)"
221 221
 	mkdir -p bundles/$VERSION/$bundle
222
-	source $bundlescript $(pwd)/bundles/$VERSION/$bundle
222
+	source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle"
223 223
 }
224 224
 
225 225
 main() {
... ...
@@ -3,19 +3,26 @@ set -e
3 3
 
4 4
 DEST=$1
5 5
 BINARY_NAME="docker-$VERSION"
6
+BINARY_EXTENSION=
6 7
 if [ "$(go env GOOS)" = 'windows' ]; then
7
-	BINARY_NAME+='.exe'
8
+	BINARY_EXTENSION='.exe'
9
+fi
10
+BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
11
+
12
+# Cygdrive paths don't play well with go build -o.
13
+if [[ "$(uname -s)" == CYGWIN* ]]; then
14
+	DEST=$(cygpath -mw $DEST)
8 15
 fi
9 16
 
10 17
 go build \
11
-	-o "$DEST/$BINARY_NAME" \
18
+	-o "$DEST/$BINARY_FULLNAME" \
12 19
 	"${BUILDFLAGS[@]}" \
13 20
 	-ldflags "
14 21
 		$LDFLAGS
15 22
 		$LDFLAGS_STATIC_DOCKER
16 23
 	" \
17 24
 	./docker
18
-echo "Created binary: $DEST/$BINARY_NAME"
19
-ln -sf "$BINARY_NAME" "$DEST/docker"
25
+echo "Created binary: $DEST/$BINARY_FULLNAME"
26
+ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION"
20 27
 
21
-hash_files "$DEST/$BINARY_NAME"
28
+hash_files "$DEST/$BINARY_FULLNAME"
... ...
@@ -51,7 +51,7 @@ clone hg code.google.com/p/go.net 84a4013f96e0
51 51
 
52 52
 clone hg code.google.com/p/gosqlite 74691fb6f837
53 53
 
54
-clone git github.com/docker/libtrust d273ef2565ca
54
+clone git github.com/docker/libtrust 230dfd18c232
55 55
 
56 56
 clone git github.com/Sirupsen/logrus v0.6.0
57 57
 
... ...
@@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then
66 66
 	mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
67 67
 fi
68 68
 
69
-clone git github.com/docker/libcontainer 28cb5f9dfd6f3352c610a4f1502b5df4f69389ea
69
+clone git github.com/docker/libcontainer 84c1636580a356db88b079d118b94abe6a1a0acd
70 70
 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
71 71
 rm -rf src/github.com/docker/libcontainer/vendor
72 72
 eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')"
... ...
@@ -126,8 +126,8 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
126 126
 				return &configFile, err
127 127
 			}
128 128
 			authConfig.Auth = ""
129
-			configFile.Configs[k] = authConfig
130 129
 			authConfig.ServerAddress = k
130
+			configFile.Configs[k] = authConfig
131 131
 		}
132 132
 	}
133 133
 	return &configFile, nil
... ...
@@ -33,7 +33,6 @@ type Config struct {
33 33
 	NetworkDisabled bool
34 34
 	MacAddress      string
35 35
 	OnBuild         []string
36
-	SecurityOpt     []string
37 36
 }
38 37
 
39 38
 func ContainerConfigFromJob(job *engine.Job) *Config {
... ...
@@ -58,7 +57,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config {
58 58
 	}
59 59
 	job.GetenvJson("ExposedPorts", &config.ExposedPorts)
60 60
 	job.GetenvJson("Volumes", &config.Volumes)
61
-	config.SecurityOpt = job.GetenvList("SecurityOpt")
62 61
 	if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil {
63 62
 		config.PortSpecs = PortSpecs
64 63
 	}
... ...
@@ -19,10 +19,11 @@ type ExecConfig struct {
19 19
 
20 20
 func ExecConfigFromJob(job *engine.Job) *ExecConfig {
21 21
 	execConfig := &ExecConfig{
22
-		User:         job.Getenv("User"),
23
-		Privileged:   job.GetenvBool("Privileged"),
22
+		// TODO(vishh): Expose 'User' once it is supported.
23
+		//User:         job.Getenv("User"),
24
+		// TODO(vishh): Expose 'Privileged' once it is supported.
25
+		//Privileged:   job.GetenvBool("Privileged"),
24 26
 		Tty:          job.GetenvBool("Tty"),
25
-		Container:    job.Getenv("Container"),
26 27
 		AttachStdin:  job.GetenvBool("AttachStdin"),
27 28
 		AttachStderr: job.GetenvBool("AttachStderr"),
28 29
 		AttachStdout: job.GetenvBool("AttachStdout"),
... ...
@@ -95,6 +95,7 @@ type HostConfig struct {
95 95
 	CapAdd          []string
96 96
 	CapDrop         []string
97 97
 	RestartPolicy   RestartPolicy
98
+	SecurityOpt     []string
98 99
 }
99 100
 
100 101
 // This is used by the create command when you want to set both the
... ...
@@ -130,6 +131,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
130 130
 	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
131 131
 	job.GetenvJson("Devices", &hostConfig.Devices)
132 132
 	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
133
+	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
133 134
 	if Binds := job.GetenvList("Binds"); Binds != nil {
134 135
 		hostConfig.Binds = Binds
135 136
 	}
... ...
@@ -273,7 +273,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
273 273
 		MacAddress:      *flMacAddress,
274 274
 		Entrypoint:      entrypoint,
275 275
 		WorkingDir:      *flWorkingDir,
276
-		SecurityOpt:     flSecurityOpt.GetAll(),
277 276
 	}
278 277
 
279 278
 	hostConfig := &HostConfig{
... ...
@@ -294,6 +293,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
294 294
 		CapAdd:          flCapAdd.GetAll(),
295 295
 		CapDrop:         flCapDrop.GetAll(),
296 296
 		RestartPolicy:   restartPolicy,
297
+		SecurityOpt:     flSecurityOpt.GetAll(),
297 298
 	}
298 299
 
299 300
 	// When allocating stdin in attached mode, close stdin at client disconnect
... ...
@@ -288,21 +288,7 @@ func NewHTTPRequestError(msg string, res *http.Response) error {
288 288
 	}
289 289
 }
290 290
 
291
-func IsURL(str string) bool {
292
-	return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
293
-}
294
-
295
-func IsGIT(str string) bool {
296
-	return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str))
297
-}
298
-
299
-func ValidGitTransport(str string) bool {
300
-	return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") || IsURL(str)
301
-}
302
-
303
-var (
304
-	localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`)
305
-)
291
+var localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`)
306 292
 
307 293
 // RemoveLocalDns looks into the /etc/resolv.conf,
308 294
 // and removes any local nameserver entries.
... ...
@@ -97,24 +97,3 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) {
97 97
 		t.Errorf("failed to remove symlink: %s", err)
98 98
 	}
99 99
 }
100
-
101
-func TestValidGitTransport(t *testing.T) {
102
-	for _, url := range []string{
103
-		"git://github.com/docker/docker",
104
-		"git@github.com:docker/docker.git",
105
-		"https://github.com/docker/docker.git",
106
-		"http://github.com/docker/docker.git",
107
-	} {
108
-		if ValidGitTransport(url) == false {
109
-			t.Fatalf("%q should be detected as valid Git prefix", url)
110
-		}
111
-	}
112
-
113
-	for _, url := range []string{
114
-		"github.com/docker/docker",
115
-	} {
116
-		if ValidGitTransport(url) == true {
117
-			t.Fatalf("%q should not be detected as valid Git prefix", url)
118
-		}
119
-	}
120
-}
... ...
@@ -14,17 +14,11 @@ type CpusetGroup struct {
14 14
 }
15 15
 
16 16
 func (s *CpusetGroup) Set(d *data) error {
17
-	// we don't want to join this cgroup unless it is specified
18
-	if d.c.CpusetCpus != "" {
19
-		dir, err := d.path("cpuset")
20
-		if err != nil {
21
-			return err
22
-		}
23
-
24
-		return s.SetDir(dir, d.c.CpusetCpus, d.pid)
17
+	dir, err := d.path("cpuset")
18
+	if err != nil {
19
+		return err
25 20
 	}
26
-
27
-	return nil
21
+	return s.SetDir(dir, d.c.CpusetCpus, d.pid)
28 22
 }
29 23
 
30 24
 func (s *CpusetGroup) Remove(d *data) error {
... ...
@@ -46,8 +40,12 @@ func (s *CpusetGroup) SetDir(dir, value string, pid int) error {
46 46
 		return err
47 47
 	}
48 48
 
49
-	if err := writeFile(dir, "cpuset.cpus", value); err != nil {
50
-		return err
49
+	// If we don't use --cpuset, the default cpuset.cpus is set in
50
+	// s.ensureParent, otherwise, use the value we set
51
+	if value != "" {
52
+		if err := writeFile(dir, "cpuset.cpus", value); err != nil {
53
+			return err
54
+		}
51 55
 	}
52 56
 
53 57
 	return nil
... ...
@@ -137,16 +137,14 @@ func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) {
137 137
 
138 138
 	}
139 139
 
140
-	// we need to manually join the freezer cgroup in systemd because it does not currently support it
141
-	// via the dbus api
140
+	// we need to manually join the freezer and cpuset cgroup in systemd
141
+	// because it does not currently support it via the dbus api.
142 142
 	if err := joinFreezer(c, pid); err != nil {
143 143
 		return nil, err
144 144
 	}
145 145
 
146
-	if c.CpusetCpus != "" {
147
-		if err := joinCpuset(c, pid); err != nil {
148
-			return nil, err
149
-		}
146
+	if err := joinCpuset(c, pid); err != nil {
147
+		return nil, err
150 148
 	}
151 149
 
152 150
 	paths := make(map[string]string)
... ...
@@ -39,9 +39,6 @@ func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error {
39 39
 	if err := SetMtu(name1, n.Mtu); err != nil {
40 40
 		return err
41 41
 	}
42
-	if err := SetHairpinMode(name1, true); err != nil {
43
-		return err
44
-	}
45 42
 	if err := InterfaceUp(name1); err != nil {
46 43
 		return err
47 44
 	}
... ...
@@ -55,16 +55,7 @@ func (k *ecPublicKey) CurveName() string {
55 55
 
56 56
 // KeyID returns a distinct identifier which is unique to this Public Key.
57 57
 func (k *ecPublicKey) KeyID() string {
58
-	// Generate and return a libtrust fingerprint of the EC public key.
59
-	// For an EC key this should be:
60
-	//   SHA256("EC"+curveName+bytes(X)+bytes(Y))
61
-	// Then truncated to 240 bits and encoded into 12 base32 groups like so:
62
-	//   ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
63
-	hasher := crypto.SHA256.New()
64
-	hasher.Write([]byte(k.KeyType() + k.CurveName()))
65
-	hasher.Write(k.X.Bytes())
66
-	hasher.Write(k.Y.Bytes())
67
-	return keyIDEncode(hasher.Sum(nil)[:30])
58
+	return keyIDFromCryptoKey(k)
68 59
 }
69 60
 
70 61
 func (k *ecPublicKey) String() string {
... ...
@@ -11,9 +11,21 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe
11 11
 	filtered := make([]PublicKey, 0, len(keys))
12 12
 
13 13
 	for _, pubKey := range keys {
14
-		hosts, ok := pubKey.GetExtendedField("hosts").([]interface{})
14
+		var hosts []string
15
+		switch v := pubKey.GetExtendedField("hosts").(type) {
16
+		case []string:
17
+			hosts = v
18
+		case []interface{}:
19
+			for _, value := range v {
20
+				h, ok := value.(string)
21
+				if !ok {
22
+					continue
23
+				}
24
+				hosts = append(hosts, h)
25
+			}
26
+		}
15 27
 
16
-		if !ok || (ok && len(hosts) == 0) {
28
+		if len(hosts) == 0 {
17 29
 			if includeEmpty {
18 30
 				filtered = append(filtered, pubKey)
19 31
 			}
... ...
@@ -21,12 +33,7 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe
21 21
 		}
22 22
 
23 23
 		// Check if any hosts match pattern
24
-		for _, hostVal := range hosts {
25
-			hostPattern, ok := hostVal.(string)
26
-			if !ok {
27
-				continue
28
-			}
29
-
24
+		for _, hostPattern := range hosts {
30 25
 			match, err := filepath.Match(hostPattern, host)
31 26
 			if err != nil {
32 27
 				return nil, err
... ...
@@ -37,7 +44,6 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe
37 37
 				continue
38 38
 			}
39 39
 		}
40
-
41 40
 	}
42 41
 
43 42
 	return filtered, nil
... ...
@@ -27,6 +27,8 @@ func TestFilter(t *testing.T) {
27 27
 			t.Fatal(err)
28 28
 		}
29 29
 
30
+		// we use both []interface{} and []string here because jwt uses
31
+		// []interface{} format, while PEM uses []string
30 32
 		switch {
31 33
 		case i == 0:
32 34
 			// Don't add entries for this key, key 0.
... ...
@@ -36,10 +38,10 @@ func TestFilter(t *testing.T) {
36 36
 			key.AddExtendedField("hosts", []interface{}{"*.even.example.com"})
37 37
 		case i == 7:
38 38
 			// Should catch only the last key, and make it match any hostname.
39
-			key.AddExtendedField("hosts", []interface{}{"*"})
39
+			key.AddExtendedField("hosts", []string{"*"})
40 40
 		default:
41 41
 			// should catch keys 1, 3, 5.
42
-			key.AddExtendedField("hosts", []interface{}{"*.example.com"})
42
+			key.AddExtendedField("hosts", []string{"*.example.com"})
43 43
 		}
44 44
 
45 45
 		keys = append(keys, key)
... ...
@@ -138,7 +138,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) {
138 138
 	}
139 139
 
140 140
 	for addr, hostKey := range trustedHostKeysMapping {
141
-		t.Logf("Host Address: %s\n", addr)
141
+		t.Logf("Host Address: %d\n", addr)
142 142
 		t.Logf("Host Key: %s\n\n", hostKey)
143 143
 	}
144 144
 
... ...
@@ -160,7 +160,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) {
160 160
 	}
161 161
 
162 162
 	for addr, hostKey := range trustedHostKeysMapping {
163
-		t.Logf("Host Address: %s\n", addr)
163
+		t.Logf("Host Address: %d\n", addr)
164 164
 		t.Logf("Host Key: %s\n\n", hostKey)
165 165
 	}
166 166
 
... ...
@@ -34,16 +34,7 @@ func (k *rsaPublicKey) KeyType() string {
34 34
 
35 35
 // KeyID returns a distinct identifier which is unique to this Public Key.
36 36
 func (k *rsaPublicKey) KeyID() string {
37
-	// Generate and return a 'libtrust' fingerprint of the RSA public key.
38
-	// For an RSA key this should be:
39
-	//   SHA256("RSA"+bytes(N)+bytes(E))
40
-	// Then truncated to 240 bits and encoded into 12 base32 groups like so:
41
-	//   ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
42
-	hasher := crypto.SHA256.New()
43
-	hasher.Write([]byte(k.KeyType()))
44
-	hasher.Write(k.N.Bytes())
45
-	hasher.Write(serializeRSAPublicExponentParam(k.E))
46
-	return keyIDEncode(hasher.Sum(nil)[:30])
37
+	return keyIDFromCryptoKey(k)
47 38
 }
48 39
 
49 40
 func (k *rsaPublicKey) String() string {
... ...
@@ -201,7 +201,7 @@ func TestCollapseGrants(t *testing.T) {
201 201
 
202 202
 	collapsedGrants, expiration, err := CollapseStatements(statements, false)
203 203
 	if len(collapsedGrants) != 12 {
204
-		t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants))
204
+		t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants))
205 205
 	}
206 206
 	if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
207 207
 		t.Fatalf("Unexpected expiration time: %s", expiration.String())
... ...
@@ -261,7 +261,7 @@ func TestCollapseGrants(t *testing.T) {
261 261
 
262 262
 	collapsedGrants, expiration, err = CollapseStatements(statements, false)
263 263
 	if len(collapsedGrants) != 12 {
264
-		t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants))
264
+		t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants))
265 265
 	}
266 266
 	if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
267 267
 		t.Fatalf("Unexpected expiration time: %s", expiration.String())
... ...
@@ -2,6 +2,7 @@ package libtrust
2 2
 
3 3
 import (
4 4
 	"bytes"
5
+	"crypto"
5 6
 	"crypto/elliptic"
6 7
 	"crypto/x509"
7 8
 	"encoding/base32"
... ...
@@ -52,6 +53,21 @@ func keyIDEncode(b []byte) string {
52 52
 	return buf.String()
53 53
 }
54 54
 
55
+func keyIDFromCryptoKey(pubKey PublicKey) string {
56
+	// Generate and return a 'libtrust' fingerprint of the public key.
57
+	// For an RSA key this should be:
58
+	//   SHA256(DER encoded ASN1)
59
+	// Then truncated to 240 bits and encoded into 12 base32 groups like so:
60
+	//   ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
61
+	derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
62
+	if err != nil {
63
+		return ""
64
+	}
65
+	hasher := crypto.SHA256.New()
66
+	hasher.Write(derBytes)
67
+	return keyIDEncode(hasher.Sum(nil)[:30])
68
+}
69
+
55 70
 func stringFromMap(m map[string]interface{}, key string) (string, error) {
56 71
 	val, ok := m[key]
57 72
 	if !ok {
58 73
new file mode 100644
... ...
@@ -0,0 +1,23 @@
0
+package libtrust
1
+
2
+import (
3
+	"encoding/pem"
4
+	"reflect"
5
+	"testing"
6
+)
7
+
8
+func TestAddPEMHeadersToKey(t *testing.T) {
9
+	pk := &rsaPublicKey{nil, map[string]interface{}{}}
10
+	blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}}
11
+	addPEMHeadersToKey(blk, pk)
12
+
13
+	val := pk.GetExtendedField("hosts")
14
+	hosts, ok := val.([]string)
15
+	if !ok {
16
+		t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val))
17
+	}
18
+	expected := []string{"localhost", "127.0.0.1"}
19
+	if !reflect.DeepEqual(hosts, expected) {
20
+		t.Errorf("hosts(%v), expected %v", hosts, expected)
21
+	}
22
+}