Browse code

rebase master

Victor Vieux authored on 2013/06/20 22:56:36
Showing 45 changed files
... ...
@@ -41,6 +41,7 @@ Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
41 41
 Ken Cochrane <kencochrane@gmail.com>
42 42
 Kevin J. Lynagh <kevin@keminglabs.com>
43 43
 Louis Opter <kalessin@kalessin.fr>
44
+Marcus Farkas <toothlessgear@finitebox.com>
44 45
 Maxim Treskin <zerthurd@gmail.com>
45 46
 Michael Crosby <crosby.michael@gmail.com>
46 47
 Mikhail Sobolev <mss@mawhrin.net>
... ...
@@ -1,5 +1,25 @@
1 1
 # Changelog
2 2
 
3
+## 0.4.2 (2013-06-17)
4
+ - Packaging: Bumped version to work around an Ubuntu bug
5
+
6
+## 0.4.1 (2013-06-17)
7
+ + Remote Api: Add flag to enable cross domain requests
8
+ + Remote Api/Client: Add images and containers sizes in docker ps and docker images
9
+ + Runtime: Configure dns configuration host-wide with 'docker -d -dns'
10
+ + Runtime: Detect faulty DNS configuration and replace it with a public default
11
+ + Runtime: allow docker run <name>:<id>
12
+ + Runtime: you can now specify public port (ex: -p 80:4500)
13
+ * Client: allow multiple params in inspect
14
+ * Client: Print the container id before the hijack in `docker run`
15
+ * Registry: add regexp check on repo's name
16
+ * Registry: Move auth to the client
17
+ * Runtime: improved image removal to garbage-collect unreferenced parents
18
+ * Vagrantfile: Add the rest api port to vagrantfile's port_forward
19
+ * Upgrade to Go 1.1
20
+ - Builder: don't ignore last line in Dockerfile when it doesn't end with \n
21
+ - Registry: Remove login check on pull
22
+
3 23
 ## 0.4.0 (2013-06-03)
4 24
  + Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
5 25
  + Introducing Remote API: control Docker programmatically using a simple HTTP/json API
... ...
@@ -16,7 +16,6 @@ to put them - so we put them here :)
16 16
 * Unify build commands and regular commands
17 17
 * Move source code into src/ subdir for clarity
18 18
 * Clean up the Makefile, it's a mess
19
-* docker buidl: show short IDs
20 19
 * docker build: on non-existent local path for ADD, don't show full absolute path on the host
21 20
 * mount into /dockerinit rather than /sbin/init
22 21
 * docker tag foo REPO:TAG
... ...
@@ -17,7 +17,7 @@ endif
17 17
 GIT_COMMIT = $(shell git rev-parse --short HEAD)
18 18
 GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
19 19
 
20
-BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
20
+BUILD_OPTIONS = -ldflags "-X main.GITCOMMIT $(GIT_COMMIT)$(GIT_STATUS)"
21 21
 
22 22
 SRC_DIR := $(GOPATH)/src
23 23
 
... ...
@@ -108,7 +108,7 @@ Note that some methods are community contributions and not yet officially suppor
108 108
 
109 109
 * [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
110 110
 * [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
111
-* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
111
+* [Mac OS X (with Vagrant)](http://docs.docker.io/en/latest/installation/vagrant/)
112 112
 * [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
113 113
 * [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
114 114
 
... ...
@@ -181,7 +181,7 @@ Running an irc bouncer
181 181
 ----------------------
182 182
 
183 183
 ```bash
184
-BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
184
+BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc zncrun $USER $PASSWORD)
185 185
 echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
186 186
 ```
187 187
 
... ...
@@ -216,7 +216,8 @@ PORT=$(docker port $JOB 4444)
216 216
 
217 217
 # Connect to the public port via the host's public address
218 218
 # Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
219
-IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
219
+# Replace *eth0* according to your local interface name.
220
+IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
220 221
 echo hello world | nc $IP $PORT
221 222
 
222 223
 # Verify that the network connection worked
... ...
@@ -262,14 +263,14 @@ Setting up a dev environment
262 262
 Instructions that have been verified to work on Ubuntu 12.10,
263 263
 
264 264
 ```bash
265
-sudo apt-get -y install lxc wget bsdtar curl golang git
265
+sudo apt-get -y install lxc curl xz-utils golang git
266 266
 
267 267
 export GOPATH=~/go/
268 268
 export PATH=$GOPATH/bin:$PATH
269 269
 
270 270
 mkdir -p $GOPATH/src/github.com/dotcloud
271 271
 cd $GOPATH/src/github.com/dotcloud
272
-git clone git@github.com:dotcloud/docker.git
272
+git clone https://github.com/dotcloud/docker.git
273 273
 cd docker
274 274
 
275 275
 go get -v github.com/dotcloud/docker/...
... ...
@@ -3,6 +3,7 @@
3 3
 
4 4
 BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
5 5
 BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
6
+VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box"
6 7
 AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
7 8
 AWS_AMI    = ENV['AWS_AMI']    || "ami-d0f89fb9"
8 9
 FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
... ...
@@ -67,6 +68,13 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
67 67
     rs.image    = /Ubuntu/
68 68
   end
69 69
 
70
+  config.vm.provider :vmware_fusion do |f, override|
71
+    override.vm.box = BOX_NAME
72
+    override.vm.box_url = VF_BOX_URI
73
+    override.vm.synced_folder ".", "/vagrant", disabled: true
74
+    f.vmx["displayName"] = "docker"
75
+  end
76
+
70 77
   config.vm.provider :virtualbox do |vb|
71 78
     config.vm.box = BOX_NAME
72 79
     config.vm.box_url = BOX_URI
... ...
@@ -1,7 +1,8 @@
1 1
 package docker
2 2
 
3 3
 type APIHistory struct {
4
-	ID        string `json:"Id"`
4
+	ID        string   `json:"Id"`
5
+	Tags      []string `json:",omitempty"`
5 6
 	Created   int64
6 7
 	CreatedBy string `json:",omitempty"`
7 8
 }
... ...
@@ -1,12 +1,15 @@
1 1
 package docker
2 2
 
3 3
 import (
4
+	"bufio"
4 5
 	"errors"
5 6
 	"fmt"
7
+	"github.com/dotcloud/docker/utils"
6 8
 	"io"
7 9
 	"io/ioutil"
8 10
 	"os"
9 11
 	"os/exec"
12
+	"path"
10 13
 )
11 14
 
12 15
 type Archive io.Reader
... ...
@@ -20,6 +23,37 @@ const (
20 20
 	Xz
21 21
 )
22 22
 
23
+func DetectCompression(source []byte) Compression {
24
+	for _, c := range source[:10] {
25
+		utils.Debugf("%x", c)
26
+	}
27
+
28
+	sourceLen := len(source)
29
+	for compression, m := range map[Compression][]byte{
30
+		Bzip2: {0x42, 0x5A, 0x68},
31
+		Gzip:  {0x1F, 0x8B, 0x08},
32
+		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
33
+	} {
34
+		fail := false
35
+		if len(m) > sourceLen {
36
+			utils.Debugf("Len too short")
37
+			continue
38
+		}
39
+		i := 0
40
+		for _, b := range m {
41
+			if b != source[i] {
42
+				fail = true
43
+				break
44
+			}
45
+			i++
46
+		}
47
+		if !fail {
48
+			return compression
49
+		}
50
+	}
51
+	return Uncompressed
52
+}
53
+
23 54
 func (compression *Compression) Flag() string {
24 55
 	switch *compression {
25 56
 	case Bzip2:
... ...
@@ -46,14 +80,43 @@ func (compression *Compression) Extension() string {
46 46
 	return ""
47 47
 }
48 48
 
49
+// Tar creates an archive from the directory at `path`, and returns it as a
50
+// stream of bytes.
49 51
 func Tar(path string, compression Compression) (io.Reader, error) {
50
-	cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
51
-	return CmdStream(cmd)
52
+	return TarFilter(path, compression, nil)
53
+}
54
+
55
+// Tar creates an archive from the directory at `path`, only including files whose relative
56
+// paths are included in `filter`. If `filter` is nil, then all files are included.
57
+func TarFilter(path string, compression Compression, filter []string) (io.Reader, error) {
58
+	args := []string{"tar", "-f", "-", "-C", path}
59
+	if filter == nil {
60
+		filter = []string{"."}
61
+	}
62
+	for _, f := range filter {
63
+		args = append(args, "-c"+compression.Flag(), f)
64
+	}
65
+	return CmdStream(exec.Command(args[0], args[1:]...))
52 66
 }
53 67
 
68
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
69
+// and unpacks it into the directory at `path`.
70
+// The archive may be compressed with one of the following algorithgms:
71
+//  identity (uncompressed), gzip, bzip2, xz.
72
+// FIXME: specify behavior when target path exists vs. doesn't exist.
54 73
 func Untar(archive io.Reader, path string) error {
55
-	cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
56
-	cmd.Stdin = archive
74
+
75
+	bufferedArchive := bufio.NewReaderSize(archive, 10)
76
+	buf, err := bufferedArchive.Peek(10)
77
+	if err != nil {
78
+		return err
79
+	}
80
+	compression := DetectCompression(buf)
81
+
82
+	utils.Debugf("Archive compression detected: %s", compression.Extension())
83
+
84
+	cmd := exec.Command("tar", "-f", "-", "-C", path, "-x"+compression.Flag())
85
+	cmd.Stdin = bufferedArchive
57 86
 	// Hardcode locale environment for predictable outcome regardless of host configuration.
58 87
 	//   (see https://github.com/dotcloud/docker/issues/355)
59 88
 	cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
... ...
@@ -64,6 +127,86 @@ func Untar(archive io.Reader, path string) error {
64 64
 	return nil
65 65
 }
66 66
 
67
+// TarUntar is a convenience function which calls Tar and Untar, with
68
+// the output of one piped into the other. If either Tar or Untar fails,
69
+// TarUntar aborts and returns the error.
70
+func TarUntar(src string, filter []string, dst string) error {
71
+	utils.Debugf("TarUntar(%s %s %s)", src, filter, dst)
72
+	archive, err := TarFilter(src, Uncompressed, filter)
73
+	if err != nil {
74
+		return err
75
+	}
76
+	return Untar(archive, dst)
77
+}
78
+
79
+// UntarPath is a convenience function which looks for an archive
80
+// at filesystem path `src`, and unpacks it at `dst`.
81
+func UntarPath(src, dst string) error {
82
+	if archive, err := os.Open(src); err != nil {
83
+		return err
84
+	} else if err := Untar(archive, dst); err != nil {
85
+		return err
86
+	}
87
+	return nil
88
+}
89
+
90
+// CopyWithTar creates a tar archive of filesystem path `src`, and
91
+// unpacks it at filesystem path `dst`.
92
+// The archive is streamed directly with fixed buffering and no
93
+// intermediary disk IO.
94
+//
95
+func CopyWithTar(src, dst string) error {
96
+	srcSt, err := os.Stat(src)
97
+	if err != nil {
98
+		return err
99
+	}
100
+	var dstExists bool
101
+	dstSt, err := os.Stat(dst)
102
+	if err != nil {
103
+		if !os.IsNotExist(err) {
104
+			return err
105
+		}
106
+	} else {
107
+		dstExists = true
108
+	}
109
+	// Things that can go wrong if the source is a directory
110
+	if srcSt.IsDir() {
111
+		// The destination exists and is a regular file
112
+		if dstExists && !dstSt.IsDir() {
113
+			return fmt.Errorf("Can't copy a directory over a regular file")
114
+		}
115
+		// Things that can go wrong if the source is a regular file
116
+	} else {
117
+		utils.Debugf("The destination exists, it's a directory, and doesn't end in /")
118
+		// The destination exists, it's a directory, and doesn't end in /
119
+		if dstExists && dstSt.IsDir() && dst[len(dst)-1] != '/' {
120
+			return fmt.Errorf("Can't copy a regular file over a directory %s |%s|", dst, dst[len(dst)-1])
121
+		}
122
+	}
123
+	// Create the destination
124
+	var dstDir string
125
+	if srcSt.IsDir() || dst[len(dst)-1] == '/' {
126
+		// The destination ends in /, or the source is a directory
127
+		//   --> dst is the holding directory and needs to be created for -C
128
+		dstDir = dst
129
+	} else {
130
+		// The destination doesn't end in /
131
+		//   --> dst is the file
132
+		dstDir = path.Dir(dst)
133
+	}
134
+	if !dstExists {
135
+		// Create the holding directory if necessary
136
+		utils.Debugf("Creating the holding directory %s", dstDir)
137
+		if err := os.MkdirAll(dstDir, 0700); err != nil && !os.IsExist(err) {
138
+			return err
139
+		}
140
+	}
141
+	if !srcSt.IsDir() {
142
+		return TarUntar(path.Dir(src), []string{path.Base(src)}, dstDir)
143
+	}
144
+	return TarUntar(src, nil, dstDir)
145
+}
146
+
67 147
 // CmdStream executes a command, and returns its stdout as a stream.
68 148
 // If the command fails to run or doesn't complete successfully, an error
69 149
 // will be returned, including anything written on stderr.
... ...
@@ -1,10 +1,13 @@
1 1
 package docker
2 2
 
3 3
 import (
4
+	"bytes"
5
+	"fmt"
4 6
 	"io"
5 7
 	"io/ioutil"
6 8
 	"os"
7 9
 	"os/exec"
10
+	"path"
8 11
 	"testing"
9 12
 	"time"
10 13
 )
... ...
@@ -58,20 +61,58 @@ func TestCmdStreamGood(t *testing.T) {
58 58
 	}
59 59
 }
60 60
 
61
-func TestTarUntar(t *testing.T) {
62
-	archive, err := Tar(".", Uncompressed)
61
+func tarUntar(t *testing.T, origin string, compression Compression) error {
62
+	archive, err := Tar(origin, compression)
63 63
 	if err != nil {
64 64
 		t.Fatal(err)
65 65
 	}
66
+
67
+	buf := make([]byte, 10)
68
+	if _, err := archive.Read(buf); err != nil {
69
+		return err
70
+	}
71
+	archive = io.MultiReader(bytes.NewReader(buf), archive)
72
+
73
+	detectedCompression := DetectCompression(buf)
74
+	if detectedCompression.Extension() != compression.Extension() {
75
+		return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
76
+	}
77
+
66 78
 	tmp, err := ioutil.TempDir("", "docker-test-untar")
67 79
 	if err != nil {
68
-		t.Fatal(err)
80
+		return err
69 81
 	}
70 82
 	defer os.RemoveAll(tmp)
71 83
 	if err := Untar(archive, tmp); err != nil {
72
-		t.Fatal(err)
84
+		return err
73 85
 	}
74 86
 	if _, err := os.Stat(tmp); err != nil {
75
-		t.Fatalf("Error stating %s: %s", tmp, err.Error())
87
+		return err
88
+	}
89
+	return nil
90
+}
91
+
92
+func TestTarUntar(t *testing.T) {
93
+	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
94
+	if err != nil {
95
+		t.Fatal(err)
96
+	}
97
+	defer os.RemoveAll(origin)
98
+	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
99
+		t.Fatal(err)
100
+	}
101
+	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
102
+		t.Fatal(err)
103
+	}
104
+
105
+	for _, c := range []Compression{
106
+		Uncompressed,
107
+		Gzip,
108
+		Bzip2,
109
+		Xz,
110
+	} {
111
+		if err := tarUntar(t, origin, c); err != nil {
112
+			t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
113
+		}
76 114
 	}
77 115
 }
... ...
@@ -101,6 +101,7 @@ func (b *buildFile) CmdRun(args string) error {
101 101
 	if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
102 102
 		return err
103 103
 	} else if cache != nil {
104
+		fmt.Fprintf(b.out, " ---> Using cache\n")
104 105
 		utils.Debugf("[BUILDER] Use cached version")
105 106
 		b.image = cache.ID
106 107
 		return nil
... ...
@@ -185,6 +186,7 @@ func (b *buildFile) CmdAdd(args string) error {
185 185
 		return err
186 186
 	}
187 187
 	b.tmpContainers[container.ID] = struct{}{}
188
+	fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
188 189
 
189 190
 	if err := container.EnsureMounted(); err != nil {
190 191
 		return err
... ...
@@ -193,30 +195,26 @@ func (b *buildFile) CmdAdd(args string) error {
193 193
 
194 194
 	origPath := path.Join(b.context, orig)
195 195
 	destPath := path.Join(container.RootfsPath(), dest)
196
-
196
+	// Preserve the trailing '/'
197
+	if dest[len(dest)-1] == '/' {
198
+		destPath = destPath + "/"
199
+	}
197 200
 	fi, err := os.Stat(origPath)
198 201
 	if err != nil {
199 202
 		return err
200 203
 	}
201 204
 	if fi.IsDir() {
202
-		if err := os.MkdirAll(destPath, 0700); err != nil {
203
-			return err
204
-		}
205
-
206
-		files, err := ioutil.ReadDir(path.Join(b.context, orig))
207
-		if err != nil {
205
+		if err := CopyWithTar(origPath, destPath); err != nil {
208 206
 			return err
209 207
 		}
210
-		for _, fi := range files {
211
-			if err := utils.CopyDirectory(path.Join(origPath, fi.Name()), path.Join(destPath, fi.Name())); err != nil {
212
-				return err
213
-			}
214
-		}
215
-	} else {
208
+		// First try to unpack the source as an archive
209
+	} else if err := UntarPath(origPath, destPath); err != nil {
210
+		utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
211
+		// If that fails, just copy it as a regular file
216 212
 		if err := os.MkdirAll(path.Dir(destPath), 0700); err != nil {
217 213
 			return err
218 214
 		}
219
-		if err := utils.CopyDirectory(origPath, destPath); err != nil {
215
+		if err := CopyWithTar(origPath, destPath); err != nil {
220 216
 			return err
221 217
 		}
222 218
 	}
... ...
@@ -239,6 +237,7 @@ func (b *buildFile) run() (string, error) {
239 239
 		return "", err
240 240
 	}
241 241
 	b.tmpContainers[c.ID] = struct{}{}
242
+	fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
242 243
 
243 244
 	//start the container
244 245
 	if err := c.Start(); err != nil {
... ...
@@ -265,6 +264,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
265 265
 		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
266 266
 			return err
267 267
 		} else if cache != nil {
268
+			fmt.Fprintf(b.out, " ---> Using cache\n")
268 269
 			utils.Debugf("[BUILDER] Use cached version")
269 270
 			b.image = cache.ID
270 271
 			return nil
... ...
@@ -278,6 +278,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
278 278
 			return err
279 279
 		}
280 280
 		b.tmpContainers[container.ID] = struct{}{}
281
+		fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
281 282
 
282 283
 		if err := container.EnsureMounted(); err != nil {
283 284
 			return err
... ...
@@ -318,6 +319,7 @@ func (b *buildFile) Build(dockerfile, context io.Reader) (string, error) {
318 318
 		b.context = name
319 319
 	}
320 320
 	file := bufio.NewReader(dockerfile)
321
+	stepN := 0
321 322
 	for {
322 323
 		line, err := file.ReadString('\n')
323 324
 		if err != nil {
... ...
@@ -338,12 +340,13 @@ func (b *buildFile) Build(dockerfile, context io.Reader) (string, error) {
338 338
 		}
339 339
 		instruction := strings.ToLower(strings.Trim(tmp[0], " "))
340 340
 		arguments := strings.Trim(tmp[1], " ")
341
-
342
-		fmt.Fprintf(b.out, "%s %s (%s)\n", strings.ToUpper(instruction), arguments, b.image)
341
+		stepN += 1
342
+		// FIXME: only count known instructions as build steps
343
+		fmt.Fprintf(b.out, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
343 344
 
344 345
 		method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
345 346
 		if !exists {
346
-			fmt.Fprintf(b.out, "Skipping unknown instruction %s\n", strings.ToUpper(instruction))
347
+			fmt.Fprintf(b.out, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
347 348
 			continue
348 349
 		}
349 350
 		ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
... ...
@@ -351,10 +354,10 @@ func (b *buildFile) Build(dockerfile, context io.Reader) (string, error) {
351 351
 			return "", ret.(error)
352 352
 		}
353 353
 
354
-		fmt.Fprintf(b.out, "===> %v\n", b.image)
354
+		fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
355 355
 	}
356 356
 	if b.image != "" {
357
-		fmt.Fprintf(b.out, "Build successful.\n===> %s\n", b.image)
357
+		fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
358 358
 		return b.image, nil
359 359
 	}
360 360
 	return "", fmt.Errorf("An error occured during the build\n")
... ...
@@ -23,6 +23,12 @@ from   ` + unitTestImageName + `
23 23
 run    sh -c 'echo root:testpass > /tmp/passwd'
24 24
 run    mkdir -p /var/run/sshd`
25 25
 
26
+// FIXME: test building with a context
27
+
28
+// FIXME: test building with a local ADD as first command
29
+
30
+// FIXME: test building with 2 successive overlapping ADD commands
31
+
26 32
 func TestBuild(t *testing.T) {
27 33
 	dockerfiles := []string{Dockerfile, DockerfileNoNewLine}
28 34
 	for _, Dockerfile := range dockerfiles {
... ...
@@ -29,7 +29,7 @@ import (
29 29
 	"unicode"
30 30
 )
31 31
 
32
-const VERSION = "0.4.0"
32
+const VERSION = "0.4.2"
33 33
 
34 34
 var (
35 35
 	GITCOMMIT string
... ...
@@ -627,7 +627,10 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
627 627
 	fmt.Fprintln(w, "ID\tCREATED\tCREATED BY")
628 628
 
629 629
 	for _, out := range outs {
630
-		fmt.Fprintf(w, "%s\t%s ago\t%s\n", out.ID, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
630
+		if out.Tags != nil {
631
+			out.ID = out.Tags[0]
632
+		}
633
+		fmt.Fprintf(w, "%s \t%s ago\t%s\n", out.ID, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
631 634
 	}
632 635
 	w.Flush()
633 636
 	return nil
... ...
@@ -1058,6 +1061,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
1058 1058
 		return err
1059 1059
 	}
1060 1060
 
1061
+	if !container.State.Running {
1062
+		return fmt.Errorf("Impossible to attach to a stopped container, start it first")
1063
+	}
1064
+
1061 1065
 	splitStderr := container.Config.Tty
1062 1066
 
1063 1067
 	connections := 1
... ...
@@ -1257,16 +1264,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
1257 1257
 		fmt.Fprintln(os.Stderr, "WARNING: ", warning)
1258 1258
 	}
1259 1259
 
1260
-	splitStderr := !config.Tty
1261
-
1262
-	connections := 0
1263
-	if config.AttachStdin || config.AttachStdout || (!splitStderr && config.AttachStderr) {
1264
-		connections += 1
1265
-	}
1266
-	if splitStderr && config.AttachStderr {
1267
-		connections += 1
1268
-	}
1269
-
1270 1260
 	//start the container
1271 1261
 	_, _, err = cli.call("POST", "/containers/"+out.ID+"/start", nil)
1272 1262
 	if err != nil {
... ...
@@ -1275,19 +1272,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
1275 1275
 
1276 1276
 	if !config.AttachStdout && !config.AttachStderr {
1277 1277
 		fmt.Println(out.ID)
1278
-	}
1279
-	if connections > 0 {
1280
-		chErrors := make(chan error, connections)
1278
+	} else {
1281 1279
 		if config.Tty {
1282 1280
 			cli.monitorTtySize(out.ID)
1283 1281
 		}
1284 1282
 
1285
-		if splitStderr && config.AttachStderr {
1286
-			go func() {
1287
-				chErrors <- cli.hijack("POST", "/containers/"+out.ID+"/attach?logs=1&stream=1&stderr=1", config.Tty, nil, os.Stderr)
1288
-			}()
1289
-		}
1290
-
1291 1283
 		v := url.Values{}
1292 1284
 		v.Set("logs", "1")
1293 1285
 		v.Set("stream", "1")
... ...
@@ -1298,19 +1287,12 @@ func (cli *DockerCli) CmdRun(args ...string) error {
1298 1298
 		if config.AttachStdout {
1299 1299
 			v.Set("stdout", "1")
1300 1300
 		}
1301
-		if !splitStderr && config.AttachStderr {
1301
+		if config.AttachStderr {
1302 1302
 			v.Set("stderr", "1")
1303 1303
 		}
1304
-		go func() {
1305
-			chErrors <- cli.hijack("POST", "/containers/"+out.ID+"/attach?"+v.Encode(), config.Tty, os.Stdin, os.Stdout)
1306
-		}()
1307
-		for connections > 0 {
1308
-			err := <-chErrors
1309
-			if err != nil {
1310
-				utils.Debugf("Error hijack: %s", err)
1311
-				return err
1312
-			}
1313
-			connections -= 1
1304
+		if err := cli.hijack("POST", "/containers/"+out.ID+"/attach?"+v.Encode(), config.Tty, os.Stdin, os.Stdout); err != nil {
1305
+			utils.Debugf("Error hijack: %s", err)
1306
+			return err
1314 1307
 		}
1315 1308
 	}
1316 1309
 	return nil
... ...
@@ -8,7 +8,7 @@
8 8
 
9 9
 echo "Ensuring basic dependencies are installed..."
10 10
 apt-get -qq update
11
-apt-get -qq install lxc wget bsdtar
11
+apt-get -qq install lxc wget
12 12
 
13 13
 echo "Looking in /proc/filesystems to see if we have AUFS support..."
14 14
 if grep -q aufs /proc/filesystems
... ...
@@ -691,6 +691,7 @@ Get the history of an image
691 691
 	   [
692 692
 		{
693 693
 			"Id":"b750fe79269d",
694
+			"Tag":["base:latest"],
694 695
 			"Created":1364102658,
695 696
 			"CreatedBy":"/bin/bash"
696 697
 		},
697 698
deleted file mode 100644
... ...
@@ -1,8 +0,0 @@
1
-:title: Introduction
2
-:description: An introduction to docker and standard containers?
3
-:keywords: containers, lxc, concepts, explanation, docker, documentation
4
-
5
-
6
-:note: This version of the introduction is temporary, just to make sure we don't break the links from the website when the documentation is updated
7
-
8
-This document has been moved to  :ref:`introduction`, please update your bookmarks.
9 1
\ No newline at end of file
10 2
deleted file mode 100644
... ...
@@ -1,125 +0,0 @@
1
-:title: Introduction
2
-:description: An introduction to docker and standard containers?
3
-:keywords: containers, lxc, concepts, explanation
4
-
5
-Introduction
6
-============
7
-
8
-Docker -- The Linux container runtime
9
-
10
-Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
11
-
12
-Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
13
-
14
-
15
-- **Heterogeneous payloads** Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
16
-- **Any server** Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
17
-- **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
18
-- **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
19
-
20
-.. image:: images/lego_docker.jpg
21
-
22
-
23
-What is a Standard Container?
24
-
25
-Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
26
-a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
27
-
28
-The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
29
-
30
-A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
31
-
32
-Standard operations
33
-~~~~~~~~~~~~~~~~~~~
34
-
35
-Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
36
-
37
-
38
-Content-agnostic
39
-~~~~~~~~~~~~~~~~~~~
40
-
41
-Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
42
-
43
-
44
-Infrastructure-agnostic
45
-~~~~~~~~~~~~~~~~~~~~~~~~~~
46
-
47
-Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
48
-
49
-
50
-Designed for automation
51
-~~~~~~~~~~~~~~~~~~~~~~~~~~
52
-
53
-Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
54
-
55
-Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
56
-
57
-Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
58
-
59
-
60
-Industrial-grade delivery
61
-~~~~~~~~~~~~~~~~~~~~~~~~~~
62
-
63
-There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
64
-
65
-With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
66
-
67
-
68
-Standard Container Specification
69
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
70
-
71
-(TODO)
72
-
73
-Image format
74
-~~~~~~~~~~~~
75
-
76
-Standard operations
77
-~~~~~~~~~~~~~~~~~~~
78
-
79
--  Copy
80
--  Run
81
--  Stop
82
--  Wait
83
--  Commit
84
--  Attach standard streams
85
--  List filesystem changes
86
--  ...
87
-
88
-Execution environment
89
-~~~~~~~~~~~~~~~~~~~~~
90
-
91
-Root filesystem
92
-^^^^^^^^^^^^^^^
93
-
94
-Environment variables
95
-^^^^^^^^^^^^^^^^^^^^^
96
-
97
-Process arguments
98
-^^^^^^^^^^^^^^^^^
99
-
100
-Networking
101
-^^^^^^^^^^
102
-
103
-Process namespacing
104
-^^^^^^^^^^^^^^^^^^^
105
-
106
-Resource limits
107
-^^^^^^^^^^^^^^^
108
-
109
-Process monitoring
110
-^^^^^^^^^^^^^^^^^^
111
-
112
-Logging
113
-^^^^^^^
114
-
115
-Signals
116
-^^^^^^^
117
-
118
-Pseudo-terminal allocation
119
-^^^^^^^^^^^^^^^^^^^^^^^^^^
120
-
121
-Security
122
-^^^^^^^^
123
-
... ...
@@ -20,6 +20,20 @@ import sys, os
20 20
 
21 21
 # -- General configuration -----------------------------------------------------
22 22
 
23
+
24
+
25
+# Additional templates that should be rendered to pages, maps page names to
26
+# template names.
27
+# the 'redirect_home.html' page redirects using a http meta refresh which, according
28
+# to official sources is more or less equivalent of a 301.
29
+
30
+html_additional_pages = {
31
+    'concepts/containers': 'redirect_home.html',
32
+    'concepts/introduction': 'redirect_home.html',
33
+    }
34
+
35
+
36
+
23 37
 # If your documentation needs a minimal Sphinx version, state it here.
24 38
 #needs_sphinx = '1.0'
25 39
 
... ...
@@ -120,7 +134,11 @@ html_theme_path = ['../theme']
120 120
 # The name of an image file (within the static path) to use as favicon of the
121 121
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
122 122
 # pixels large.
123
-#html_favicon = None
123
+
124
+# We use a png favicon. This is not compatible with internet explorer, but looks
125
+# much better on all other browsers. However, sphynx doesn't like it (it likes
126
+# .ico better) so we have just put it in the template rather than used this setting
127
+# html_favicon = 'favicon.png'
124 128
 
125 129
 # Add any paths that contain custom static files (such as style sheets) here,
126 130
 # relative to this directory. They are copied after the builtin static files,
... ...
@@ -138,10 +156,6 @@ html_static_path = ['static_files']
138 138
 # Custom sidebar templates, maps document names to template names.
139 139
 #html_sidebars = {}
140 140
 
141
-# Additional templates that should be rendered to pages, maps page names to
142
-# template names.
143
-#html_additional_pages = {}
144
-
145 141
 # If false, no module index is generated.
146 142
 #html_domain_indices = True
147 143
 
... ...
@@ -33,7 +33,7 @@ Installation
33 33
     sudo apt-get install python-software-properties
34 34
     sudo add-apt-repository ppa:gophers/go
35 35
     sudo apt-get update
36
-    sudo apt-get -y install lxc wget bsdtar curl golang-stable git aufs-tools
36
+    sudo apt-get -y install lxc xz-utils curl golang-stable git aufs-tools
37 37
 
38 38
     export GOPATH=~/go/
39 39
     export PATH=$GOPATH/bin:$PATH
... ...
@@ -72,7 +72,7 @@ Connect to the host os with the redis-cli.
72 72
 
73 73
     docker ps  # grab the new container id
74 74
     docker port <container_id> 6379  # grab the external port
75
-    ifconfig   # grab the host ip address
75
+    ip addr show   # grab the host ip address
76 76
     redis-cli -h <host ipaddress> -p <external port>
77 77
     redis 192.168.0.1:49153> set docker awesome
78 78
     OK
... ...
@@ -59,6 +59,7 @@ The password is 'screencast'
59 59
 	 # it has now given us a port to connect to
60 60
 	 # we have to connect using a public ip of our host
61 61
 	 $ hostname
62
+	 # *ifconfig* is deprecated, better use *ip addr show* now
62 63
 	 $ ifconfig
63 64
 	 $ ssh root@192.168.33.10 -p 49153
64 65
 	 # Ah! forgot to set root passwd
... ...
@@ -70,6 +71,7 @@ The password is 'screencast'
70 70
 	 $ docker commit 9e863f0ca0af31c8b951048ba87641d67c382d08d655c2e4879c51410e0fedc1 dhrp/sshd
71 71
 	 $ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D
72 72
 	 $ docker port a0aaa9558c90cf5c7782648df904a82365ebacce523e4acc085ac1213bfe2206 22
73
+	 # *ifconfig* is deprecated, better use *ip addr show* now
73 74
 	 $ ifconfig
74 75
 	 $ ssh root@192.168.33.10 -p 49154
75 76
 	 # Thanks for watching, Thatcher thatcher@dotcloud.com
76 77
deleted file mode 100644
... ...
@@ -1,27 +0,0 @@
1
-:title: Index Environment Variable
2
-:description: Setting this environment variable on the docker server will change the URL docker index.
3
-:keywords: docker, index environment variable, documentation 
4
-
5
-=================================
6
-Docker Index Environment Variable
7
-=================================
8
-
9
-Variable
10
-
11
-.. code-block:: sh
12
-
13
-    DOCKER_INDEX_URL
14
-
15
-Setting this environment variable on the docker server will change the URL docker index.
16
-This address is used in commands such as ``docker login``, ``docker push`` and ``docker pull``.
17
-The docker daemon doesn't need to be restarted for this parameter to take effect.
18
-
19
-Example
20
-
21
-.. code-block:: sh
22
-
23
-    docker -d &
24
-    export DOCKER_INDEX_URL="https://index.docker.io"
25
-
... ...
@@ -30,8 +30,7 @@ Dependencies:
30 30
 * 3.8 Kernel (read more about :ref:`kernel`)
31 31
 * AUFS filesystem support
32 32
 * lxc
33
-* bsdtar
34
-
33
+* xz-utils
35 34
 
36 35
 Get the docker binary:
37 36
 ----------------------
... ...
@@ -82,7 +82,8 @@ Expose a service on a TCP port
82 82
 
83 83
   # Connect to the public port via the host's public address
84 84
   # Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
85
-  IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
85
+  # Replace *eth0* according to your local interface name.
86
+  IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
86 87
   echo hello world | nc $IP $PORT
87 88
 
88 89
   # Verify that the network connection worked
... ...
@@ -15,10 +15,18 @@ steps and commit them along the way, giving you a final image.
15 15
 1. Usage
16 16
 ========
17 17
 
18
-To use Docker Builder, assemble the steps into a text file (commonly referred to
19
-as a Dockerfile) and supply this to `docker build` on STDIN, like so:
18
+To build an image from a source repository, create a description file called `Dockerfile`
19
+at the root of your repository. This file will describe the steps to assemble
20
+the image.
20 21
 
21
-    ``docker build - < Dockerfile``
22
+Then call `docker build` with the path of your source repository as argument:
23
+
24
+    ``docker build .``
25
+
26
+You can specify a repository and tag at which to save the new image if the
27
+build succeeds:
28
+
29
+    ``docker build -t shykes/myapp .``
22 30
 
23 31
 Docker will run your steps one-by-one, committing the result if necessary, 
24 32
 before finally outputting the ID of your new image.
... ...
@@ -130,9 +138,32 @@ curl was installed within the image.
130 130
 
131 131
     ``ADD <src> <dest>``
132 132
 
133
-The `ADD` instruction will insert the files from the `<src>` path of the context into `<dest>` path 
134
-of the container.
135
-The context must be set in order to use this instruction. (see examples)
133
+The `ADD` instruction will copy new files from <src> and add them to the container's filesystem at path `<dest>`.
134
+
135
+`<src>` must be the path to a file or directory relative to the source directory being built (also called the
136
+context of the build).
137
+
138
+`<dest>` is the path at which the source will be copied in the destination container.
139
+
140
+The copy obeys the following rules:
141
+
142
+If `<src>` is a directory, the entire directory is copied, including filesystem metadata.
143
+
144
+If `<src>` is a tar archive in a recognized compression format (identity, gzip, bzip2 or xz), it
145
+is unpacked as a directory.
146
+
147
+When a directory is copied or unpacked, it has the same behavior as 'tar -x': the result is the union of
148
+a) whatever existed at the destination path and b) the contents of the source tree, with conflicts resolved
149
+in favor of b on a file-by-file basis.
150
+
151
+If `<src>` is any other kind of file, it is copied individually along with its metadata. In this case,
152
+if `<dst>` ends with a trailing slash '/', it will be considered a directory and the contents of `<src>`
153
+will be written at `<dst>/base(<src>)`.
154
+If `<dst>` does not end with a trailing slash, it will be considered a regular file and the contents
155
+of `<src>` will be written at `<dst>`.
156
+
157
+If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
158
+files and directories are created with mode 0700, uid and gid 0.
136 159
 
137 160
 3. Dockerfile Examples
138 161
 ======================
... ...
@@ -14,6 +14,7 @@ Contents:
14 14
 
15 15
    basics
16 16
    workingwithrepository
17
+   port_redirection
17 18
    builder
18 19
    puppet
19 20
 
20 21
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+:title: Port redirection
1
+:description: usage about port redirection
2
+:keywords: Usage, basic port, docker, documentation, examples
3
+
4
+
5
+Port redirection
6
+================
7
+
8
+Docker can redirect public tcp ports to your container, so it can be reached over the network.
9
+Port redirection is done on ``docker run`` using the -p flag.
10
+
11
+A port redirect is specified as PUBLIC:PRIVATE, where tcp port PUBLIC will be redirected to
12
+tcp port PRIVATE. As a special case, the public port can be omitted, in which case a random
13
+public port will be allocated.
14
+
15
+.. code-block:: bash
16
+
17
+    # A random PUBLIC port is redirected to PRIVATE port 80 on the container
18
+    docker run -p 80 <image> <cmd>
19
+
20
+    # PUBLIC port 80 is redirected to PRIVATE port 80
21
+    docker run -p 80:80 <image> <cmd>
22
+
23
+
24
+Default port redirects can be built into a container with the EXPOSE build command.
... ...
@@ -77,3 +77,28 @@ Now you can commit this image to the repository
77 77
     # for example docker push dhrp/kickassapp
78 78
     docker push <image-name>
79 79
 
80
+
81
+Changing the server to connect to
82
+----------------------------------
83
+
84
+When you are running your own index and/or registry, You can change the server the docker client will connect to.
85
+
86
+Variable
87
+^^^^^^^^
88
+
89
+.. code-block:: sh
90
+
91
+    DOCKER_INDEX_URL
92
+
93
+Setting this environment variable on the docker server will change the URL docker index.
94
+This address is used in commands such as ``docker login``, ``docker push`` and ``docker pull``.
95
+The docker daemon doesn't need to be restarted for this parameter to take effect.
96
+
97
+Example
98
+^^^^^^^
99
+
100
+.. code-block:: sh
101
+
102
+    docker -d &
103
+    export DOCKER_INDEX_URL="https://index.docker.io"
104
+
... ...
@@ -40,6 +40,8 @@
40 40
 
41 41
     {%- set script_files = script_files + ['_static/js/docs.js'] %}
42 42
 
43
+    <link rel="canonical" href="http://docs.docker.io/en/latest/{{ pagename }}/">
44
+
43 45
     {%- for cssfile in css_files %}
44 46
     <link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
45 47
     {%- endfor %}
... ...
@@ -48,9 +50,8 @@
48 48
     <script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
49 49
     {%- endfor %}
50 50
 
51
-    {%- if favicon %}
52
-    <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
53
-    {%- endif %}
51
+    <link rel="shortcut icon" href="{{ pathto('_static/favicon.png', 1) }}"/>
52
+
54 53
 
55 54
     {%- block extrahead %}{% endblock %}
56 55
 
... ...
@@ -104,11 +105,8 @@
104 104
     <!-- Docs nav
105 105
      ================================================== -->
106 106
     <div class="row" style="position: relative">
107
-        <div class="span3" style="height:100%;" >
108
-
109
-        </div>
110 107
 
111
-        <div class="span3 sidebar bs-docs-sidebar" style="position: absolute">
108
+        <div class="span3 sidebar bs-docs-sidebar">
112 109
             {{ toctree(collapse=False, maxdepth=3) }}
113 110
         </div>
114 111
 
115 112
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+<!DOCTYPE html>
1
+<html>
2
+<head>
3
+    <title>Page Moved</title>
4
+    <meta http-equiv="refresh" content="0; url=http://docks.docker.io/en/latest/">
5
+</head>
6
+<body>
7
+
8
+This page has moved. Perhaps you should visit the <a href="http://docs.docker.io/" title="documentation homepage">Documentation Homepage</a>
9
+
10
+</body>
11
+</html>
... ...
@@ -168,10 +168,13 @@ section.header {
168 168
 .sidebar {
169 169
   font-weight: normal;
170 170
   float: left;
171
-  min-height: 475px;
171
+  /*  min-height: 475px;*/
172
+
172 173
   background: #ececec;
173
-  border-left: 1px solid #bbbbbb;
174
-  border-right: 1px solid #cccccc;
174
+  /*  border-left: 1px solid #bbbbbb;*/
175
+
176
+  /*  border-right: 1px solid #cccccc;*/
177
+
175 178
   position: relative;
176 179
 }
177 180
 .sidebar ul {
... ...
@@ -357,7 +360,6 @@ section.header {
357 357
   #global {
358 358
     /* TODO: Fix this to be relative to the navigation size */
359 359
   
360
-    padding-top: 600px;
361 360
   }
362 361
   #fork-us {
363 362
     display: none;
... ...
@@ -226,20 +226,21 @@ section.header {
226 226
 
227 227
 }
228 228
 
229
+  .sidebar {
230
+  //  font-family: "Maven Pro";
231
+    font-weight: normal;
232
+  //  margin-top: 38px;
233
+    float: left;
234
+  //  width: 220px;
235
+  /*  min-height: 475px;*/
236
+  //  margin-bottom: 28px;
237
+  //  padding-bottom: 120px;
238
+    background: #ececec;
239
+  /*  border-left: 1px solid #bbbbbb;*/
240
+  /*  border-right: 1px solid #cccccc;*/
241
+    position: relative;
242
+
229 243
 
230
-.sidebar {
231
-//  font-family: "Maven Pro";
232
-  font-weight: normal;
233
-//  margin-top: 38px;
234
-  float: left;
235
-//  width: 220px;
236
-  min-height: 475px;
237
-//  margin-bottom: 28px;
238
-//  padding-bottom: 120px;
239
-  background: #ececec;
240
-  border-left: 1px solid #bbbbbb;
241
-  border-right: 1px solid #cccccc;
242
-  position: relative;
243 244
 
244 245
   ul {
245 246
     padding: 0px;
... ...
@@ -471,7 +472,7 @@ section.header {
471 471
   }
472 472
   #global {
473 473
   /* TODO: Fix this to be relative to the navigation size */
474
-    padding-top: 600px;
474
+//    padding-top: 600px;
475 475
   }
476 476
   #fork-us {
477 477
     display: none;
478 478
deleted file mode 100755
479 479
Binary files a/docs/theme/docker/static/favicon.ico and /dev/null differ
480 480
new file mode 100644
481 481
Binary files /dev/null and b/docs/theme/docker/static/favicon.png differ
... ...
@@ -192,11 +192,19 @@ func TestDelete(t *testing.T) {
192 192
 	}
193 193
 	assertNImages(graph, t, 0)
194 194
 
195
+	archive, err = fakeTar()
196
+	if err != nil {
197
+		t.Fatal(err)
198
+	}
195 199
 	// Test 2 create (same name) / 1 delete
196 200
 	img1, err := graph.Create(archive, nil, "Testing", "", nil)
197 201
 	if err != nil {
198 202
 		t.Fatal(err)
199 203
 	}
204
+	archive, err = fakeTar()
205
+	if err != nil {
206
+		t.Fatal(err)
207
+	}
200 208
 	if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
201 209
 		t.Fatal(err)
202 210
 	}
... ...
@@ -212,6 +220,10 @@ func TestDelete(t *testing.T) {
212 212
 	}
213 213
 	assertNImages(graph, t, 1)
214 214
 
215
+	archive, err = fakeTar()
216
+	if err != nil {
217
+		t.Fatal(err)
218
+	}
215 219
 	// Test delete twice (pull -> rm -> pull -> rm)
216 220
 	if err := graph.Register(archive, false, img1); err != nil {
217 221
 		t.Fatal(err)
... ...
@@ -22,7 +22,7 @@ Vagrant::Config.run do |config|
22 22
   pkg_cmd = "touch #{DOCKER_PATH}; "
23 23
   # Install docker dependencies
24 24
   pkg_cmd << "export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; " \
25
-    "apt-get install -q -y lxc bsdtar git aufs-tools golang make linux-image-extra-3.8.0-19-generic; " \
25
+    "apt-get install -q -y lxc git aufs-tools golang make linux-image-extra-3.8.0-19-generic; " \
26 26
     "chown -R #{USER}.#{USER} #{GOPATH}; " \
27 27
     "install -m 0664 #{CFG_PATH}/bash_profile /home/#{USER}/.bash_profile"
28 28
   config.vm.provision :shell, :inline => pkg_cmd
... ...
@@ -1,5 +1,13 @@
1
-# This will build a container capable of producing an official binary build of docker and
2
-# uploading it to S3
1
+# DESCRIPTION     Build a container capable of producing official binary and
2
+#                 PPA packages and uploading them to S3 and Launchpad
3
+# VERSION         1.2
4
+# DOCKER_VERSION  0.4
5
+# AUTHOR          Solomon Hykes <solomon@dotcloud.com>
6
+#                 Daniel Mizyrycki <daniel@dotcloud.net>
7
+# BUILD_CMD       docker build -t dockerbuilder .
8
+# RUN_CMD         docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" dockerbuilder
9
+#
10
+#
3 11
 from	ubuntu:12.04
4 12
 maintainer	Solomon Hykes <solomon@dotcloud.com>
5 13
 # Workaround the upstart issue
... ...
@@ -8,7 +16,7 @@ run ln -s /bin/true /sbin/initctl
8 8
 # Enable universe and gophers PPA
9 9
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q python-software-properties
10 10
 run	add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe"
11
-run	add-apt-repository -y ppa:gophers/go/ubuntu
11
+run	add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
12 12
 run	apt-get update
13 13
 # Packages required to checkout, build and upload docker
14 14
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
... ...
@@ -67,7 +67,11 @@ lxc.cgroup.devices.allow = c 10:200 rwm
67 67
 
68 68
 
69 69
 # standard mount point
70
+#  WARNING: procfs is a known attack vector and should probably be disabled
71
+#           if your userspace allows it. eg. see http://blog.zx2c4.com/749
70 72
 lxc.mount.entry = proc {{$ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
73
+#  WARNING: sysfs is a known attack vector and should probably be disabled
74
+#           if your userspace allows it. eg. see http://bit.ly/T9CkqJ
71 75
 lxc.mount.entry = sysfs {{$ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
72 76
 lxc.mount.entry = devpts {{$ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
73 77
 #lxc.mount.entry = varrun {{$ROOTFS}}/var/run tmpfs mode=755,size=4096k,nosuid,nodev,noexec 0 0
... ...
@@ -86,6 +90,9 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,rw 0 0
86 86
 {{end}}
87 87
 
88 88
 # drop linux capabilities (apply mainly to the user root in the container)
89
+#  (Note: 'lxc.cap.keep' is coming soon and should replace this under the
90
+#         security principle 'deny all unless explicitly permitted', see
91
+#         http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
89 92
 lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
90 93
 
91 94
 # limits
... ...
@@ -1,6 +1,6 @@
1 1
 # Ubuntu package Makefile
2 2
 #
3
-# Dependencies:  debhelper autotools-dev devscripts golang
3
+# Dependencies:  debhelper autotools-dev devscripts golang-stable
4 4
 # Notes:
5 5
 # Use 'make ubuntu' to create the ubuntu package
6 6
 # GPG_KEY environment variable needs to contain a GPG private key for package to be signed
... ...
@@ -9,12 +9,9 @@
9 9
 # status code 2
10 10
 
11 11
 PKG_NAME=lxc-docker
12
-VERSION=$(shell head -1 changelog | sed 's/^.\+(\(.\+\)..).\+$$/\1/')
13 12
 GITHUB_PATH=github.com/dotcloud/docker
14
-DOCKER_VERSION=${PKG_NAME}_${VERSION}
15
-DOCKER_FVERSION=${PKG_NAME}_$(shell head -1 changelog | sed 's/^.\+(\(.\+\)).\+$$/\1/')
16 13
 BUILD_SRC=${CURDIR}/../../build_src
17
-VERSION_TAG=v$(shell head -1 changelog | sed 's/^.\+(\(.\+\)-[0-9]\+).\+$$/\1/')
14
+VERSION=$(shell sed -En '0,/^\#\# /{s/^\#\# ([^ ]+).+/\1/p}' ../../CHANGELOG.md)
18 15
 
19 16
 all:
20 17
 	# Compile docker. Used by dpkg-buildpackage.
... ...
@@ -35,18 +32,19 @@ ubuntu:
35 35
 	# Retrieve docker project and its go structure from internet
36 36
 	rm -rf ${BUILD_SRC}
37 37
 	git clone $(shell git rev-parse --show-toplevel) ${BUILD_SRC}/${GITHUB_PATH}
38
-	cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout ${VERSION_TAG} && GOPATH=${BUILD_SRC} go get -d
38
+	cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout v${VERSION} && GOPATH=${BUILD_SRC} go get -d
39 39
 	# Add debianization
40 40
 	mkdir ${BUILD_SRC}/debian
41 41
 	cp Makefile ${BUILD_SRC}
42 42
 	cp -r * ${BUILD_SRC}/debian
43 43
 	cp ../../README.md ${BUILD_SRC}
44
+	./parse_changelog.py < ../../CHANGELOG.md  > ${BUILD_SRC}/debian/changelog
44 45
 	# Cleanup
45 46
 	for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done
46
-	rm -rf ${BUILD_SRC}/../${DOCKER_VERSION}.orig.tar.gz
47
+	rm -rf ${BUILD_SRC}/../${PKG_NAME}_${VERSION}.orig.tar.gz
47 48
 	rm -rf ${BUILD_SRC}/pkg
48 49
 	# Create docker debian files
49
-	cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz .
50
+	cd ${BUILD_SRC}; tar czf ../${PKG_NAME}_${VERSION}.orig.tar.gz .
50 51
 	cd ${BUILD_SRC}; dpkg-buildpackage -us -uc
51 52
 	rm -rf ${BUILD_SRC}
52 53
 	# Sign package and upload it to PPA if GPG_KEY environment variable
... ...
@@ -56,7 +54,7 @@ ubuntu:
56 56
 	# Import gpg signing key
57 57
 	echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import
58 58
 	# Sign the package
59
-	cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${DOCKER_FVERSION}.dsc
59
+	cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${PKG_NAME}_${VERSION}-1.dsc
60 60
 	cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa
61
-	cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${DOCKER_FVERSION}_source.changes
61
+	cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${PKG_NAME}_${VERSION}-1_source.changes
62 62
 	rm -rf ${BUILD_SRC}
63 63
deleted file mode 100644
... ...
@@ -1,222 +0,0 @@
1
-lxc-docker (0.4.0-1) precise; urgency=low
2
-  - Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
3
-  - Introducing Remote API: control Docker programmatically using a simple HTTP/json API
4
-  - Runtime: various reliability and usability improvements
5
-
6
- -- dotCloud <ops@dotcloud.com>  Mon, 03 Jun 2013 00:00:00 -0700
7
-
8
-lxc-docker (0.3.4-1) precise; urgency=low
9
-  - Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
10
-  - Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
11
-  - Runtime: interactive TTYs correctly handle window resize
12
-  - Runtime: fix how configuration is merged between layers
13
-  - Remote API: split stdout and stderr on 'docker run'
14
-  - Remote API: optionally listen on a different IP and port (use at your own risk)
15
-  - Documentation: improved install instructions.
16
-
17
- -- dotCloud <ops@dotcloud.com>  Thu, 30 May 2013 00:00:00 -0700
18
-
19
-
20
-lxc-docker (0.3.3-1) precise; urgency=low
21
-  - Registry: Fix push regression
22
-  - Various bugfixes
23
-
24
- -- dotCloud <ops@dotcloud.com>  Thu, 23 May 2013 00:00:00 -0700
25
-
26
-
27
-lxc-docker (0.3.2-1) precise; urgency=low
28
-  - Runtime: Store the actual archive on commit
29
-  - Registry: Improve the checksum process
30
-  - Registry: Use the size to have a good progress bar while pushing
31
-  - Registry: Use the actual archive if it exists in order to speed up the push
32
-  - Registry: Fix error 400 on push
33
-
34
- -- dotCloud <ops@dotcloud.com>  Fri, 9 May 2013 00:00:00 -0700
35
-
36
-
37
-lxc-docker (0.3.1-1) precise; urgency=low
38
-  - Builder: Implement the autorun capability within docker builder
39
-  - Builder: Add caching to docker builder
40
-  - Builder: Add support for docker builder with native API as top level command
41
-  - Runtime: Add go version to debug infos
42
-  - Builder: Implement ENV within docker builder
43
-  - Registry: Add docker search top level command in order to search a repository
44
-  - Images: output graph of images to dot (graphviz)
45
-  - Documentation: new introduction and high-level overview
46
-  - Documentation: Add the documentation for docker builder
47
-  - Website: new high-level overview
48
-  - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
49
-  - Images: fix ByParent function
50
-  - Builder: Check the command existance prior create and add Unit tests for the case
51
-  - Registry: Fix pull for official images with specific tag
52
-  - Registry: Fix issue when login in with a different user and trying to push
53
-  - Documentation: CSS fix for docker documentation to make REST API docs look better.
54
-  - Documentation: Fixed CouchDB example page header mistake
55
-  - Documentation: fixed README formatting
56
-  - Registry: Improve checksum - async calculation
57
-  - Runtime: kernel version - don't show the dash if flavor is empty
58
-  - Documentation: updated www.docker.io website.
59
-  - Builder: use any whitespaces instead of tabs
60
-  - Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
61
-
62
- -- dotCloud <ops@dotcloud.com>  Fri, 8 May 2013 00:00:00 -0700
63
-
64
-
65
-lxc-docker (0.3.0-1) precise; urgency=low
66
-  - Registry: Implement the new registry
67
-  - Documentation: new example: sharing data between 2 couchdb databases
68
-  - Runtime: Fix the command existance check
69
-  - Runtime: strings.Split may return an empty string on no match
70
-  - Runtime: Fix an index out of range crash if cgroup memory is not
71
-  - Documentation: Various improvments
72
-  - Vagrant: Use only one deb line in /etc/apt
73
-
74
- -- dotCloud <ops@dotcloud.com>  Fri, 5 May 2013 00:00:00 -0700
75
-
76
-
77
-lxc-docker (0.2.2-1) precise; urgency=low
78
-  - Support for data volumes ('docker run -v=PATH')
79
-  - Share data volumes between containers ('docker run -volumes-from')
80
-  - Improved documentation
81
-  - Upgrade to Go 1.0.3
82
-  - Various upgrades to the dev environment for contributors
83
-
84
- -- dotCloud <ops@dotcloud.com>  Fri, 3 May 2013 00:00:00 -0700
85
-
86
-
87
-lxc-docker (0.2.1-1) precise; urgency=low
88
-
89
-  - 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
90
-  - Improve install process on Vagrant
91
-  - New Dockerfile operation: "maintainer"
92
-  - New Dockerfile operation: "expose"
93
-  - New Dockerfile operation: "cmd"
94
-  - Contrib script to build a Debian base layer
95
-  - 'docker -d -r': restart crashed containers at daemon startup
96
-  - Runtime: improve test coverage
97
-
98
- -- dotCloud <ops@dotcloud.com>  Wed, 1 May 2013 00:00:00 -0700
99
-
100
-
101
-lxc-docker (0.2.0-1) precise; urgency=low
102
-
103
-  - Runtime: ghost containers can be killed and waited for
104
-  - Documentation: update install intructions
105
-  - Packaging: fix Vagrantfile
106
-  - Development: automate releasing binaries and ubuntu packages
107
-  - Add a changelog
108
-  - Various bugfixes
109
-
110
- -- dotCloud <ops@dotcloud.com>  Mon, 23 Apr 2013 00:00:00 -0700
111
-
112
-
113
-lxc-docker (0.1.8-1) precise; urgency=low
114
-
115
-  - Dynamically detect cgroup capabilities
116
-  - Issue stability warning on kernels <3.8
117
-  - 'docker push' buffers on disk instead of memory
118
-  - Fix 'docker diff' for removed files
119
-  - Fix 'docker stop' for ghost containers
120
-  - Fix handling of pidfile
121
-  - Various bugfixes and stability improvements
122
-
123
- -- dotCloud <ops@dotcloud.com>  Mon, 22 Apr 2013 00:00:00 -0700
124
-
125
-
126
-lxc-docker (0.1.7-1) precise; urgency=low
127
-
128
-  - Container ports are available on localhost
129
-  - 'docker ps' shows allocated TCP ports
130
-  - Contributors can run 'make hack' to start a continuous integration VM
131
-  - Streamline ubuntu packaging & uploading
132
-  - Various bugfixes and stability improvements
133
-
134
- -- dotCloud <ops@dotcloud.com>  Thu, 18 Apr 2013 00:00:00 -0700
135
-
136
-
137
-lxc-docker (0.1.6-1) precise; urgency=low
138
-
139
-  - Record the author an image with 'docker commit -author'
140
-
141
- -- dotCloud <ops@dotcloud.com>  Wed, 17 Apr 2013 00:00:00 -0700
142
-
143
-
144
-lxc-docker (0.1.5-1) precise; urgency=low
145
-
146
-  - Disable standalone mode
147
-  - Use a custom DNS resolver with 'docker -d -dns'
148
-  - Detect ghost containers
149
-  - Improve diagnosis of missing system capabilities
150
-  - Allow disabling memory limits at compile time
151
-  - Add debian packaging
152
-  - Documentation: installing on Arch Linux
153
-  - Documentation: running Redis on docker
154
-  - Fixed lxc 0.9 compatibility
155
-  - Automatically load aufs module
156
-  - Various bugfixes and stability improvements
157
-
158
- -- dotCloud <ops@dotcloud.com>  Wed, 17 Apr 2013 00:00:00 -0700
159
-
160
-
161
-lxc-docker (0.1.4-1) precise; urgency=low
162
-
163
-  - Full support for TTY emulation
164
-  - Detach from a TTY session with the escape sequence `C-p C-q`
165
-  - Various bugfixes and stability improvements
166
-  - Minor UI improvements
167
-  - Automatically create our own bridge interface 'docker0'
168
-
169
- -- dotCloud <ops@dotcloud.com>  Tue,  9 Apr 2013 00:00:00 -0700
170
-
171
-
172
-lxc-docker (0.1.3-1) precise; urgency=low
173
-
174
-  - Choose TCP frontend port with '-p :PORT'
175
-  - Layer format is versioned
176
-  - Major reliability improvements to the process manager
177
-  - Various bugfixes and stability improvements
178
-
179
- -- dotCloud <ops@dotcloud.com>  Thu,  4 Apr 2013 00:00:00 -0700
180
-
181
-
182
-lxc-docker (0.1.2-1) precise; urgency=low
183
-
184
-  - Set container hostname with 'docker run -h'
185
-  - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
186
-  - Various bugfixes and stability improvements
187
-  - UI polish
188
-  - Progress bar on push/pull
189
-  - Use XZ compression by default
190
-  - Make IP allocator lazy
191
-
192
- -- dotCloud <ops@dotcloud.com>  Wed,  3 Apr 2013 00:00:00 -0700
193
-
194
-
195
-lxc-docker (0.1.1-1) precise; urgency=low
196
-
197
-  - Display shorthand IDs for convenience
198
-  - Stabilize process management
199
-  - Layers can include a commit message
200
-  - Simplified 'docker attach'
201
-  - Fixed support for re-attaching
202
-  - Various bugfixes and stability improvements
203
-  - Auto-download at run
204
-  - Auto-login on push
205
-  - Beefed up documentation
206
-
207
- -- dotCloud <ops@dotcloud.com>  Sun, 31 Mar 2013 00:00:00 -0700
208
-
209
-
210
-lxc-docker (0.1.0-1) precise; urgency=low
211
-
212
-  - First release
213
-  - Implement registry in order to push/pull images
214
-  - TCP port allocation
215
-  - Fix termcaps on Linux
216
-  - Add documentation
217
-  - Add Vagrant support with Vagrantfile
218
-  - Add unit tests
219
-  - Add repository/tags to ease image management
220
-  - Improve the layer implementation
221
-
222
- -- dotCloud <ops@dotcloud.com>  Sat, 23 Mar 2013 00:00:00 -0700
223 1
new file mode 100755
... ...
@@ -0,0 +1,23 @@
0
+#!/usr/bin/env python
1
+
2
+'Parse main CHANGELOG.md from stdin outputing on stdout the ubuntu changelog'
3
+
4
+import sys,re, datetime
5
+
6
+on_block=False
7
+for line in sys.stdin.readlines():
8
+    line = line.strip()
9
+    if line.startswith('# ') or len(line) == 0:
10
+        continue
11
+    if line.startswith('## '):
12
+        if on_block:
13
+            print '\n -- dotCloud <ops@dotcloud.com>  {0}\n'.format(date)
14
+        version, date = line[3:].split()
15
+        date = datetime.datetime.strptime(date, '(%Y-%m-%d)').strftime(
16
+            '%a, %d %b %Y 00:00:00 -0700')
17
+        on_block = True
18
+        print 'lxc-docker ({0}-1) precise; urgency=low'.format(version)
19
+        continue
20
+    if on_block:
21
+        print '  ' + line
22
+print '\n -- dotCloud <ops@dotcloud.com>  {0}'.format(date)
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"net/http"
13 13
 	"net/http/cookiejar"
14 14
 	"net/url"
15
+	"strconv"
15 16
 	"strings"
16 17
 )
17 18
 
... ...
@@ -106,40 +107,45 @@ func (r *Registry) getImagesInRepository(repository string, authConfig *auth.Aut
106 106
 }
107 107
 
108 108
 // Retrieve an image from the Registry.
109
-// Returns the Image object as well as the layer as an Archive (io.Reader)
110
-func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, error) {
109
+func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, int, error) {
111 110
 	// Get the JSON
112 111
 	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
113 112
 	if err != nil {
114
-		return nil, fmt.Errorf("Failed to download json: %s", err)
113
+		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
115 114
 	}
116 115
 	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
117 116
 	res, err := r.client.Do(req)
118 117
 	if err != nil {
119
-		return nil, fmt.Errorf("Failed to download json: %s", err)
118
+		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
120 119
 	}
121 120
 	defer res.Body.Close()
122 121
 	if res.StatusCode != 200 {
123
-		return nil, fmt.Errorf("HTTP code %d", res.StatusCode)
122
+		return nil, -1, fmt.Errorf("HTTP code %d", res.StatusCode)
124 123
 	}
124
+
125
+	imageSize, err := strconv.Atoi(res.Header.Get("X-Docker-Size"))
126
+	if err != nil {
127
+		return nil, -1, err
128
+	}
129
+
125 130
 	jsonString, err := ioutil.ReadAll(res.Body)
126 131
 	if err != nil {
127
-		return nil, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
132
+		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
128 133
 	}
129
-	return jsonString, nil
134
+	return jsonString, imageSize, nil
130 135
 }
131 136
 
132
-func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, int, error) {
137
+func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, error) {
133 138
 	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
134 139
 	if err != nil {
135
-		return nil, -1, fmt.Errorf("Error while getting from the server: %s\n", err)
140
+		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
136 141
 	}
137 142
 	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
138 143
 	res, err := r.client.Do(req)
139 144
 	if err != nil {
140
-		return nil, -1, err
145
+		return nil, err
141 146
 	}
142
-	return res.Body, int(res.ContentLength), nil
147
+	return res.Body, nil
143 148
 }
144 149
 
145 150
 func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
... ...
@@ -150,16 +156,16 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
150 150
 	}
151 151
 	for _, host := range registries {
152 152
 		endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
153
-		req, err := http.NewRequest("GET", endpoint, nil)
153
+		req, err := r.opaqueRequest("GET", endpoint, nil)
154 154
 		if err != nil {
155 155
 			return nil, err
156 156
 		}
157 157
 		req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
158 158
 		res, err := r.client.Do(req)
159
-		utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
160 159
 		if err != nil {
161 160
 			return nil, err
162 161
 		}
162
+		utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
163 163
 		defer res.Body.Close()
164 164
 
165 165
 		if res.StatusCode != 200 && res.StatusCode != 404 {
... ...
@@ -184,7 +190,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
184 184
 func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
185 185
 	repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
186 186
 
187
-	req, err := http.NewRequest("GET", repositoryTarget, nil)
187
+	req, err := r.opaqueRequest("GET", repositoryTarget, nil)
188 188
 	if err != nil {
189 189
 		return nil, err
190 190
 	}
... ...
@@ -303,6 +309,15 @@ func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registr
303 303
 	return nil
304 304
 }
305 305
 
306
+func (r *Registry) opaqueRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
307
+	req, err := http.NewRequest(method, urlStr, body)
308
+	if err != nil {
309
+		return nil, err
310
+	}
311
+	req.URL.Opaque = strings.Replace(urlStr, req.URL.Scheme + ":", "", 1)
312
+	return req, err
313
+}
314
+
306 315
 // push a tag on the registry.
307 316
 // Remote has the format '<user>/<repo>
308 317
 func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
... ...
@@ -310,7 +325,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
310 310
 	revision = "\"" + revision + "\""
311 311
 	registry = "https://" + registry + "/v1"
312 312
 
313
-	req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
313
+	req, err := r.opaqueRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
314 314
 	if err != nil {
315 315
 		return err
316 316
 	}
... ...
@@ -340,7 +355,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
340 340
 
341 341
 	utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
342 342
 
343
-	req, err := http.NewRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJSON))
343
+	req, err := r.opaqueRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJSON))
344 344
 	if err != nil {
345 345
 		return nil, err
346 346
 	}
... ...
@@ -360,7 +375,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
360 360
 	// Redirect if necessary
361 361
 	for res.StatusCode >= 300 && res.StatusCode < 400 {
362 362
 		utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
363
-		req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
363
+		req, err = r.opaqueRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
364 364
 		if err != nil {
365 365
 			return nil, err
366 366
 		}
... ...
@@ -476,7 +491,7 @@ type Registry struct {
476 476
 func NewRegistry(root string, authConfig *auth.AuthConfig) (r *Registry, err error) {
477 477
 	httpTransport := &http.Transport{
478 478
 		DisableKeepAlives: true,
479
-		Proxy: http.ProxyFromEnvironment,
479
+		Proxy:             http.ProxyFromEnvironment,
480 480
 	}
481 481
 
482 482
 	r = &Registry{
... ...
@@ -65,7 +65,11 @@ func init() {
65 65
 
66 66
 	// Create the "Server"
67 67
 	srv := &Server{
68
-		runtime: runtime,
68
+		runtime:     runtime,
69
+		enableCors:  false,
70
+		lock:        &sync.Mutex{},
71
+		pullingPool: make(map[string]struct{}),
72
+		pushingPool: make(map[string]struct{}),
69 73
 	}
70 74
 	// Retrieve the Image
71 75
 	if err := srv.ImagePull(unitTestImageName, "", "", os.Stdout, utils.NewStreamFormatter(false), nil); err != nil {
... ...
@@ -15,6 +15,7 @@ import (
15 15
 	"path"
16 16
 	"runtime"
17 17
 	"strings"
18
+	"sync"
18 19
 )
19 20
 
20 21
 func (srv *Server) DockerVersion() APIVersion {
... ...
@@ -221,12 +222,24 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
221 221
 		return nil, err
222 222
 	}
223 223
 
224
+	lookupMap := make(map[string][]string)
225
+	for name, repository := range srv.runtime.repositories.Repositories {
226
+		for tag, id := range repository {
227
+			// If the ID already has a reverse lookup, do not update it unless for "latest"
228
+			if _, exists := lookupMap[id]; !exists {
229
+				lookupMap[id] = []string{}
230
+			}
231
+			lookupMap[id] = append(lookupMap[id], name+":"+tag)
232
+		}
233
+	}
234
+
224 235
 	outs := []APIHistory{} //produce [] when empty instead of 'null'
225 236
 	err = image.WalkHistory(func(img *Image) error {
226 237
 		var out APIHistory
227 238
 		out.ID = srv.runtime.repositories.ImageName(img.ShortID())
228 239
 		out.Created = img.Created.Unix()
229 240
 		out.CreatedBy = strings.Join(img.ContainerConfig.Cmd, " ")
241
+		out.Tags = lookupMap[img.ID]
230 242
 		outs = append(outs, out)
231 243
 		return nil
232 244
 	})
... ...
@@ -312,7 +325,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoin
312 312
 	for _, id := range history {
313 313
 		if !srv.runtime.graph.Exists(id) {
314 314
 			out.Write(sf.FormatStatus("Pulling %s metadata", id))
315
-			imgJSON, err := r.GetRemoteImageJSON(id, endpoint, token)
315
+			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
316 316
 			if err != nil {
317 317
 				// FIXME: Keep goging in case of error?
318 318
 				return err
... ...
@@ -324,12 +337,12 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoin
324 324
 
325 325
 			// Get the layer
326 326
 			out.Write(sf.FormatStatus("Pulling %s fs layer", id))
327
-			layer, contentLength, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
327
+			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
328 328
 			if err != nil {
329 329
 				return err
330 330
 			}
331 331
 			defer layer.Close()
332
-			if err := srv.runtime.graph.Register(utils.ProgressReader(layer, contentLength, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
332
+			if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
333 333
 				return err
334 334
 			}
335 335
 		}
... ...
@@ -404,11 +417,51 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
404 404
 	return nil
405 405
 }
406 406
 
407
+func (srv *Server) poolAdd(kind, key string) error {
408
+	srv.lock.Lock()
409
+	defer srv.lock.Unlock()
410
+
411
+	if _, exists := srv.pullingPool[key]; exists {
412
+		return fmt.Errorf("%s %s is already in progress", key, kind)
413
+	}
414
+
415
+	switch kind {
416
+	case "pull":
417
+		srv.pullingPool[key] = struct{}{}
418
+		break
419
+	case "push":
420
+		srv.pushingPool[key] = struct{}{}
421
+		break
422
+	default:
423
+		return fmt.Errorf("Unkown pool type")
424
+	}
425
+	return nil
426
+}
427
+
428
+func (srv *Server) poolRemove(kind, key string) error {
429
+	switch kind {
430
+	case "pull":
431
+		delete(srv.pullingPool, key)
432
+		break
433
+	case "push":
434
+		delete(srv.pushingPool, key)
435
+		break
436
+	default:
437
+		return fmt.Errorf("Unkown pool type")
438
+	}
439
+	return nil
440
+}
441
+
407 442
 func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
408 443
 	r, err := registry.NewRegistry(srv.runtime.root, authConfig)
409 444
 	if err != nil {
410 445
 		return err
411 446
 	}
447
+	if err := srv.poolAdd("pull", name+":"+tag); err != nil {
448
+		return err
449
+	}
450
+	defer srv.poolRemove("pull", name+":"+tag)
451
+
412 452
 	out = utils.NewWriteFlusher(out)
413 453
 	if endpoint != "" {
414 454
 		if err := srv.pullImage(r, out, name, endpoint, nil, sf); err != nil {
... ...
@@ -424,7 +477,6 @@ func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *util
424 424
 	if err := srv.pullRepository(r, out, name, remote, tag, sf); err != nil {
425 425
 		return err
426 426
 	}
427
-
428 427
 	return nil
429 428
 }
430 429
 
... ...
@@ -526,7 +578,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name stri
526 526
 				// FIXME: Continue on error?
527 527
 				return err
528 528
 			}
529
-			out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"/users/"+srvName+"/"+elem.Tag))
529
+			out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"/repositories/"+srvName+"/tags/"+elem.Tag))
530 530
 			if err := r.PushRegistryTag(srvName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
531 531
 				return err
532 532
 			}
... ...
@@ -599,7 +651,13 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId,
599 599
 	return nil
600 600
 }
601 601
 
602
+// FIXME: Allow to interupt current push when new push of same image is done.
602 603
 func (srv *Server) ImagePush(name, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
604
+	if err := srv.poolAdd("push", name); err != nil {
605
+		return err
606
+	}
607
+	defer srv.poolRemove("push", name)
608
+
603 609
 	out = utils.NewWriteFlusher(out)
604 610
 	img, err := srv.runtime.graph.Get(name)
605 611
 	r, err2 := registry.NewRegistry(srv.runtime.root, authConfig)
... ...
@@ -938,9 +996,6 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
938 938
 		if container.State.Ghost {
939 939
 			return fmt.Errorf("Impossible to attach to a ghost container")
940 940
 		}
941
-		if !container.State.Running {
942
-			return fmt.Errorf("Impossible to attach to a stopped container, start it first")
943
-		}
944 941
 
945 942
 		var (
946 943
 			cStdin           io.ReadCloser
... ...
@@ -999,14 +1054,20 @@ func NewServer(autoRestart, enableCors bool, dns ListOpts) (*Server, error) {
999 999
 		return nil, err
1000 1000
 	}
1001 1001
 	srv := &Server{
1002
-		runtime:    runtime,
1003
-		enableCors: enableCors,
1002
+		runtime:     runtime,
1003
+		enableCors:  enableCors,
1004
+		lock:        &sync.Mutex{},
1005
+		pullingPool: make(map[string]struct{}),
1006
+		pushingPool: make(map[string]struct{}),
1004 1007
 	}
1005 1008
 	runtime.srv = srv
1006 1009
 	return srv, nil
1007 1010
 }
1008 1011
 
1009 1012
 type Server struct {
1010
-	runtime    *Runtime
1011
-	enableCors bool
1013
+	runtime     *Runtime
1014
+	enableCors  bool
1015
+	lock        *sync.Mutex
1016
+	pullingPool map[string]struct{}
1017
+	pushingPool map[string]struct{}
1012 1018
 }
... ...
@@ -30,7 +30,7 @@ Vagrant::Config.run do |config|
30 30
     # Install docker dependencies
31 31
     pkg_cmd << "apt-get install -q -y python-software-properties; " \
32 32
       "add-apt-repository -y ppa:gophers/go/ubuntu; apt-get update -qq; " \
33
-      "DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc bsdtar git golang-stable aufs-tools make; "
33
+      "DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git golang-stable aufs-tools make; "
34 34
     # Activate new kernel
35 35
     pkg_cmd << "shutdown -r +1; "
36 36
     config.vm.provision :shell, :inline => pkg_cmd
... ...
@@ -86,7 +86,7 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
86 86
 	}
87 87
 	if r.readProgress-r.lastUpdate > updateEvery || err != nil {
88 88
 		if r.readTotal > 0 {
89
-			fmt.Fprintf(r.output, r.template, r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
89
+			fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
90 90
 		} else {
91 91
 			fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
92 92
 		}
... ...
@@ -548,6 +548,7 @@ func GetKernelVersion() (*KernelVersionInfo, error) {
548 548
 	}, nil
549 549
 }
550 550
 
551
+// FIXME: this is deprecated by CopyWithTar in archive.go
551 552
 func CopyDirectory(source, dest string) error {
552 553
 	if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
553 554
 		return fmt.Errorf("Error copy: %s (%s)", err, output)