Browse code

up to date with master

Victor Vieux authored on 2013/05/03 01:36:23
Showing 98 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,96 @@
0
+# Changelog
1
+
2
+## 0.2.1 (2012-05-01)
3
+ + 'docker commit -run' bundles a layer with default runtime options: command, ports etc. 
4
+ * Improve install process on Vagrant
5
+ + New Dockerfile operation: "maintainer"
6
+ + New Dockerfile operation: "expose"
7
+ + New Dockerfile operation: "cmd"
8
+ + Contrib script to build a Debian base layer
9
+ + 'docker -d -r': restart crashed containers at daemon startup
10
+ * Runtime: improve test coverage
11
+
12
+## 0.2.0 (2012-04-23)
13
+ - Runtime: ghost containers can be killed and waited for
14
+ * Documentation: update install intructions
15
+ - Packaging: fix Vagrantfile
16
+ - Development: automate releasing binaries and ubuntu packages
17
+ + Add a changelog
18
+ - Various bugfixes
19
+
20
+
21
+## 0.1.8 (2013-04-22)
22
+ - Dynamically detect cgroup capabilities
23
+ - Issue stability warning on kernels <3.8
24
+ - 'docker push' buffers on disk instead of memory
25
+ - Fix 'docker diff' for removed files
26
+ - Fix 'docker stop' for ghost containers 
27
+ - Fix handling of pidfile
28
+ - Various bugfixes and stability improvements
29
+
30
+## 0.1.7 (2013-04-18)
31
+ - Container ports are available on localhost
32
+ - 'docker ps' shows allocated TCP ports
33
+ - Contributors can run 'make hack' to start a continuous integration VM
34
+ - Streamline ubuntu packaging & uploading
35
+ - Various bugfixes and stability improvements
36
+
37
+## 0.1.6 (2013-04-17)
38
+ - Record the author an image with 'docker commit -author'
39
+
40
+## 0.1.5 (2013-04-17)
41
+ - Disable standalone mode
42
+ - Use a custom DNS resolver with 'docker -d -dns'
43
+ - Detect ghost containers
44
+ - Improve diagnosis of missing system capabilities
45
+ - Allow disabling memory limits at compile time
46
+ - Add debian packaging
47
+ - Documentation: installing on Arch Linux 
48
+ - Documentation: running Redis on docker
49
+ - Fixed lxc 0.9 compatibility
50
+ - Automatically load aufs module
51
+ - Various bugfixes and stability improvements
52
+
53
+## 0.1.4 (2013-04-09)
54
+ - Full support for TTY emulation
55
+ - Detach from a TTY session with the escape sequence `C-p C-q`
56
+ - Various bugfixes and stability improvements
57
+ - Minor UI improvements
58
+ - Automatically create our own bridge interface 'docker0'
59
+
60
+## 0.1.3 (2013-04-04)
61
+ - Choose TCP frontend port with '-p :PORT'
62
+ - Layer format is versioned
63
+ - Major reliability improvements to the process manager
64
+ - Various bugfixes and stability improvements
65
+
66
+## 0.1.2 (2013-04-03)
67
+ - Set container hostname with 'docker run -h'
68
+ - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
69
+ - Various bugfixes and stability improvements
70
+ - UI polish
71
+ - Progress bar on push/pull
72
+ - Use XZ compression by default
73
+ - Make IP allocator lazy
74
+
75
+## 0.1.1 (2013-03-31)
76
+ - Display shorthand IDs for convenience
77
+ - Stabilize process management
78
+ - Layers can include a commit message
79
+ - Simplified 'docker attach'
80
+ - Fixed support for re-attaching
81
+ - Various bugfixes and stability improvements
82
+ - Auto-download at run
83
+ - Auto-login on push
84
+ - Beefed up documentation
85
+
86
+## 0.1.0 (2013-03-23)
87
+ - First release
88
+ - Implement registry in order to push/pull images
89
+ - TCP port allocation
90
+ - Fix termcaps on Linux
91
+ - Add documentation
92
+ - Add Vagrant support with Vagrantfile
93
+ - Add unit tests
94
+ - Add repository/tags to ease image management
95
+ - Improve the layer implementation
... ...
@@ -1,5 +1,9 @@
1 1
 DOCKER_PACKAGE := github.com/dotcloud/docker
2
+RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
3
+SRCRELEASE := docker-$(RELEASE_VERSION)
4
+BINRELEASE := docker-$(RELEASE_VERSION).tgz
2 5
 
6
+GIT_ROOT := $(shell git rev-parse --show-toplevel)
3 7
 BUILD_DIR := $(CURDIR)/.gopath
4 8
 
5 9
 GOPATH ?= $(BUILD_DIR)
... ...
@@ -13,10 +17,7 @@ endif
13 13
 GIT_COMMIT = $(shell git rev-parse --short HEAD)
14 14
 GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
15 15
 
16
-NO_MEMORY_LIMIT ?= 0
17
-export NO_MEMORY_LIMIT
18
-
19
-BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS) -X main.NO_MEMORY_LIMIT $(NO_MEMORY_LIMIT)"
16
+BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
20 17
 
21 18
 SRC_DIR := $(GOPATH)/src
22 19
 
... ...
@@ -26,7 +27,7 @@ DOCKER_MAIN := $(DOCKER_DIR)/docker
26 26
 DOCKER_BIN_RELATIVE := bin/docker
27 27
 DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
28 28
 
29
-.PHONY: all clean test
29
+.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
30 30
 
31 31
 all: $(DOCKER_BIN)
32 32
 
... ...
@@ -39,6 +40,24 @@ $(DOCKER_DIR):
39 39
 	@mkdir -p $(dir $@)
40 40
 	@ln -sf $(CURDIR)/ $@
41 41
 
42
+whichrelease:
43
+	echo $(RELEASE_VERSION)
44
+
45
+release: $(BINRELEASE)
46
+srcrelease: $(SRCRELEASE)
47
+deps: $(DOCKER_DIR)
48
+
49
+# A clean checkout of $RELEASE_VERSION, with vendored dependencies
50
+$(SRCRELEASE):
51
+	rm -fr $(SRCRELEASE)
52
+	git clone $(GIT_ROOT) $(SRCRELEASE)
53
+	cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
54
+
55
+# A binary release ready to be uploaded to a mirror
56
+$(BINRELEASE): $(SRCRELEASE)
57
+	rm -f $(BINRELEASE)
58
+	cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
59
+
42 60
 clean:
43 61
 	@rm -rf $(dir $(DOCKER_BIN))
44 62
 ifeq ($(GOPATH), $(BUILD_DIR))
... ...
@@ -52,3 +71,9 @@ test: all
52 52
 
53 53
 fmt:
54 54
 	@gofmt -s -l -w .
55
+
56
+hack:
57
+	cd $(CURDIR)/hack && vagrant up
58
+
59
+ssh-dev:
60
+	cd $(CURDIR)/hack && vagrant ssh
... ...
@@ -33,123 +33,85 @@ Notable features
33 33
 
34 34
 * Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
35 35
 
36
-
37
-
38
-Under the hood
39
-
40
-Under the hood, Docker is built on the following components:
41
-
42
-
43
-* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
44
-
45
-* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
46
-
47
-* The [Go](http://golang.org) programming language;
48
-
49
-* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
50
-
51
-
52 36
 Install instructions
53 37
 ==================
54 38
 
55
-Building from source
56
-
57
-1. Make sure you have a [Go language](http://golang.org) compiler.
58
-
59
-    On a Debian/wheezy or Ubuntu 12.10 install the package:
60
-
61
-    ```bash
62
-
63
-    $ sudo apt-get install golang-go
64
-    ```
65
-
66
-2. Execute ``make``
67
-
68
-   This command will install all necessary dependencies and build the
69
-   executable that you can find in ``bin/docker``
70
-
71
-3. Should you like to see what's happening, run ``make`` with ``VERBOSE=1`` parameter:
72
-
73
-    ```bash
39
+Quick install on Ubuntu 12.04 and 12.10
40
+---------------------------------------
74 41
 
75
-    $ make VERBOSE=1
76
-    ```
42
+```bash
43
+curl get.docker.io | sh -x
44
+```
77 45
 
78
-Installing on Ubuntu 12.04 and 12.10
46
+Binary installs
47
+----------------
79 48
 
80
-1. Install dependencies:
49
+Docker supports the following binary installation methods.
50
+Note that some methods are community contributions and not yet officially supported.
81 51
 
82
-    ```bash
83
-    sudo apt-get install lxc wget bsdtar curl
84
-    sudo apt-get install linux-image-extra-`uname -r`
85
-    ```
52
+* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
53
+* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
54
+* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
55
+* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
56
+* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
86 57
 
87
-    The `linux-image-extra` package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
58
+Installing from source
59
+----------------------
88 60
 
89
-2. Install the latest docker binary:
61
+1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
90 62
 
91
-    ```bash
92
-    wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
93
-    tar -xf docker-master.tgz
94
-    ```
63
+2. Checkout the source code
95 64
 
96
-3. Run your first container!
65
+   ```bash
66
+   git clone http://github.com/dotcloud/docker
67
+   ```
97 68
 
98
-    ```bash
99
-    cd docker-master
100
-    sudo ./docker pull base
101
-    sudo ./docker run -i -t base /bin/bash
102
-    ```
69
+3. Build the docker binary
103 70
 
104
-    Consider adding docker to your `PATH` for simplicity.
71
+   ```bash
72
+   cd docker
73
+   make VERBOSE=1
74
+   sudo cp ./bin/docker /usr/local/bin/docker
75
+   ```
105 76
 
106
-Installing on other Linux distributions
77
+Usage examples
78
+==============
107 79
 
108
-Right now, the officially supported distributions are:
80
+First run the docker daemon
81
+---------------------------
109 82
 
110
-* Ubuntu 12.04 (precise LTS)
111
-* Ubuntu 12.10 (quantal)
83
+All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
112 84
 
113
-Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested.
85
+```bash
86
+# On a production system you want this running in an init script
87
+sudo docker -d &
88
+```
114 89
 
115
-Some streamlined (but possibly outdated) installation paths' are available from the website: http://docker.io/documentation/ 
90
+Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
116 91
 
92
+```bash
93
+# Now you can run docker commands from any account.
94
+docker help
95
+```
117 96
 
118
-Usage examples
119
-==============
120 97
 
121
-Running an interactive shell
98
+Throwaway shell in a base ubuntu image
99
+--------------------------------------
122 100
 
123 101
 ```bash
124
-# Download a base image
125
-docker pull base
126
-
127
-# Run an interactive shell in the base image,
128
-# allocate a tty, attach stdin and stdout
129
-docker run -i -t base /bin/bash
130
-```
102
+docker pull ubuntu:12.10
131 103
 
132
-Detaching from the interactive shell
133
-```
134
-# In order to detach without killing the shell, you can use the escape sequence Ctrl-p + Ctrl-q
135
-# Note: this works only in tty mode (run with -t option).
104
+# Run an interactive shell, allocate a tty, attach stdin and stdout
105
+# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
106
+docker run -i -t ubuntu:12.10 /bin/bash
136 107
 ```
137 108
 
138 109
 Starting a long-running worker process
139 110
 --------------------------------------
140 111
 
141 112
 ```bash
142
-# Run docker in daemon mode
143
-(docker -d || echo "Docker daemon already running") &
144
-
145 113
 # Start a very useful long-running process
146
-JOB=$(docker run -d base /bin/sh -c "while true; do echo Hello world; sleep 1; done")
114
+JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
147 115
 
148 116
 # Collect the output of the job so far
149 117
 docker logs $JOB
... ...
@@ -158,25 +120,32 @@ docker logs $JOB
158 158
 docker kill $JOB
159 159
 ```
160 160
 
161
-
162
-Listing all running containers
161
+Running an irc bouncer
162
+----------------------
163 163
 
164 164
 ```bash
165
-docker ps
165
+BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
166
+echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
166 167
 ```
167 168
 
169
+Running Redis
170
+-------------
171
+
172
+```bash
173
+REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
174
+echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
175
+```
168 176
 
169 177
 Share your own image!
170 178
 ---------------------
171 179
 
172 180
 ```bash
173
-docker pull base
174
-CONTAINER=$(docker run -d base apt-get install -y curl)
181
+CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
175 182
 docker commit -m "Installed curl" $CONTAINER $USER/betterbase
176 183
 docker push $USER/betterbase
177 184
 ```
178 185
 
186
+A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
179 187
 
180 188
 Expose a service on a TCP port
181 189
 ------------------------------
... ...
@@ -197,6 +166,22 @@ echo hello world | nc $IP $PORT
197 197
 echo "Daemon received: $(docker logs $JOB)"
198 198
 ```
199 199
 
200
+Under the hood
201
+--------------
202
+
203
+Under the hood, Docker is built on the following components:
204
+
205
+
206
+* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
207
+
208
+* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
209
+
210
+* The [Go](http://golang.org) programming language;
211
+
212
+* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
213
+
214
+
215
+
200 216
 Contributing to Docker
201 217
 ======================
202 218
 
... ...
@@ -1,41 +1,37 @@
1 1
 # -*- mode: ruby -*-
2 2
 # vi: set ft=ruby :
3 3
 
4
-def v10(config)
5
-  config.vm.box = "quantal64_3.5.0-25"
6
-  config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
7
-
8
-  config.vm.share_folder "v-data", "/opt/go/src/github.com/dotcloud/docker", File.dirname(__FILE__)
9
-
10
-  # Ensure puppet is installed on the instance
11
-  config.vm.provision :shell, :inline => "apt-get -qq update; apt-get install -y puppet"
12
-
13
-  config.vm.provision :puppet do |puppet|
14
-    puppet.manifests_path = "puppet/manifests"
15
-    puppet.manifest_file  = "quantal64.pp"
16
-    puppet.module_path = "puppet/modules"
4
+BOX_NAME = "ubuntu"
5
+BOX_URI = "http://files.vagrantup.com/precise64.box"
6
+PPA_KEY = "E61D797F63561DC6"
7
+
8
+Vagrant::Config.run do |config|
9
+  # Setup virtual machine box. This VM configuration code is always executed.
10
+  config.vm.box = BOX_NAME
11
+  config.vm.box_url = BOX_URI
12
+  # Add docker PPA key to the local repository and install docker
13
+  pkg_cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys #{PPA_KEY}; "
14
+  pkg_cmd << "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list; "
15
+  pkg_cmd << "apt-get update -qq; apt-get install -q -y lxc-docker"
16
+  if ARGV.include?("--provider=aws".downcase)
17
+    # Add AUFS dependency to amazon's VM
18
+    pkg_cmd << "; apt-get install linux-image-extra-3.2.0-40-virtual"
17 19
   end
20
+  config.vm.provision :shell, :inline => pkg_cmd
18 21
 end
19 22
 
20
-Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
21
-  v10(config)
22
-end
23
-
24
-Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
25
-  v10(config)
26
-end
27
-
23
+# Providers were added on Vagrant >= 1.1.0
28 24
 Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
29
-  config.vm.provider :aws do |aws|
25
+  config.vm.provider :aws do |aws, override|
30 26
     config.vm.box = "dummy"
31 27
     config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
32 28
     aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
33 29
     aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
34 30
     aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
35
-    aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
31
+    override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
32
+    override.ssh.username = "ubuntu"
36 33
     aws.region = "us-east-1"
37
-    aws.ami = "ami-ae9806c7"
38
-    aws.ssh_username = "ubuntu"
34
+    aws.ami = "ami-d0f89fb9"
39 35
     aws.instance_type = "t1.micro"
40 36
   end
41 37
 
... ...
@@ -51,7 +47,7 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
51 51
   end
52 52
 
53 53
   config.vm.provider :virtualbox do |vb|
54
-    config.vm.box = "quantal64_3.5.0-25"
55
-    config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
54
+    config.vm.box = BOX_NAME
55
+    config.vm.box_url = BOX_URI
56 56
   end
57 57
 end
... ...
@@ -22,7 +22,7 @@ func ListenAndServe(addr string, rtime *Runtime) error {
22 22
 
23 23
 	r.Path("/version").Methods("GET").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
24 24
 		log.Println(r.Method, r.RequestURI)
25
-		m := ApiVersion{VERSION, GIT_COMMIT, NO_MEMORY_LIMIT}
25
+		m := ApiVersion{VERSION, GIT_COMMIT, rtime.capabilities.MemoryLimit, rtime.capabilities.SwapLimit}
26 26
 		b, err := json.Marshal(m)
27 27
 		if err != nil {
28 28
 			http.Error(w, err.Error(), http.StatusInternalServerError)
... ...
@@ -291,6 +291,7 @@ func ListenAndServe(addr string, rtime *Runtime) error {
291 291
 				out.Command = command
292 292
 				out.Created = container.Created.Unix()
293 293
 				out.Status = container.State.String()
294
+				out.Ports = container.NetworkSettings.PortMappingHuman()
294 295
 			}
295 296
 			outs = append(outs, out)
296 297
 		}
... ...
@@ -305,6 +306,12 @@ func ListenAndServe(addr string, rtime *Runtime) error {
305 305
 
306 306
 	r.Path("/containers/{name:.*}/commit").Methods("POST").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
307 307
 		log.Println(r.Method, r.RequestURI)
308
+		var config Config
309
+		if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
310
+			http.Error(w, err.Error(), http.StatusInternalServerError)
311
+			return
312
+		}
313
+
308 314
 		if err := r.ParseForm(); err != nil {
309 315
 			http.Error(w, err.Error(), http.StatusInternalServerError)
310 316
 			return
... ...
@@ -313,9 +320,10 @@ func ListenAndServe(addr string, rtime *Runtime) error {
313 313
 		name := vars["name"]
314 314
 		repo := r.Form.Get("repo")
315 315
 		tag := r.Form.Get("tag")
316
+		author := r.Form.Get("author")
316 317
 		comment := r.Form.Get("comment")
317 318
 
318
-		img, err := rtime.Commit(name, repo, tag, comment)
319
+		img, err := rtime.Commit(name, repo, tag, comment, author, &config)
319 320
 		if err != nil {
320 321
 			http.Error(w, err.Error(), http.StatusInternalServerError)
321 322
 			return
... ...
@@ -433,9 +441,9 @@ func ListenAndServe(addr string, rtime *Runtime) error {
433 433
 				http.Error(w, err.Error(), http.StatusInternalServerError)
434 434
 				return
435 435
 			}
436
-			archive = ProgressReader(resp.Body, int(resp.ContentLength), file)
436
+			archive = ProgressReader(resp.Body, int(resp.ContentLength), file, "Importing %v/%v (%v)")
437 437
 		}
438
-		img, err := rtime.graph.Create(archive, nil, "Imported from "+src)
438
+		img, err := rtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
439 439
 		if err != nil {
440 440
 			http.Error(w, err.Error(), http.StatusInternalServerError)
441 441
 			return
... ...
@@ -458,7 +466,19 @@ func ListenAndServe(addr string, rtime *Runtime) error {
458 458
 			http.Error(w, err.Error(), http.StatusInternalServerError)
459 459
 			return
460 460
 		}
461
+		var memoryW, swapW bool
462
+
463
+		if config.Memory > 0 && !rtime.capabilities.MemoryLimit {
464
+			memoryW = true
465
+			log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
466
+			config.Memory = 0
467
+		}
461 468
 
469
+		if config.Memory > 0 && !rtime.capabilities.SwapLimit {
470
+			swapW = true
471
+			log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
472
+			config.MemorySwap = -1
473
+		}
462 474
 		container, err := rtime.Create(&config)
463 475
 		if err != nil {
464 476
 			if rtime.graph.IsNotExist(err) {
... ...
@@ -468,8 +488,16 @@ func ListenAndServe(addr string, rtime *Runtime) error {
468 468
 			}
469 469
 			return
470 470
 		}
471
+		var out ApiRun
472
+		out.Id = container.ShortId()
473
+		if memoryW {
474
+			out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
475
+		}
476
+		if swapW {
477
+			out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
478
+		}
471 479
 
472
-		b, err := json.Marshal(ApiId{container.ShortId()})
480
+		b, err := json.Marshal(out)
473 481
 		if err != nil {
474 482
 			http.Error(w, err.Error(), http.StatusInternalServerError)
475 483
 		} else {
... ...
@@ -479,10 +507,17 @@ func ListenAndServe(addr string, rtime *Runtime) error {
479 479
 
480 480
 	r.Path("/containers/{name:.*}/restart").Methods("POST").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
481 481
 		log.Println(r.Method, r.RequestURI)
482
+		if err := r.ParseForm(); err != nil {
483
+			http.Error(w, err.Error(), http.StatusInternalServerError)
484
+		}
485
+		t, err := strconv.Atoi(r.Form.Get("t"))
486
+		if err != nil || t < 0 {
487
+			t = 10
488
+		}
482 489
 		vars := mux.Vars(r)
483 490
 		name := vars["name"]
484 491
 		if container := rtime.Get(name); container != nil {
485
-			if err := container.Restart(); err != nil {
492
+			if err := container.Restart(t); err != nil {
486 493
 				http.Error(w, "Error restarting container "+name+": "+err.Error(), http.StatusInternalServerError)
487 494
 				return
488 495
 			}
... ...
@@ -545,10 +580,17 @@ func ListenAndServe(addr string, rtime *Runtime) error {
545 545
 
546 546
 	r.Path("/containers/{name:.*}/stop").Methods("POST").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
547 547
 		log.Println(r.Method, r.RequestURI)
548
+		if err := r.ParseForm(); err != nil {
549
+			http.Error(w, err.Error(), http.StatusInternalServerError)
550
+		}
551
+		t, err := strconv.Atoi(r.Form.Get("t"))
552
+		if err != nil || t < 0 {
553
+			t = 10
554
+		}
548 555
 		vars := mux.Vars(r)
549 556
 		name := vars["name"]
550 557
 		if container := rtime.Get(name); container != nil {
551
-			if err := container.Stop(); err != nil {
558
+			if err := container.Stop(t); err != nil {
552 559
 				http.Error(w, "Error stopping container "+name+": "+err.Error(), http.StatusInternalServerError)
553 560
 				return
554 561
 			}
... ...
@@ -603,17 +645,7 @@ func ListenAndServe(addr string, rtime *Runtime) error {
603 603
 				return
604 604
 			}
605 605
 			defer file.Close()
606
-			if container.Config.Tty {
607
-				oldState, err := SetRawTerminal()
608
-				if err != nil {
609
-					if os.Getenv("DEBUG") != "" {
610
-						log.Printf("Can't set the terminal in raw mode: %s", err)
611
-					}
612
-				} else {
613
-					defer RestoreTerminal(oldState)
614
-				}
615 606
 
616
-			}
617 607
 			// Flush the options to make sure the client sets the raw mode
618 608
 			conn.Write([]byte{})
619 609
 
... ...
@@ -640,6 +672,23 @@ func ListenAndServe(addr string, rtime *Runtime) error {
640 640
 
641 641
 			//stream
642 642
 			if stream == "1" {
643
+
644
+				if container.State.Ghost {
645
+					fmt.Fprintf(file, "error: Impossible to attach to a ghost container")
646
+					return
647
+				}
648
+
649
+				if container.Config.Tty {
650
+					oldState, err := SetRawTerminal()
651
+					if err != nil {
652
+						if os.Getenv("DEBUG") != "" {
653
+							log.Printf("Can't set the terminal in raw mode: %s", err)
654
+						}
655
+					} else {
656
+						defer RestoreTerminal(oldState)
657
+					}
658
+
659
+				}
643 660
 				var (
644 661
 					cStdin           io.ReadCloser
645 662
 					cStdout, cStderr io.Writer
... ...
@@ -28,20 +28,27 @@ type ApiContainers struct {
28 28
 	Command string `json:",omitempty"`
29 29
 	Created int64  `json:",omitempty"`
30 30
 	Status  string `json:",omitempty"`
31
+	Ports   string `json:",omitempty"`
31 32
 }
32 33
 
33 34
 type ApiId struct {
34 35
 	Id string
35 36
 }
36 37
 
38
+type ApiRun struct {
39
+	Id       string
40
+	Warnings []string
41
+}
42
+
37 43
 type ApiPort struct {
38 44
 	Port string
39 45
 }
40 46
 
41 47
 type ApiVersion struct {
42
-	Version             string
43
-	GitCommit           string
44
-	MemoryLimitDisabled bool
48
+	Version     string
49
+	GitCommit   string
50
+	MemoryLimit bool
51
+	SwapLimit   bool
45 52
 }
46 53
 
47 54
 type ApiWait struct {
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"errors"
5 5
 	"io"
6 6
 	"io/ioutil"
7
+	"os"
7 8
 	"os/exec"
8 9
 )
9 10
 
... ...
@@ -86,3 +87,38 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
86 86
 	}
87 87
 	return pipeR, nil
88 88
 }
89
+
90
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
91
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
92
+// the file will be deleted.
93
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
94
+	f, err := ioutil.TempFile(dir, "")
95
+	if err != nil {
96
+		return nil, err
97
+	}
98
+	if _, err := io.Copy(f, src); err != nil {
99
+		return nil, err
100
+	}
101
+	if _, err := f.Seek(0, 0); err != nil {
102
+		return nil, err
103
+	}
104
+	st, err := f.Stat()
105
+	if err != nil {
106
+		return nil, err
107
+	}
108
+	size := st.Size()
109
+	return &TempArchive{f, size}, nil
110
+}
111
+
112
+type TempArchive struct {
113
+	*os.File
114
+	Size int64 // Pre-computed from Stat().Size() as a convenience
115
+}
116
+
117
+func (archive *TempArchive) Read(data []byte) (int, error) {
118
+	n, err := archive.File.Read(data)
119
+	if err != nil {
120
+		os.Remove(archive.File.Name())
121
+	}
122
+	return n, err
123
+}
89 124
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+Buildbot
1
+========
2
+
3
+Buildbot is a continuous integration system designed to automate the
4
+build/test cycle. By automatically rebuilding and testing the tree each time
5
+something has changed, build problems are pinpointed quickly, before other
6
+developers are inconvenienced by the failure.
7
+
8
+When running 'make hack' at the docker root directory, it spawns a virtual
9
+machine in the background running a buildbot instance and adds a git
10
+post-commit hook that automatically run docker tests for you.
11
+
12
+You can check your buildbot instance at http://192.168.33.21:8010/waterfall
13
+
14
+
15
+Buildbot dependencies
16
+---------------------
17
+
18
+vagrant, virtualbox packages and python package requests
19
+
0 20
new file mode 100644
... ...
@@ -0,0 +1,28 @@
0
+# -*- mode: ruby -*-
1
+# vi: set ft=ruby :
2
+
3
+$BUILDBOT_IP = '192.168.33.21'
4
+
5
+def v10(config)
6
+  config.vm.box = "quantal64_3.5.0-25"
7
+  config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
8
+  config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
9
+  config.vm.network :hostonly, $BUILDBOT_IP
10
+
11
+  # Ensure puppet is installed on the instance
12
+  config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
13
+
14
+  config.vm.provision :puppet do |puppet|
15
+    puppet.manifests_path = '.'
16
+    puppet.manifest_file  = 'buildbot.pp'
17
+    puppet.options = ['--templatedir','.']
18
+  end
19
+end
20
+
21
+Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
22
+  v10(config)
23
+end
24
+
25
+Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
26
+  v10(config)
27
+end
0 28
new file mode 100755
... ...
@@ -0,0 +1,43 @@
0
+#!/bin/bash
1
+
2
+# Auto setup of buildbot configuration. Package installation is being done
3
+# on buildbot.pp
4
+# Dependencies: buildbot, buildbot-slave, supervisor
5
+
6
+SLAVE_NAME='buildworker'
7
+SLAVE_SOCKET='localhost:9989'
8
+BUILDBOT_PWD='pass-docker'
9
+USER='vagrant'
10
+ROOT_PATH='/data/buildbot'
11
+DOCKER_PATH='/data/docker'
12
+BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
13
+IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
14
+
15
+function run { su $USER -c "$1"; }
16
+
17
+export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
18
+
19
+# Exit if buildbot has already been installed
20
+[ -d "$ROOT_PATH" ] && exit 0
21
+
22
+# Setup buildbot
23
+run "mkdir -p ${ROOT_PATH}"
24
+cd ${ROOT_PATH}
25
+run "buildbot create-master master"
26
+run "cp $BUILDBOT_CFG/master.cfg master"
27
+run "sed -i 's/localhost/$IP/' master/master.cfg"
28
+run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
29
+
30
+# Allow buildbot subprocesses (docker tests) to properly run in containers,
31
+# in particular with docker -u
32
+run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
33
+
34
+# Setup supervisor
35
+cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
36
+sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
37
+kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
38
+
39
+# Add git hook
40
+cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
41
+sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
42
+
0 43
new file mode 100644
... ...
@@ -0,0 +1,32 @@
0
+node default {
1
+    $USER = 'vagrant'
2
+    $ROOT_PATH = '/data/buildbot'
3
+    $DOCKER_PATH = '/data/docker'
4
+
5
+    exec {'apt_update': command => '/usr/bin/apt-get update' }
6
+    Package { require => Exec['apt_update'] }
7
+    group {'puppet': ensure => 'present'}
8
+
9
+    # Install dependencies
10
+    Package { ensure => 'installed' }
11
+    package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
12
+
13
+    file{[ '/data' ]:
14
+        owner => $USER, group => $USER, ensure => 'directory' }
15
+
16
+    file {'/var/tmp/requirements.txt':
17
+        content => template('requirements.txt') }
18
+
19
+    exec {'requirements':
20
+        require => [ Package['python-dev'], Package['python-pip'],
21
+            File['/var/tmp/requirements.txt'] ],
22
+        cwd     => '/var/tmp',
23
+        command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
24
+            rm /var/tmp/requirements.txt)'" }
25
+
26
+    exec {'buildbot-cfg-sh':
27
+        require => [ Package['supervisor'], Exec['requirements']],
28
+        path    => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
29
+        cwd     => '/data',
30
+        command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
31
+}
0 32
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+sqlalchemy<=0.7.9
1
+sqlalchemy-migrate>=0.7.2
2
+buildbot==0.8.7p1
3
+buildbot_slave==0.8.7p1
4
+nose==1.2.1
5
+requests==1.1.0
... ...
@@ -18,11 +18,10 @@ import (
18 18
 	"time"
19 19
 )
20 20
 
21
-const VERSION = "0.1.4"
21
+const VERSION = "0.2.1"
22 22
 
23 23
 var (
24
-	GIT_COMMIT      string
25
-	NO_MEMORY_LIMIT bool
24
+	GIT_COMMIT string
26 25
 )
27 26
 
28 27
 func ParseCommands(args []string) error {
... ...
@@ -248,8 +247,11 @@ func CmdVersion(args []string) error {
248 248
 	}
249 249
 	fmt.Println("Version:", out.Version)
250 250
 	fmt.Println("Git Commit:", out.GitCommit)
251
-	if out.MemoryLimitDisabled {
252
-		fmt.Println("Memory limit disabled")
251
+	if !out.MemoryLimit {
252
+		fmt.Println("WARNING: No memory limit support")
253
+	}
254
+	if !out.SwapLimit {
255
+		fmt.Println("WARNING: No swap limit support")
253 256
 	}
254 257
 
255 258
 	return nil
... ...
@@ -285,7 +287,8 @@ func CmdInfo(args []string) error {
285 285
 }
286 286
 
287 287
 func CmdStop(args []string) error {
288
-	cmd := Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container")
288
+	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
289
+	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
289 290
 	if err := cmd.Parse(args); err != nil {
290 291
 		return nil
291 292
 	}
... ...
@@ -294,8 +297,11 @@ func CmdStop(args []string) error {
294 294
 		return nil
295 295
 	}
296 296
 
297
-	for _, name := range args {
298
-		_, _, err := call("POST", "/containers/"+name+"/stop", nil)
297
+	v := url.Values{}
298
+	v.Set("t", strconv.Itoa(*nSeconds))
299
+
300
+	for _, name := range cmd.Args() {
301
+		_, _, err := call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
299 302
 		if err != nil {
300 303
 			fmt.Printf("%s", err)
301 304
 		} else {
... ...
@@ -306,7 +312,8 @@ func CmdStop(args []string) error {
306 306
 }
307 307
 
308 308
 func CmdRestart(args []string) error {
309
-	cmd := Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container")
309
+	cmd := Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
310
+	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
310 311
 	if err := cmd.Parse(args); err != nil {
311 312
 		return nil
312 313
 	}
... ...
@@ -315,8 +322,11 @@ func CmdRestart(args []string) error {
315 315
 		return nil
316 316
 	}
317 317
 
318
-	for _, name := range args {
319
-		_, _, err := call("POST", "/containers/"+name+"/restart", nil)
318
+	v := url.Values{}
319
+	v.Set("t", strconv.Itoa(*nSeconds))
320
+
321
+	for _, name := range cmd.Args() {
322
+		_, _, err := call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
320 323
 		if err != nil {
321 324
 			fmt.Printf("%s", err)
322 325
 		} else {
... ...
@@ -682,12 +692,12 @@ func CmdPs(args []string) error {
682 682
 	}
683 683
 	w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
684 684
 	if !*quiet {
685
-		fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS")
685
+		fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS")
686 686
 	}
687 687
 
688 688
 	for _, out := range outs {
689 689
 		if !*quiet {
690
-			fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\n", out.Id, out.Image, out.Command, out.Status, HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
690
+			fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", out.Id, out.Image, out.Command, out.Status, HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Ports)
691 691
 		} else {
692 692
 			fmt.Fprintln(w, out.Id)
693 693
 		}
... ...
@@ -702,6 +712,8 @@ func CmdPs(args []string) error {
702 702
 func CmdCommit(args []string) error {
703 703
 	cmd := Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]", "Create a new image from a container's changes")
704 704
 	flComment := cmd.String("m", "", "Commit message")
705
+	flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
706
+	flConfig := cmd.String("run", "", "Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
705 707
 	if err := cmd.Parse(args); err != nil {
706 708
 		return nil
707 709
 	}
... ...
@@ -710,12 +722,21 @@ func CmdCommit(args []string) error {
710 710
 		cmd.Usage()
711 711
 		return nil
712 712
 	}
713
+
713 714
 	v := url.Values{}
714 715
 	v.Set("repo", repository)
715 716
 	v.Set("tag", tag)
716 717
 	v.Set("comment", *flComment)
718
+	v.Set("author", *flAuthor)
719
+	var config *Config
720
+	if *flConfig != "" {
721
+		config = &Config{}
722
+		if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
723
+			return err
724
+		}
725
+	}
717 726
 
718
-	body, _, err := call("POST", "/containers/"+name+"/commit?"+v.Encode(), nil)
727
+	body, _, err := call("POST", "/containers/"+name+"/commit?"+v.Encode(), config)
719 728
 	if err != nil {
720 729
 		return err
721 730
 	}
... ...
@@ -941,12 +962,16 @@ func CmdRun(args []string) error {
941 941
 		return err
942 942
 	}
943 943
 
944
-	var out ApiId
944
+	var out ApiRun
945 945
 	err = json.Unmarshal(body, &out)
946 946
 	if err != nil {
947 947
 		return err
948 948
 	}
949 949
 
950
+	for _, warning := range out.Warnings {
951
+		fmt.Fprintln(os.Stderr, "WARNING: ", warning)
952
+	}
953
+
950 954
 	v := url.Values{}
951 955
 	v.Set("logs", "1")
952 956
 	v.Set("stream", "1")
... ...
@@ -972,17 +997,13 @@ func CmdRun(args []string) error {
972 972
 	if err != nil {
973 973
 		return err
974 974
 	}
975
-
976
-	if err := hijack("POST", "/containers/"+out.Id+"/attach?"+v.Encode(), config.Tty); err != nil {
977
-		return err
978
-	}
979
-
980
-	/*
981
-		if err := <-attach; err != nil {
975
+	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
976
+		if err := hijack("POST", "/containers/"+out.Id+"/attach?"+v.Encode(), config.Tty); err != nil {
982 977
 			return err
983 978
 		}
984
-	*/
985
-
979
+	} else {
980
+		fmt.Println(out.Id)
981
+	}
986 982
 	return nil
987 983
 }
988 984
 
... ...
@@ -228,6 +228,21 @@ func TestRunDisconnectTty(t *testing.T) {
228 228
 		close(c1)
229 229
 	}()
230 230
 
231
+	setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
232
+		for {
233
+			// Client disconnect after run -i should keep stdin out in TTY mode
234
+			l := runtime.List()
235
+			if len(l) == 1 && l[0].State.Running {
236
+				break
237
+			}
238
+
239
+			time.Sleep(10 * time.Millisecond)
240
+		}
241
+	})
242
+
243
+	// Client disconnect after run -i should keep stdin out in TTY mode
244
+	container := runtime.List()[0]
245
+
231 246
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
232 247
 		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
233 248
 			t.Fatal(err)
... ...
@@ -239,14 +254,9 @@ func TestRunDisconnectTty(t *testing.T) {
239 239
 		t.Fatal(err)
240 240
 	}
241 241
 
242
-	// as the pipes are close, we expect the process to die,
243
-	// therefore CmdRun to unblock. Wait for CmdRun
244
-	setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
245
-		<-c1
246
-	})
242
+	// In tty mode, we expect the process to stay alive even after client's stdin closes.
243
+	// Do not wait for run to finish
247 244
 
248
-	// Client disconnect after run -i should keep stdin out in TTY mode
249
-	container := runtime.List()[0]
250 245
 	// Give some time to monitor to do his thing
251 246
 	container.WaitTimeout(500 * time.Millisecond)
252 247
 	if !container.State.Running {
... ...
@@ -384,4 +394,5 @@ func TestAttachDisconnect(t *testing.T) {
384 384
 	// Try to avoid the timeoout in destroy. Best effort, don't check error
385 385
 	cStdin, _ := container.StdinPipe()
386 386
 	cStdin.Close()
387
+	container.Wait()
387 388
 }
... ...
@@ -11,7 +11,9 @@ import (
11 11
 	"os"
12 12
 	"os/exec"
13 13
 	"path"
14
+	"sort"
14 15
 	"strconv"
16
+	"strings"
15 17
 	"syscall"
16 18
 	"time"
17 19
 )
... ...
@@ -81,11 +83,6 @@ func ParseRun(args []string) (*Config, *flag.FlagSet, error) {
81 81
 	flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
82 82
 	flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
83 83
 
84
-	if *flMemory > 0 && NO_MEMORY_LIMIT {
85
-		fmt.Println("WARNING: This version of docker has been compiled without memory limit support. Discarding -m.")
86
-		*flMemory = 0
87
-	}
88
-
89 84
 	var flPorts ListOpts
90 85
 	cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
91 86
 
... ...
@@ -135,6 +132,7 @@ func ParseRun(args []string) (*Config, *flag.FlagSet, error) {
135 135
 		Dns:          flDns,
136 136
 		Image:        image,
137 137
 	}
138
+
138 139
 	// When allocating stdin in attached mode, close stdin at client disconnect
139 140
 	if config.OpenStdin && config.AttachStdin {
140 141
 		config.StdinOnce = true
... ...
@@ -150,6 +148,16 @@ type NetworkSettings struct {
150 150
 	PortMapping map[string]string
151 151
 }
152 152
 
153
+// String returns a human-readable description of the port mapping defined in the settings
154
+func (settings *NetworkSettings) PortMappingHuman() string {
155
+	var mapping []string
156
+	for private, public := range settings.PortMapping {
157
+		mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
158
+	}
159
+	sort.Strings(mapping)
160
+	return strings.Join(mapping, ", ")
161
+}
162
+
153 163
 func (container *Container) Cmd() *exec.Cmd {
154 164
 	return container.cmd
155 165
 }
... ...
@@ -367,10 +375,15 @@ func (container *Container) Start() error {
367 367
 		return err
368 368
 	}
369 369
 
370
-	if container.Config.Memory > 0 && NO_MEMORY_LIMIT {
371
-		log.Printf("WARNING: This version of docker has been compiled without memory limit support. Discarding the limit.")
370
+	// Make sure the config is compatible with the current kernel
371
+	if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
372
+		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
372 373
 		container.Config.Memory = 0
373 374
 	}
375
+	if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
376
+		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
377
+		container.Config.MemorySwap = -1
378
+	}
374 379
 
375 380
 	if err := container.generateLXCConfig(); err != nil {
376 381
 		return err
... ...
@@ -390,21 +403,26 @@ func (container *Container) Start() error {
390 390
 		params = append(params, "-u", container.Config.User)
391 391
 	}
392 392
 
393
+	if container.Config.Tty {
394
+		params = append(params, "-e", "TERM=xterm")
395
+	}
396
+
397
+	// Setup environment
398
+	params = append(params,
399
+		"-e", "HOME=/",
400
+		"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
401
+	)
402
+
403
+	for _, elem := range container.Config.Env {
404
+		params = append(params, "-e", elem)
405
+	}
406
+
393 407
 	// Program
394 408
 	params = append(params, "--", container.Path)
395 409
 	params = append(params, container.Args...)
396 410
 
397 411
 	container.cmd = exec.Command("lxc-start", params...)
398 412
 
399
-	// Setup environment
400
-	container.cmd.Env = append(
401
-		[]string{
402
-			"HOME=/",
403
-			"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
404
-		},
405
-		container.Config.Env...,
406
-	)
407
-
408 413
 	// Setup logging of stdout and stderr to disk
409 414
 	if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
410 415
 		return err
... ...
@@ -415,10 +433,6 @@ func (container *Container) Start() error {
415 415
 
416 416
 	var err error
417 417
 	if container.Config.Tty {
418
-		container.cmd.Env = append(
419
-			[]string{"TERM=xterm"},
420
-			container.cmd.Env...,
421
-		)
422 418
 		err = container.startPty()
423 419
 	} else {
424 420
 		err = container.start()
... ...
@@ -506,16 +520,42 @@ func (container *Container) releaseNetwork() {
506 506
 	container.NetworkSettings = &NetworkSettings{}
507 507
 }
508 508
 
509
+// FIXME: replace this with a control socket within docker-init
510
+func (container *Container) waitLxc() error {
511
+	for {
512
+		if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
513
+			return err
514
+		} else {
515
+			if !strings.Contains(string(output), "RUNNING") {
516
+				return nil
517
+			}
518
+		}
519
+		time.Sleep(500 * time.Millisecond)
520
+	}
521
+	return nil
522
+}
523
+
509 524
 func (container *Container) monitor() {
510 525
 	// Wait for the program to exit
511 526
 	Debugf("Waiting for process")
512
-	if err := container.cmd.Wait(); err != nil {
513
-		// Discard the error as any signals or non 0 returns will generate an error
514
-		Debugf("%s: Process: %s", container.Id, err)
527
+
528
+	// If the command does not exists, try to wait via lxc
529
+	if container.cmd == nil {
530
+		if err := container.waitLxc(); err != nil {
531
+			Debugf("%s: Process: %s", container.Id, err)
532
+		}
533
+	} else {
534
+		if err := container.cmd.Wait(); err != nil {
535
+			// Discard the error as any signals or non 0 returns will generate an error
536
+			Debugf("%s: Process: %s", container.Id, err)
537
+		}
515 538
 	}
516 539
 	Debugf("Process finished")
517 540
 
518
-	exitCode := container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
541
+	var exitCode int = -1
542
+	if container.cmd != nil {
543
+		exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
544
+	}
519 545
 
520 546
 	// Cleanup
521 547
 	container.releaseNetwork()
... ...
@@ -564,7 +604,7 @@ func (container *Container) monitor() {
564 564
 }
565 565
 
566 566
 func (container *Container) kill() error {
567
-	if !container.State.Running || container.cmd == nil {
567
+	if !container.State.Running {
568 568
 		return nil
569 569
 	}
570 570
 
... ...
@@ -576,6 +616,9 @@ func (container *Container) kill() error {
576 576
 
577 577
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
578 578
 	if err := container.WaitTimeout(10 * time.Second); err != nil {
579
+		if container.cmd == nil {
580
+			return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
581
+		}
579 582
 		log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
580 583
 		if err := container.cmd.Process.Kill(); err != nil {
581 584
 			return err
... ...
@@ -593,21 +636,15 @@ func (container *Container) Kill() error {
593 593
 	if !container.State.Running {
594 594
 		return nil
595 595
 	}
596
-	if container.State.Ghost {
597
-		return fmt.Errorf("Can't kill ghost container")
598
-	}
599 596
 	return container.kill()
600 597
 }
601 598
 
602
-func (container *Container) Stop() error {
599
+func (container *Container) Stop(seconds int) error {
603 600
 	container.State.lock()
604 601
 	defer container.State.unlock()
605 602
 	if !container.State.Running {
606 603
 		return nil
607 604
 	}
608
-	if container.State.Ghost {
609
-		return fmt.Errorf("Can't stop ghot container")
610
-	}
611 605
 
612 606
 	// 1. Send a SIGTERM
613 607
 	if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
... ...
@@ -619,8 +656,8 @@ func (container *Container) Stop() error {
619 619
 	}
620 620
 
621 621
 	// 2. Wait for the process to exit on its own
622
-	if err := container.WaitTimeout(10 * time.Second); err != nil {
623
-		log.Printf("Container %v failed to exit within 10 seconds of SIGTERM - using the force", container.Id)
622
+	if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
623
+		log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
624 624
 		if err := container.kill(); err != nil {
625 625
 			return err
626 626
 		}
... ...
@@ -628,8 +665,8 @@ func (container *Container) Stop() error {
628 628
 	return nil
629 629
 }
630 630
 
631
-func (container *Container) Restart() error {
632
-	if err := container.Stop(); err != nil {
631
+func (container *Container) Restart(seconds int) error {
632
+	if err := container.Stop(seconds); err != nil {
633 633
 		return err
634 634
 	}
635 635
 	if err := container.Start(); err != nil {
... ...
@@ -22,9 +22,8 @@ func TestIdFormat(t *testing.T) {
22 22
 	defer nuke(runtime)
23 23
 	container1, err := runtime.Create(
24 24
 		&Config{
25
-			Image:  GetTestImage(runtime).Id,
26
-			Cmd:    []string{"/bin/sh", "-c", "echo hello world"},
27
-			Memory: 33554432,
25
+			Image: GetTestImage(runtime).Id,
26
+			Cmd:   []string{"/bin/sh", "-c", "echo hello world"},
28 27
 		},
29 28
 	)
30 29
 	if err != nil {
... ...
@@ -50,7 +49,6 @@ func TestMultipleAttachRestart(t *testing.T) {
50 50
 			Image: GetTestImage(runtime).Id,
51 51
 			Cmd: []string{"/bin/sh", "-c",
52 52
 				"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`;  echo hello; done"},
53
-			Memory: 33554432,
54 53
 		},
55 54
 	)
56 55
 	if err != nil {
... ...
@@ -97,7 +95,7 @@ func TestMultipleAttachRestart(t *testing.T) {
97 97
 		t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
98 98
 	}
99 99
 
100
-	if err := container.Stop(); err != nil {
100
+	if err := container.Stop(10); err != nil {
101 101
 		t.Fatal(err)
102 102
 	}
103 103
 
... ...
@@ -116,8 +114,8 @@ func TestMultipleAttachRestart(t *testing.T) {
116 116
 	if err := container.Start(); err != nil {
117 117
 		t.Fatal(err)
118 118
 	}
119
-	timeout := make(chan bool)
120
-	go func() {
119
+
120
+	setTimeout(t, "Timeout reading from the process", 3*time.Second, func() {
121 121
 		l1, err = bufio.NewReader(stdout1).ReadString('\n')
122 122
 		if err != nil {
123 123
 			t.Fatal(err)
... ...
@@ -139,14 +137,161 @@ func TestMultipleAttachRestart(t *testing.T) {
139 139
 		if strings.Trim(l3, " \r\n") != "hello" {
140 140
 			t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
141 141
 		}
142
-		timeout <- false
143
-	}()
144
-	go func() {
145
-		time.Sleep(3 * time.Second)
146
-		timeout <- true
147
-	}()
148
-	if <-timeout {
149
-		t.Fatalf("Timeout reading from the process")
142
+	})
143
+	container.Wait()
144
+}
145
+
146
+func TestDiff(t *testing.T) {
147
+	runtime, err := newTestRuntime()
148
+	if err != nil {
149
+		t.Fatal(err)
150
+	}
151
+	defer nuke(runtime)
152
+
153
+	// Create a container and remove a file
154
+	container1, err := runtime.Create(
155
+		&Config{
156
+			Image: GetTestImage(runtime).Id,
157
+			Cmd:   []string{"/bin/rm", "/etc/passwd"},
158
+		},
159
+	)
160
+	if err != nil {
161
+		t.Fatal(err)
162
+	}
163
+	defer runtime.Destroy(container1)
164
+
165
+	if err := container1.Run(); err != nil {
166
+		t.Fatal(err)
167
+	}
168
+
169
+	// Check the changelog
170
+	c, err := container1.Changes()
171
+	if err != nil {
172
+		t.Fatal(err)
173
+	}
174
+	success := false
175
+	for _, elem := range c {
176
+		if elem.Path == "/etc/passwd" && elem.Kind == 2 {
177
+			success = true
178
+		}
179
+	}
180
+	if !success {
181
+		t.Fatalf("/etc/passwd as been removed but is not present in the diff")
182
+	}
183
+
184
+	// Commit the container
185
+	rwTar, err := container1.ExportRw()
186
+	if err != nil {
187
+		t.Error(err)
188
+	}
189
+	img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
190
+	if err != nil {
191
+		t.Error(err)
192
+	}
193
+
194
+	// Create a new container from the commited image
195
+	container2, err := runtime.Create(
196
+		&Config{
197
+			Image: img.Id,
198
+			Cmd:   []string{"cat", "/etc/passwd"},
199
+		},
200
+	)
201
+	if err != nil {
202
+		t.Fatal(err)
203
+	}
204
+	defer runtime.Destroy(container2)
205
+
206
+	if err := container2.Run(); err != nil {
207
+		t.Fatal(err)
208
+	}
209
+
210
+	// Check the changelog
211
+	c, err = container2.Changes()
212
+	if err != nil {
213
+		t.Fatal(err)
214
+	}
215
+	for _, elem := range c {
216
+		if elem.Path == "/etc/passwd" {
217
+			t.Fatalf("/etc/passwd should not be present in the diff after commit.")
218
+		}
219
+	}
220
+}
221
+
222
+func TestCommitAutoRun(t *testing.T) {
223
+	runtime, err := newTestRuntime()
224
+	if err != nil {
225
+		t.Fatal(err)
226
+	}
227
+	defer nuke(runtime)
228
+	container1, err := runtime.Create(
229
+		&Config{
230
+			Image: GetTestImage(runtime).Id,
231
+			Cmd:   []string{"/bin/sh", "-c", "echo hello > /world"},
232
+		},
233
+	)
234
+	if err != nil {
235
+		t.Fatal(err)
236
+	}
237
+	defer runtime.Destroy(container1)
238
+
239
+	if container1.State.Running {
240
+		t.Errorf("Container shouldn't be running")
241
+	}
242
+	if err := container1.Run(); err != nil {
243
+		t.Fatal(err)
244
+	}
245
+	if container1.State.Running {
246
+		t.Errorf("Container shouldn't be running")
247
+	}
248
+
249
+	rwTar, err := container1.ExportRw()
250
+	if err != nil {
251
+		t.Error(err)
252
+	}
253
+	img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
254
+	if err != nil {
255
+		t.Error(err)
256
+	}
257
+
258
+	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
259
+
260
+	container2, err := runtime.Create(
261
+		&Config{
262
+			Image: img.Id,
263
+		},
264
+	)
265
+	if err != nil {
266
+		t.Fatal(err)
267
+	}
268
+	defer runtime.Destroy(container2)
269
+	stdout, err := container2.StdoutPipe()
270
+	if err != nil {
271
+		t.Fatal(err)
272
+	}
273
+	stderr, err := container2.StderrPipe()
274
+	if err != nil {
275
+		t.Fatal(err)
276
+	}
277
+	if err := container2.Start(); err != nil {
278
+		t.Fatal(err)
279
+	}
280
+	container2.Wait()
281
+	output, err := ioutil.ReadAll(stdout)
282
+	if err != nil {
283
+		t.Fatal(err)
284
+	}
285
+	output2, err := ioutil.ReadAll(stderr)
286
+	if err != nil {
287
+		t.Fatal(err)
288
+	}
289
+	if err := stdout.Close(); err != nil {
290
+		t.Fatal(err)
291
+	}
292
+	if err := stderr.Close(); err != nil {
293
+		t.Fatal(err)
294
+	}
295
+	if string(output) != "hello\n" {
296
+		t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
150 297
 	}
151 298
 }
152 299
 
... ...
@@ -158,9 +303,8 @@ func TestCommitRun(t *testing.T) {
158 158
 	defer nuke(runtime)
159 159
 	container1, err := runtime.Create(
160 160
 		&Config{
161
-			Image:  GetTestImage(runtime).Id,
162
-			Cmd:    []string{"/bin/sh", "-c", "echo hello > /world"},
163
-			Memory: 33554432,
161
+			Image: GetTestImage(runtime).Id,
162
+			Cmd:   []string{"/bin/sh", "-c", "echo hello > /world"},
164 163
 		},
165 164
 	)
166 165
 	if err != nil {
... ...
@@ -182,7 +326,7 @@ func TestCommitRun(t *testing.T) {
182 182
 	if err != nil {
183 183
 		t.Error(err)
184 184
 	}
185
-	img, err := runtime.graph.Create(rwTar, container1, "unit test commited image")
185
+	img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
186 186
 	if err != nil {
187 187
 		t.Error(err)
188 188
 	}
... ...
@@ -191,9 +335,8 @@ func TestCommitRun(t *testing.T) {
191 191
 
192 192
 	container2, err := runtime.Create(
193 193
 		&Config{
194
-			Image:  img.Id,
195
-			Memory: 33554432,
196
-			Cmd:    []string{"cat", "/world"},
194
+			Image: img.Id,
195
+			Cmd:   []string{"cat", "/world"},
197 196
 		},
198 197
 	)
199 198
 	if err != nil {
... ...
@@ -278,9 +421,8 @@ func TestRun(t *testing.T) {
278 278
 	defer nuke(runtime)
279 279
 	container, err := runtime.Create(
280 280
 		&Config{
281
-			Image:  GetTestImage(runtime).Id,
282
-			Memory: 33554432,
283
-			Cmd:    []string{"ls", "-al"},
281
+			Image: GetTestImage(runtime).Id,
282
+			Cmd:   []string{"ls", "-al"},
284 283
 		},
285 284
 	)
286 285
 	if err != nil {
... ...
@@ -1,16 +1,22 @@
1 1
 package main
2 2
 
3 3
 import (
4
+	"fmt"
4 5
 	"io"
5 6
 	"log"
7
+	"net"
6 8
 	"os"
7 9
 	"os/exec"
10
+	"path"
8 11
 	"time"
9 12
 )
10 13
 
11
-const DOCKER_PATH = "/home/creack/dotcloud/docker/docker/docker"
14
+var DOCKER_PATH string = path.Join(os.Getenv("DOCKERPATH"), "docker")
12 15
 
16
+// WARNING: this crashTest will 1) crash your host, 2) remove all containers
13 17
 func runDaemon() (*exec.Cmd, error) {
18
+	os.Remove("/var/run/docker.pid")
19
+	exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
14 20
 	cmd := exec.Command(DOCKER_PATH, "-d")
15 21
 	outPipe, err := cmd.StdoutPipe()
16 22
 	if err != nil {
... ...
@@ -37,17 +43,43 @@ func crashTest() error {
37 37
 		return err
38 38
 	}
39 39
 
40
+	var endpoint string
41
+	if ep := os.Getenv("TEST_ENDPOINT"); ep == "" {
42
+		endpoint = "192.168.56.1:7979"
43
+	} else {
44
+		endpoint = ep
45
+	}
46
+
47
+	c := make(chan bool)
48
+	var conn io.Writer
49
+
50
+	go func() {
51
+		conn, _ = net.Dial("tcp", endpoint)
52
+		c <- false
53
+	}()
54
+	go func() {
55
+		time.Sleep(2 * time.Second)
56
+		c <- true
57
+	}()
58
+	<-c
59
+
60
+	restartCount := 0
61
+	totalTestCount := 1
40 62
 	for {
41 63
 		daemon, err := runDaemon()
42 64
 		if err != nil {
43 65
 			return err
44 66
 		}
45
-		time.Sleep(5000 * time.Millisecond)
67
+		restartCount++
68
+		//		time.Sleep(5000 * time.Millisecond)
69
+		var stop bool
46 70
 		go func() error {
47
-			for i := 0; i < 100; i++ {
48
-				go func() error {
49
-					cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", "hello", "world")
50
-					log.Printf("%d", i)
71
+			stop = false
72
+			for i := 0; i < 100 && !stop; {
73
+				func() error {
74
+					cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", fmt.Sprintf("%d", totalTestCount))
75
+					i++
76
+					totalTestCount++
51 77
 					outPipe, err := cmd.StdoutPipe()
52 78
 					if err != nil {
53 79
 						return err
... ...
@@ -59,9 +91,10 @@ func crashTest() error {
59 59
 					if err := cmd.Start(); err != nil {
60 60
 						return err
61 61
 					}
62
-					go func() {
63
-						io.Copy(os.Stdout, outPipe)
64
-					}()
62
+					if conn != nil {
63
+						go io.Copy(conn, outPipe)
64
+					}
65
+
65 66
 					// Expecting error, do not check
66 67
 					inPipe.Write([]byte("hello world!!!!!\n"))
67 68
 					go inPipe.Write([]byte("hello world!!!!!\n"))
... ...
@@ -74,12 +107,11 @@ func crashTest() error {
74 74
 					outPipe.Close()
75 75
 					return nil
76 76
 				}()
77
-				time.Sleep(250 * time.Millisecond)
78 77
 			}
79 78
 			return nil
80 79
 		}()
81
-
82 80
 		time.Sleep(20 * time.Second)
81
+		stop = true
83 82
 		if err := daemon.Process.Kill(); err != nil {
84 83
 			return err
85 84
 		}
... ...
@@ -49,26 +49,39 @@ def docker(args, stdin=None):
49 49
 def image_exists(img):
50 50
 	return docker(["inspect", img]).read().strip() != ""
51 51
 
52
-def run_and_commit(img_in, cmd, stdin=None):
52
+def image_config(img):
53
+	return json.loads(docker(["inspect", img]).read()).get("config", {})
54
+
55
+def run_and_commit(img_in, cmd, stdin=None, author=None, run=None):
53 56
 	run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip()
54 57
 	print "---> Waiting for " + run_id
55 58
 	result=int(docker(["wait", run_id]).read().rstrip())
56 59
 	if result != 0:
57 60
 		print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result)
58 61
 		sys.exit(1)
59
-	return docker(["commit", run_id]).read().rstrip()
62
+	return docker(["commit"] + (["-author", author] if author else []) + (["-run", json.dumps(run)] if run is not None else []) + [run_id]).read().rstrip()
60 63
 
61
-def insert(base, src, dst):
64
+def insert(base, src, dst, author=None):
62 65
 	print "COPY {} to {} in {}".format(src, dst, base)
63 66
 	if dst == "":
64 67
 		raise Exception("Missing destination path")
65 68
 	stdin = file(src)
66 69
 	stdin.seek(0)
67
-	return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin)
68
-	
70
+	return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin, author=author)
71
+
72
+def add(base, src, dst, author=None):
73
+	print "PUSH to {} in {}".format(dst, base)
74
+	if src == ".":
75
+		tar = subprocess.Popen(["tar", "-c", "."], stdout=subprocess.PIPE).stdout
76
+	else:
77
+		tar = subprocess.Popen(["curl", src], stdout=subprocess.PIPE).stdout
78
+	if dst == "":
79
+		raise Exception("Missing argument to push")
80
+	return run_and_commit(base, "mkdir -p '{0}' && tar -C '{0}' -x".format(dst), stdin=tar, author=author)
69 81
 
70 82
 def main():
71 83
 	base=""
84
+	maintainer=""
72 85
 	steps = []
73 86
 	try:
74 87
 		for line in sys.stdin.readlines():
... ...
@@ -77,26 +90,51 @@ def main():
77 77
 			if line == "" or line[0] == "#":
78 78
 				continue
79 79
 			op, param = line.split("	", 1)
80
+			print op.upper() + " " + param
80 81
 			if op == "from":
81
-				print "FROM " + param
82 82
 				base = param
83 83
 				steps.append(base)
84
+			elif op == "maintainer":
85
+				maintainer = param
84 86
 			elif op == "run":
85
-				print "RUN " + param
86
-				result = run_and_commit(base, param)
87
+				result = run_and_commit(base, param, author=maintainer)
87 88
 				steps.append(result)
88 89
 				base = result
89 90
 				print "===> " + base
90 91
 			elif op == "copy":
91 92
 				src, dst = param.split("	", 1)
92
-				result = insert(base, src, dst)
93
+				result = insert(base, src, dst, author=maintainer)
93 94
 				steps.append(result)
94 95
 				base = result
95 96
 				print "===> " + base
97
+			elif op == "add":
98
+				src, dst = param.split("	", 1)
99
+				result = add(base, src, dst, author=maintainer)
100
+				steps.append(result)
101
+				base=result
102
+				print "===> " + base
103
+			elif op == "expose":
104
+				config = image_config(base)
105
+				if config.get("PortSpecs") is None:
106
+					config["PortSpecs"] = []
107
+				portspec = param.strip()
108
+				config["PortSpecs"].append(portspec)
109
+				result = run_and_commit(base, "# (nop) expose port {}".format(portspec), author=maintainer, run=config)
110
+				steps.append(result)
111
+				base=result
112
+				print "===> " + base
113
+			elif op == "cmd":
114
+				config  = image_config(base)
115
+				cmd = list(json.loads(param))
116
+				config["Cmd"] = cmd
117
+				result = run_and_commit(base, "# (nop) set default command to '{}'".format(" ".join(cmd)), author=maintainer, run=config)
118
+				steps.append(result)
119
+				base=result
120
+				print "===> " + base
96 121
 			else:
97 122
 				print "Skipping uknown op " + op
98 123
 	except:
99
-		docker(["rmi"] + steps)
124
+		docker(["rmi"] + steps[1:])
100 125
 		raise
101 126
 	print base
102 127
 
... ...
@@ -1,11 +1,13 @@
1 1
 # Start build from a know base image
2
+maintainer	Solomon Hykes <solomon@dotcloud.com>
2 3
 from	base:ubuntu-12.10
3 4
 # Update ubuntu sources
4 5
 run	echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
5 6
 run	apt-get update
6 7
 # Install system packages
7 8
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
8
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
9
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
9
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
10
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
10 11
 # Insert files from the host (./myscript must be present in the current directory)
11
-copy	myscript /usr/local/bin/myscript
12
+copy	myscript	/usr/local/bin/myscript
13
+push	/src
12 14
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+#!/bin/sh
1
+
2
+echo hello, world!
... ...
@@ -45,7 +45,7 @@ then
45 45
   echo "Upstart script already exists."
46 46
 else
47 47
   echo "Creating /etc/init/dockerd.conf..."
48
-  echo "exec /usr/local/bin/docker -d" > /etc/init/dockerd.conf
48
+  echo "exec env LANG=\"en_US.UTF-8\" /usr/local/bin/docker -d" > /etc/init/dockerd.conf
49 49
 fi
50 50
 
51 51
 echo "Starting dockerd..."
52 52
new file mode 100755
... ...
@@ -0,0 +1,61 @@
0
+#!/bin/bash
1
+set -e
2
+
3
+# these should match the names found at http://www.debian.org/releases/
4
+stableSuite='squeeze'
5
+testingSuite='wheezy'
6
+unstableSuite='sid'
7
+
8
+# if suite is equal to this, it gets the "latest" tag
9
+latestSuite="$testingSuite"
10
+
11
+variant='minbase'
12
+include='iproute,iputils-ping'
13
+
14
+repo="$1"
15
+suite="${2:-$latestSuite}"
16
+mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
17
+
18
+if [ ! "$repo" ]; then
19
+	echo >&2 "usage: $0 repo [suite [mirror]]"
20
+	echo >&2 "   ie: $0 tianon/debian squeeze"
21
+	exit 1
22
+fi
23
+
24
+target="/tmp/docker-rootfs-debian-$suite-$$-$RANDOM"
25
+
26
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
27
+returnTo="$(pwd -P)"
28
+
29
+set -x
30
+
31
+# bootstrap
32
+mkdir -p "$target"
33
+sudo debootstrap --verbose --variant="$variant" --include="$include" "$suite" "$target" "$mirror"
34
+
35
+cd "$target"
36
+
37
+# create the image
38
+img=$(sudo tar -c . | docker import -)
39
+
40
+# tag suite
41
+docker tag $img $repo $suite
42
+
43
+if [ "$suite" = "$latestSuite" ]; then
44
+	# tag latest
45
+	docker tag $img $repo latest
46
+fi
47
+
48
+# test the image
49
+docker run -i -t $repo:$suite echo success
50
+
51
+# unstable's version numbers match testing (since it's mostly just a sandbox for testing), so it doesn't get a version number tag
52
+if [ "$suite" != "$unstableSuite" -a "$suite" != 'unstable' ]; then
53
+	# tag the specific version
54
+	ver=$(docker run $repo:$suite cat /etc/debian_version)
55
+	docker tag $img $repo $ver
56
+fi
57
+
58
+# cleanup
59
+cd "$returnTo"
60
+sudo rm -rf "$target"
0 61
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+# Vagrant-docker
1
+
2
+This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider.
... ...
@@ -7,16 +7,17 @@ import (
7 7
 	"github.com/dotcloud/docker/rcli"
8 8
 	"github.com/dotcloud/docker/term"
9 9
 	"io"
10
+	"io/ioutil"
10 11
 	"log"
11 12
 	"os"
12 13
 	"os/signal"
13 14
 	"runtime"
15
+	"strconv"
14 16
 	"syscall"
15 17
 )
16 18
 
17 19
 var (
18
-	GIT_COMMIT      string
19
-	NO_MEMORY_LIMIT string
20
+	GIT_COMMIT string
20 21
 )
21 22
 
22 23
 func main() {
... ...
@@ -28,6 +29,7 @@ func main() {
28 28
 	// FIXME: Switch d and D ? (to be more sshd like)
29 29
 	flDaemon := flag.Bool("d", false, "Daemon mode")
30 30
 	flDebug := flag.Bool("D", false, "Debug mode")
31
+	flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
31 32
 	bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge")
32 33
 	pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
33 34
 	flag.Parse()
... ...
@@ -40,16 +42,12 @@ func main() {
40 40
 		os.Setenv("DEBUG", "1")
41 41
 	}
42 42
 	docker.GIT_COMMIT = GIT_COMMIT
43
-	docker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == "1"
44 43
 	if *flDaemon {
45 44
 		if flag.NArg() != 0 {
46 45
 			flag.Usage()
47 46
 			return
48 47
 		}
49
-		if NO_MEMORY_LIMIT == "1" {
50
-			log.Printf("WARNING: This version of docker has been compiled without memory limit support.")
51
-		}
52
-		if err := daemon(*pidfile); err != nil {
48
+		if err := daemon(*pidfile, *flAutoRestart); err != nil {
53 49
 			log.Fatal(err)
54 50
 		}
55 51
 	} else {
... ...
@@ -60,8 +58,13 @@ func main() {
60 60
 }
61 61
 
62 62
 func createPidFile(pidfile string) error {
63
-	if _, err := os.Stat(pidfile); err == nil {
64
-		return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
63
+	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
64
+		pid, err := strconv.Atoi(string(pidString))
65
+		if err == nil {
66
+			if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
67
+				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
68
+			}
69
+		}
65 70
 	}
66 71
 
67 72
 	file, err := os.Create(pidfile)
... ...
@@ -81,7 +84,7 @@ func removePidFile(pidfile string) {
81 81
 	}
82 82
 }
83 83
 
84
-func daemon(pidfile string) error {
84
+func daemon(pidfile string, autoRestart bool) error {
85 85
 	if err := createPidFile(pidfile); err != nil {
86 86
 		log.Fatal(err)
87 87
 	}
... ...
@@ -99,7 +102,7 @@ func daemon(pidfile string) error {
99 99
 	if runtime.GOARCH != "amd64" {
100 100
 		log.Fatalf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
101 101
 	}
102
-	runtime, err := docker.NewRuntime()
102
+	runtime, err := docker.NewRuntime(autoRestart)
103 103
 	if err != nil {
104 104
 		return err
105 105
 	}
... ...
@@ -51,6 +51,7 @@ docs:
51 51
 	cp sources/dotcloud.yml $(BUILDDIR)/html/
52 52
 	cp sources/CNAME $(BUILDDIR)/html/
53 53
 	cp sources/.nojekyll $(BUILDDIR)/html/
54
+	cp sources/nginx.conf $(BUILDDIR)/html/
54 55
 	@echo
55 56
 	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 57
 
... ...
@@ -9,3 +9,19 @@
9 9
     Create a new image from a container's changes
10 10
 
11 11
       -m="": Commit message
12
+      -author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
13
+      -run="": Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
14
+
15
+Full -run example::
16
+
17
+    {"Hostname": "",
18
+     "User": "",
19
+     "Memory": 0,
20
+     "MemorySwap": 0,
21
+     "PortSpecs": ["22", "80", "443"],
22
+     "Tty": true,
23
+     "OpenStdin": true,
24
+     "StdinOnce": true,
25
+     "Env": ["FOO=BAR", "FOO2=BAR2"],
26
+     "Cmd": ["cat", "-e", "/etc/resolv.conf"],
27
+     "Dns": ["8.8.8.8", "8.8.4.4"]}
... ...
@@ -10,7 +10,7 @@ Building blocks
10 10
 
11 11
 Images
12 12
 ------
13
-An original container image. These are stored on disk and are comparable with what you normally expect from a stoppped virtual machine image. Images are stored (and retrieved from) repository
13
+An original container image. These are stored on disk and are comparable with what you normally expect from a stopped virtual machine image. Images are stored (and retrieved from) repository
14 14
 
15 15
 Images are stored on your local file system under /var/lib/docker/images
16 16
 
... ...
@@ -49,7 +49,7 @@ Save the changed we just made in the container to a new image called "_/builds/g
49 49
     WEB_WORKER=$(docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
50 50
 
51 51
 - **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
52
-  **"-p 5000"* the web app is going to listen on this port, so it must be mapped from the container to the host system.
52
+- **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system.
53 53
 - **"$BUILD_IMG"** is the image we want to run the command inside of.
54 54
 - **/usr/local/bin/runapp** is the command which starts the web app.
55 55
 
... ...
@@ -7,27 +7,16 @@
7 7
 Running The Examples
8 8
 --------------------
9 9
 
10
-There are two ways to run docker, daemon mode and standalone mode.
11
-
12
-When you run the docker command it will first check if there is a docker daemon running in the background it can connect to.
13
-
14
-* If it exists it will use that daemon to run all of the commands.
15
-* If it does not exist docker will run in standalone mode (docker will exit after each command).
16
-
17
-Docker needs to be run from a privileged account (root).
18
-
19
-1. The most common (and recommended) way is to run a docker daemon as root in the background, and then connect to it from the docker client from any account.
10
+All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
20 11
 
21 12
    .. code-block:: bash
22 13
 
23
-      # starting docker daemon in the background
24 14
       sudo docker -d &
25 15
 
26
-      # now you can run docker commands from any account.
27
-      docker <command>
28
-
29
-2. Standalone: You need to run every command as root, or using sudo
16
+Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client
17
+can run from any account.
30 18
 
31 19
    .. code-block:: bash
32 20
 
33
-       sudo docker <command>
21
+      # now you can run docker commands from any account.
22
+      docker help
... ...
@@ -71,34 +71,40 @@
71 71
                 <h2>
72 72
                     <a name="installing-on-ubuntu-1204-and-1210" class="anchor" href="#installing-on-ubuntu-1204-and-1210"><span class="mini-icon mini-icon-link"></span>
73 73
                     </a>Installing on Ubuntu</h2>
74
+
75
+                    <p><strong>Requirements</strong></p>
76
+                    <ul>
77
+                        <li>Ubuntu 12.04 (LTS) (64-bit)</li>
78
+                        <li> or Ubuntu 12.10 (quantal) (64-bit)</li>
79
+                    </ul>
74 80
                 <ol>
75 81
                     <li>
76
-                        <p>Install dependencies:</p>
82
+                    <p><strong>Install dependencies</strong></p>
83
+                    The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
84
+                    <pre>sudo apt-get install linux-image-extra-`uname -r`</pre>
77 85
 
78
-                        <div class="highlight">
79
-                            <pre>sudo apt-get install lxc wget bsdtar curl</pre>
80
-                            <pre>sudo apt-get install linux-image-extra-<span class="sb">`</span>uname -r<span class="sb">`</span></pre></div>
81 86
 
82
-                        <p>The <code>linux-image-extra</code> package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.</p>
83 87
                     </li>
84 88
                     <li>
85
-                        <p>Install the latest docker binary:</p>
86
-
89
+                        <p><strong>Install Docker</strong></p>
90
+                        <p>Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list, update and install.</p>
91
+                        <p>You may see some warnings that the GPG keys cannot be verified.</p>
87 92
                         <div class="highlight">
88
-                            <pre>wget http://get.docker.io/builds/<span class="k">$(</span>uname -s<span class="k">)</span>/<span class="k">$(</span>uname -m<span class="k">)</span>/docker-master.tgz</pre>
89
-                            <pre>tar -xf docker-master.tgz</pre>
93
+                            <pre>sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"</pre>
94
+                            <pre>sudo apt-get update</pre>
95
+                            <pre>sudo apt-get install lxc-docker</pre>
90 96
                         </div>
97
+
98
+
91 99
                     </li>
100
+
92 101
                     <li>
93
-                        <p>Run your first container!</p>
102
+                        <p><strong>Run!</strong></p>
94 103
 
95
-                        <div class="highlight"><pre><span class="nb">cd </span>docker-master</pre>
96
-                            <pre>sudo ./docker run -i -t base /bin/bash</pre>
104
+                        <div class="highlight">
105
+                            <pre>docker run -i -t ubuntu /bin/bash</pre>
97 106
                         </div>
98
-                        <p>Done!</p>
99
-                        <p>Consider adding docker to your <code>PATH</code> for simplicity.</p>
100 107
                     </li>
101
-
102 108
                     Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
103 109
                 </ol>
104 110
             </section>
... ...
@@ -117,7 +123,7 @@
117 117
                     vagrant and an Ubuntu virtual machine.</strong></p>
118 118
 
119 119
                 <ul>
120
-                    <li><a href="http://docs.docker.io/en/latest/installation/macos/">Mac OS X and other linuxes</a></li>
120
+                    <li><a href="http://docs.docker.io/en/latest/installation/vagrant/">Mac OS X and other linuxes</a></li>
121 121
                     <li><a href="http://docs.docker.io/en/latest/installation/windows/">Windows</a></li>
122 122
                 </ul>
123 123
 
... ...
@@ -15,6 +15,7 @@ This documentation has the following resources:
15 15
    examples/index
16 16
    contributing/index
17 17
    commandline/index
18
+   registry/index
18 19
    faq
19 20
 
20 21
 
... ...
@@ -1,8 +1,9 @@
1 1
 Amazon EC2
2 2
 ==========
3 3
 
4
-    Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
5
-    may be out of date because it depends on some binaries to be updated and published
4
+  Please note this is a community contributed installation path. The only 'official' installation is using the
5
+  :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
6
+
6 7
 
7 8
 Installation
8 9
 ------------
... ...
@@ -17,7 +18,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
17 17
        vagrant plugin install vagrant-aws
18 18
 
19 19
 
20
-3. Get the docker sources, this will give you the latest Vagrantfile and puppet manifests.
20
+3. Get the docker sources, this will give you the latest Vagrantfile.
21 21
 
22 22
    ::
23 23
 
24 24
new file mode 100644
... ...
@@ -0,0 +1,65 @@
0
+.. _arch_linux:
1
+
2
+Arch Linux
3
+==========
4
+
5
+  Please note this is a community contributed installation path. The only 'official' installation is using the
6
+  :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
7
+
8
+
9
+Installing on Arch Linux is not officially supported but can be handled via 
10
+either of the following AUR packages:
11
+
12
+* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
13
+* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
14
+
15
+The lxc-docker package will install the latest tagged version of docker. 
16
+The lxc-docker-git package will build from the current master branch.
17
+
18
+Dependencies
19
+------------
20
+
21
+Docker depends on several packages which are specified as dependencies in
22
+either AUR package.
23
+
24
+* aufs3
25
+* bridge-utils
26
+* go
27
+* iproute2
28
+* linux-aufs_friendly
29
+* lxc
30
+
31
+Installation
32
+------------
33
+
34
+The instructions here assume **yaourt** is installed.  See 
35
+`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
36
+for information on building and installing packages from the AUR if you have not
37
+done so before.
38
+
39
+Keep in mind that if **linux-aufs_friendly** is not already installed that a
40
+new kernel will be compiled and this can take quite a while.
41
+
42
+::
43
+
44
+    yaourt -S lxc-docker-git
45
+
46
+
47
+Starting Docker
48
+---------------
49
+
50
+Prior to starting docker modify your bootloader to use the 
51
+**linux-aufs_friendly** kernel and reboot your system.
52
+
53
+There is a systemd service unit created for docker.  To start the docker service:
54
+
55
+::
56
+
57
+    sudo systemctl start docker
58
+
59
+
60
+To start on system boot:
61
+
62
+::
63
+
64
+    sudo systemctl enable docker
0 65
new file mode 100644
... ...
@@ -0,0 +1,53 @@
0
+.. _binaries:
1
+
2
+Binaries
3
+========
4
+
5
+  **Please note this project is currently under heavy development. It should not be used in production.**
6
+
7
+
8
+Right now, the officially supported distributions are:
9
+
10
+- Ubuntu 12.04 (precise LTS) (64-bit)
11
+- Ubuntu 12.10 (quantal) (64-bit)
12
+
13
+
14
+Install dependencies:
15
+---------------------
16
+
17
+::
18
+
19
+    sudo apt-get install lxc bsdtar
20
+    sudo apt-get install linux-image-extra-`uname -r`
21
+
22
+The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
23
+
24
+Install the docker binary:
25
+
26
+::
27
+
28
+    wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
29
+    tar -xf docker-master.tgz
30
+    sudo cp ./docker-master /usr/local/bin
31
+
32
+Note: docker currently only supports 64-bit Linux hosts.
33
+
34
+
35
+Run the docker daemon
36
+---------------------
37
+
38
+::
39
+
40
+    sudo docker -d &
41
+
42
+
43
+Run your first container!
44
+-------------------------
45
+
46
+::
47
+
48
+    docker run -i -t ubuntu /bin/bash
49
+
50
+
51
+
52
+Continue with the :ref:`hello_world` example.
0 53
\ No newline at end of file
... ...
@@ -13,7 +13,9 @@ Contents:
13 13
    :maxdepth: 1
14 14
 
15 15
    ubuntulinux
16
-   macos
16
+   binaries
17
+   archlinux
18
+   vagrant
17 19
    windows
18 20
    amazon
19 21
    upgrading
20 22
deleted file mode 100644
... ...
@@ -1,66 +0,0 @@
1
-
2
-Mac OS X and other linux
3
-========================
4
-
5
-  Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
6
-  may be out of date because it depends on some binaries to be updated and published
7
-
8
-
9
-Requirements
10
-
11
-We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a
12
-streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant.
13
-
14
-1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
15
-2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
16
-3. Install git if you had not installed it before, check if it is installed by running
17
-   ``git`` in a terminal window
18
-
19
-We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more).
20
-
21
-Installation
22
-
23
-1. Fetch the docker sources
24
-
25
-.. code-block:: bash
26
-
27
-   git clone https://github.com/dotcloud/docker.git
28
-
29
-2. Run vagrant from the sources directory
30
-
31
-.. code-block:: bash
32
-
33
-    vagrant up
34
-
35
-Vagrant will:
36
-
37
-* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/
38
-* Boot this image in virtualbox
39
-
40
-Then it will use Puppet to perform an initial setup in this machine:
41
-
42
-* Download & untar the most recent docker binary tarball to vagrant homedir.
43
-* Debootstrap to /var/lib/docker/images/ubuntu.
44
-* Install & run dockerd as service.
45
-* Put docker in /usr/local/bin.
46
-* Put latest Go toolchain in /usr/local/go.
47
-
48
-You now have a Ubuntu Virtual Machine running with docker pre-installed.
49
-
50
-To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
51
-``vagrant up``. Vagrant will make sure to connect you to the correct VM.
52
-
53
-.. code-block:: bash
54
-
55
-    vagrant ssh
56
-
57
-Now you are in the VM, run docker
58
-
59
-.. code-block:: bash
60
-
61
-    docker
62
-
63
-
64
-Continue with the :ref:`hello_world` example.
... ...
@@ -6,51 +6,56 @@ Ubuntu Linux
6 6
   **Please note this project is currently under heavy development. It should not be used in production.**
7 7
 
8 8
 
9
+Right now, the officially supported distributions are:
9 10
 
10
-Installing on Ubuntu 12.04 and 12.10
11
+- Ubuntu 12.04 (precise LTS) (64-bit)
12
+- Ubuntu 12.10 (quantal) (64-bit)
11 13
 
12
-Right now, the officially supported distributions are:
14
+Dependencies
15
+------------
16
+
17
+The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
18
+
19
+.. code-block:: bash
20
+
21
+   sudo apt-get install linux-image-extra-`uname -r`
22
+
23
+
24
+Installation
25
+------------
13 26
 
14
-Ubuntu 12.04 (precise LTS)
15
-Ubuntu 12.10 (quantal)
16
-Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested.
27
+Docker is available as a Ubuntu PPA (Personal Package Archive),
28
+`hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
29
+which makes installing Docker on Ubuntu very easy.
17 30
 
18
-Install dependencies:
19 31
 
20
-::
21 32
 
22
-    sudo apt-get install lxc wget bsdtar curl
23
-    sudo apt-get install linux-image-extra-`uname -r`
33
+Add the custom package sources to your apt sources list. Copy and paste the following lines at once.
24 34
 
25
-The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
35
+.. code-block:: bash
26 36
 
27
-Install the latest docker binary:
37
+   sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
28 38
 
29
-::
30 39
 
31
-    wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
32
-    tar -xf docker-master.tgz
40
+Update your sources. You will see a warning that GPG signatures cannot be verified.
33 41
 
34
-Run your first container!
42
+.. code-block:: bash
35 43
 
36
-::
44
+   sudo apt-get update
37 45
 
38
-    cd docker-master
39 46
 
40
-::
47
+Now install it, you will see another warning that the package cannot be authenticated. Confirm install.
41 48
 
42
-    sudo ./docker run -i -t base /bin/bash
49
+.. code-block:: bash
43 50
 
51
+   sudo apt-get install lxc-docker
44 52
 
45
-To run docker as a daemon, in the background, and allow non-root users to run ``docker`` start
46
-docker -d
47 53
 
48
-::
54
+Verify it worked
49 55
 
50
-    sudo ./docker -d &
56
+.. code-block:: bash
51 57
 
58
+   docker
52 59
 
53
-Consider adding docker to your PATH for simplicity.
54 60
 
55
-Continue with the :ref:`hello_world` example.
56 61
\ No newline at end of file
62
+**Done!**, now continue with the :ref:`hello_world` example.
... ...
@@ -3,7 +3,8 @@
3 3
 Upgrading
4 4
 ============
5 5
 
6
-   We assume you are upgrading from within the operating system which runs your docker daemon.
6
+These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation.
7
+If you istalled docker using apt-get, use that to upgrade.
7 8
 
8 9
 
9 10
 Get the latest docker binary:
10 11
new file mode 100644
... ...
@@ -0,0 +1,70 @@
0
+
1
+.. _install_using_vagrant:
2
+
3
+Using Vagrant
4
+=============
5
+
6
+  Please note this is a community contributed installation path. The only 'official' installation is using the
7
+  :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
8
+
9
+**Requirements:**
10
+This guide will setup a new virtual machine with docker installed on your computer. This works on most operating
11
+systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
12
+to spare you should be good.
13
+
14
+
15
+Install Vagrant and Virtualbox
16
+------------------------------
17
+
18
+1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
19
+2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
20
+3. Install git if you had not installed it before, check if it is installed by running
21
+   ``git`` in a terminal window
22
+
23
+
24
+Spin it up
25
+----------
26
+
27
+1. Fetch the docker sources (this includes the Vagrantfile for machine setup).
28
+
29
+   .. code-block:: bash
30
+
31
+      git clone https://github.com/dotcloud/docker.git
32
+
33
+2. Run vagrant from the sources directory
34
+
35
+   .. code-block:: bash
36
+
37
+      vagrant up
38
+
39
+   Vagrant will:
40
+
41
+   * Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com
42
+   * Boot this image in virtualbox
43
+   * Add the `Docker PPA sources <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_ to /etc/apt/sources.lst
44
+   * Update your sources
45
+   * Install lxc-docker
46
+
47
+   You now have a Ubuntu Virtual Machine running with docker pre-installed.
48
+
49
+Connect
50
+-------
51
+
52
+To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
53
+``vagrant up``. Vagrant will connect you to the correct VM.
54
+
55
+.. code-block:: bash
56
+
57
+   vagrant ssh
58
+
59
+Run
60
+-----
61
+
62
+Now you are in the VM, run docker
63
+
64
+.. code-block:: bash
65
+
66
+   docker
67
+
68
+
69
+Continue with the :ref:`hello_world` example.
... ...
@@ -3,8 +3,8 @@
3 3
 :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
4 4
 
5 5
 
6
-Windows
7
-=========
6
+Windows (with Vagrant)
7
+======================
8 8
 
9 9
   Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
10 10
   may be out of date because it depends on some binaries to be updated and published
11 11
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+
1
+# rule to redirect original links created when hosted on github pages
2
+rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent;
3
+
4
+# rewrite the stuff which was on the current page
5
+rewrite ^/gettingstarted.html$ /gettingstarted/ permanent;
0 6
new file mode 100644
... ...
@@ -0,0 +1,464 @@
0
+===================
1
+Docker Registry API
2
+===================
3
+
4
+.. contents:: Table of Contents
5
+
6
+1. The 3 roles
7
+===============
8
+
9
+1.1 Index
10
+---------
11
+
12
+The Index is responsible for centralizing information about:
13
+- User accounts
14
+- Checksums of the images
15
+- Public namespaces
16
+
17
+The Index has different components:
18
+- Web UI
19
+- Meta-data store (comments, stars, list public repositories)
20
+- Authentication service
21
+- Tokenization
22
+
23
+The index is authoritative for those information.
24
+
25
+We expect that there will be only one instance of the index, run and managed by dotCloud.
26
+
27
+1.2 Registry
28
+------------
29
+- It stores the images and the graph for a set of repositories
30
+- It does not have user accounts data
31
+- It has no notion of user accounts or authorization
32
+- It delegates authentication and authorization to the Index Auth service using tokens
33
+- It supports different storage backends (S3, cloud files, local FS)
34
+- It doesn’t have a local database
35
+- It will be open-sourced at some point
36
+
37
+We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
38
+
39
+- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
40
+- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
41
+- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
42
+- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
43
+
44
+.. note::
45
+
46
+    Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server. 
47
+
48
+.. note::
49
+
50
+    The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
51
+        - HTTP with GET (and PUT for read-write registries);
52
+        - local mount point;
53
+        - remote docker addressed through SSH.
54
+
55
+The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
56
+
57
+1.3 Docker
58
+----------
59
+
60
+On top of being a runtime for LXC, Docker is the Registry client. It supports:
61
+- Push / Pull on the registry
62
+- Client authentication on the Index
63
+
64
+2. Workflow
65
+===========
66
+
67
+2.1 Pull
68
+--------
69
+
70
+.. image:: /static_files/docker_pull_chart.png
71
+
72
+1. Contact the Index to know where I should download “samalba/busybox”
73
+2. Index replies:
74
+   a. “samalba/busybox” is on Registry A
75
+   b. here are the checksums for “samalba/busybox” (for all layers)
76
+   c. token
77
+3. Contact Registry A to receive the layers for “samalba/busybox” (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
78
+4. registry contacts index to verify if token/user is allowed to download images
79
+5. Index returns true/false lettings registry know if it should proceed or error out
80
+6. Get the payload for all layers
81
+
82
+It’s possible to run docker pull https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
83
+
84
+Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
85
+
86
+Token is only returned when it is a private repo, public repos do not require tokens to be returned. The Registry will still contact the Index to make sure the pull is authorized (“is it ok to download this repos without a Token?”).
87
+
88
+API (pulling repository foo/bar):
89
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
90
+
91
+1. (Docker -> Index) GET /v1/repositories/foo/bar/images
92
+    **Headers**:
93
+        Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
94
+        X-Docker-Token: true
95
+    **Action**:
96
+        (looking up the foo/bar in db and gets images and checksums for that repo (all if no tag is specified, if tag, only checksums for those tags) see part 4.4.1)
97
+
98
+2. (Index -> Docker) HTTP 200 OK
99
+
100
+    **Headers**:
101
+        - Authorization: Token signature=123abc,repository=”foo/bar”,access=write
102
+        - X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
103
+    **Body**:
104
+        Jsonified checksums (see part 4.4.1)
105
+
106
+3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
107
+    **Headers**: 
108
+        Authorization: Token signature=123abc,repository=”foo/bar”,access=write
109
+
110
+4. (Registry -> Index) GET /v1/repositories/foo/bar/images
111
+
112
+    **Headers**:
113
+        Authorization: Token signature=123abc,repository=”foo/bar”,access=read
114
+
115
+    **Body**:
116
+        <ids and checksums in payload>
117
+
118
+    **Action**:
119
+        ( Lookup token see if they have access to pull.)
120
+
121
+        If good: 
122
+            HTTP 200 OK
123
+            Index will invalidate the token
124
+        If bad: 
125
+            HTTP 401 Unauthorized
126
+
127
+5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
128
+    **Action**:
129
+        (for each image id returned in the registry, fetch /json + /layer)
130
+
131
+.. note::
132
+
133
+    If someone makes a second request, then we will always give a new token, never reuse tokens.
134
+
135
+2.2 Push
136
+--------
137
+
138
+.. image:: /static_files/docker_push_chart.png
139
+
140
+1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials)
141
+2. If authentication works and namespace available, “samalba/busybox” is allocated and a temporary token is returned (namespace is marked as initialized in index)
142
+3. Push the image on the registry (along with the token)
143
+4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
144
+5. Index validates the token. Registry A starts reading the stream pushed by docker and store the repository (with its images)
145
+6. docker contacts the index to give checksums for upload images
146
+
147
+.. note::
148
+
149
+    **It’s possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authentified and the security is not guaranteed.
150
+
151
+.. note::
152
+
153
+    **Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies.
154
+
155
+Docker computes the checksums and submit them to the Index at the end of the push. When a repository name does not have checksums on the Index, it means that the push is in progress (since checksums are submitted at the end).
156
+
157
+API (pushing repos foo/bar):
158
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
159
+
160
+1. (Docker -> Index) PUT /v1/repositories/foo/bar/
161
+    **Headers**:
162
+        Authorization: Basic sdkjfskdjfhsdkjfh==
163
+        X-Docker-Token: true
164
+
165
+    **Action**::
166
+        - in index, we allocated a new repository, and set to initialized
167
+
168
+    **Body**::
169
+        (The body contains the list of images that are going to be pushed, with empty checksums. The checksums will be set at the end of the push)::
170
+
171
+        [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
172
+
173
+2. (Index -> Docker) 200 Created
174
+    **Headers**:
175
+        - WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=write
176
+        - X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
177
+
178
+3. (Docker -> Registry) PUT /v1/images/98765432_parent/json
179
+    **Headers**:
180
+        Authorization: Token signature=123abc,repository=”foo/bar”,access=write
181
+
182
+4. (Registry->Index) GET /v1/repositories/foo/bar/images
183
+    **Headers**:
184
+        Authorization: Token signature=123abc,repository=”foo/bar”,access=write
185
+    **Action**::
186
+        - Index: 
187
+            will invalidate the token.
188
+        - Registry: 
189
+            grants a session (if token is approved) and fetches the images id
190
+
191
+5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
192
+    **Headers**::
193
+        - Authorization: Token signature=123abc,repository=”foo/bar”,access=write
194
+        - Cookie: (Cookie provided by the Registry)
195
+
196
+6. (Docker -> Registry) PUT /v1/images/98765432/json
197
+    **Headers**:
198
+        Cookie: (Cookie provided by the Registry)
199
+
200
+7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer
201
+    **Headers**:
202
+        Cookie: (Cookie provided by the Registry)
203
+
204
+8. (Docker -> Registry) PUT /v1/images/98765432/layer
205
+    **Headers**:
206
+        X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh
207
+
208
+9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest
209
+    **Headers**:
210
+        Cookie: (Cookie provided by the Registry)
211
+    **Body**:
212
+        “98765432”
213
+
214
+10. (Docker -> Index) PUT /v1/repositories/foo/bar/images
215
+
216
+    **Headers**:
217
+        Authorization: Basic 123oislifjsldfj==
218
+        X-Docker-Endpoints: registry1.docker.io (no validation on this right now)
219
+
220
+    **Body**:
221
+        (The image, id’s, tags and checksums)
222
+
223
+        [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, 
224
+        “checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
225
+
226
+    **Return** HTTP 204
227
+
228
+.. note::
229
+
230
+     If push fails and they need to start again, what happens in the index, there will already be a record for the namespace/name, but it will be initialized. Should we allow it, or mark as name already used? One edge case could be if someone pushes the same thing at the same time with two different shells.
231
+
232
+     If it's a retry on the Registry, Docker has a cookie (provided by the registry after token validation). So the Index won’t have to provide a new token.
233
+
234
+3. How to use the Registry in standalone mode
235
+=============================================
236
+
237
+The Index has two main purposes (along with its fancy social features):
238
+
239
+- Resolve short names (to avoid passing absolute URLs all the time)
240
+   - username/projectname -> https://registry.docker.io/users/<username>/repositories/<projectname>/
241
+   - team/projectname -> https://registry.docker.io/team/<team>/repositories/<projectname>/
242
+- Authenticate a user as a repos owner (for a central referenced repository)
243
+
244
+3.1 Without an Index
245
+--------------------
246
+Using the Registry without the Index can be useful to store the images on a private network without having to rely on an external entity controlled by dotCloud.
247
+
248
+In this case, the registry will be launched in a special mode (--standalone? --no-index?). In this mode, the only thing which changes is that Registry will never contact the Index to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, etc...).
249
+
250
+In this scenario, the Registry is responsible for the security in case of data corruption since the checksums are not delivered by a trusted entity.
251
+
252
+As hinted previously, a standalone registry can also be implemented by any HTTP server handling GET/PUT requests (or even only GET requests if no write access is necessary).
253
+
254
+3.2 With an Index
255
+-----------------
256
+
257
+The Index data needed by the Registry are simple:
258
+- Serve the checksums
259
+- Provide and authorize a Token
260
+
261
+In the scenario of a Registry running on a private network with the need of centralizing and authorizing, it’s easy to use a custom Index.
262
+
263
+The only challenge will be to tell Docker to contact (and trust) this custom Index. Docker will be configurable at some point to use a specific Index, it’ll be the private entity responsibility (basically the organization who uses Docker in a private environment) to maintain the Index and the Docker’s configuration among its consumers.
264
+
265
+4. The API
266
+==========
267
+
268
+The first version of the api is available here: https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md
269
+
270
+4.1 Images
271
+----------
272
+
273
+The format returned in the images is not defined here (for layer and json), basically because Registry stores exactly the same kind of information as Docker uses to manage them.
274
+
275
+The format of ancestry is a line-separated list of image ids, in age order. I.e. the image’s parent is on the last line, the parent of the parent on the next-to-last line, etc.; if the image has no parent, the file is empty.
276
+
277
+GET /v1/images/<image_id>/layer
278
+PUT /v1/images/<image_id>/layer
279
+GET /v1/images/<image_id>/json
280
+PUT /v1/images/<image_id>/json
281
+GET /v1/images/<image_id>/ancestry
282
+PUT /v1/images/<image_id>/ancestry
283
+
284
+4.2 Users
285
+---------
286
+
287
+4.2.1 Create a user (Index)
288
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
289
+
290
+POST /v1/users
291
+
292
+**Body**:
293
+    {"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
294
+
295
+**Validation**:
296
+    - **username** : min 4 character, max 30 characters, all lowercase no special characters. 
297
+    - **password**: min 5 characters
298
+
299
+**Valid**: return HTTP 200
300
+
301
+Errors: HTTP 400 (we should create error codes for possible errors)
302
+- invalid json
303
+- missing field
304
+- wrong format (username, password, email, etc)
305
+- forbidden name
306
+- name already exists
307
+
308
+.. note::
309
+
310
+    A user account will be valid only if the email has been validated (a validation link is sent to the email address).
311
+
312
+4.2.2 Update a user (Index)
313
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
314
+
315
+PUT /v1/users/<username>
316
+
317
+**Body**:
318
+    {"password": "toto"}
319
+
320
+.. note::
321
+
322
+    We can also update email address, if they do, they will need to reverify their new email address.
323
+
324
+4.2.3 Login (Index)
325
+^^^^^^^^^^^^^^^^^^^
326
+Does nothing else but asking for a user authentication. Can be used to validate credentials. HTTP Basic Auth for now, maybe change in future.
327
+
328
+GET /v1/users
329
+
330
+**Return**:
331
+    - Valid: HTTP 200
332
+    - Invalid login: HTTP 401
333
+    - Account inactive: HTTP 403 Account is not Active
334
+
335
+4.3 Tags (Registry)
336
+-------------------
337
+
338
+The Registry does not know anything about users. Even though repositories are under usernames, it’s just a namespace for the registry. Allowing us to implement organizations or different namespaces per user later, without modifying the Registry’s API.
339
+
340
+4.3.1 Get all tags
341
+^^^^^^^^^^^^^^^^^^
342
+
343
+GET /v1/repositories/<namespace>/<repository_name>/tags
344
+
345
+**Return**: HTTP 200
346
+    {
347
+    "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
348
+    “0.1.1”:  “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
349
+    }
350
+
351
+4.3.2 Read the content of a tag (resolve the image id)
352
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
353
+
354
+GET /v1/repositories/<namespace>/<repo_name>/tags/<tag>
355
+
356
+**Return**:
357
+    "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
358
+
359
+4.3.3 Delete a tag (registry)
360
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
361
+
362
+DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
363
+
364
+4.4 Images (Index)
365
+------------------
366
+
367
+For the Index to “resolve” the repository name to a Registry location, it uses the X-Docker-Endpoints header. In other terms, this requests always add a “X-Docker-Endpoints” to indicate the location of the registry which hosts this repository.
368
+
369
+4.4.1 Get the images
370
+^^^^^^^^^^^^^^^^^^^^^
371
+
372
+GET /v1/repositories/<namespace>/<repo_name>/images
373
+
374
+**Return**: HTTP 200
375
+    [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
376
+
377
+
378
+4.4.2 Add/update the images
379
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
380
+
381
+You always add images, you never remove them.
382
+
383
+PUT /v1/repositories/<namespace>/<repo_name>/images
384
+
385
+**Body**:
386
+    [ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
387
+    
388
+**Return** 204
389
+
390
+5. Chaining Registries
391
+======================
392
+
393
+It’s possible to chain Registries server for several reasons:
394
+- Load balancing
395
+- Delegate the next request to another server
396
+
397
+When a Registry is a reference for a repository, it should host the entire images chain in order to avoid breaking the chain during the download.
398
+
399
+The Index and Registry use this mechanism to redirect on one or the other.
400
+
401
+Example with an image download:
402
+On every request, a special header can be returned:
403
+
404
+X-Docker-Endpoints: server1,server2
405
+
406
+On the next request, the client will always pick a server from this list.
407
+
408
+6. Authentication & Authorization
409
+=================================
410
+
411
+6.1 On the Index
412
+-----------------
413
+
414
+The Index supports both “Basic” and “Token” challenges. Usually when there is a “401 Unauthorized”, the Index replies this::
415
+
416
+    401 Unauthorized
417
+    WWW-Authenticate: Basic realm="auth required",Token
418
+
419
+You have 3 options:
420
+
421
+1. Provide user credentials and ask for a token
422
+
423
+    **Header**:
424
+        - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
425
+        - X-Docker-Token: true
426
+
427
+    In this case, along with the 200 response, you’ll get a new token (if user auth is ok):
428
+
429
+    **Response**:
430
+        - 200 OK
431
+        - X-Docker-Token: Token signature=123abc,repository=”foo/bar”,access=read
432
+
433
+2. Provide user credentials only
434
+
435
+    **Header**:
436
+        Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
437
+
438
+3. Provide Token
439
+
440
+    **Header**:
441
+        Authorization: Token signature=123abc,repository=”foo/bar”,access=read
442
+
443
+6.2 On the Registry
444
+-------------------
445
+
446
+The Registry only supports the Token challenge::
447
+
448
+    401 Unauthorized
449
+    WWW-Authenticate: Token
450
+
451
+The only way is to provide a token on “401 Unauthorized” responses::
452
+
453
+    Authorization: Token signature=123abc,repository=”foo/bar”,access=read
454
+
455
+Usually, the Registry provides a Cookie when a Token verification succeeded. Every time the Registry passes a Cookie, you have to pass it back the same cookie.::
456
+
457
+    200 OK
458
+    Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
459
+
460
+Next request::
461
+
462
+    GET /(...)
463
+    Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
0 464
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+:title: docker Registry documentation
1
+:description: Documentation for docker Registry and Registry API
2
+:keywords: docker, registry, api, index
3
+
4
+
5
+
6
+Registry
7
+========
8
+
9
+Contents:
10
+
11
+.. toctree::
12
+   :maxdepth: 2
13
+
14
+   api
0 15
new file mode 100644
1 16
Binary files /dev/null and b/docs/sources/static_files/docker_pull_chart.png differ
2 17
new file mode 100644
3 18
Binary files /dev/null and b/docs/sources/static_files/docker_push_chart.png differ
4 19
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+package docker
1
+
2
+import (
3
+	"fmt"
4
+)
5
+
6
+func getKernelVersion() (*KernelVersionInfo, error) {
7
+	return nil, fmt.Errorf("Kernel version detection is not available on darwin")
8
+}
0 9
new file mode 100644
... ...
@@ -0,0 +1,69 @@
0
+package docker
1
+
2
+import (
3
+	"bytes"
4
+	"strconv"
5
+	"strings"
6
+	"syscall"
7
+)
8
+
9
+func getKernelVersion() (*KernelVersionInfo, error) {
10
+	var (
11
+		uts                  syscall.Utsname
12
+		flavor               string
13
+		kernel, major, minor int
14
+		err                  error
15
+	)
16
+
17
+	if err := syscall.Uname(&uts); err != nil {
18
+		return nil, err
19
+	}
20
+
21
+	release := make([]byte, len(uts.Release))
22
+
23
+	i := 0
24
+	for _, c := range uts.Release {
25
+		release[i] = byte(c)
26
+		i++
27
+	}
28
+
29
+	// Remove the \x00 from the release for Atoi to parse correctly
30
+	release = release[:bytes.IndexByte(release, 0)]
31
+
32
+	tmp := strings.SplitN(string(release), "-", 2)
33
+	tmp2 := strings.SplitN(tmp[0], ".", 3)
34
+
35
+	if len(tmp2) > 0 {
36
+		kernel, err = strconv.Atoi(tmp2[0])
37
+		if err != nil {
38
+			return nil, err
39
+		}
40
+	}
41
+
42
+	if len(tmp2) > 1 {
43
+		major, err = strconv.Atoi(tmp2[1])
44
+		if err != nil {
45
+			return nil, err
46
+		}
47
+	}
48
+
49
+	if len(tmp2) > 2 {
50
+		minor, err = strconv.Atoi(tmp2[2])
51
+		if err != nil {
52
+			return nil, err
53
+		}
54
+	}
55
+
56
+	if len(tmp) == 2 {
57
+		flavor = tmp[1]
58
+	} else {
59
+		flavor = ""
60
+	}
61
+
62
+	return &KernelVersionInfo{
63
+		Kernel: kernel,
64
+		Major:  major,
65
+		Minor:  minor,
66
+		Flavor: flavor,
67
+	}, nil
68
+}
... ...
@@ -2,6 +2,7 @@ package docker
2 2
 
3 3
 import (
4 4
 	"fmt"
5
+	"io"
5 6
 	"io/ioutil"
6 7
 	"os"
7 8
 	"path"
... ...
@@ -83,17 +84,24 @@ func (graph *Graph) Get(name string) (*Image, error) {
83 83
 }
84 84
 
85 85
 // Create creates a new image and registers it in the graph.
86
-func (graph *Graph) Create(layerData Archive, container *Container, comment string) (*Image, error) {
86
+func (graph *Graph) Create(layerData Archive, container *Container, comment, author string, config *Config) (*Image, error) {
87 87
 	img := &Image{
88 88
 		Id:            GenerateId(),
89 89
 		Comment:       comment,
90 90
 		Created:       time.Now(),
91 91
 		DockerVersion: VERSION,
92
+		Author:        author,
93
+		Config:        config,
92 94
 	}
93 95
 	if container != nil {
94 96
 		img.Parent = container.Image
95 97
 		img.Container = container.Id
96 98
 		img.ContainerConfig = *container.Config
99
+		if config == nil {
100
+			if parentImage, err := graph.Get(container.Image); err == nil && parentImage != nil {
101
+				img.Config = parentImage.Config
102
+			}
103
+		}
97 104
 	}
98 105
 	if err := graph.Register(layerData, img); err != nil {
99 106
 		return nil, err
... ...
@@ -111,7 +119,7 @@ func (graph *Graph) Register(layerData Archive, img *Image) error {
111 111
 	if graph.Exists(img.Id) {
112 112
 		return fmt.Errorf("Image %s already exists", img.Id)
113 113
 	}
114
-	tmp, err := graph.Mktemp(img.Id)
114
+	tmp, err := graph.Mktemp("")
115 115
 	defer os.RemoveAll(tmp)
116 116
 	if err != nil {
117 117
 		return fmt.Errorf("Mktemp failed: %s", err)
... ...
@@ -128,12 +136,32 @@ func (graph *Graph) Register(layerData Archive, img *Image) error {
128 128
 	return nil
129 129
 }
130 130
 
131
+// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
132
+//   The archive is stored on disk and will be automatically deleted as soon as has been read.
133
+//   If output is not nil, a human-readable progress bar will be written to it.
134
+//   FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
135
+func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) {
136
+	image, err := graph.Get(id)
137
+	if err != nil {
138
+		return nil, err
139
+	}
140
+	tmp, err := graph.tmp()
141
+	if err != nil {
142
+		return nil, err
143
+	}
144
+	archive, err := image.TarLayer(compression)
145
+	if err != nil {
146
+		return nil, err
147
+	}
148
+	return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
149
+}
150
+
131 151
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
132 152
 func (graph *Graph) Mktemp(id string) (string, error) {
133 153
 	if id == "" {
134 154
 		id = GenerateId()
135 155
 	}
136
-	tmp, err := NewGraph(path.Join(graph.Root, ":tmp:"))
156
+	tmp, err := graph.tmp()
137 157
 	if err != nil {
138 158
 		return "", fmt.Errorf("Couldn't create temp: %s", err)
139 159
 	}
... ...
@@ -143,6 +171,10 @@ func (graph *Graph) Mktemp(id string) (string, error) {
143 143
 	return tmp.imageRoot(id), nil
144 144
 }
145 145
 
146
+func (graph *Graph) tmp() (*Graph, error) {
147
+	return NewGraph(path.Join(graph.Root, ":tmp:"))
148
+}
149
+
146 150
 // Check if given error is "not empty".
147 151
 // Note: this is the way golang does it internally with os.IsNotExists.
148 152
 func isNotEmpty(err error) bool {
... ...
@@ -3,6 +3,7 @@ package docker
3 3
 import (
4 4
 	"archive/tar"
5 5
 	"bytes"
6
+	"errors"
6 7
 	"io"
7 8
 	"io/ioutil"
8 9
 	"os"
... ...
@@ -26,6 +27,32 @@ func TestInit(t *testing.T) {
26 26
 	}
27 27
 }
28 28
 
29
+// Test that Register can be interrupted cleanly without side effects
30
+func TestInterruptedRegister(t *testing.T) {
31
+	graph := tempGraph(t)
32
+	defer os.RemoveAll(graph.Root)
33
+	badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
34
+	image := &Image{
35
+		Id:      GenerateId(),
36
+		Comment: "testing",
37
+		Created: time.Now(),
38
+	}
39
+	go graph.Register(badArchive, image)
40
+	time.Sleep(200 * time.Millisecond)
41
+	w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
42
+	if _, err := graph.Get(image.Id); err == nil {
43
+		t.Fatal("Image should not exist after Register is interrupted")
44
+	}
45
+	// Registering the same image again should succeed if the first register was interrupted
46
+	goodArchive, err := fakeTar()
47
+	if err != nil {
48
+		t.Fatal(err)
49
+	}
50
+	if err := graph.Register(goodArchive, image); err != nil {
51
+		t.Fatal(err)
52
+	}
53
+}
54
+
29 55
 // FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
30 56
 //       create multiple, check the amount of images and paths, etc..)
31 57
 func TestGraphCreate(t *testing.T) {
... ...
@@ -35,7 +62,7 @@ func TestGraphCreate(t *testing.T) {
35 35
 	if err != nil {
36 36
 		t.Fatal(err)
37 37
 	}
38
-	image, err := graph.Create(archive, nil, "Testing")
38
+	image, err := graph.Create(archive, nil, "Testing", "", nil)
39 39
 	if err != nil {
40 40
 		t.Fatal(err)
41 41
 	}
... ...
@@ -95,7 +122,7 @@ func TestMount(t *testing.T) {
95 95
 	if err != nil {
96 96
 		t.Fatal(err)
97 97
 	}
98
-	image, err := graph.Create(archive, nil, "Testing")
98
+	image, err := graph.Create(archive, nil, "Testing", "", nil)
99 99
 	if err != nil {
100 100
 		t.Fatal(err)
101 101
 	}
... ...
@@ -139,7 +166,7 @@ func createTestImage(graph *Graph, t *testing.T) *Image {
139 139
 	if err != nil {
140 140
 		t.Fatal(err)
141 141
 	}
142
-	img, err := graph.Create(archive, nil, "Test image")
142
+	img, err := graph.Create(archive, nil, "Test image", "", nil)
143 143
 	if err != nil {
144 144
 		t.Fatal(err)
145 145
 	}
... ...
@@ -154,7 +181,7 @@ func TestDelete(t *testing.T) {
154 154
 		t.Fatal(err)
155 155
 	}
156 156
 	assertNImages(graph, t, 0)
157
-	img, err := graph.Create(archive, nil, "Bla bla")
157
+	img, err := graph.Create(archive, nil, "Bla bla", "", nil)
158 158
 	if err != nil {
159 159
 		t.Fatal(err)
160 160
 	}
... ...
@@ -165,11 +192,11 @@ func TestDelete(t *testing.T) {
165 165
 	assertNImages(graph, t, 0)
166 166
 
167 167
 	// Test 2 create (same name) / 1 delete
168
-	img1, err := graph.Create(archive, nil, "Testing")
168
+	img1, err := graph.Create(archive, nil, "Testing", "", nil)
169 169
 	if err != nil {
170 170
 		t.Fatal(err)
171 171
 	}
172
-	if _, err = graph.Create(archive, nil, "Testing"); err != nil {
172
+	if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
173 173
 		t.Fatal(err)
174 174
 	}
175 175
 	assertNImages(graph, t, 2)
176 176
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-This directory contains material helpful for hacking on docker.
2 1
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+This directory contains material helpful for hacking on docker.
1
+
2
+make hack
3
+=========
4
+
5
+Set up an Ubuntu 13.04 virtual machine for developers including kernel 3.8
6
+and buildbot. The environment is setup in a way that can be used through
7
+the usual go workflow and/or the root Makefile. You can either edit on
8
+your host, or inside the VM (using make ssh-dev) and run and test docker
9
+inside the VM.
10
+
11
+dependencies: vagrant, virtualbox packages and python package requests
12
+
13
+
14
+Buildbot
15
+~~~~~~~~
16
+
17
+Buildbot is a continuous integration system designed to automate the
18
+build/test cycle. By automatically rebuilding and testing the tree each time
19
+something has changed, build problems are pinpointed quickly, before other
20
+developers are inconvenienced by the failure.
21
+
22
+When running 'make hack' at the docker root directory, it spawns a virtual
23
+machine in the background running a buildbot instance and adds a git
24
+post-commit hook that automatically run docker tests for you.
25
+
26
+You can check your buildbot instance at http://192.168.33.21:8010/waterfall
0 27
new file mode 100644
... ...
@@ -0,0 +1,35 @@
0
+# -*- mode: ruby -*-
1
+# vi: set ft=ruby :
2
+
3
+BOX_NAME = "ubuntu-dev"
4
+BOX_URI = "http://cloud-images.ubuntu.com/raring/current/raring-server-cloudimg-vagrant-amd64-disk1.box"
5
+VM_IP = "192.168.33.21"
6
+USER = "vagrant"
7
+GOPATH = "/data/docker"
8
+DOCKER_PATH = "#{GOPATH}/src/github.com/dotcloud/docker"
9
+CFG_PATH = "#{DOCKER_PATH}/hack/environment"
10
+BUILDBOT_PATH = "/data/buildbot"
11
+
12
+Vagrant::Config.run do |config|
13
+  # Setup virtual machine box
14
+  config.vm.box = BOX_NAME
15
+  config.vm.box_url = BOX_URI
16
+  config.vm.share_folder "v-data", DOCKER_PATH, "#{File.dirname(__FILE__)}/.."
17
+  config.vm.network :hostonly, VM_IP
18
+  # Stop if deployment has been done
19
+  config.vm.provision :shell, :inline => "[ ! -f /usr/bin/git ]"
20
+  # Touch for makefile
21
+  pkg_cmd = "touch #{DOCKER_PATH}; "
22
+  # Install docker dependencies
23
+  pkg_cmd << "export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; " \
24
+    "apt-get install -q -y lxc bsdtar git golang make linux-image-extra-3.8.0-19-generic; " \
25
+    "chown -R #{USER}.#{USER} #{GOPATH}; " \
26
+    "install -m 0664 #{CFG_PATH}/bash_profile /home/#{USER}/.bash_profile"
27
+  config.vm.provision :shell, :inline => pkg_cmd
28
+  # Deploy buildbot CI
29
+  pkg_cmd = "apt-get install -q -y python-dev python-pip supervisor; " \
30
+    "pip install -r #{CFG_PATH}/requirements.txt; " \
31
+    "chown #{USER}.#{USER} /data; cd /data; " \
32
+    "#{CFG_PATH}/setup.sh #{USER} #{GOPATH} #{DOCKER_PATH} #{CFG_PATH} #{BUILDBOT_PATH}"
33
+  config.vm.provision :shell, :inline => pkg_cmd
34
+end
0 35
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+# This will build a container capable of producing an official binary build of docker and
1
+# uploading it to S3
2
+maintainer	Solomon Hykes <solomon@dotcloud.com>
3
+from	ubuntu:12.10
4
+run	apt-get update
5
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
6
+# Packages required to checkout and build docker
7
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
8
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
9
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential
10
+# Packages required to build an ubuntu package
11
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q debhelper
12
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q autotools-dev
13
+add	.	/src
14
+run	cp /src/dockerbuilder /usr/local/bin/ && chmod +x /usr/local/bin/dockerbuilder
15
+run	cp /src/fake_initctl /usr/local/bin/initctl && chmod +x /usr/local/bin/initctl
16
+run	cp /src/s3cfg /.s3cfg
17
+run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q devscripts
18
+cmd	["dockerbuilder"]
0 19
new file mode 100644
... ...
@@ -0,0 +1,53 @@
0
+#!/bin/sh
1
+set -x
2
+set -e
3
+
4
+PACKAGE=github.com/dotcloud/docker
5
+
6
+if [ $# -gt 1 ]; then
7
+	echo "Usage: $0 [REVISION]"
8
+	exit 1
9
+fi
10
+
11
+export REVISION=$1
12
+
13
+if [ -z "$AWS_ID" ]; then
14
+	echo "Warning: environment variable AWS_ID is not set. Won't upload to S3."
15
+	NO_S3=1
16
+fi
17
+
18
+if [ -z "$AWS_KEY" ]; then
19
+	echo "Warning: environment variable AWS_KEY is not set. Won't upload to S3."
20
+	NO_S3=1
21
+fi
22
+
23
+if [ -z "$GPG_KEY" ]; then
24
+	echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
25
+	NO_UBUNTU=1
26
+fi
27
+
28
+if [ -z "$REVISION" ]; then
29
+	rm -fr docker-master
30
+	git clone https://github.com/dotcloud/docker docker-master
31
+	cd docker-master
32
+else 
33
+	rm -fr docker-$REVISION
34
+	git init docker-$REVISION
35
+	cd docker-$REVISION
36
+	git fetch -t https://github.com/dotcloud/docker $REVISION:$REVISION
37
+	git reset --hard FETCH_HEAD
38
+fi
39
+
40
+if [ -z "$REVISION" ]; then
41
+	make release
42
+else
43
+	make release RELEASE_VERSION=$REVISION
44
+fi
45
+
46
+if [ -z "$NO_S3" ]; then
47
+	s3cmd -P put docker-$REVISION.tgz s3://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-$REVISION.tgz
48
+fi
49
+
50
+if [ -z "$NO_UBUNTU" ]; then
51
+	(cd packaging/ubuntu && make ubuntu)
52
+fi
0 53
new file mode 100755
... ...
@@ -0,0 +1,3 @@
0
+#!/bin/sh
1
+
2
+echo Whatever you say, man
0 3
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+[default]
1
+access_key = $AWS_ID
2
+secret_key = $AWS_KEY
0 3
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+Files used to setup the developer virtual machine
0 1
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+# ~/.bash_profile : executed by the command interpreter for login shells.
1
+
2
+# if running bash
3
+if [ -n "$BASH_VERSION" ]; then
4
+    # include .bashrc if it exists
5
+    if [ -f "$HOME/.bashrc" ]; then
6
+        . "$HOME/.bashrc"
7
+    fi
8
+fi
9
+
10
+# set PATH so it includes user's private bin if it exists
11
+[ -d "$HOME/bin" ] && PATH="$HOME/bin:$PATH"
12
+
13
+docker=/data/docker/src/github.com/dotcloud/docker
14
+[ -d $docker ] && cd $docker
15
+
16
+export GOPATH=/data/docker
17
+export PATH=$PATH:$GOPATH/bin
18
+
0 19
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+[program:buildmaster]
1
+command=su vagrant -c "buildbot start master"
2
+directory=/data/buildbot
3
+chown= root:root
4
+redirect_stderr=true
5
+stdout_logfile=/var/log/supervisor/buildbot-master.log
6
+stderr_logfile=/var/log/supervisor/buildbot-master.log
7
+
8
+[program:buildworker]
9
+command=buildslave start slave
10
+directory=/data/buildbot
11
+chown= root:root
12
+redirect_stderr=true
13
+stdout_logfile=/var/log/supervisor/buildbot-slave.log
14
+stderr_logfile=/var/log/supervisor/buildbot-slave.log
15
+
16
+[group:buildbot]
17
+programs=buildmaster,buildworker
0 18
new file mode 100644
... ...
@@ -0,0 +1,43 @@
0
+import os
1
+from buildbot.buildslave import BuildSlave
2
+from buildbot.schedulers.forcesched import ForceScheduler
3
+from buildbot.config import BuilderConfig
4
+from buildbot.process.factory import BuildFactory
5
+from buildbot.steps.shell import ShellCommand
6
+from buildbot.status import html
7
+from buildbot.status.web import authz, auth
8
+
9
+PORT_WEB = 8010         # Buildbot webserver port
10
+PORT_MASTER = 9989      # Port where buildbot master listen buildworkers
11
+TEST_USER = 'buildbot'  # Credential to authenticate build triggers
12
+TEST_PWD = 'docker'     # Credential to authenticate build triggers
13
+BUILDER_NAME = 'docker'
14
+BUILDPASSWORD = 'pass-docker'  # Credential to authenticate buildworkers
15
+GOPATH = '/data/docker'
16
+DOCKER_PATH = '{0}/src/github.com/dotcloud/docker'.format(GOPATH)
17
+
18
+c = BuildmasterConfig = {}
19
+
20
+c['title'] = "Docker"
21
+c['titleURL'] = "waterfall"
22
+c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
23
+c['db'] = {'db_url':"sqlite:///state.sqlite"}
24
+c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
25
+c['slavePortnum'] = PORT_MASTER
26
+
27
+c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
28
+
29
+# Docker test command
30
+test_cmd = "GOPATH={0} make -C {1} test".format(GOPATH,DOCKER_PATH)
31
+
32
+# Builder
33
+factory = BuildFactory()
34
+factory.addStep(ShellCommand(description='Docker',logEnviron=False,
35
+    usePTY=True,command=test_cmd))
36
+c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
37
+    factory=factory)]
38
+
39
+# Status
40
+authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
41
+    forceBuild='auth')
42
+c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
0 43
new file mode 100755
... ...
@@ -0,0 +1,21 @@
0
+#!/usr/bin/env python
1
+
2
+'''Trigger buildbot docker test build
3
+
4
+   post-commit git hook designed to automatically trigger buildbot on
5
+   the provided vagrant docker VM.'''
6
+
7
+import requests
8
+
9
+USERNAME = 'buildbot'
10
+PASSWORD = 'docker'
11
+BASE_URL = 'http://localhost:8010'
12
+path = lambda s: BASE_URL + '/' + s
13
+
14
+try:
15
+    session = requests.session()
16
+    session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
17
+    session.post(path('builders/docker/force'),
18
+        data={'forcescheduler':'trigger','reason':'Test commit'})
19
+except:
20
+    pass
0 21
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+sqlalchemy<=0.7.9
1
+sqlalchemy-migrate>=0.7.2
2
+buildbot==0.8.7p1
3
+buildbot_slave==0.8.7p1
4
+nose==1.2.1
5
+requests==1.1.0
0 6
new file mode 100755
... ...
@@ -0,0 +1,45 @@
0
+#!/bin/bash
1
+
2
+# Setup of buildbot configuration. Package installation is being done by
3
+# Vagrantfile
4
+# Dependencies: buildbot, buildbot-slave, supervisor
5
+
6
+USER=$1
7
+GOPATH=$2
8
+DOCKER_PATH=$3
9
+CFG_PATH=$4
10
+BUILDBOT_PATH=$5
11
+SLAVE_NAME="buildworker"
12
+SLAVE_SOCKET="localhost:9989"
13
+BUILDBOT_PWD="pass-docker"
14
+IP=$(sed -nE 's/VM_IP = "(.+)"/\1/p' ${DOCKER_PATH}/hack/Vagrantfile)
15
+export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin"
16
+
17
+function run { su $USER -c "$1"; }
18
+
19
+# Exit if buildbot has already been installed
20
+[ -d "$BUILDBOT_PATH" ] && exit 0
21
+
22
+# Setup buildbot
23
+run "mkdir -p $BUILDBOT_PATH"
24
+cd $BUILDBOT_PATH
25
+run "buildbot create-master master"
26
+run "cp $CFG_PATH/master.cfg master"
27
+run "sed -i 's/localhost/$IP/' master/master.cfg"
28
+run "sed -i -E 's#(GOPATH = ).+#\1\"$GOPATH\"#' master/master.cfg"
29
+run "sed -i -E 's#(DOCKER_PATH = ).+#\1\"$DOCKER_PATH\"#' master/master.cfg"
30
+run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
31
+
32
+# Allow buildbot subprocesses (docker tests) to properly run in containers,
33
+# in particular with docker -u
34
+run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac"
35
+
36
+# Setup supervisor
37
+cp $CFG_PATH/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
38
+sed -i -E "s/^chmod=0700.+/chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
39
+kill -HUP $(pgrep -f "/usr/bin/python /usr/bin/supervisord")
40
+
41
+# Add git hook
42
+cp $CFG_PATH/post-commit $DOCKER_PATH/.git/hooks
43
+sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
44
+
... ...
@@ -7,7 +7,9 @@ import (
7 7
 	"fmt"
8 8
 	"io"
9 9
 	"io/ioutil"
10
+	"log"
10 11
 	"os"
12
+	"os/exec"
11 13
 	"path"
12 14
 	"strings"
13 15
 	"time"
... ...
@@ -21,6 +23,8 @@ type Image struct {
21 21
 	Container       string    `json:"container,omitempty"`
22 22
 	ContainerConfig Config    `json:"container_config,omitempty"`
23 23
 	DockerVersion   string    `json:"docker_version,omitempty"`
24
+	Author          string    `json:"author,omitempty"`
25
+	Config          *Config   `json:"config,omitempty"`
24 26
 	graph           *Graph
25 27
 }
26 28
 
... ...
@@ -89,10 +93,31 @@ func MountAUFS(ro []string, rw string, target string) error {
89 89
 	rwBranch := fmt.Sprintf("%v=rw", rw)
90 90
 	roBranches := ""
91 91
 	for _, layer := range ro {
92
-		roBranches += fmt.Sprintf("%v=ro:", layer)
92
+		roBranches += fmt.Sprintf("%v=ro+wh:", layer)
93 93
 	}
94 94
 	branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
95
-	return mount("none", target, "aufs", 0, branches)
95
+
96
+	//if error, try to load aufs kernel module
97
+	if err := mount("none", target, "aufs", 0, branches); err != nil {
98
+		log.Printf("Kernel does not support AUFS, trying to load the AUFS module with modprobe...")
99
+		if err := exec.Command("modprobe", "aufs").Run(); err != nil {
100
+			return fmt.Errorf("Unable to load the AUFS module")
101
+		}
102
+		log.Printf("...module loaded.")
103
+		if err := mount("none", target, "aufs", 0, branches); err != nil {
104
+			return fmt.Errorf("Unable to mount using aufs")
105
+		}
106
+	}
107
+	return nil
108
+}
109
+
110
+// TarLayer returns a tar archive of the image's filesystem layer.
111
+func (image *Image) TarLayer(compression Compression) (Archive, error) {
112
+	layerPath, err := image.layer()
113
+	if err != nil {
114
+		return nil, err
115
+	}
116
+	return Tar(layerPath, compression)
96 117
 }
97 118
 
98 119
 func (image *Image) Mount(root, rw string) error {
... ...
@@ -112,34 +137,9 @@ func (image *Image) Mount(root, rw string) error {
112 112
 	if err := os.Mkdir(rw, 0755); err != nil && !os.IsExist(err) {
113 113
 		return err
114 114
 	}
115
-	// FIXME: @creack shouldn't we do this after going over changes?
116 115
 	if err := MountAUFS(layers, rw, root); err != nil {
117 116
 		return err
118 117
 	}
119
-	// FIXME: Create tests for deletion
120
-	// FIXME: move this part to change.go
121
-	// Retrieve the changeset from the parent and apply it to the container
122
-	//  - Retrieve the changes
123
-	changes, err := Changes(layers, layers[0])
124
-	if err != nil {
125
-		return err
126
-	}
127
-	// Iterate on changes
128
-	for _, c := range changes {
129
-		// If there is a delete
130
-		if c.Kind == ChangeDelete {
131
-			// Make sure the directory exists
132
-			file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
133
-			if err := os.MkdirAll(path.Join(rw, file_path), 0755); err != nil {
134
-				return err
135
-			}
136
-			// And create the whiteout (we just need to create empty file, discard the return)
137
-			if _, err := os.Create(path.Join(path.Join(rw, file_path),
138
-				".wh."+path.Base(file_name))); err != nil {
139
-				return err
140
-			}
141
-		}
142
-	}
143 118
 	return nil
144 119
 }
145 120
 
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"encoding/binary"
5 5
 	"errors"
6 6
 	"fmt"
7
+	"io"
7 8
 	"log"
8 9
 	"net"
9 10
 	"os/exec"
... ...
@@ -183,18 +184,21 @@ func getIfaceAddr(name string) (net.Addr, error) {
183 183
 // It keeps track of all mappings and is able to unmap at will
184 184
 type PortMapper struct {
185 185
 	mapping map[int]net.TCPAddr
186
+	proxies map[int]net.Listener
186 187
 }
187 188
 
188 189
 func (mapper *PortMapper) cleanup() error {
189 190
 	// Ignore errors - This could mean the chains were never set up
190 191
 	iptables("-t", "nat", "-D", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER")
191
-	iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER")
192
+	iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER")
193
+	iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER") // Created in versions <= 0.1.6
192 194
 	// Also cleanup rules created by older versions, or -X might fail.
193 195
 	iptables("-t", "nat", "-D", "PREROUTING", "-j", "DOCKER")
194 196
 	iptables("-t", "nat", "-D", "OUTPUT", "-j", "DOCKER")
195 197
 	iptables("-t", "nat", "-F", "DOCKER")
196 198
 	iptables("-t", "nat", "-X", "DOCKER")
197 199
 	mapper.mapping = make(map[int]net.TCPAddr)
200
+	mapper.proxies = make(map[int]net.Listener)
198 201
 	return nil
199 202
 }
200 203
 
... ...
@@ -205,7 +209,7 @@ func (mapper *PortMapper) setup() error {
205 205
 	if err := iptables("-t", "nat", "-A", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil {
206 206
 		return fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err)
207 207
 	}
208
-	if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil {
208
+	if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER"); err != nil {
209 209
 		return fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err)
210 210
 	}
211 211
 	return nil
... ...
@@ -220,15 +224,64 @@ func (mapper *PortMapper) Map(port int, dest net.TCPAddr) error {
220 220
 	if err := mapper.iptablesForward("-A", port, dest); err != nil {
221 221
 		return err
222 222
 	}
223
+
223 224
 	mapper.mapping[port] = dest
225
+	listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
226
+	if err != nil {
227
+		mapper.Unmap(port)
228
+		return err
229
+	}
230
+	mapper.proxies[port] = listener
231
+	go proxy(listener, "tcp", dest.String())
232
+	return nil
233
+}
234
+
235
+// proxy listens for socket connections on `listener`, and forwards them unmodified
236
+// to `proto:address`
237
+func proxy(listener net.Listener, proto, address string) error {
238
+	Debugf("proxying to %s:%s", proto, address)
239
+	defer Debugf("Done proxying to %s:%s", proto, address)
240
+	for {
241
+		Debugf("Listening on %s", listener)
242
+		src, err := listener.Accept()
243
+		if err != nil {
244
+			return err
245
+		}
246
+		Debugf("Connecting to %s:%s", proto, address)
247
+		dst, err := net.Dial(proto, address)
248
+		if err != nil {
249
+			log.Printf("Error connecting to %s:%s: %s", proto, address, err)
250
+			src.Close()
251
+			continue
252
+		}
253
+		Debugf("Connected to backend, splicing")
254
+		splice(src, dst)
255
+	}
224 256
 	return nil
225 257
 }
226 258
 
259
+func halfSplice(dst, src net.Conn) error {
260
+	_, err := io.Copy(dst, src)
261
+	// FIXME: on EOF from a tcp connection, pass WriteClose()
262
+	dst.Close()
263
+	src.Close()
264
+	return err
265
+}
266
+
267
+func splice(a, b net.Conn) {
268
+	go halfSplice(a, b)
269
+	go halfSplice(b, a)
270
+}
271
+
227 272
 func (mapper *PortMapper) Unmap(port int) error {
228 273
 	dest, ok := mapper.mapping[port]
229 274
 	if !ok {
230 275
 		return errors.New("Port is not mapped")
231 276
 	}
277
+	if proxy, exists := mapper.proxies[port]; exists {
278
+		proxy.Close()
279
+		delete(mapper.proxies, port)
280
+	}
232 281
 	if err := mapper.iptablesForward("-D", port, dest); err != nil {
233 282
 		return err
234 283
 	}
235 284
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+Docker on Arch
1
+==============
2
+
3
+The AUR lxc-docker and lxc-docker-git packages handle building docker on Arch
4
+linux.  The PKGBUILD specifies all dependencies, build, and packaging steps.
5
+
6
+Dependencies
7
+============
8
+
9
+The only buildtime dependencies are git and go which are available via pacman.
10
+The -s flag can be used on makepkg commands below to automatically install
11
+these dependencies.
12
+
13
+Building Package
14
+================
15
+
16
+Download the tarball for either AUR packaged to a local directory.  In that
17
+directory makepkg can be run to build the package.
18
+
19
+# Build the binary package
20
+makepkg
21
+
22
+# Build an updated source tarball
23
+makepkg --source
24
+
... ...
@@ -1,73 +1,62 @@
1
-PKG_NAME=lxc-docker
2
-PKG_ARCH=amd64
3
-PKG_VERSION=1
4
-ROOT_PATH:=$(PWD)
5
-BUILD_PATH=build	# Do not change, decided by dpkg-buildpackage
6
-BUILD_SRC=build_src
7
-GITHUB_PATH=src/github.com/dotcloud/docker
8
-INSDIR=usr/bin
9
-SOURCE_PACKAGE=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz
10
-DEB_PACKAGE=$(PKG_NAME)_$(PKG_VERSION)_$(PKG_ARCH).deb
11
-EXTRA_GO_PKG=./auth
12
-
13
-TMPDIR=$(shell mktemp -d -t XXXXXX)
1
+# Ubuntu package Makefile
2
+#
3
+# Dependencies:  debhelper autotools-dev devscripts golang
4
+# Notes:
5
+# Use 'make ubuntu' to create the ubuntu package
6
+# GPG_KEY environment variable needs to contain a GPG private key for package to be signed
7
+# and uploaded to docker PPA.
8
+# If GPG_KEY is not defined, make ubuntu will create docker package and exit with
9
+# status code 2
14 10
 
11
+PKG_NAME=lxc-docker
12
+VERSION=$(shell head -1 changelog | sed 's/^.\+(\(.\+\)..).\+$$/\1/')
13
+GITHUB_PATH=github.com/dotcloud/docker
14
+DOCKER_VERSION=${PKG_NAME}_${VERSION}
15
+DOCKER_FVERSION=${PKG_NAME}_$(shell head -1 changelog | sed 's/^.\+(\(.\+\)).\+$$/\1/')
16
+BUILD_SRC=${CURDIR}/../../build_src
17
+VERSION_TAG=v$(shell head -1 changelog | sed 's/^.\+(\(.\+\)-[0-9]\+).\+$$/\1/')
15 18
 
16
-# Build a debian source package
17
-all: clean build_in_deb
18
-
19
-build_in_deb:
20
-	echo "GOPATH = " $(ROOT_PATH)
21
-	mkdir bin
22
-	cd $(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH) go build -o $(ROOT_PATH)/bin/docker
19
+all:
20
+	# Compile docker. Used by dpkg-buildpackage.
21
+	cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build
23 22
 
24
-# DESTDIR provided by Debian packaging
25 23
 install:
26
-	# Call this from a go environment (as packaged for deb source package)
27
-	mkdir -p $(DESTDIR)/$(INSDIR)
28
-	mkdir -p $(DESTDIR)/etc/init
29
-	install -m 0755 bin/docker $(DESTDIR)/$(INSDIR)
30
-	install -o root -m 0755 etc/docker.upstart $(DESTDIR)/etc/init/docker.conf
31
-
32
-$(BUILD_SRC): clean
33
-	# Copy ourselves into $BUILD_SRC to comply with unusual golang constraints
34
-	tar --exclude=*.tar.gz --exclude=checkout.tgz -f checkout.tgz -cz *
35
-	mkdir -p $(BUILD_SRC)/$(GITHUB_PATH)
36
-	tar -f checkout.tgz -C $(BUILD_SRC)/$(GITHUB_PATH) -xz
37
-	cd $(BUILD_SRC)/$(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go get -d
38
-	for d in `find $(BUILD_SRC) -name '.git*'`; do rm -rf $$d; done
39
-	# Populate source build with debian stuff
40
-	cp -R -L ./deb/* $(BUILD_SRC)
41
-
42
-$(SOURCE_PACKAGE): $(BUILD_SRC)
43
-	rm -f $(SOURCE_PACKAGE)
44
-	# Create the debian source package
45
-	tar -f $(SOURCE_PACKAGE) -C ${ROOT_PATH}/${BUILD_SRC} -cz .
46
-
47
-# Build deb package fetching go dependencies and cleaning up git repositories
48
-deb: $(DEB_PACKAGE)
49
-
50
-$(DEB_PACKAGE): $(SOURCE_PACKAGE)
51
-	# dpkg-buildpackage looks for source package tarball in ../
52
-	cd $(BUILD_SRC); dpkg-buildpackage
53
-	rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files
54
-
55
-debsrc: $(SOURCE_PACKAGE)
56
-
57
-# Build local sources
58
-#$(PKG_NAME): build_local
59
-
60
-build_local:
61
-	-@mkdir -p bin
62
-	cd docker && go build -o ../bin/docker
63
-
64
-gotest:
65
-	@echo "\033[36m[Testing]\033[00m docker..."
66
-	@sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v . $(EXTRA_GO_PKG) && \
67
-		echo -n "\033[32m[OK]\033[00m" || \
68
-		echo -n "\033[31m[FAIL]\033[00m"; \
69
-		echo " docker"
70
-	@sudo rm -rf /tmp/docker-*
71
-
72
-clean:
73
-	rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz bin
24
+	# Used by dpkg-buildpackage
25
+	mkdir -p ${DESTDIR}/usr/bin
26
+	mkdir -p ${DESTDIR}/etc/init
27
+	mkdir -p ${DESTDIR}/DEBIAN
28
+	install -m 0755 src/${GITHUB_PATH}/docker/docker ${DESTDIR}/usr/bin
29
+	install -o root -m 0755 debian/docker.upstart ${DESTDIR}/etc/init/docker.conf
30
+	install debian/lxc-docker.prerm ${DESTDIR}/DEBIAN/prerm
31
+	install debian/lxc-docker.postinst ${DESTDIR}/DEBIAN/postinst
32
+
33
+ubuntu:
34
+	# This Makefile will compile the github master branch of dotcloud/docker
35
+	# Retrieve docker project and its go structure from internet
36
+	rm -rf ${BUILD_SRC}
37
+	git clone $(shell git rev-parse --show-toplevel) ${BUILD_SRC}/${GITHUB_PATH}
38
+	cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout ${VERSION_TAG} && GOPATH=${BUILD_SRC} go get -d
39
+	# Add debianization
40
+	mkdir ${BUILD_SRC}/debian
41
+	cp Makefile ${BUILD_SRC}
42
+	cp -r * ${BUILD_SRC}/debian
43
+	cp ../../README.md ${BUILD_SRC}
44
+	# Cleanup
45
+	for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done
46
+	rm -rf ${BUILD_SRC}/../${DOCKER_VERSION}.orig.tar.gz
47
+	rm -rf ${BUILD_SRC}/pkg
48
+	# Create docker debian files
49
+	cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz .
50
+	cd ${BUILD_SRC}; dpkg-buildpackage -us -uc
51
+	rm -rf ${BUILD_SRC}
52
+	# Sign package and upload it to PPA if GPG_KEY environment variable
53
+	# holds a private GPG KEY
54
+	if /usr/bin/test "$${GPG_KEY}" == ""; then exit 2; fi
55
+	mkdir ${BUILD_SRC}
56
+	# Import gpg signing key
57
+	echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import
58
+	# Sign the package
59
+	cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${DOCKER_FVERSION}.dsc
60
+	cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa
61
+	cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${DOCKER_FVERSION}_source.changes
62
+	rm -rf ${BUILD_SRC}
74 63
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+Docker on Ubuntu
1
+================
2
+
3
+The easiest way to get docker up and running natively on Ubuntu is installing
4
+it from its official PPA::
5
+
6
+  sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
7
+  sudo apt-get update
8
+  sudo apt-get install lxc-docker
9
+
10
+
11
+Building docker package
12
+~~~~~~~~~~~~~~~~~~~~~~~
13
+
14
+The building process is shared by both, developers and maintainers. If you are
15
+a developer, the Makefile will stop with exit status 2 right before signing
16
+the built packages.
17
+
18
+Assuming you are working on an Ubuntu 12.04 TLS system ::
19
+
20
+  # Download a fresh copy of the docker project
21
+  git clone https://github.com/dotcloud/docker.git
22
+  cd docker
23
+
24
+  # Get building dependencies
25
+  sudo apt-get update; sudo apt-get install -y debhelper autotools-dev devscripts golang
26
+
27
+  # Make the ubuntu package
28
+  (cd packaging/ubuntu; make ubuntu)
29
+
30
+
31
+Install docker built package
32
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33
+
34
+::
35
+
36
+  sudo dpkg -i lxc-docker_*_amd64.deb; sudo apt-get install -f -y
0 37
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+BUILDBOT_IP = '192.168.33.32'
1
+
2
+Vagrant::Config.run do |config|
3
+  config.vm.box = 'precise64'
4
+  config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
5
+  config.vm.share_folder 'v-data', '/data/docker', "#{File.dirname(__FILE__)}/../.."
6
+  config.vm.network :hostonly,BUILDBOT_IP
7
+
8
+  # Install ubuntu packaging dependencies and create ubuntu packages
9
+  config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y git debhelper autotools-dev devscripts golang'
10
+  config.vm.provision :shell, :inline => "export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/ubuntu; make ubuntu"
11
+end
0 12
new file mode 100644
... ...
@@ -0,0 +1,137 @@
0
+
1
+lxc-docker (0.2.1-1) precise; urgency=low
2
+
3
+  - 'docker commit -run' bundles a layer with default runtime options: command, ports etc. 
4
+  - Improve install process on Vagrant
5
+  - New Dockerfile operation: "maintainer"
6
+  - New Dockerfile operation: "expose"
7
+  - New Dockerfile operation: "cmd"
8
+  - Contrib script to build a Debian base layer
9
+  - 'docker -d -r': restart crashed containers at daemon startup
10
+  - Runtime: improve test coverage
11
+
12
+ -- dotCloud <ops@dotcloud.com>  Wed, 1 May 2013 00:00:00 -0700
13
+
14
+
15
+lxc-docker (0.2.0-1) precise; urgency=low
16
+
17
+  - Runtime: ghost containers can be killed and waited for
18
+  - Documentation: update install intructions
19
+  - Packaging: fix Vagrantfile
20
+  - Development: automate releasing binaries and ubuntu packages
21
+  - Add a changelog
22
+  - Various bugfixes
23
+
24
+ -- dotCloud <ops@dotcloud.com>  Mon, 23 Apr 2013 00:00:00 -0700
25
+
26
+
27
+lxc-docker (0.1.8-1) precise; urgency=low
28
+
29
+  - Dynamically detect cgroup capabilities
30
+  - Issue stability warning on kernels <3.8
31
+  - 'docker push' buffers on disk instead of memory
32
+  - Fix 'docker diff' for removed files
33
+  - Fix 'docker stop' for ghost containers
34
+  - Fix handling of pidfile
35
+  - Various bugfixes and stability improvements
36
+
37
+ -- dotCloud <ops@dotcloud.com>  Mon, 22 Apr 2013 00:00:00 -0700
38
+
39
+
40
+lxc-docker (0.1.7-1) precise; urgency=low
41
+
42
+  - Container ports are available on localhost
43
+  - 'docker ps' shows allocated TCP ports
44
+  - Contributors can run 'make hack' to start a continuous integration VM
45
+  - Streamline ubuntu packaging & uploading
46
+  - Various bugfixes and stability improvements
47
+
48
+ -- dotCloud <ops@dotcloud.com>  Thu, 18 Apr 2013 00:00:00 -0700
49
+
50
+
51
+lxc-docker (0.1.6-1) precise; urgency=low
52
+
53
+  - Record the author an image with 'docker commit -author'
54
+
55
+ -- dotCloud <ops@dotcloud.com>  Wed, 17 Apr 2013 00:00:00 -0700
56
+
57
+
58
+lxc-docker (0.1.5-1) precise; urgency=low
59
+
60
+  - Disable standalone mode
61
+  - Use a custom DNS resolver with 'docker -d -dns'
62
+  - Detect ghost containers
63
+  - Improve diagnosis of missing system capabilities
64
+  - Allow disabling memory limits at compile time
65
+  - Add debian packaging
66
+  - Documentation: installing on Arch Linux
67
+  - Documentation: running Redis on docker
68
+  - Fixed lxc 0.9 compatibility
69
+  - Automatically load aufs module
70
+  - Various bugfixes and stability improvements
71
+
72
+ -- dotCloud <ops@dotcloud.com>  Wed, 17 Apr 2013 00:00:00 -0700
73
+
74
+
75
+lxc-docker (0.1.4-1) precise; urgency=low
76
+
77
+  - Full support for TTY emulation
78
+  - Detach from a TTY session with the escape sequence `C-p C-q`
79
+  - Various bugfixes and stability improvements
80
+  - Minor UI improvements
81
+  - Automatically create our own bridge interface 'docker0'
82
+
83
+ -- dotCloud <ops@dotcloud.com>  Tue,  9 Apr 2013 00:00:00 -0700
84
+
85
+
86
+lxc-docker (0.1.3-1) precise; urgency=low
87
+
88
+  - Choose TCP frontend port with '-p :PORT'
89
+  - Layer format is versioned
90
+  - Major reliability improvements to the process manager
91
+  - Various bugfixes and stability improvements
92
+
93
+ -- dotCloud <ops@dotcloud.com>  Thu,  4 Apr 2013 00:00:00 -0700
94
+
95
+
96
+lxc-docker (0.1.2-1) precise; urgency=low
97
+
98
+  - Set container hostname with 'docker run -h'
99
+  - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
100
+  - Various bugfixes and stability improvements
101
+  - UI polish
102
+  - Progress bar on push/pull
103
+  - Use XZ compression by default
104
+  - Make IP allocator lazy
105
+
106
+ -- dotCloud <ops@dotcloud.com>  Wed,  3 Apr 2013 00:00:00 -0700
107
+
108
+
109
+lxc-docker (0.1.1-1) precise; urgency=low
110
+
111
+  - Display shorthand IDs for convenience
112
+  - Stabilize process management
113
+  - Layers can include a commit message
114
+  - Simplified 'docker attach'
115
+  - Fixed support for re-attaching
116
+  - Various bugfixes and stability improvements
117
+  - Auto-download at run
118
+  - Auto-login on push
119
+  - Beefed up documentation
120
+
121
+ -- dotCloud <ops@dotcloud.com>  Sun, 31 Mar 2013 00:00:00 -0700
122
+
123
+
124
+lxc-docker (0.1.0-1) precise; urgency=low
125
+
126
+  - First release
127
+  - Implement registry in order to push/pull images
128
+  - TCP port allocation
129
+  - Fix termcaps on Linux
130
+  - Add documentation
131
+  - Add Vagrant support with Vagrantfile
132
+  - Add unit tests
133
+  - Add repository/tags to ease image management
134
+  - Improve the layer implementation
135
+
136
+ -- dotCloud <ops@dotcloud.com>  Sat, 23 Mar 2013 00:00:00 -0700
0 137
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+8
0 1
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+Source: lxc-docker
1
+Section: misc
2
+Priority: extra
3
+Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
4
+Build-Depends: debhelper,autotools-dev,devscripts,golang
5
+Standards-Version: 3.9.3
6
+Homepage: http://github.com/dotcloud/docker
7
+
8
+Package: lxc-docker
9
+Architecture: linux-any
10
+Depends: ${misc:Depends},${shlibs:Depends},lxc,bsdtar
11
+Conflicts: docker
12
+Description: lxc-docker is a Linux container runtime
13
+ Docker complements LXC with a high-level API which operates at the process
14
+ level. It runs unix processes with strong guarantees of isolation and
15
+ repeatability across servers.
16
+ Docker is a great building block for automating distributed systems:
17
+ large-scale web deployments, database clusters, continuous deployment systems,
18
+ private PaaS, service-oriented architectures, etc.
0 19
new file mode 100644
... ...
@@ -0,0 +1,237 @@
0
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
1
+Upstream-Name: docker
2
+Upstream-Contact: DotCloud Inc <opensource@dotcloud.com>
3
+Source: http://github.com/dotcloud/docker
4
+
5
+Files: *
6
+Copyright: 2012, DotCloud Inc <opensource@dotcloud.com>
7
+License: Apache-2.0
8
+ Apache License
9
+ Version 2.0, January 2004
10
+ http://www.apache.org/licenses/
11
+ 
12
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
13
+ 
14
+ 1. Definitions.
15
+ 
16
+ "License" shall mean the terms and conditions for use, reproduction,
17
+ and distribution as defined by Sections 1 through 9 of this document.
18
+ 
19
+ "Licensor" shall mean the copyright owner or entity authorized by
20
+ the copyright owner that is granting the License.
21
+ 
22
+ "Legal Entity" shall mean the union of the acting entity and all
23
+ other entities that control, are controlled by, or are under common
24
+ control with that entity. For the purposes of this definition,
25
+ "control" means (i) the power, direct or indirect, to cause the
26
+ direction or management of such entity, whether by contract or
27
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
28
+ outstanding shares, or (iii) beneficial ownership of such entity.
29
+ 
30
+ "You" (or "Your") shall mean an individual or Legal Entity
31
+ exercising permissions granted by this License.
32
+ 
33
+ "Source" form shall mean the preferred form for making modifications,
34
+ including but not limited to software source code, documentation
35
+ source, and configuration files.
36
+ 
37
+ "Object" form shall mean any form resulting from mechanical
38
+ transformation or translation of a Source form, including but
39
+ not limited to compiled object code, generated documentation,
40
+ and conversions to other media types.
41
+ 
42
+ "Work" shall mean the work of authorship, whether in Source or
43
+ Object form, made available under the License, as indicated by a
44
+ copyright notice that is included in or attached to the work
45
+ (an example is provided in the Appendix below).
46
+ 
47
+ "Derivative Works" shall mean any work, whether in Source or Object
48
+ form, that is based on (or derived from) the Work and for which the
49
+ editorial revisions, annotations, elaborations, or other modifications
50
+ represent, as a whole, an original work of authorship. For the purposes
51
+ of this License, Derivative Works shall not include works that remain
52
+ separable from, or merely link (or bind by name) to the interfaces of,
53
+ the Work and Derivative Works thereof.
54
+ 
55
+ "Contribution" shall mean any work of authorship, including
56
+ the original version of the Work and any modifications or additions
57
+ to that Work or Derivative Works thereof, that is intentionally
58
+ submitted to Licensor for inclusion in the Work by the copyright owner
59
+ or by an individual or Legal Entity authorized to submit on behalf of
60
+ the copyright owner. For the purposes of this definition, "submitted"
61
+ means any form of electronic, verbal, or written communication sent
62
+ to the Licensor or its representatives, including but not limited to
63
+ communication on electronic mailing lists, source code control systems,
64
+ and issue tracking systems that are managed by, or on behalf of, the
65
+ Licensor for the purpose of discussing and improving the Work, but
66
+ excluding communication that is conspicuously marked or otherwise
67
+ designated in writing by the copyright owner as "Not a Contribution."
68
+ 
69
+ "Contributor" shall mean Licensor and any individual or Legal Entity
70
+ on behalf of whom a Contribution has been received by Licensor and
71
+ subsequently incorporated within the Work.
72
+ 
73
+ 2. Grant of Copyright License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ copyright license to reproduce, prepare Derivative Works of,
77
+ publicly display, publicly perform, sublicense, and distribute the
78
+ Work and such Derivative Works in Source or Object form.
79
+ 
80
+ 3. Grant of Patent License. Subject to the terms and conditions of
81
+ this License, each Contributor hereby grants to You a perpetual,
82
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
83
+ (except as stated in this section) patent license to make, have made,
84
+ use, offer to sell, sell, import, and otherwise transfer the Work,
85
+ where such license applies only to those patent claims licensable
86
+ by such Contributor that are necessarily infringed by their
87
+ Contribution(s) alone or by combination of their Contribution(s)
88
+ with the Work to which such Contribution(s) was submitted. If You
89
+ institute patent litigation against any entity (including a
90
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
91
+ or a Contribution incorporated within the Work constitutes direct
92
+ or contributory patent infringement, then any patent licenses
93
+ granted to You under this License for that Work shall terminate
94
+ as of the date such litigation is filed.
95
+ 
96
+ 4. Redistribution. You may reproduce and distribute copies of the
97
+ Work or Derivative Works thereof in any medium, with or without
98
+ modifications, and in Source or Object form, provided that You
99
+ meet the following conditions:
100
+ 
101
+ (a) You must give any other recipients of the Work or
102
+ Derivative Works a copy of this License; and
103
+ 
104
+ (b) You must cause any modified files to carry prominent notices
105
+ stating that You changed the files; and
106
+ 
107
+ (c) You must retain, in the Source form of any Derivative Works
108
+ that You distribute, all copyright, patent, trademark, and
109
+ attribution notices from the Source form of the Work,
110
+ excluding those notices that do not pertain to any part of
111
+ the Derivative Works; and
112
+ 
113
+ (d) If the Work includes a "NOTICE" text file as part of its
114
+ distribution, then any Derivative Works that You distribute must
115
+ include a readable copy of the attribution notices contained
116
+ within such NOTICE file, excluding those notices that do not
117
+ pertain to any part of the Derivative Works, in at least one
118
+ of the following places: within a NOTICE text file distributed
119
+ as part of the Derivative Works; within the Source form or
120
+ documentation, if provided along with the Derivative Works; or,
121
+ within a display generated by the Derivative Works, if and
122
+ wherever such third-party notices normally appear. The contents
123
+ of the NOTICE file are for informational purposes only and
124
+ do not modify the License. You may add Your own attribution
125
+ notices within Derivative Works that You distribute, alongside
126
+ or as an addendum to the NOTICE text from the Work, provided
127
+ that such additional attribution notices cannot be construed
128
+ as modifying the License.
129
+ 
130
+ You may add Your own copyright statement to Your modifications and
131
+ may provide additional or different license terms and conditions
132
+ for use, reproduction, or distribution of Your modifications, or
133
+ for any such Derivative Works as a whole, provided Your use,
134
+ reproduction, and distribution of the Work otherwise complies with
135
+ the conditions stated in this License.
136
+ 
137
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
138
+ any Contribution intentionally submitted for inclusion in the Work
139
+ by You to the Licensor shall be under the terms and conditions of
140
+ this License, without any additional terms or conditions.
141
+ Notwithstanding the above, nothing herein shall supersede or modify
142
+ the terms of any separate license agreement you may have executed
143
+ with Licensor regarding such Contributions.
144
+ 
145
+ 6. Trademarks. This License does not grant permission to use the trade
146
+ names, trademarks, service marks, or product names of the Licensor,
147
+ except as required for reasonable and customary use in describing the
148
+ origin of the Work and reproducing the content of the NOTICE file.
149
+ 
150
+ 7. Disclaimer of Warranty. Unless required by applicable law or
151
+ agreed to in writing, Licensor provides the Work (and each
152
+ Contributor provides its Contributions) on an "AS IS" BASIS,
153
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
154
+ implied, including, without limitation, any warranties or conditions
155
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
156
+ PARTICULAR PURPOSE. You are solely responsible for determining the
157
+ appropriateness of using or redistributing the Work and assume any
158
+ risks associated with Your exercise of permissions under this License.
159
+ 
160
+ 8. Limitation of Liability. In no event and under no legal theory,
161
+ whether in tort (including negligence), contract, or otherwise,
162
+ unless required by applicable law (such as deliberate and grossly
163
+ negligent acts) or agreed to in writing, shall any Contributor be
164
+ liable to You for damages, including any direct, indirect, special,
165
+ incidental, or consequential damages of any character arising as a
166
+ result of this License or out of the use or inability to use the
167
+ Work (including but not limited to damages for loss of goodwill,
168
+ work stoppage, computer failure or malfunction, or any and all
169
+ other commercial damages or losses), even if such Contributor
170
+ has been advised of the possibility of such damages.
171
+ 
172
+ 9. Accepting Warranty or Additional Liability. While redistributing
173
+ the Work or Derivative Works thereof, You may choose to offer,
174
+ and charge a fee for, acceptance of support, warranty, indemnity,
175
+ or other liability obligations and/or rights consistent with this
176
+ License. However, in accepting such obligations, You may act only
177
+ on Your own behalf and on Your sole responsibility, not on behalf
178
+ of any other Contributor, and only if You agree to indemnify,
179
+ defend, and hold each Contributor harmless for any liability
180
+ incurred by, or claims asserted against, such Contributor by reason
181
+ of your accepting any such warranty or additional liability.
182
+ 
183
+ END OF TERMS AND CONDITIONS
184
+ 
185
+ APPENDIX: How to apply the Apache License to your work.
186
+ 
187
+ To apply the Apache License to your work, attach the following
188
+ boilerplate notice, with the fields enclosed by brackets "[]"
189
+ replaced with your own identifying information. (Don't include
190
+ the brackets!) The text should be enclosed in the appropriate
191
+ comment syntax for the file format. We also recommend that a
192
+ file or class name and description of purpose be included on the
193
+ same "printed page" as the copyright notice for easier
194
+ identification within third-party archives.
195
+ 
196
+ Copyright 2012 DotCloud Inc
197
+ 
198
+ Licensed under the Apache License, Version 2.0 (the "License");
199
+ you may not use this file except in compliance with the License.
200
+ You may obtain a copy of the License at
201
+ 
202
+ http://www.apache.org/licenses/LICENSE-2.0
203
+ 
204
+ Unless required by applicable law or agreed to in writing, software
205
+ distributed under the License is distributed on an "AS IS" BASIS,
206
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
207
+ See the License for the specific language governing permissions and
208
+ limitations under the License.
209
+ 
210
+ 
211
+Files: src/github.com/kr/pty/*
212
+Copyright: Copyright (c) 2011 Keith Rarick
213
+License: Expat
214
+ Copyright (c) 2011 Keith Rarick
215
+ 
216
+ Permission is hereby granted, free of charge, to any person
217
+ obtaining a copy of this software and associated
218
+ documentation files (the "Software"), to deal in the
219
+ Software without restriction, including without limitation
220
+ the rights to use, copy, modify, merge, publish, distribute,
221
+ sublicense, and/or sell copies of the Software, and to
222
+ permit persons to whom the Software is furnished to do so,
223
+ subject to the following conditions:
224
+ 
225
+ The above copyright notice and this permission notice shall
226
+ be included in all copies or substantial portions of the
227
+ Software.
228
+ 
229
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
230
+ KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
231
+ WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
232
+ PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
233
+ OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
234
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
235
+ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
236
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0 237
deleted file mode 100644
... ...
@@ -1,5 +0,0 @@
1
-lxc-docker (1) precise; urgency=low
2
-
3
-  * Initial release
4
-
5
- -- dotCloud <ops@dotcloud.com>  Mon, 25 Mar 2013 05:51:12 -0700
6 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-8
2 1
deleted file mode 100644
... ...
@@ -1,19 +0,0 @@
1
-Source: lxc-docker
2
-Section: misc
3
-Priority: extra
4
-Homepage: http://docker.io
5
-Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
6
-Build-Depends: debhelper (>= 8.0.0), pkg-config, git, golang, libsqlite3-dev
7
-Vcs-Git: http://github.com/dotcloud/docker.git
8
-Standards-Version: 3.9.3
9
-
10
-Package: lxc-docker
11
-Architecture: amd64
12
-Depends: ${shlibs:Depends}, ${misc:Depends}, lxc, wget, bsdtar, curl, sqlite3
13
-Conflicts: docker
14
-Description: A process manager with superpowers
15
-    It encapsulates heterogeneous payloads in Standard Containers, and runs
16
-    them on any server with strong guarantees of isolation and repeatability.
17
-    Is is a great building block for automating distributed systems:
18
-    large-scale web deployments, database clusters, continuous deployment
19
-    systems, private PaaS, service-oriented architectures, etc.
20 1
deleted file mode 100644
... ...
@@ -1,209 +0,0 @@
1
-Format: http://dep.debian.net/deps/dep5
2
-Upstream-Name: docker
3
-Source: https://github.com/dotcloud/docker
4
-
5
-Files: *
6
-Copyright: 2012 DotCloud Inc (opensource@dotcloud.com)
7
-License: Apache License Version 2.0
8
-
9
-                                Apache License
10
-                           Version 2.0, January 2004
11
-                        http://www.apache.org/licenses/
12
-
13
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
14
-
15
-   1. Definitions.
16
-
17
-      "License" shall mean the terms and conditions for use, reproduction,
18
-      and distribution as defined by Sections 1 through 9 of this document.
19
-
20
-      "Licensor" shall mean the copyright owner or entity authorized by
21
-      the copyright owner that is granting the License.
22
-
23
-      "Legal Entity" shall mean the union of the acting entity and all
24
-      other entities that control, are controlled by, or are under common
25
-      control with that entity. For the purposes of this definition,
26
-      "control" means (i) the power, direct or indirect, to cause the
27
-      direction or management of such entity, whether by contract or
28
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
29
-      outstanding shares, or (iii) beneficial ownership of such entity.
30
-
31
-      "You" (or "Your") shall mean an individual or Legal Entity
32
-      exercising permissions granted by this License.
33
-
34
-      "Source" form shall mean the preferred form for making modifications,
35
-      including but not limited to software source code, documentation
36
-      source, and configuration files.
37
-
38
-      "Object" form shall mean any form resulting from mechanical
39
-      transformation or translation of a Source form, including but
40
-      not limited to compiled object code, generated documentation,
41
-      and conversions to other media types.
42
-
43
-      "Work" shall mean the work of authorship, whether in Source or
44
-      Object form, made available under the License, as indicated by a
45
-      copyright notice that is included in or attached to the work
46
-      (an example is provided in the Appendix below).
47
-
48
-      "Derivative Works" shall mean any work, whether in Source or Object
49
-      form, that is based on (or derived from) the Work and for which the
50
-      editorial revisions, annotations, elaborations, or other modifications
51
-      represent, as a whole, an original work of authorship. For the purposes
52
-      of this License, Derivative Works shall not include works that remain
53
-      separable from, or merely link (or bind by name) to the interfaces of,
54
-      the Work and Derivative Works thereof.
55
-
56
-      "Contribution" shall mean any work of authorship, including
57
-      the original version of the Work and any modifications or additions
58
-      to that Work or Derivative Works thereof, that is intentionally
59
-      submitted to Licensor for inclusion in the Work by the copyright owner
60
-      or by an individual or Legal Entity authorized to submit on behalf of
61
-      the copyright owner. For the purposes of this definition, "submitted"
62
-      means any form of electronic, verbal, or written communication sent
63
-      to the Licensor or its representatives, including but not limited to
64
-      communication on electronic mailing lists, source code control systems,
65
-      and issue tracking systems that are managed by, or on behalf of, the
66
-      Licensor for the purpose of discussing and improving the Work, but
67
-      excluding communication that is conspicuously marked or otherwise
68
-      designated in writing by the copyright owner as "Not a Contribution."
69
-
70
-      "Contributor" shall mean Licensor and any individual or Legal Entity
71
-      on behalf of whom a Contribution has been received by Licensor and
72
-      subsequently incorporated within the Work.
73
-
74
-   2. Grant of Copyright License. Subject to the terms and conditions of
75
-      this License, each Contributor hereby grants to You a perpetual,
76
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
-      copyright license to reproduce, prepare Derivative Works of,
78
-      publicly display, publicly perform, sublicense, and distribute the
79
-      Work and such Derivative Works in Source or Object form.
80
-
81
-   3. Grant of Patent License. Subject to the terms and conditions of
82
-      this License, each Contributor hereby grants to You a perpetual,
83
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
84
-      (except as stated in this section) patent license to make, have made,
85
-      use, offer to sell, sell, import, and otherwise transfer the Work,
86
-      where such license applies only to those patent claims licensable
87
-      by such Contributor that are necessarily infringed by their
88
-      Contribution(s) alone or by combination of their Contribution(s)
89
-      with the Work to which such Contribution(s) was submitted. If You
90
-      institute patent litigation against any entity (including a
91
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
92
-      or a Contribution incorporated within the Work constitutes direct
93
-      or contributory patent infringement, then any patent licenses
94
-      granted to You under this License for that Work shall terminate
95
-      as of the date such litigation is filed.
96
-
97
-   4. Redistribution. You may reproduce and distribute copies of the
98
-      Work or Derivative Works thereof in any medium, with or without
99
-      modifications, and in Source or Object form, provided that You
100
-      meet the following conditions:
101
-
102
-      (a) You must give any other recipients of the Work or
103
-          Derivative Works a copy of this License; and
104
-
105
-      (b) You must cause any modified files to carry prominent notices
106
-          stating that You changed the files; and
107
-
108
-      (c) You must retain, in the Source form of any Derivative Works
109
-          that You distribute, all copyright, patent, trademark, and
110
-          attribution notices from the Source form of the Work,
111
-          excluding those notices that do not pertain to any part of
112
-          the Derivative Works; and
113
-
114
-      (d) If the Work includes a "NOTICE" text file as part of its
115
-          distribution, then any Derivative Works that You distribute must
116
-          include a readable copy of the attribution notices contained
117
-          within such NOTICE file, excluding those notices that do not
118
-          pertain to any part of the Derivative Works, in at least one
119
-          of the following places: within a NOTICE text file distributed
120
-          as part of the Derivative Works; within the Source form or
121
-          documentation, if provided along with the Derivative Works; or,
122
-          within a display generated by the Derivative Works, if and
123
-          wherever such third-party notices normally appear. The contents
124
-          of the NOTICE file are for informational purposes only and
125
-          do not modify the License. You may add Your own attribution
126
-          notices within Derivative Works that You distribute, alongside
127
-          or as an addendum to the NOTICE text from the Work, provided
128
-          that such additional attribution notices cannot be construed
129
-          as modifying the License.
130
-
131
-      You may add Your own copyright statement to Your modifications and
132
-      may provide additional or different license terms and conditions
133
-      for use, reproduction, or distribution of Your modifications, or
134
-      for any such Derivative Works as a whole, provided Your use,
135
-      reproduction, and distribution of the Work otherwise complies with
136
-      the conditions stated in this License.
137
-
138
-   5. Submission of Contributions. Unless You explicitly state otherwise,
139
-      any Contribution intentionally submitted for inclusion in the Work
140
-      by You to the Licensor shall be under the terms and conditions of
141
-      this License, without any additional terms or conditions.
142
-      Notwithstanding the above, nothing herein shall supersede or modify
143
-      the terms of any separate license agreement you may have executed
144
-      with Licensor regarding such Contributions.
145
-
146
-   6. Trademarks. This License does not grant permission to use the trade
147
-      names, trademarks, service marks, or product names of the Licensor,
148
-      except as required for reasonable and customary use in describing the
149
-      origin of the Work and reproducing the content of the NOTICE file.
150
-
151
-   7. Disclaimer of Warranty. Unless required by applicable law or
152
-      agreed to in writing, Licensor provides the Work (and each
153
-      Contributor provides its Contributions) on an "AS IS" BASIS,
154
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
155
-      implied, including, without limitation, any warranties or conditions
156
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
157
-      PARTICULAR PURPOSE. You are solely responsible for determining the
158
-      appropriateness of using or redistributing the Work and assume any
159
-      risks associated with Your exercise of permissions under this License.
160
-
161
-   8. Limitation of Liability. In no event and under no legal theory,
162
-      whether in tort (including negligence), contract, or otherwise,
163
-      unless required by applicable law (such as deliberate and grossly
164
-      negligent acts) or agreed to in writing, shall any Contributor be
165
-      liable to You for damages, including any direct, indirect, special,
166
-      incidental, or consequential damages of any character arising as a
167
-      result of this License or out of the use or inability to use the
168
-      Work (including but not limited to damages for loss of goodwill,
169
-      work stoppage, computer failure or malfunction, or any and all
170
-      other commercial damages or losses), even if such Contributor
171
-      has been advised of the possibility of such damages.
172
-
173
-   9. Accepting Warranty or Additional Liability. While redistributing
174
-      the Work or Derivative Works thereof, You may choose to offer,
175
-      and charge a fee for, acceptance of support, warranty, indemnity,
176
-      or other liability obligations and/or rights consistent with this
177
-      License. However, in accepting such obligations, You may act only
178
-      on Your own behalf and on Your sole responsibility, not on behalf
179
-      of any other Contributor, and only if You agree to indemnify,
180
-      defend, and hold each Contributor harmless for any liability
181
-      incurred by, or claims asserted against, such Contributor by reason
182
-      of your accepting any such warranty or additional liability.
183
-
184
-   END OF TERMS AND CONDITIONS
185
-
186
-   APPENDIX: How to apply the Apache License to your work.
187
-
188
-      To apply the Apache License to your work, attach the following
189
-      boilerplate notice, with the fields enclosed by brackets "[]"
190
-      replaced with your own identifying information. (Don't include
191
-      the brackets!) The text should be enclosed in the appropriate
192
-      comment syntax for the file format. We also recommend that a
193
-      file or class name and description of purpose be included on the
194
-      same "printed page" as the copyright notice for easier
195
-      identification within third-party archives.
196
-
197
-   Copyright 2012 DotCloud Inc (opensource@dotcloud.com)
198
-
199
-   Licensed under the Apache License, Version 2.0 (the "License");
200
-   you may not use this file except in compliance with the License.
201
-   You may obtain a copy of the License at
202
-
203
-       http://www.apache.org/licenses/LICENSE-2.0
204
-
205
-   Unless required by applicable law or agreed to in writing, software
206
-   distributed under the License is distributed on an "AS IS" BASIS,
207
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
208
-   See the License for the specific language governing permissions and
209
-   limitations under the License.
210 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-README.md
2 1
deleted file mode 100755
... ...
@@ -1,13 +0,0 @@
1
-#!/usr/bin/make -f
2
-# -*- makefile -*-
3
-# Sample debian/rules that uses debhelper.
4
-# This file was originally written by Joey Hess and Craig Small.
5
-# As a special exception, when this file is copied by dh-make into a
6
-# dh-make output file, you may use that output file without restriction.
7
-# This special exception was added by Craig Small in version 0.37 of dh-make.
8
-
9
-# Uncomment this to turn on verbose mode.
10
-#export DH_VERBOSE=1
11
-
12
-%:
13
-	dh $@
14 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-3.0 (quilt)
2 1
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+description     "Run docker"
1
+
2
+start on runlevel [2345]
3
+stop on starting rc RUNLEVEL=[016]
4
+respawn
5
+
6
+script
7
+    # FIXME: docker should not depend on the system having en_US.UTF-8
8
+    LC_ALL='en_US.UTF-8' /usr/bin/docker -d
9
+end script
0 10
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+README.md
0 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-description     "Run docker"
2
-
3
-start on runlevel [2345]
4
-stop on starting rc RUNLEVEL=[016]
5
-respawn
6
-
7
-script
8
-    test -f /etc/default/locale && . /etc/default/locale || true
9
-    LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d
10
-end script
11 1
new file mode 100644
... ...
@@ -0,0 +1,4 @@
0
+#!/bin/sh
1
+
2
+# Start docker
3
+/sbin/start docker
0 4
new file mode 100644
... ...
@@ -0,0 +1,4 @@
0
+#!/bin/sh
1
+
2
+# Stop docker
3
+/sbin/stop docker
0 4
new file mode 100644
... ...
@@ -0,0 +1,39 @@
0
+Maintainer duty
1
+===============
2
+
3
+Ubuntu allows developers to use their PPA (Personal Package Archive)
4
+repository. This is very convenient for the users as they just need to add
5
+the PPA address, update their package database and use the apt-get tool.
6
+
7
+For now, the official lxc-docker package is located on launchpad and can be
8
+accessed adding the following line to /etc/apt/sources.list ::
9
+
10
+
11
+  deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main
12
+
13
+
14
+Releasing a new package
15
+~~~~~~~~~~~~~~~~~~~~~~~
16
+
17
+The most relevant information to update is the packaging/ubuntu/changelog file:
18
+Each new release should create a new first paragraph with new release version,
19
+changes, and the maintainer information. The core of this paragraph is
20
+located on CHANGELOG.md. Make sure to transcribe it and translate the formats
21
+(eg: packaging/ubuntu/changelog uses 2 spaces for body change descriptions
22
+instead of 1 space from CHANGELOG.md)
23
+
24
+Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it
25
+into the GPG_KEY environment variable with::
26
+
27
+  export GPG_KEY=`cat /media/usbdrive/docker.key`
28
+
29
+
30
+After this is done and you are ready to upload the package to the PPA, you have
31
+a couple of choices:
32
+
33
+* Follow packaging/ubuntu/README.ubuntu to generate the actual source packages
34
+  and upload them to the PPA
35
+
36
+* Let vagrant do all the work for you::
37
+
38
+  ( cd docker/packaging/ubuntu; vagrant up )
0 39
new file mode 100755
... ...
@@ -0,0 +1,13 @@
0
+#!/usr/bin/make -f
1
+# -*- makefile -*-
2
+# Sample debian/rules that uses debhelper.
3
+# This file was originally written by Joey Hess and Craig Small.
4
+# As a special exception, when this file is copied by dh-make into a
5
+# dh-make output file, you may use that output file without restriction.
6
+# This special exception was added by Craig Small in version 0.37 of dh-make.
7
+
8
+# Uncomment this to turn on verbose mode.
9
+#export DH_VERBOSE=1
10
+
11
+%:
12
+	dh $@
0 13
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+3.0 (quilt)
0 1
deleted file mode 100644
... ...
@@ -1,17 +0,0 @@
1
-node default {
2
-    exec {
3
-        "apt_update" : 
4
-            command => "/usr/bin/apt-get update"
5
-    }
6
-
7
-    Package {
8
-        require => Exec['apt_update']
9
-    }
10
-
11
-    group { "puppet":
12
-        ensure => "present"
13
-    }
14
-
15
-    include "docker"
16
-
17
-}
18 1
deleted file mode 100644
... ...
@@ -1,99 +0,0 @@
1
-class virtualbox {
2
-    Package { ensure => "installed" }
3
-
4
-    # remove some files from the base vagrant image because they're old
5
-    file { "/home/vagrant/docker-master":
6
-        ensure => absent,
7
-        recurse => true,
8
-        force => true,
9
-        purge => true,
10
-    }
11
-    file { "/usr/local/bin/dockerd":
12
-        ensure => absent,
13
-    }
14
-    file { "/usr/local/bin/docker":
15
-        ensure => absent,
16
-    }
17
-
18
-    # Set up VirtualBox guest utils
19
-    package { "virtualbox-guest-utils": }
20
-    exec { "vbox-add" :
21
-        command => "/etc/init.d/vboxadd setup",
22
-        require => [
23
-            Package["virtualbox-guest-utils"],
24
-            Package["linux-headers-3.5.0-25-generic"], ],
25
-    }
26
-}
27
-
28
-class docker {
29
-    # update this with latest go binary dist
30
-    $go_url = "http://go.googlecode.com/files/go1.0.3.linux-amd64.tar.gz"
31
-
32
-    Package { ensure => "installed" }
33
-
34
-    package { ["lxc", "debootstrap", "wget", "bsdtar", "git",
35
-               "linux-image-3.5.0-25-generic",
36
-               "linux-image-extra-3.5.0-25-generic",
37
-               "linux-headers-3.5.0-25-generic"]: }
38
-
39
-    $ec2_version = file("/etc/ec2_version", "/dev/null")
40
-    $rax_version = inline_template("<%= %x{/usr/bin/xenstore-read vm-data/provider_data/provider} %>")
41
-
42
-    if ($ec2_version) {
43
-        $vagrant_user = "ubuntu"
44
-        $vagrant_home = "/home/ubuntu"
45
-    } elsif ($rax_version) {
46
-        $vagrant_user = "root"
47
-        $vagrant_home = "/root"
48
-    } else {
49
-        # virtualbox is the vagrant default, so it should be safe to assume
50
-        $vagrant_user = "vagrant"
51
-        $vagrant_home = "/home/vagrant"
52
-        include virtualbox
53
-    }
54
-
55
-    exec { "fetch-go":
56
-        require => Package["wget"],
57
-        command => "/usr/bin/wget -O - $go_url | /bin/tar xz -C /usr/local",
58
-        creates => "/usr/local/go/bin/go",
59
-    }
60
-
61
-    file { "/etc/init/dockerd.conf":
62
-        mode => 600,
63
-        owner => "root",
64
-        group => "root",
65
-        content => template("docker/dockerd.conf"),
66
-    }
67
-
68
-    file { "/opt/go":
69
-        owner => $vagrant_user,
70
-        group => $vagrant_user,
71
-        recurse => true,
72
-    }
73
-
74
-    file { "${vagrant_home}/.profile":
75
-        mode => 644,
76
-        owner => $vagrant_user,
77
-        group => $vagrant_user,
78
-        content => template("docker/profile"),
79
-    }
80
-
81
-     exec { "build-docker" :
82
-        cwd  => "/opt/go/src/github.com/dotcloud/docker",
83
-        user => $vagrant_user,
84
-        environment => "GOPATH=/opt/go",
85
-        command => "/usr/local/go/bin/go get -v ./... && /usr/local/go/bin/go install ./docker",
86
-        creates => "/opt/go/bin/docker",
87
-        logoutput => "on_failure",
88
-        require => [ Exec["fetch-go"], File["/opt/go"] ],
89
-    }
90
-
91
-    service { "dockerd" :
92
-        ensure => "running",
93
-        start => "/sbin/initctl start dockerd",
94
-        stop => "/sbin/initctl stop dockerd",
95
-        require => [ Exec["build-docker"], File["/etc/init/dockerd.conf"] ],
96
-        name => "dockerd",
97
-        provider => "base"
98
-    }
99
-}
100 1
deleted file mode 100644
... ...
@@ -1,12 +0,0 @@
1
-description     "Run dockerd"
2
-
3
-stop on runlevel [!2345]
4
-start on runlevel [3]
5
-
6
-# if you want it to automatically restart if it crashes, leave the next line in
7
-respawn
8
-
9
-script
10
-    test -f /etc/default/locale && . /etc/default/locale || true
11
-    LANG=$LANG LC_ALL=$LANG /opt/go/bin/docker -d >> /var/log/dockerd 2>&1
12
-end script
13 1
deleted file mode 100644
... ...
@@ -1,30 +0,0 @@
1
-# ~/.profile: executed by the command interpreter for login shells.
2
-# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
3
-# exists.
4
-# see /usr/share/doc/bash/examples/startup-files for examples.
5
-# the files are located in the bash-doc package.
6
-
7
-# the default umask is set in /etc/profile; for setting the umask
8
-# for ssh logins, install and configure the libpam-umask package.
9
-#umask 022
10
-
11
-# if running bash
12
-if [ -n "$BASH_VERSION" ]; then
13
-    # include .bashrc if it exists
14
-    if [ -f "$HOME/.bashrc" ]; then
15
-        . "$HOME/.bashrc"
16
-    fi
17
-fi
18
-
19
-# set PATH so it includes user's private bin if it exists
20
-if [ -d "$HOME/bin" ] ; then
21
-    PATH="$HOME/bin:$PATH"
22
-fi
23
-
24
-export GOPATH=/opt/go
25
-export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
26
-
27
-docker=/opt/go/src/github.com/dotcloud/docker
28
-if [ -d $docker ]; then
29
-  cd $docker
30
-fi
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"io"
8 8
 	"io/ioutil"
9 9
 	"net/http"
10
+	"os"
10 11
 	"path"
11 12
 	"strings"
12 13
 )
... ...
@@ -135,7 +136,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a
135 135
 	if err != nil {
136 136
 		return nil, nil, err
137 137
 	}
138
-	return img, ProgressReader(res.Body, int(res.ContentLength), stdout), nil
138
+	return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil
139 139
 }
140 140
 
141 141
 func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) error {
... ...
@@ -269,24 +270,20 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth
269 269
 			return fmt.Errorf("Failed to retrieve layer upload location: %s", err)
270 270
 		}
271 271
 
272
-		// FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB
273
-		// FIXME2: I won't stress it enough, DON'T DO THIS! very high priority
274
-		layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
275
-		tmp, err := ioutil.ReadAll(layerData2)
272
+		// FIXME: stream the archive directly to the registry instead of buffering it on disk. This requires either:
273
+		//	a) Implementing S3's proprietary streaming logic, or
274
+		//	b) Stream directly to the registry instead of S3.
275
+		// I prefer option b. because it doesn't lock us into a proprietary cloud service.
276
+		tmpLayer, err := graph.TempLayerArchive(img.Id, Xz, stdout)
276 277
 		if err != nil {
277 278
 			return err
278 279
 		}
279
-		layerLength := len(tmp)
280
-
281
-		layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
282
-		if err != nil {
283
-			return fmt.Errorf("Failed to generate layer archive: %s", err)
284
-		}
285
-		req3, err := http.NewRequest("PUT", url.String(), ProgressReader(layerData.(io.ReadCloser), layerLength, stdout))
280
+		defer os.Remove(tmpLayer.Name())
281
+		req3, err := http.NewRequest("PUT", url.String(), ProgressReader(tmpLayer, int(tmpLayer.Size), stdout, "Uploading %v/%v (%v)"))
286 282
 		if err != nil {
287 283
 			return err
288 284
 		}
289
-		req3.ContentLength = int64(layerLength)
285
+		req3.ContentLength = int64(tmpLayer.Size)
290 286
 
291 287
 		req3.TransferEncoding = []string{"none"}
292 288
 		res3, err := client.Do(req3)
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"github.com/dotcloud/docker/auth"
7 7
 	"io"
8 8
 	"io/ioutil"
9
+	"log"
9 10
 	"os"
10 11
 	"os/exec"
11 12
 	"path"
... ...
@@ -14,6 +15,11 @@ import (
14 14
 	"time"
15 15
 )
16 16
 
17
+type Capabilities struct {
18
+	MemoryLimit bool
19
+	SwapLimit   bool
20
+}
21
+
17 22
 type Runtime struct {
18 23
 	root           string
19 24
 	repository     string
... ...
@@ -23,6 +29,9 @@ type Runtime struct {
23 23
 	repositories   *TagStore
24 24
 	authConfig     *auth.AuthConfig
25 25
 	idIndex        *TruncIndex
26
+	capabilities   *Capabilities
27
+	kernelVersion  *KernelVersionInfo
28
+	autoRestart    bool
26 29
 }
27 30
 
28 31
 var sysInitPath string
... ...
@@ -69,12 +78,58 @@ func (runtime *Runtime) containerRoot(id string) string {
69 69
 	return path.Join(runtime.repository, id)
70 70
 }
71 71
 
72
+func (runtime *Runtime) mergeConfig(userConf, imageConf *Config) {
73
+	if userConf.Hostname != "" {
74
+		userConf.Hostname = imageConf.Hostname
75
+	}
76
+	if userConf.User != "" {
77
+		userConf.User = imageConf.User
78
+	}
79
+	if userConf.Memory == 0 {
80
+		userConf.Memory = imageConf.Memory
81
+	}
82
+	if userConf.MemorySwap == 0 {
83
+		userConf.MemorySwap = imageConf.MemorySwap
84
+	}
85
+	if userConf.PortSpecs == nil || len(userConf.PortSpecs) == 0 {
86
+		userConf.PortSpecs = imageConf.PortSpecs
87
+	}
88
+	if !userConf.Tty {
89
+		userConf.Tty = userConf.Tty
90
+	}
91
+	if !userConf.OpenStdin {
92
+		userConf.OpenStdin = imageConf.OpenStdin
93
+	}
94
+	if !userConf.StdinOnce {
95
+		userConf.StdinOnce = imageConf.StdinOnce
96
+	}
97
+	if userConf.Env == nil || len(userConf.Env) == 0 {
98
+		userConf.Env = imageConf.Env
99
+	}
100
+	if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
101
+		userConf.Cmd = imageConf.Cmd
102
+	}
103
+	if userConf.Dns == nil || len(userConf.Dns) == 0 {
104
+		userConf.Dns = imageConf.Dns
105
+	}
106
+}
107
+
72 108
 func (runtime *Runtime) Create(config *Config) (*Container, error) {
109
+
73 110
 	// Lookup image
74 111
 	img, err := runtime.repositories.LookupImage(config.Image)
75 112
 	if err != nil {
76 113
 		return nil, err
77 114
 	}
115
+
116
+	if img.Config != nil {
117
+		runtime.mergeConfig(config, img.Config)
118
+	}
119
+
120
+	if config.Cmd == nil {
121
+		return nil, fmt.Errorf("No command specified")
122
+	}
123
+
78 124
 	// Generate id
79 125
 	id := GenerateId()
80 126
 	// Generate default hostname
... ...
@@ -95,6 +150,7 @@ func (runtime *Runtime) Create(config *Config) (*Container, error) {
95 95
 		// FIXME: do we need to store this in the container?
96 96
 		SysInitPath: sysInitPath,
97 97
 	}
98
+
98 99
 	container.root = runtime.containerRoot(container.Id)
99 100
 	// Step 1: create the container directory.
100 101
 	// This doubles as a barrier to avoid race conditions.
... ...
@@ -159,6 +215,28 @@ func (runtime *Runtime) Register(container *Container) error {
159 159
 	// init the wait lock
160 160
 	container.waitLock = make(chan struct{})
161 161
 
162
+	// Even if not running, we init the lock (prevents races in start/stop/kill)
163
+	container.State.initLock()
164
+
165
+	container.runtime = runtime
166
+
167
+	// Attach to stdout and stderr
168
+	container.stderr = newWriteBroadcaster()
169
+	container.stdout = newWriteBroadcaster()
170
+	// Attach to stdin
171
+	if container.Config.OpenStdin {
172
+		container.stdin, container.stdinPipe = io.Pipe()
173
+	} else {
174
+		container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
175
+	}
176
+	// done
177
+	runtime.containers.PushBack(container)
178
+	runtime.idIndex.Add(container.Id)
179
+
180
+	// When we actually restart, Start() do the monitoring.
181
+	// However, when we simply 'reattach', we have to restart a monitor
182
+	nomonitor := false
183
+
162 184
 	// FIXME: if the container is supposed to be running but is not, auto restart it?
163 185
 	//        if so, then we need to restart monitor and init a new lock
164 186
 	// If the container is supposed to be running, make sure of it
... ...
@@ -168,9 +246,20 @@ func (runtime *Runtime) Register(container *Container) error {
168 168
 		} else {
169 169
 			if !strings.Contains(string(output), "RUNNING") {
170 170
 				Debugf("Container %s was supposed to be running be is not.", container.Id)
171
-				container.State.setStopped(-127)
172
-				if err := container.ToDisk(); err != nil {
173
-					return err
171
+				if runtime.autoRestart {
172
+					Debugf("Restarting")
173
+					container.State.Ghost = false
174
+					container.State.setStopped(0)
175
+					if err := container.Start(); err != nil {
176
+						return err
177
+					}
178
+					nomonitor = true
179
+				} else {
180
+					Debugf("Marking as stopped")
181
+					container.State.setStopped(-127)
182
+					if err := container.ToDisk(); err != nil {
183
+						return err
184
+					}
174 185
 				}
175 186
 			}
176 187
 		}
... ...
@@ -180,25 +269,10 @@ func (runtime *Runtime) Register(container *Container) error {
180 180
 	// then close the wait lock chan (will be reset upon start)
181 181
 	if !container.State.Running {
182 182
 		close(container.waitLock)
183
+	} else if !nomonitor {
184
+		container.allocateNetwork()
185
+		go container.monitor()
183 186
 	}
184
-
185
-	// Even if not running, we init the lock (prevents races in start/stop/kill)
186
-	container.State.initLock()
187
-
188
-	container.runtime = runtime
189
-
190
-	// Attach to stdout and stderr
191
-	container.stderr = newWriteBroadcaster()
192
-	container.stdout = newWriteBroadcaster()
193
-	// Attach to stdin
194
-	if container.Config.OpenStdin {
195
-		container.stdin, container.stdinPipe = io.Pipe()
196
-	} else {
197
-		container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
198
-	}
199
-	// done
200
-	runtime.containers.PushBack(container)
201
-	runtime.idIndex.Add(container.Id)
202 187
 	return nil
203 188
 }
204 189
 
... ...
@@ -217,7 +291,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
217 217
 		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.Id)
218 218
 	}
219 219
 
220
-	if err := container.Stop(); err != nil {
220
+	if err := container.Stop(10); err != nil {
221 221
 		return err
222 222
 	}
223 223
 	if mounted, err := container.Mounted(); err != nil {
... ...
@@ -238,7 +312,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
238 238
 
239 239
 // Commit creates a new filesystem image from the current state of a container.
240 240
 // The image can optionally be tagged into a repository
241
-func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, error) {
241
+func (runtime *Runtime) Commit(id, repository, tag, comment, author string, config *Config) (*Image, error) {
242 242
 	container := runtime.Get(id)
243 243
 	if container == nil {
244 244
 		return nil, fmt.Errorf("No such container: %s", id)
... ...
@@ -250,7 +324,7 @@ func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, err
250 250
 		return nil, err
251 251
 	}
252 252
 	// Create a new image from the container's base layers + a new layer from container changes
253
-	img, err := runtime.graph.Create(rwTar, container, comment)
253
+	img, err := runtime.graph.Create(rwTar, container, comment, author, config)
254 254
 	if err != nil {
255 255
 		return nil, err
256 256
 	}
... ...
@@ -280,12 +354,47 @@ func (runtime *Runtime) restore() error {
280 280
 	return nil
281 281
 }
282 282
 
283
+func (runtime *Runtime) UpdateCapabilities(quiet bool) {
284
+	if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil {
285
+		if !quiet {
286
+			log.Printf("WARNING: %s\n", err)
287
+		}
288
+	} else {
289
+		_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes"))
290
+		_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes"))
291
+		runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil
292
+		if !runtime.capabilities.MemoryLimit && !quiet {
293
+			log.Printf("WARNING: Your kernel does not support cgroup memory limit.")
294
+		}
295
+
296
+		_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
297
+		runtime.capabilities.SwapLimit = err == nil
298
+		if !runtime.capabilities.SwapLimit && !quiet {
299
+			log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
300
+		}
301
+	}
302
+}
303
+
283 304
 // FIXME: harmonize with NewGraph()
284
-func NewRuntime() (*Runtime, error) {
285
-	return NewRuntimeFromDirectory("/var/lib/docker")
305
+func NewRuntime(autoRestart bool) (*Runtime, error) {
306
+	runtime, err := NewRuntimeFromDirectory("/var/lib/docker", autoRestart)
307
+	if err != nil {
308
+		return nil, err
309
+	}
310
+
311
+	if k, err := GetKernelVersion(); err != nil {
312
+		log.Printf("WARNING: %s\n", err)
313
+	} else {
314
+		runtime.kernelVersion = k
315
+		if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
316
+			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
317
+		}
318
+	}
319
+	runtime.UpdateCapabilities(false)
320
+	return runtime, nil
286 321
 }
287 322
 
288
-func NewRuntimeFromDirectory(root string) (*Runtime, error) {
323
+func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
289 324
 	runtimeRepo := path.Join(root, "containers")
290 325
 
291 326
 	if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
... ...
@@ -321,6 +430,8 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
321 321
 		repositories:   repositories,
322 322
 		authConfig:     authConfig,
323 323
 		idIndex:        NewTruncIndex(),
324
+		capabilities:   &Capabilities{},
325
+		autoRestart:    autoRestart,
324 326
 	}
325 327
 
326 328
 	if err := runtime.restore(); err != nil {
... ...
@@ -1,9 +1,11 @@
1 1
 package docker
2 2
 
3 3
 import (
4
+	"fmt"
4 5
 	"github.com/dotcloud/docker/rcli"
5 6
 	"io"
6 7
 	"io/ioutil"
8
+	"net"
7 9
 	"os"
8 10
 	"os/exec"
9 11
 	"os/user"
... ...
@@ -12,11 +14,9 @@ import (
12 12
 	"time"
13 13
 )
14 14
 
15
-// FIXME: this is no longer needed
16
-const testLayerPath string = "/var/lib/docker/docker-ut.tar"
17 15
 const unitTestImageName string = "docker-ut"
18 16
 
19
-var unitTestStoreBase string
17
+const unitTestStoreBase string = "/var/lib/docker/unit-tests"
20 18
 
21 19
 func nuke(runtime *Runtime) error {
22 20
 	var wg sync.WaitGroup
... ...
@@ -48,8 +48,6 @@ func layerArchive(tarfile string) (io.Reader, error) {
48 48
 }
49 49
 
50 50
 func init() {
51
-	NO_MEMORY_LIMIT = os.Getenv("NO_MEMORY_LIMIT") == "1"
52
-
53 51
 	// Hack to run sys init during unit testing
54 52
 	if SelfPath() == "/sbin/init" {
55 53
 		SysInit()
... ...
@@ -62,15 +60,10 @@ func init() {
62 62
 		panic("docker tests needs to be run as root")
63 63
 	}
64 64
 
65
-	// Create a temp directory
66
-	root, err := ioutil.TempDir("", "docker-test")
67
-	if err != nil {
68
-		panic(err)
69
-	}
70
-	unitTestStoreBase = root
65
+	NetworkBridgeIface = "testdockbr0"
71 66
 
72 67
 	// Make it our Store root
73
-	runtime, err := NewRuntimeFromDirectory(root)
68
+	runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
74 69
 	if err != nil {
75 70
 		panic(err)
76 71
 	}
... ...
@@ -96,11 +89,11 @@ func newTestRuntime() (*Runtime, error) {
96 96
 		return nil, err
97 97
 	}
98 98
 
99
-	runtime, err := NewRuntimeFromDirectory(root)
99
+	runtime, err := NewRuntimeFromDirectory(root, false)
100 100
 	if err != nil {
101 101
 		return nil, err
102 102
 	}
103
-
103
+	runtime.UpdateCapabilities(true)
104 104
 	return runtime, nil
105 105
 }
106 106
 
... ...
@@ -263,6 +256,57 @@ func TestGet(t *testing.T) {
263 263
 
264 264
 }
265 265
 
266
+// Run a container with a TCP port allocated, and test that it can receive connections on localhost
267
+func TestAllocatePortLocalhost(t *testing.T) {
268
+	runtime, err := newTestRuntime()
269
+	if err != nil {
270
+		t.Fatal(err)
271
+	}
272
+	container, err := runtime.Create(&Config{
273
+		Image:     GetTestImage(runtime).Id,
274
+		Cmd:       []string{"sh", "-c", "echo well hello there | nc -l -p 5555"},
275
+		PortSpecs: []string{"5555"},
276
+	},
277
+	)
278
+	if err != nil {
279
+		t.Fatal(err)
280
+	}
281
+	if err := container.Start(); err != nil {
282
+		t.Fatal(err)
283
+	}
284
+	defer container.Kill()
285
+
286
+	setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
287
+		for {
288
+			if container.State.Running {
289
+				break
290
+			}
291
+			time.Sleep(10 * time.Millisecond)
292
+		}
293
+	})
294
+
295
+	conn, err := net.Dial("tcp",
296
+		fmt.Sprintf(
297
+			"localhost:%s", container.NetworkSettings.PortMapping["5555"],
298
+		),
299
+	)
300
+	if err != nil {
301
+		t.Fatal(err)
302
+	}
303
+	defer conn.Close()
304
+	output, err := ioutil.ReadAll(conn)
305
+	if err != nil {
306
+		t.Fatal(err)
307
+	}
308
+	if string(output) != "well hello there\n" {
309
+		t.Fatalf("Received wrong output from network connection: should be '%s', not '%s'",
310
+			"well hello there\n",
311
+			string(output),
312
+		)
313
+	}
314
+	container.Wait()
315
+}
316
+
266 317
 func TestRestore(t *testing.T) {
267 318
 
268 319
 	root, err := ioutil.TempDir("", "docker-test")
... ...
@@ -276,7 +320,7 @@ func TestRestore(t *testing.T) {
276 276
 		t.Fatal(err)
277 277
 	}
278 278
 
279
-	runtime1, err := NewRuntimeFromDirectory(root)
279
+	runtime1, err := NewRuntimeFromDirectory(root, false)
280 280
 	if err != nil {
281 281
 		t.Fatal(err)
282 282
 	}
... ...
@@ -335,7 +379,7 @@ func TestRestore(t *testing.T) {
335 335
 
336 336
 	// Here are are simulating a docker restart - that is, reloading all containers
337 337
 	// from scratch
338
-	runtime2, err := NewRuntimeFromDirectory(root)
338
+	runtime2, err := NewRuntimeFromDirectory(root, false)
339 339
 	if err != nil {
340 340
 		t.Fatal(err)
341 341
 	}
... ...
@@ -53,8 +53,7 @@ func changeUser(u string) {
53 53
 }
54 54
 
55 55
 // Clear environment pollution introduced by lxc-start
56
-func cleanupEnv() {
57
-	env := os.Environ()
56
+func cleanupEnv(env ListOpts) {
58 57
 	os.Clearenv()
59 58
 	for _, kv := range env {
60 59
 		parts := strings.SplitN(kv, "=", 2)
... ...
@@ -91,10 +90,13 @@ func SysInit() {
91 91
 	var u = flag.String("u", "", "username or uid")
92 92
 	var gw = flag.String("g", "", "gateway address")
93 93
 
94
+	var flEnv ListOpts
95
+	flag.Var(&flEnv, "e", "Set environment variables")
96
+
94 97
 	flag.Parse()
95 98
 
99
+	cleanupEnv(flEnv)
96 100
 	setupNetworking(*gw)
97
-	cleanupEnv()
98 101
 	changeUser(*u)
99 102
 	executeProgram(flag.Arg(0), flag.Args())
100 103
 }
... ...
@@ -71,23 +71,30 @@ type progressReader struct {
71 71
 	readTotal    int           // Expected stream length (bytes)
72 72
 	readProgress int           // How much has been read so far (bytes)
73 73
 	lastUpdate   int           // How many bytes read at least update
74
+	template     string        // Template to print. Default "%v/%v (%v)"
74 75
 }
75 76
 
76 77
 func (r *progressReader) Read(p []byte) (n int, err error) {
77 78
 	read, err := io.ReadCloser(r.reader).Read(p)
78 79
 	r.readProgress += read
79 80
 
80
-	// Only update progress for every 1% read
81
-	updateEvery := int(0.01 * float64(r.readTotal))
82
-	if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal {
83
-		fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
84
-			r.readProgress,
85
-			r.readTotal,
86
-			float64(r.readProgress)/float64(r.readTotal)*100)
81
+	updateEvery := 4096
82
+	if r.readTotal > 0 {
83
+		// Only update progress for every 1% read
84
+		if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
85
+			updateEvery = increment
86
+		}
87
+	}
88
+	if r.readProgress-r.lastUpdate > updateEvery || err != nil {
89
+		if r.readTotal > 0 {
90
+			fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
91
+		} else {
92
+			fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
93
+		}
87 94
 		r.lastUpdate = r.readProgress
88 95
 	}
89 96
 	// Send newline when complete
90
-	if err == io.EOF {
97
+	if err != nil {
91 98
 		fmt.Fprintf(r.output, "\n")
92 99
 	}
93 100
 
... ...
@@ -96,8 +103,11 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
96 96
 func (r *progressReader) Close() error {
97 97
 	return io.ReadCloser(r.reader).Close()
98 98
 }
99
-func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
100
-	return &progressReader{r, output, size, 0, 0}
99
+func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
100
+	if template == "" {
101
+		template = "%v/%v (%v)"
102
+	}
103
+	return &progressReader{r, output, size, 0, 0, template}
101 104
 }
102 105
 
103 106
 // HumanDuration returns a human-readable approximation of a duration
... ...
@@ -405,3 +415,65 @@ func SetRawTerminal() (*term.State, error) {
405 405
 func RestoreTerminal(state *term.State) {
406 406
 	term.Restore(int(os.Stdin.Fd()), state)
407 407
 }
408
+
409
+type KernelVersionInfo struct {
410
+	Kernel int
411
+	Major  int
412
+	Minor  int
413
+	Flavor string
414
+}
415
+
416
+// FIXME: this doens't build on Darwin
417
+func GetKernelVersion() (*KernelVersionInfo, error) {
418
+	return getKernelVersion()
419
+}
420
+
421
+func (k *KernelVersionInfo) String() string {
422
+	return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
423
+}
424
+
425
+// Compare two KernelVersionInfo struct.
426
+// Returns -1 if a < b, = if a == b, 1 it a > b
427
+func CompareKernelVersion(a, b *KernelVersionInfo) int {
428
+	if a.Kernel < b.Kernel {
429
+		return -1
430
+	} else if a.Kernel > b.Kernel {
431
+		return 1
432
+	}
433
+
434
+	if a.Major < b.Major {
435
+		return -1
436
+	} else if a.Major > b.Major {
437
+		return 1
438
+	}
439
+
440
+	if a.Minor < b.Minor {
441
+		return -1
442
+	} else if a.Minor > b.Minor {
443
+		return 1
444
+	}
445
+
446
+	return 0
447
+}
448
+
449
+func FindCgroupMountpoint(cgroupType string) (string, error) {
450
+	output, err := ioutil.ReadFile("/proc/mounts")
451
+	if err != nil {
452
+		return "", err
453
+	}
454
+
455
+	// /proc/mounts has 6 fields per line, one mount per line, e.g.
456
+	// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
457
+	for _, line := range strings.Split(string(output), "\n") {
458
+		parts := strings.Split(line, " ")
459
+		if parts[2] == "cgroup" {
460
+			for _, opt := range strings.Split(parts[3], ",") {
461
+				if opt == cgroupType {
462
+					return parts[1], nil
463
+				}
464
+			}
465
+		}
466
+	}
467
+
468
+	return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
469
+}
... ...
@@ -228,3 +228,36 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
228 228
 		t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
229 229
 	}
230 230
 }
231
+
232
+func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
233
+	if r := CompareKernelVersion(a, b); r != result {
234
+		t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
235
+	}
236
+}
237
+
238
+func TestCompareKernelVersion(t *testing.T) {
239
+	assertKernelVersion(t,
240
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
241
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
242
+		0)
243
+	assertKernelVersion(t,
244
+		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
245
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
246
+		-1)
247
+	assertKernelVersion(t,
248
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
249
+		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
250
+		1)
251
+	assertKernelVersion(t,
252
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
253
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"},
254
+		0)
255
+	assertKernelVersion(t,
256
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
257
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
258
+		1)
259
+	assertKernelVersion(t,
260
+		&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"},
261
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
262
+		-1)
263
+}