Browse code

Merge pull request #21367 from mlaventure/containerd-docs-cleanup

Remove unneeded references to execDriver

Tibor Vass authored on 2016/03/23 08:40:27
Showing 15 changed files
... ...
@@ -51,7 +51,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable
51 51
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
52 52
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options'
53 53
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
54
-complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options'
54
+complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options'
55 55
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
56 56
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
57 57
 complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'
... ...
@@ -650,8 +650,8 @@ __docker_subcommand() {
650 650
                 "($help)*--dns-opt=[DNS options to use]:DNS option: " \
651 651
                 "($help)*--default-ulimit=[Default ulimit settings for containers]:ulimit: " \
652 652
                 "($help)--disable-legacy-registry[Do not contact legacy registries]" \
653
-                "($help)*--exec-opt=[Exec driver options]:exec driver options: " \
654
-                "($help)--exec-root=[Root of the Docker execdriver]:path:_directories" \
653
+                "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \
654
+                "($help)--exec-root=[Root directory for execution state files]:path:_directories" \
655 655
                 "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \
656 656
                 "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \
657 657
                 "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \
... ...
@@ -112,10 +112,10 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
112 112
 
113 113
 	cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
114 114
 	cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
115
-	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
115
+	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options"))
116 116
 	cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file"))
117 117
 	cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime"))
118
-	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root of the Docker execdriver"))
118
+	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files"))
119 119
 	cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run"))
120 120
 	cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use"))
121 121
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
... ...
@@ -51,6 +51,10 @@ const (
51 51
 	// constants for remapped root settings
52 52
 	defaultIDSpecifier string = "default"
53 53
 	defaultRemappedID  string = "dockremap"
54
+
55
+	// constant for cgroup drivers
56
+	cgroupFsDriver      = "cgroupfs"
57
+	cgroupSystemdDriver = "systemd"
54 58
 )
55 59
 
56 60
 func getMemoryResources(config containertypes.Resources) *specs.Memory {
... ...
@@ -460,29 +464,30 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi
460 460
 }
461 461
 
462 462
 func (daemon *Daemon) getCgroupDriver() string {
463
-	cgroupDriver := "cgroupfs"
464
-	if daemon.usingSystemd() {
465
-		cgroupDriver = "systemd"
466
-	}
467
-	return cgroupDriver
468
-}
463
+	cgroupDriver := cgroupFsDriver
469 464
 
470
-func usingSystemd(config *Config) bool {
471
-	for _, option := range config.ExecOptions {
465
+	// No other cgroup drivers are supported at the moment. Warn the
466
+	// user if they tried to set one other than cgroupfs
467
+	for _, option := range daemon.configStore.ExecOptions {
472 468
 		key, val, err := parsers.ParseKeyValueOpt(option)
473 469
 		if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
474 470
 			continue
475 471
 		}
476
-		if val == "systemd" {
477
-			return true
472
+		if val != cgroupFsDriver {
473
+			logrus.Warnf("cgroupdriver '%s' is not supported", val)
478 474
 		}
479 475
 	}
480 476
 
477
+	return cgroupDriver
478
+}
479
+
480
+func usingSystemd(config *Config) bool {
481
+	// No support for systemd cgroup atm
481 482
 	return false
482 483
 }
483 484
 
484 485
 func (daemon *Daemon) usingSystemd() bool {
485
-	return usingSystemd(daemon.configStore)
486
+	return daemon.getCgroupDriver() == cgroupSystemdDriver
486 487
 }
487 488
 
488 489
 // verifyPlatformContainerSettings performs platform-specific validation of the
... ...
@@ -9,21 +9,21 @@ daemon via the `--storage-opt dm.thinpooldev` option.
9 9
 
10 10
 As a fallback if no thin pool is provided, loopback files will be
11 11
 created.  Loopback is very slow, but can be used without any
12
-pre-configuration of storage.  It is strongly recommended that you do 
12
+pre-configuration of storage.  It is strongly recommended that you do
13 13
 not use loopback in production.  Ensure your Docker daemon has a
14 14
 `--storage-opt dm.thinpooldev` argument provided.
15 15
 
16 16
 In loopback, a thin pool is created at `/var/lib/docker/devicemapper`
17
-(devicemapper graph location) based on two block devices, one for 
18
-data and one for metadata. By default these block devices are created 
19
-automatically by using loopback mounts of automatically created sparse 
17
+(devicemapper graph location) based on two block devices, one for
18
+data and one for metadata. By default these block devices are created
19
+automatically by using loopback mounts of automatically created sparse
20 20
 files.
21 21
 
22
-The default loopback files used are 
23
-`/var/lib/docker/devicemapper/devicemapper/data` and 
24
-`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata 
25
-required to map from docker entities to the corresponding devicemapper 
26
-volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` 
22
+The default loopback files used are
23
+`/var/lib/docker/devicemapper/devicemapper/data` and
24
+`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata
25
+required to map from docker entities to the corresponding devicemapper
26
+volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json`
27 27
 file (encoded as Json).
28 28
 
29 29
 In order to support multiple devicemapper graphs on a system, the thin
... ...
@@ -92,6 +92,5 @@ This uses the `dm` prefix and would be used something like `docker daemon --stor
92 92
 
93 93
 These options are currently documented both in [the man
94 94
 page](../../../man/docker.1.md) and in [the online
95
-documentation](https://docs.docker.com/reference/commandline/daemon/#docker-
96
-execdriver-option).  If you add an options, update both the `man` page and the
97
-documentation.
95
+documentation](https://docs.docker.com/reference/commandline/daemon/#storage-driver-options).
96
+If you add an options, update both the `man` page and the documentation.
... ...
@@ -103,7 +103,7 @@ func (daemon *Daemon) Kill(container *container.Container) error {
103 103
 		// because if we can't stop the container by this point then
104 104
 		// its probably because its already stopped. Meaning, between
105 105
 		// the time of the IsRunning() call above and now it stopped.
106
-		// Also, since the err return will be exec driver specific we can't
106
+		// Also, since the err return will be environment specific we can't
107 107
 		// look for any particular (common) error that would indicate
108 108
 		// that the process is already dead vs something else going wrong.
109 109
 		// So, instead we'll give it up to 2 more seconds to complete and if
... ...
@@ -11,8 +11,8 @@ import (
11 11
 )
12 12
 
13 13
 // setupMounts configures the mount points for a container by appending each
14
-// of the configured mounts on the container to the oci mount structure
15
-// which will ultimately be passed into the exec driver during container creation.
14
+// of the configured mounts on the container to the OCI mount structure
15
+// which will ultimately be passed into the oci runtime during container creation.
16 16
 // It also ensures each of the mounts are lexographically sorted.
17 17
 
18 18
 // BUGBUG TODO Windows containerd. This would be much better if it returned
... ...
@@ -162,7 +162,7 @@ can be located at `/var/log/upstart/docker.log`
162 162
 
163 163
     $ tail -f /var/log/upstart/docker.log
164 164
     INFO[0000] Loading containers: done.
165
-    INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs
165
+    INFO[0000] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev
166 166
     INFO[0000] +job acceptconnections()
167 167
     INFO[0000] -job acceptconnections() = OK (0)
168 168
     INFO[0000] Daemon has completed initialization
... ...
@@ -273,7 +273,7 @@ be viewed using `journalctl -u docker`
273 273
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
274 274
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
275 275
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
276
-    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper"
276
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev"
277 277
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
278 278
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
279 279
 
... ...
@@ -32,8 +32,8 @@ weight = -1
32 32
       --dns-opt=[]                           DNS options to use
33 33
       --dns-search=[]                        DNS search domains to use
34 34
       --default-ulimit=[]                    Set default ulimit settings for containers
35
-      --exec-opt=[]                          Set exec driver options
36
-      --exec-root="/var/run/docker"          Root of the Docker execdriver
35
+      --exec-opt=[]                          Set runtime execution options
36
+      --exec-root="/var/run/docker"          Root directory for execution state files
37 37
       --fixed-cidr=""                        IPv4 subnet for fixed IPs
38 38
       --fixed-cidr-v6=""                     IPv6 subnet for fixed IPs
39 39
       -G, --group="docker"                   Group for the unix socket
... ...
@@ -476,24 +476,26 @@ Currently supported options of `zfs`:
476 476
 
477 477
         $ docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker
478 478
 
479
-## Docker execdriver option
479
+## Docker runtime execution options
480 480
 
481
-The Docker daemon uses a specifically built `libcontainer` execution driver as
482
-its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`.
481
+The Docker daemon relies on a
482
+[OCI](https://github.com/opencontainers/specs) compliant runtime
483
+(invoked via the `containerd` daemon) as its interface to the Linux
484
+kernel `namespaces`, `cgroups`, and `SELinux`.
483 485
 
484
-## Options for the native execdriver
486
+## Options for the runtime
485 487
 
486
-You can configure the `native` (libcontainer) execdriver using options specified
488
+You can configure the runtime using options specified
487 489
 with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
488 490
 single `native.cgroupdriver` option is available.
489 491
 
490 492
 The `native.cgroupdriver` option specifies the management of the container's
491
-cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and
492
-it is not available, the system uses `cgroupfs`. If you omit the
493
+cgroups. You can specify only specify `cgroupfs` at the moment.  If you omit the
493 494
 `native.cgroupdriver` option,` cgroupfs` is used.
494
-This example sets the `cgroupdriver` to `systemd`:
495 495
 
496
-    $ sudo docker daemon --exec-opt native.cgroupdriver=systemd
496
+This example explicitely sets the `cgroupdriver` to `cgroupfs`:
497
+
498
+    $ sudo docker daemon --exec-opt native.cgroupdriver=cgroupfs
497 499
 
498 500
 Setting this option applies to all containers the daemon launches.
499 501
 
... ...
@@ -198,7 +198,7 @@ to the host.
198 198
 This won't affect regular web apps; but malicious users will find that
199 199
 the arsenal at their disposal has shrunk considerably! By default Docker
200 200
 drops all capabilities except [those
201
-needed](https://github.com/docker/docker/blob/87de5fdd5972343a11847922e0f41d9898b5cff7/daemon/execdriver/native/template/default_template_linux.go#L16-L29),
201
+needed](https://github.com/docker/docker/blob/master/oci/defaults_linux.go#L64-L79),
202 202
 a whitelist instead of a blacklist approach. You can see a full list of
203 203
 available capabilities in [Linux
204 204
 manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
... ...
@@ -243,11 +243,11 @@ with e.g., special network topologies or shared filesystems, you can
243 243
 expect to see tools to harden existing Docker containers without
244 244
 affecting Docker's core.
245 245
 
246
-As of Docker 1.10 User Namespaces are supported directly by the docker 
247
-daemon. This feature allows for the root user in a container to be mapped 
246
+As of Docker 1.10 User Namespaces are supported directly by the docker
247
+daemon. This feature allows for the root user in a container to be mapped
248 248
 to a non uid-0 user outside the container, which can help to mitigate the
249 249
 risks of container breakout. This facility is available but not enabled
250
-by default. 
250
+by default.
251 251
 
252 252
 Refer to the [daemon command](../reference/commandline/daemon.md#daemon-user-namespace-options)
253 253
 in the command line reference for more information on this feature.
... ...
@@ -51,46 +51,46 @@ Device Mapper technology works at the block level rather than the file level.
51 51
 This means that `devicemapper` storage driver's thin provisioning and
52 52
 copy-on-write operations work with blocks rather than entire files.
53 53
 
54
->**Note**: Snapshots are also referred to as *thin devices* or *virtual 
55
->devices*. They all mean the same thing in the context of the `devicemapper` 
54
+>**Note**: Snapshots are also referred to as *thin devices* or *virtual
55
+>devices*. They all mean the same thing in the context of the `devicemapper`
56 56
 >storage driver.
57 57
 
58 58
 With `devicemapper` the high level process for creating images is as follows:
59 59
 
60 60
 1. The `devicemapper` storage driver creates a thin pool.
61 61
 
62
-    The pool is created from block devices or loop mounted sparse files (more 
62
+    The pool is created from block devices or loop mounted sparse files (more
63 63
 on this later).
64 64
 
65 65
 2. Next it creates a *base device*.
66 66
 
67
-    A base device is a thin device with a filesystem. You can see which 
68
-filesystem is in use by running the `docker info` command and checking the 
67
+    A base device is a thin device with a filesystem. You can see which
68
+filesystem is in use by running the `docker info` command and checking the
69 69
 `Backing filesystem` value.
70 70
 
71 71
 3. Each new image (and image layer) is a snapshot of this base device.
72 72
 
73
-    These are thin provisioned copy-on-write snapshots. This means that they 
74
-are initially empty and only consume space from the pool when data is written 
73
+    These are thin provisioned copy-on-write snapshots. This means that they
74
+are initially empty and only consume space from the pool when data is written
75 75
 to them.
76 76
 
77
-With `devicemapper`, container layers are snapshots of the image they are 
78
-created from. Just as with images, container snapshots are thin provisioned 
79
-copy-on-write snapshots. The container snapshot stores all updates to the 
80
-container. The `devicemapper` allocates space to them on-demand from the pool 
77
+With `devicemapper`, container layers are snapshots of the image they are
78
+created from. Just as with images, container snapshots are thin provisioned
79
+copy-on-write snapshots. The container snapshot stores all updates to the
80
+container. The `devicemapper` allocates space to them on-demand from the pool
81 81
 as and when data is written to the container.
82 82
 
83
-The high level diagram below shows a thin pool with a base device and two 
83
+The high level diagram below shows a thin pool with a base device and two
84 84
 images.
85 85
 
86 86
 ![](images/base_device.jpg)
87 87
 
88
-If you look closely at the diagram you'll see that it's snapshots all the way 
88
+If you look closely at the diagram you'll see that it's snapshots all the way
89 89
 down. Each image layer is a snapshot of the layer below it. The lowest layer of
90
- each image is a snapshot of the base device that exists in the pool. This 
90
+ each image is a snapshot of the base device that exists in the pool. This
91 91
 base device is a `Device Mapper` artifact and not a Docker image layer.
92 92
 
93
-A container is a snapshot of the image it is created from. The diagram below 
93
+A container is a snapshot of the image it is created from. The diagram below
94 94
 shows two containers - one based on the Ubuntu image and the other based on the
95 95
  Busybox image.
96 96
 
... ...
@@ -99,22 +99,22 @@ shows two containers - one based on the Ubuntu image and the other based on the
99 99
 
100 100
 ## Reads with the devicemapper
101 101
 
102
-Let's look at how reads and writes occur using the `devicemapper` storage 
103
-driver. The diagram below shows the high level process for reading a single 
102
+Let's look at how reads and writes occur using the `devicemapper` storage
103
+driver. The diagram below shows the high level process for reading a single
104 104
 block (`0x44f`) in an example container.
105 105
 
106 106
 ![](images/dm_container.jpg)
107 107
 
108 108
 1. An application makes a read request for block `0x44f` in the container.
109 109
 
110
-    Because the container is a thin snapshot of an image it does not have the 
111
-data. Instead, it has a pointer (PTR) to where the data is stored in the image 
110
+    Because the container is a thin snapshot of an image it does not have the
111
+data. Instead, it has a pointer (PTR) to where the data is stored in the image
112 112
 snapshot lower down in the image stack.
113 113
 
114
-2. The storage driver follows the pointer to block `0xf33` in the snapshot 
114
+2. The storage driver follows the pointer to block `0xf33` in the snapshot
115 115
 relating to image layer `a005...`.
116 116
 
117
-3. The `devicemapper` copies the contents of block `0xf33` from the image 
117
+3. The `devicemapper` copies the contents of block `0xf33` from the image
118 118
 snapshot to memory in the container.
119 119
 
120 120
 4. The storage driver returns the data to the requesting application.
... ...
@@ -122,11 +122,11 @@ snapshot to memory in the container.
122 122
 ### Write examples
123 123
 
124 124
 With the `devicemapper` driver, writing new data to a container is accomplished
125
- by an *allocate-on-demand* operation. Updating existing data uses a 
126
-copy-on-write operation. Because Device Mapper is a block-based technology 
125
+ by an *allocate-on-demand* operation. Updating existing data uses a
126
+copy-on-write operation. Because Device Mapper is a block-based technology
127 127
 these operations occur at the block level.
128 128
 
129
-For example, when making a small change to a large file in a container, the 
129
+For example, when making a small change to a large file in a container, the
130 130
 `devicemapper` storage driver does not copy the entire file. It only copies the
131 131
  blocks to be modified. Each block is 64KB.
132 132
 
... ...
@@ -136,10 +136,10 @@ To write 56KB of new data to a container:
136 136
 
137 137
 1. An application makes a request to write 56KB of new data to the container.
138 138
 
139
-2. The allocate-on-demand operation allocates a single new 64KB block to the 
139
+2. The allocate-on-demand operation allocates a single new 64KB block to the
140 140
 container's snapshot.
141 141
 
142
-    If the write operation is larger than 64KB, multiple new blocks are 
142
+    If the write operation is larger than 64KB, multiple new blocks are
143 143
 allocated to the container's snapshot.
144 144
 
145 145
 3. The data is written to the newly allocated block.
... ...
@@ -152,7 +152,7 @@ To modify existing data for the first time:
152 152
 
153 153
 2. A copy-on-write operation locates the blocks that need updating.
154 154
 
155
-3. The operation allocates new empty blocks to the container snapshot and 
155
+3. The operation allocates new empty blocks to the container snapshot and
156 156
 copies the data into those blocks.
157 157
 
158 158
 4. The modified data is written into the newly allocated blocks.
... ...
@@ -164,18 +164,18 @@ to the application's read and write operations.
164 164
 ## Configuring Docker with Device Mapper
165 165
 
166 166
 The `devicemapper` is the default Docker storage driver on some Linux
167
-distributions. This includes RHEL and most of its forks. Currently, the 
167
+distributions. This includes RHEL and most of its forks. Currently, the
168 168
 following distributions support the driver:
169 169
 
170 170
 * RHEL/CentOS/Fedora
171
-* Ubuntu 12.04          
172
-* Ubuntu 14.04          
173
-* Debian  
171
+* Ubuntu 12.04
172
+* Ubuntu 14.04
173
+* Debian
174 174
 
175 175
 Docker hosts running the `devicemapper` storage driver default to a
176 176
 configuration mode known as `loop-lvm`. This mode uses sparse files to build
177
-the thin pool used by image and container snapshots. The mode is designed to 
178
-work out-of-the-box with no additional configuration. However, production 
177
+the thin pool used by image and container snapshots. The mode is designed to
178
+work out-of-the-box with no additional configuration. However, production
179 179
 deployments should not run under `loop-lvm` mode.
180 180
 
181 181
 You can detect the mode by viewing the `docker info` command:
... ...
@@ -193,83 +193,83 @@ You can detect the mode by viewing the `docker info` command:
193 193
      Library Version: 1.02.93-RHEL7 (2015-01-28)
194 194
      ...
195 195
 
196
-The output above shows a Docker host running with the `devicemapper` storage 
197
-driver operating in `loop-lvm` mode. This is indicated by the fact that the 
198
-`Data loop file` and a `Metadata loop file` are on files under 
199
-`/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse 
196
+The output above shows a Docker host running with the `devicemapper` storage
197
+driver operating in `loop-lvm` mode. This is indicated by the fact that the
198
+`Data loop file` and a `Metadata loop file` are on files under
199
+`/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse
200 200
 files.
201 201
 
202 202
 ### Configure direct-lvm mode for production
203 203
 
204 204
 The preferred configuration for production deployments is `direct lvm`. This
205 205
 mode uses block devices to create the thin pool. The following procedure shows
206
-you how to configure a Docker host to use the `devicemapper` storage driver in 
206
+you how to configure a Docker host to use the `devicemapper` storage driver in
207 207
 a `direct-lvm` configuration.
208 208
 
209
-> **Caution:** If you have already run the Docker daemon on your Docker host 
210
-> and have images you want to keep, `push` them Docker Hub or your private 
209
+> **Caution:** If you have already run the Docker daemon on your Docker host
210
+> and have images you want to keep, `push` them Docker Hub or your private
211 211
 > Docker Trusted Registry before attempting this procedure.
212 212
 
213
-The procedure below will create a 90GB data volume and 4GB metadata volume to 
214
-use as backing for the storage pool. It assumes that you have a spare block 
215
-device at `/dev/xvdf` with enough free space to complete the task. The device 
216
-identifier and volume sizes may be be different in your environment and you 
217
-should substitute your own values throughout the procedure. The procedure also 
213
+The procedure below will create a 90GB data volume and 4GB metadata volume to
214
+use as backing for the storage pool. It assumes that you have a spare block
215
+device at `/dev/xvdf` with enough free space to complete the task. The device
216
+identifier and volume sizes may be be different in your environment and you
217
+should substitute your own values throughout the procedure. The procedure also
218 218
 assumes that the Docker daemon is in the `stopped` state.
219 219
 
220 220
 1. Log in to the Docker host you want to configure and stop the Docker daemon.
221 221
 
222
-2. If it exists, delete your existing image store by removing the 
222
+2. If it exists, delete your existing image store by removing the
223 223
 `/var/lib/docker` directory.
224 224
 
225 225
         $ sudo rm -rf /var/lib/docker
226 226
 
227
-3. Create an LVM physical volume (PV) on your spare block device using the 
227
+3. Create an LVM physical volume (PV) on your spare block device using the
228 228
 `pvcreate` command.
229 229
 
230 230
         $ sudo pvcreate /dev/xvdf
231 231
         Physical volume `/dev/xvdf` successfully created
232 232
 
233
-    The device identifier may be different on your system. Remember to 
233
+    The device identifier may be different on your system. Remember to
234 234
 substitute your value in the command above.
235 235
 
236
-4. Create a new volume group (VG) called `vg-docker` using the PV created in 
236
+4. Create a new volume group (VG) called `vg-docker` using the PV created in
237 237
 the previous step.
238 238
 
239 239
         $ sudo vgcreate vg-docker /dev/xvdf
240 240
         Volume group `vg-docker` successfully created
241 241
 
242
-5. Create a new 90GB logical volume (LV) called `data` from space in the 
242
+5. Create a new 90GB logical volume (LV) called `data` from space in the
243 243
 `vg-docker` volume group.
244 244
 
245 245
         $ sudo lvcreate -L 90G -n data vg-docker
246 246
         Logical volume `data` created.
247 247
 
248
-    The command creates an LVM logical volume called `data` and an associated 
249
-block device file at `/dev/vg-docker/data`. In a later step, you instruct the 
250
-`devicemapper` storage driver to use this block device to store image and 
248
+    The command creates an LVM logical volume called `data` and an associated
249
+block device file at `/dev/vg-docker/data`. In a later step, you instruct the
250
+`devicemapper` storage driver to use this block device to store image and
251 251
 container data.
252 252
 
253
-    If you receive a signature detection warning, make sure you are working on 
254
-the correct devices before continuing. Signature warnings indicate that the 
255
-device you're working on is currently in use by LVM or has been used by LVM in 
253
+    If you receive a signature detection warning, make sure you are working on
254
+the correct devices before continuing. Signature warnings indicate that the
255
+device you're working on is currently in use by LVM or has been used by LVM in
256 256
 the past.
257 257
 
258
-6. Create a new logical volume (LV) called `metadata` from space in the 
258
+6. Create a new logical volume (LV) called `metadata` from space in the
259 259
 `vg-docker` volume group.
260 260
 
261 261
         $ sudo lvcreate -L 4G -n metadata vg-docker
262 262
         Logical volume `metadata` created.
263 263
 
264
-    This creates an LVM logical volume called `metadata` and an associated 
265
-block device file at `/dev/vg-docker/metadata`. In the next step you instruct 
266
-the `devicemapper` storage driver to use this block device to store image and 
264
+    This creates an LVM logical volume called `metadata` and an associated
265
+block device file at `/dev/vg-docker/metadata`. In the next step you instruct
266
+the `devicemapper` storage driver to use this block device to store image and
267 267
 container metadata.
268 268
 
269
-7. Start the Docker daemon with the `devicemapper` storage driver and the 
269
+7. Start the Docker daemon with the `devicemapper` storage driver and the
270 270
 `--storage-opt` flags.
271 271
 
272
-    The `data` and `metadata` devices that you pass to the `--storage-opt` 
272
+    The `data` and `metadata` devices that you pass to the `--storage-opt`
273 273
 options were created in the previous steps.
274 274
 
275 275
           $ sudo docker daemon --storage-driver=devicemapper --storage-opt dm.datadev=/dev/vg-docker/data --storage-opt dm.metadatadev=/dev/vg-docker/metadata &
... ...
@@ -279,13 +279,13 @@ options were created in the previous steps.
279 279
           INFO[0027] Option DefaultNetwork: bridge
280 280
           <output truncated>
281 281
           INFO[0027] Daemon has completed initialization
282
-          INFO[0027] Docker daemon commit=0a8c2e3 execdriver=native-0.2 graphdriver=devicemapper version=1.8.2
282
+          INFO[0027] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev
283 283
 
284 284
     It is also possible to set the `--storage-driver` and `--storage-opt` flags
285 285
  in the Docker config file and start the daemon normally using the `service` or
286 286
  `systemd` commands.
287 287
 
288
-8. Use the `docker info` command to verify that the daemon is using `data` and 
288
+8. Use the `docker info` command to verify that the daemon is using `data` and
289 289
 `metadata` devices you created.
290 290
 
291 291
         $ sudo docker info
... ...
@@ -301,12 +301,12 @@ options were created in the previous steps.
301 301
         [...]
302 302
 
303 303
     The output of the command above shows the storage driver as `devicemapper`.
304
- The last two lines also confirm that the correct devices are being used for 
304
+ The last two lines also confirm that the correct devices are being used for
305 305
 the `Data file` and the `Metadata file`.
306 306
 
307 307
 ### Examine devicemapper structures on the host
308 308
 
309
-You can use the `lsblk` command to see the device files created above and the 
309
+You can use the `lsblk` command to see the device files created above and the
310 310
 `pool` that the `devicemapper` storage driver creates on top of them.
311 311
 
312 312
     $ sudo lsblk
... ...
@@ -319,7 +319,7 @@ You can use the `lsblk` command to see the device files created above and the
319 319
     └─vg--docker-metadata      253:1    0    4G  0 lvm
320 320
       └─docker-202:1-1032-pool 253:2    0   10G  0 dm
321 321
 
322
-The diagram below shows the image from prior examples updated with the detail 
322
+The diagram below shows the image from prior examples updated with the detail
323 323
 from the `lsblk` command above.
324 324
 
325 325
 ![](http://farm1.staticflickr.com/703/22116692899_0471e5e160_b.jpg)
... ...
@@ -335,73 +335,73 @@ Docker-MAJ:MIN-INO-pool
335 335
 `MAJ`, `MIN` and `INO` refer to the major and minor device numbers and inode.
336 336
 
337 337
 Because Device Mapper operates at the block level it is more difficult to see
338
-diffs between image layers and containers. Docker 1.10 and later no longer 
339
-matches image layer IDs with directory names in `/var/lib/docker`.  However, 
338
+diffs between image layers and containers. Docker 1.10 and later no longer
339
+matches image layer IDs with directory names in `/var/lib/docker`.  However,
340 340
 there are two key directories. The `/var/lib/docker/devicemapper/mnt` directory
341
- contains the mount points for image and container layers. The 
342
-`/var/lib/docker/devicemapper/metadata`directory contains one file for every 
343
-image layer and container snapshot. The files contain metadata about each 
341
+ contains the mount points for image and container layers. The
342
+`/var/lib/docker/devicemapper/metadata`directory contains one file for every
343
+image layer and container snapshot. The files contain metadata about each
344 344
 snapshot in JSON format.
345 345
 
346 346
 ## Device Mapper and Docker performance
347 347
 
348
-It is important to understand the impact that allocate-on-demand and 
348
+It is important to understand the impact that allocate-on-demand and
349 349
 copy-on-write operations can have on overall container performance.
350 350
 
351 351
 ### Allocate-on-demand performance impact
352 352
 
353
-The `devicemapper` storage driver allocates new blocks to a container via an 
354
-allocate-on-demand operation. This means that each time an app writes to 
355
-somewhere new inside a container, one or more empty blocks has to be located 
353
+The `devicemapper` storage driver allocates new blocks to a container via an
354
+allocate-on-demand operation. This means that each time an app writes to
355
+somewhere new inside a container, one or more empty blocks has to be located
356 356
 from the pool and mapped into the container.
357 357
 
358 358
 All blocks are 64KB. A write that uses less than 64KB still results in a single
359
- 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB 
360
-blocks. This can impact container performance, especially in containers that 
359
+ 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB
360
+blocks. This can impact container performance, especially in containers that
361 361
 perform lots of small writes. However, once a block is allocated to a container
362 362
  subsequent reads and writes can operate directly on that block.
363 363
 
364 364
 ### Copy-on-write performance impact
365 365
 
366
-Each time a container updates existing data for the first time, the 
367
-`devicemapper` storage driver has to perform a copy-on-write operation. This 
368
-copies the data from the image snapshot to the container's snapshot. This 
366
+Each time a container updates existing data for the first time, the
367
+`devicemapper` storage driver has to perform a copy-on-write operation. This
368
+copies the data from the image snapshot to the container's snapshot. This
369 369
 process can have a noticeable impact on container performance.
370 370
 
371
-All copy-on-write operations have a 64KB granularity. As a results, updating 
372
-32KB of a 1GB file causes the driver to copy a single 64KB block into the 
373
-container's snapshot. This has obvious performance advantages over file-level 
374
-copy-on-write operations which would require copying the entire 1GB file into 
371
+All copy-on-write operations have a 64KB granularity. As a results, updating
372
+32KB of a 1GB file causes the driver to copy a single 64KB block into the
373
+container's snapshot. This has obvious performance advantages over file-level
374
+copy-on-write operations which would require copying the entire 1GB file into
375 375
 the container layer.
376 376
 
377
-In practice, however, containers that perform lots of small block writes 
377
+In practice, however, containers that perform lots of small block writes
378 378
 (<64KB) can perform worse with `devicemapper` than with AUFS.
379 379
 
380 380
 ### Other device mapper performance considerations
381 381
 
382
-There are several other things that impact the performance of the 
382
+There are several other things that impact the performance of the
383 383
 `devicemapper` storage driver.
384 384
 
385
-- **The mode.** The default mode for Docker running the `devicemapper` storage 
386
-driver is `loop-lvm`. This mode uses sparse files and suffers from poor 
385
+- **The mode.** The default mode for Docker running the `devicemapper` storage
386
+driver is `loop-lvm`. This mode uses sparse files and suffers from poor
387 387
 performance. It is **not recommended for production**. The recommended mode for
388
- production environments is `direct-lvm` where the storage driver writes 
388
+ production environments is `direct-lvm` where the storage driver writes
389 389
 directly to raw block devices.
390 390
 
391 391
 - **High speed storage.** For best performance you should place the `Data file`
392
- and `Metadata file` on high speed storage such as SSD. This can be direct 
392
+ and `Metadata file` on high speed storage such as SSD. This can be direct
393 393
 attached storage or from a SAN or NAS array.
394 394
 
395
-- **Memory usage.** `devicemapper` is not the most memory efficient Docker 
396
-storage driver. Launching *n* copies of the same container loads *n* copies of 
397
-its files into memory. This can have a memory impact on your Docker host. As a 
398
-result, the `devicemapper` storage driver may not be the best choice for PaaS 
395
+- **Memory usage.** `devicemapper` is not the most memory efficient Docker
396
+storage driver. Launching *n* copies of the same container loads *n* copies of
397
+its files into memory. This can have a memory impact on your Docker host. As a
398
+result, the `devicemapper` storage driver may not be the best choice for PaaS
399 399
 and other high density use cases.
400 400
 
401
-One final point, data volumes provide the best and most predictable 
402
-performance. This is because they bypass the storage driver and do not incur 
403
-any of the potential overheads introduced by thin provisioning and 
404
-copy-on-write. For this reason, you should to place heavy write workloads on 
401
+One final point, data volumes provide the best and most predictable
402
+performance. This is because they bypass the storage driver and do not incur
403
+any of the potential overheads introduced by thin provisioning and
404
+copy-on-write. For this reason, you should to place heavy write workloads on
405 405
 data volumes.
406 406
 
407 407
 ## Related Information
... ...
@@ -21,8 +21,7 @@ var (
21 21
 
22 22
 	// TODO Windows CI. These are incorrect and need fixing into
23 23
 	// platform specific pieces.
24
-	runtimePath    = "/var/run/docker"
25
-	execDriverPath = runtimePath + "/execdriver/native"
24
+	runtimePath = "/var/run/docker"
26 25
 
27 26
 	workingDirectory string
28 27
 
... ...
@@ -126,10 +126,10 @@ format.
126 126
   DNS search domains to use.
127 127
 
128 128
 **--exec-opt**=[]
129
-  Set exec driver options. See EXEC DRIVER OPTIONS.
129
+  Set runtime execution options. See RUNTIME EXECUTION OPTIONS.
130 130
 
131 131
 **--exec-root**=""
132
-  Path to use as the root of the Docker exec driver. Default is `/var/run/docker`.
132
+  Path to use as the root of the Docker execution state files. Default is `/var/run/docker`.
133 133
 
134 134
 **--fixed-cidr**=""
135 135
   IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
... ...
@@ -289,13 +289,13 @@ will use more space for base images the larger the device
289 289
 is.
290 290
 
291 291
 The base device size can be increased at daemon restart which will allow
292
-all future images and containers (based on those new images) to be of the 
292
+all future images and containers (based on those new images) to be of the
293 293
 new base device size.
294 294
 
295
-Example use: `docker daemon --storage-opt dm.basesize=50G` 
295
+Example use: `docker daemon --storage-opt dm.basesize=50G`
296 296
 
297
-This will increase the base device size to 50G. The Docker daemon will throw an 
298
-error if existing base device size is larger than 50G. A user can use 
297
+This will increase the base device size to 50G. The Docker daemon will throw an
298
+error if existing base device size is larger than 50G. A user can use
299 299
 this option to expand the base device size however shrinking is not permitted.
300 300
 
301 301
 This value affects the system-wide "base" empty filesystem that may already
... ...
@@ -16,7 +16,7 @@ CONTAINER|IMAGE [CONTAINER|IMAGE...]
16 16
 
17 17
 This displays all the information available in Docker for a given
18 18
 container or image. By default, this will render all results in a JSON
19
-array. If the container and image have the same name, this will return 
19
+array. If the container and image have the same name, this will return
20 20
 container JSON for unspecified type. If a format is specified, the given
21 21
 template will be executed for each result.
22 22
 
... ...
@@ -110,7 +110,6 @@ To get information on a container use its ID or instance name:
110 110
     "Name": "/adoring_wozniak",
111 111
     "RestartCount": 0,
112 112
     "Driver": "devicemapper",
113
-    "ExecDriver": "native-0.2",
114 113
     "MountLabel": "",
115 114
     "ProcessLabel": "",
116 115
     "Mounts": [
... ...
@@ -224,15 +224,14 @@ inside it)
224 224
   See **docker-wait(1)** for full documentation on the **wait** command.
225 225
 
226 226
 
227
-# EXEC DRIVER OPTIONS
227
+# RUNTIME EXECUTION OPTIONS
228 228
 
229 229
 Use the **--exec-opt** flags to specify options to the execution driver.
230 230
 The following options are available:
231 231
 
232 232
 #### native.cgroupdriver
233
-Specifies the management of the container's `cgroups`. You can specify 
234
-`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the 
235
-system uses `cgroupfs`.
233
+Specifies the management of the container's `cgroups`. Only `cgroupfs` can be specified
234
+`cgroupfs` at the moment.
236 235
 
237 236
 #### Client
238 237
 For specific client examples please see the man page for the specific Docker