Browse code

Merge pull request #13161 from calavera/plugin_discovery

Proposal: Volume refactor and external volume plugins

Arnaud Porterie authored on 2015/05/24 10:44:18
Showing 43 changed files
... ...
@@ -6,19 +6,18 @@ import (
6 6
 	"errors"
7 7
 	"fmt"
8 8
 	"io"
9
-	"net"
10 9
 	"net/http"
11 10
 	"os"
12 11
 	"path/filepath"
13 12
 	"reflect"
14 13
 	"strings"
15 14
 	"text/template"
16
-	"time"
17 15
 
18 16
 	"github.com/docker/docker/cliconfig"
19 17
 	"github.com/docker/docker/pkg/homedir"
20 18
 	flag "github.com/docker/docker/pkg/mflag"
21 19
 	"github.com/docker/docker/pkg/term"
20
+	"github.com/docker/docker/utils"
22 21
 )
23 22
 
24 23
 // DockerCli represents the docker command line client.
... ...
@@ -178,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
178 178
 	tr := &http.Transport{
179 179
 		TLSClientConfig: tlsConfig,
180 180
 	}
181
-
182
-	// Why 32? See https://github.com/docker/docker/pull/8035.
183
-	timeout := 32 * time.Second
184
-	if proto == "unix" {
185
-		// No need for compression in local communications.
186
-		tr.DisableCompression = true
187
-		tr.Dial = func(_, _ string) (net.Conn, error) {
188
-			return net.DialTimeout(proto, addr, timeout)
189
-		}
190
-	} else {
191
-		tr.Proxy = http.ProxyFromEnvironment
192
-		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
193
-	}
181
+	utils.ConfigureTCPTransport(tr, proto, addr)
194 182
 
195 183
 	configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
196 184
 	if e != nil {
... ...
@@ -773,7 +773,7 @@ func (b *Builder) clearTmp() {
773 773
 			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
774 774
 			return
775 775
 		}
776
-		b.Daemon.DeleteVolumes(tmp.VolumePaths())
776
+		b.Daemon.DeleteVolumes(tmp)
777 777
 		delete(b.TmpContainers, c)
778 778
 		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
779 779
 	}
... ...
@@ -26,9 +26,11 @@ import (
26 26
 	"github.com/docker/docker/pkg/broadcastwriter"
27 27
 	"github.com/docker/docker/pkg/ioutils"
28 28
 	"github.com/docker/docker/pkg/jsonlog"
29
+	"github.com/docker/docker/pkg/mount"
29 30
 	"github.com/docker/docker/pkg/promise"
30 31
 	"github.com/docker/docker/pkg/symlink"
31 32
 	"github.com/docker/docker/runconfig"
33
+	"github.com/docker/docker/volume"
32 34
 )
33 35
 
34 36
 var (
... ...
@@ -48,46 +50,37 @@ type StreamConfig struct {
48 48
 // CommonContainer holds the settings for a container which are applicable
49 49
 // across all platforms supported by the daemon.
50 50
 type CommonContainer struct {
51
+	StreamConfig
52
+
51 53
 	*State `json:"State"` // Needed for remote api version <= 1.11
52 54
 	root   string         // Path to the "home" of the container, including metadata.
53 55
 	basefs string         // Path to the graphdriver mountpoint
54 56
 
55
-	ID string
56
-
57
-	Created time.Time
58
-
59
-	Path string
60
-	Args []string
61
-
62
-	Config  *runconfig.Config
63
-	ImageID string `json:"Image"`
64
-
65
-	NetworkSettings *network.Settings
66
-
67
-	ResolvConfPath string
68
-	HostnamePath   string
69
-	HostsPath      string
70
-	LogPath        string
71
-	Name           string
72
-	Driver         string
73
-	ExecDriver     string
74
-
75
-	command *execdriver.Command
76
-	StreamConfig
77
-
78
-	daemon                   *Daemon
57
+	ID                       string
58
+	Created                  time.Time
59
+	Path                     string
60
+	Args                     []string
61
+	Config                   *runconfig.Config
62
+	ImageID                  string `json:"Image"`
63
+	NetworkSettings          *network.Settings
64
+	ResolvConfPath           string
65
+	HostnamePath             string
66
+	HostsPath                string
67
+	LogPath                  string
68
+	Name                     string
69
+	Driver                   string
70
+	ExecDriver               string
79 71
 	MountLabel, ProcessLabel string
80 72
 	RestartCount             int
81 73
 	UpdateDns                bool
74
+	MountPoints              map[string]*mountPoint
82 75
 
83
-	// Maps container paths to volume paths.  The key in this is the path to which
84
-	// the volume is being mounted inside the container.  Value is the path of the
85
-	// volume on disk
86
-	Volumes    map[string]string
87 76
 	hostConfig *runconfig.HostConfig
77
+	command    *execdriver.Command
88 78
 
89 79
 	monitor      *containerMonitor
90 80
 	execCommands *execStore
81
+	daemon       *Daemon
91 82
 	// logDriver for closing
92 83
 	logDriver logger.Logger
93 84
 	logCopier *logger.Copier
... ...
@@ -259,9 +252,6 @@ func (container *Container) Start() (err error) {
259 259
 		return err
260 260
 	}
261 261
 	container.verifyDaemonSettings()
262
-	if err := container.prepareVolumes(); err != nil {
263
-		return err
264
-	}
265 262
 	linkedEnv, err := container.setupLinkedContainers()
266 263
 	if err != nil {
267 264
 		return err
... ...
@@ -273,10 +263,13 @@ func (container *Container) Start() (err error) {
273 273
 	if err := populateCommand(container, env); err != nil {
274 274
 		return err
275 275
 	}
276
-	if err := container.setupMounts(); err != nil {
276
+
277
+	mounts, err := container.setupMounts()
278
+	if err != nil {
277 279
 		return err
278 280
 	}
279 281
 
282
+	container.command.Mounts = mounts
280 283
 	return container.waitForStart()
281 284
 }
282 285
 
... ...
@@ -353,6 +346,8 @@ func (container *Container) cleanup() {
353 353
 	for _, eConfig := range container.execCommands.s {
354 354
 		container.daemon.unregisterExecCommand(eConfig)
355 355
 	}
356
+
357
+	container.UnmountVolumes(true)
356 358
 }
357 359
 
358 360
 func (container *Container) KillSig(sig int) error {
... ...
@@ -476,6 +471,7 @@ func (container *Container) Stop(seconds int) error {
476 476
 			return err
477 477
 		}
478 478
 	}
479
+
479 480
 	return nil
480 481
 }
481 482
 
... ...
@@ -573,25 +569,29 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
573 573
 	}
574 574
 	defer func() {
575 575
 		if err != nil {
576
+			// unmount any volumes
577
+			container.UnmountVolumes(true)
578
+			// unmount the container's rootfs
576 579
 			container.Unmount()
577 580
 		}
578 581
 	}()
579
-
580
-	if err = container.mountVolumes(); err != nil {
581
-		container.unmountVolumes()
582
+	mounts, err := container.setupMounts()
583
+	if err != nil {
582 584
 		return nil, err
583 585
 	}
584
-	defer func() {
586
+	for _, m := range mounts {
587
+		dest, err := container.GetResourcePath(m.Destination)
585 588
 		if err != nil {
586
-			container.unmountVolumes()
589
+			return nil, err
587 590
 		}
588
-	}()
589
-
591
+		if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
592
+			return nil, err
593
+		}
594
+	}
590 595
 	basePath, err := container.GetResourcePath(resource)
591 596
 	if err != nil {
592 597
 		return nil, err
593 598
 	}
594
-
595 599
 	stat, err := os.Stat(basePath)
596 600
 	if err != nil {
597 601
 		return nil, err
... ...
@@ -605,7 +605,6 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
605 605
 		filter = []string{filepath.Base(basePath)}
606 606
 		basePath = filepath.Dir(basePath)
607 607
 	}
608
-
609 608
 	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
610 609
 		Compression:  archive.Uncompressed,
611 610
 		IncludeFiles: filter,
... ...
@@ -613,10 +612,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
613 613
 	if err != nil {
614 614
 		return nil, err
615 615
 	}
616
-
617 616
 	return ioutils.NewReadCloserWrapper(archive, func() error {
618 617
 			err := archive.Close()
619
-			container.unmountVolumes()
618
+			container.UnmountVolumes(true)
620 619
 			container.Unmount()
621 620
 			return err
622 621
 		}),
... ...
@@ -1007,3 +1005,129 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
1007 1007
 	}
1008 1008
 	return written, err
1009 1009
 }
1010
+
1011
+func (container *Container) networkMounts() []execdriver.Mount {
1012
+	var mounts []execdriver.Mount
1013
+	if container.ResolvConfPath != "" {
1014
+		mounts = append(mounts, execdriver.Mount{
1015
+			Source:      container.ResolvConfPath,
1016
+			Destination: "/etc/resolv.conf",
1017
+			Writable:    !container.hostConfig.ReadonlyRootfs,
1018
+			Private:     true,
1019
+		})
1020
+	}
1021
+	if container.HostnamePath != "" {
1022
+		mounts = append(mounts, execdriver.Mount{
1023
+			Source:      container.HostnamePath,
1024
+			Destination: "/etc/hostname",
1025
+			Writable:    !container.hostConfig.ReadonlyRootfs,
1026
+			Private:     true,
1027
+		})
1028
+	}
1029
+	if container.HostsPath != "" {
1030
+		mounts = append(mounts, execdriver.Mount{
1031
+			Source:      container.HostsPath,
1032
+			Destination: "/etc/hosts",
1033
+			Writable:    !container.hostConfig.ReadonlyRootfs,
1034
+			Private:     true,
1035
+		})
1036
+	}
1037
+	return mounts
1038
+}
1039
+
1040
+func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
1041
+	container.MountPoints[destination] = &mountPoint{
1042
+		Name:        name,
1043
+		Driver:      volume.DefaultDriverName,
1044
+		Destination: destination,
1045
+		RW:          rw,
1046
+	}
1047
+}
1048
+
1049
+func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
1050
+	container.MountPoints[destination] = &mountPoint{
1051
+		Name:        vol.Name(),
1052
+		Driver:      vol.DriverName(),
1053
+		Destination: destination,
1054
+		RW:          rw,
1055
+		Volume:      vol,
1056
+	}
1057
+}
1058
+
1059
+func (container *Container) isDestinationMounted(destination string) bool {
1060
+	return container.MountPoints[destination] != nil
1061
+}
1062
+
1063
+func (container *Container) prepareMountPoints() error {
1064
+	for _, config := range container.MountPoints {
1065
+		if len(config.Driver) > 0 {
1066
+			v, err := createVolume(config.Name, config.Driver)
1067
+			if err != nil {
1068
+				return err
1069
+			}
1070
+			config.Volume = v
1071
+		}
1072
+	}
1073
+	return nil
1074
+}
1075
+
1076
+func (container *Container) removeMountPoints() error {
1077
+	for _, m := range container.MountPoints {
1078
+		if m.Volume != nil {
1079
+			if err := removeVolume(m.Volume); err != nil {
1080
+				return err
1081
+			}
1082
+		}
1083
+	}
1084
+	return nil
1085
+}
1086
+
1087
+func (container *Container) shouldRestart() bool {
1088
+	return container.hostConfig.RestartPolicy.Name == "always" ||
1089
+		(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
1090
+}
1091
+
1092
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
1093
+	for _, m := range container.MountPoints {
1094
+		dest, err := container.GetResourcePath(m.Destination)
1095
+		if err != nil {
1096
+			return err
1097
+		}
1098
+
1099
+		if forceSyscall {
1100
+			syscall.Unmount(dest, 0)
1101
+		}
1102
+
1103
+		if m.Volume != nil {
1104
+			if err := m.Volume.Unmount(); err != nil {
1105
+				return err
1106
+			}
1107
+		}
1108
+	}
1109
+	return nil
1110
+}
1111
+
1112
+func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
1113
+	rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
1114
+	if err != nil {
1115
+		return err
1116
+	}
1117
+
1118
+	if _, err = ioutil.ReadDir(rootfs); err != nil {
1119
+		if os.IsNotExist(err) {
1120
+			return nil
1121
+		}
1122
+		return err
1123
+	}
1124
+
1125
+	path, err := v.Mount()
1126
+	if err != nil {
1127
+		return err
1128
+	}
1129
+
1130
+	if err := copyExistingContents(rootfs, path); err != nil {
1131
+		return err
1132
+	}
1133
+
1134
+	return v.Unmount()
1135
+}
... ...
@@ -42,14 +42,7 @@ type Container struct {
42 42
 	// Fields below here are platform specific.
43 43
 
44 44
 	AppArmorProfile string
45
-
46
-	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
47
-	// Easier than migrating older container configs :)
48
-	VolumesRW map[string]bool
49
-
50
-	AppliedVolumesFrom map[string]struct{}
51
-
52
-	activeLinks map[string]*links.Link
45
+	activeLinks     map[string]*links.Link
53 46
 }
54 47
 
55 48
 func killProcessDirectly(container *Container) error {
... ...
@@ -27,12 +27,6 @@ type Container struct {
27 27
 	// removed in subsequent PRs.
28 28
 
29 29
 	AppArmorProfile string
30
-
31
-	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
32
-	// Easier than migrating older container configs :)
33
-	VolumesRW map[string]bool
34
-
35
-	AppliedVolumesFrom map[string]struct{}
36 30
 	// ---- END OF TEMPORARY DECLARATION ----
37 31
 
38 32
 }
... ...
@@ -2,11 +2,14 @@ package daemon
2 2
 
3 3
 import (
4 4
 	"fmt"
5
+	"os"
5 6
 	"path/filepath"
7
+	"strings"
6 8
 
7 9
 	"github.com/docker/docker/graph"
8 10
 	"github.com/docker/docker/image"
9 11
 	"github.com/docker/docker/pkg/parsers"
12
+	"github.com/docker/docker/pkg/stringid"
10 13
 	"github.com/docker/docker/runconfig"
11 14
 	"github.com/docker/libcontainer/label"
12 15
 )
... ...
@@ -87,17 +90,51 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
87 87
 	if err := daemon.createRootfs(container); err != nil {
88 88
 		return nil, nil, err
89 89
 	}
90
-	if hostConfig != nil {
91
-		if err := daemon.setHostConfig(container, hostConfig); err != nil {
92
-			return nil, nil, err
93
-		}
90
+	if err := daemon.setHostConfig(container, hostConfig); err != nil {
91
+		return nil, nil, err
94 92
 	}
95 93
 	if err := container.Mount(); err != nil {
96 94
 		return nil, nil, err
97 95
 	}
98 96
 	defer container.Unmount()
99
-	if err := container.prepareVolumes(); err != nil {
100
-		return nil, nil, err
97
+
98
+	for spec := range config.Volumes {
99
+		var (
100
+			name, destination string
101
+			parts             = strings.Split(spec, ":")
102
+		)
103
+		switch len(parts) {
104
+		case 2:
105
+			name, destination = parts[0], filepath.Clean(parts[1])
106
+		default:
107
+			name = stringid.GenerateRandomID()
108
+			destination = filepath.Clean(parts[0])
109
+		}
110
+		// Skip volumes for which we already have something mounted on that
111
+		// destination because of a --volume-from.
112
+		if container.isDestinationMounted(destination) {
113
+			continue
114
+		}
115
+		path, err := container.GetResourcePath(destination)
116
+		if err != nil {
117
+			return nil, nil, err
118
+		}
119
+
120
+		stat, err := os.Stat(path)
121
+		if err == nil && !stat.IsDir() {
122
+			return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
123
+		}
124
+
125
+		v, err := createVolume(name, config.VolumeDriver)
126
+		if err != nil {
127
+			return nil, nil, err
128
+		}
129
+
130
+		if err := container.copyImagePathContent(v, destination); err != nil {
131
+			return nil, nil, err
132
+		}
133
+
134
+		container.addMountPointWithVolume(destination, v, true)
101 135
 	}
102 136
 	if err := container.ToDisk(); err != nil {
103 137
 		return nil, nil, err
... ...
@@ -46,9 +46,12 @@ import (
46 46
 	"github.com/docker/docker/runconfig"
47 47
 	"github.com/docker/docker/trust"
48 48
 	"github.com/docker/docker/utils"
49
-	"github.com/docker/docker/volumes"
49
+	volumedrivers "github.com/docker/docker/volume/drivers"
50
+	"github.com/docker/docker/volume/local"
50 51
 )
51 52
 
53
+const defaultVolumesPathName = "volumes"
54
+
52 55
 var (
53 56
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
54 57
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
... ...
@@ -99,7 +102,6 @@ type Daemon struct {
99 99
 	repositories     *graph.TagStore
100 100
 	idIndex          *truncindex.TruncIndex
101 101
 	sysInfo          *sysinfo.SysInfo
102
-	volumes          *volumes.Repository
103 102
 	config           *Config
104 103
 	containerGraph   *graphdb.Database
105 104
 	driver           graphdriver.Driver
... ...
@@ -109,6 +111,7 @@ type Daemon struct {
109 109
 	RegistryService  *registry.Service
110 110
 	EventsService    *events.Events
111 111
 	netController    libnetwork.NetworkController
112
+	root             string
112 113
 }
113 114
 
114 115
 // Get looks for a container using the provided information, which could be
... ...
@@ -209,7 +212,13 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
209 209
 	// we'll waste time if we update it for every container
210 210
 	daemon.idIndex.Add(container.ID)
211 211
 
212
-	container.registerVolumes()
212
+	if err := daemon.verifyOldVolumesInfo(container); err != nil {
213
+		return err
214
+	}
215
+
216
+	if err := container.prepareMountPoints(); err != nil {
217
+		return err
218
+	}
213 219
 
214 220
 	if container.IsRunning() {
215 221
 		logrus.Debugf("killing old running container %s", container.ID)
... ...
@@ -249,10 +258,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
249 249
 }
250 250
 
251 251
 func (daemon *Daemon) restore() error {
252
+	type cr struct {
253
+		container  *Container
254
+		registered bool
255
+	}
256
+
252 257
 	var (
253 258
 		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
254
-		containers    = make(map[string]*Container)
255 259
 		currentDriver = daemon.driver.String()
260
+		containers    = make(map[string]*cr)
256 261
 	)
257 262
 
258 263
 	if !debug {
... ...
@@ -278,14 +292,12 @@ func (daemon *Daemon) restore() error {
278 278
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
279 279
 			logrus.Debugf("Loaded container %v", container.ID)
280 280
 
281
-			containers[container.ID] = container
281
+			containers[container.ID] = &cr{container: container}
282 282
 		} else {
283 283
 			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
284 284
 		}
285 285
 	}
286 286
 
287
-	registeredContainers := []*Container{}
288
-
289 287
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
290 288
 		for _, p := range entities.Paths() {
291 289
 			if !debug && logrus.GetLevel() == logrus.InfoLevel {
... ...
@@ -294,50 +306,43 @@ func (daemon *Daemon) restore() error {
294 294
 
295 295
 			e := entities[p]
296 296
 
297
-			if container, ok := containers[e.ID()]; ok {
298
-				if err := daemon.register(container, false); err != nil {
299
-					logrus.Debugf("Failed to register container %s: %s", container.ID, err)
300
-				}
301
-
302
-				registeredContainers = append(registeredContainers, container)
303
-
304
-				// delete from the map so that a new name is not automatically generated
305
-				delete(containers, e.ID())
297
+			if c, ok := containers[e.ID()]; ok {
298
+				c.registered = true
306 299
 			}
307 300
 		}
308 301
 	}
309 302
 
310
-	// Any containers that are left over do not exist in the graph
311
-	for _, container := range containers {
312
-		// Try to set the default name for a container if it exists prior to links
313
-		container.Name, err = daemon.generateNewName(container.ID)
314
-		if err != nil {
315
-			logrus.Debugf("Setting default id - %s", err)
316
-		}
303
+	group := sync.WaitGroup{}
304
+	for _, c := range containers {
305
+		group.Add(1)
317 306
 
318
-		if err := daemon.register(container, false); err != nil {
319
-			logrus.Debugf("Failed to register container %s: %s", container.ID, err)
320
-		}
307
+		go func(container *Container, registered bool) {
308
+			defer group.Done()
321 309
 
322
-		registeredContainers = append(registeredContainers, container)
323
-	}
310
+			if !registered {
311
+				// Try to set the default name for a container if it exists prior to links
312
+				container.Name, err = daemon.generateNewName(container.ID)
313
+				if err != nil {
314
+					logrus.Debugf("Setting default id - %s", err)
315
+				}
316
+			}
324 317
 
325
-	// check the restart policy on the containers and restart any container with
326
-	// the restart policy of "always"
327
-	if daemon.config.AutoRestart {
328
-		logrus.Debug("Restarting containers...")
318
+			if err := daemon.register(container, false); err != nil {
319
+				logrus.Debugf("Failed to register container %s: %s", container.ID, err)
320
+			}
329 321
 
330
-		for _, container := range registeredContainers {
331
-			if container.hostConfig.RestartPolicy.IsAlways() ||
332
-				(container.hostConfig.RestartPolicy.IsOnFailure() && container.ExitCode != 0) {
322
+			// check the restart policy on the containers and restart any container with
323
+			// the restart policy of "always"
324
+			if daemon.config.AutoRestart && container.shouldRestart() {
333 325
 				logrus.Debugf("Starting container %s", container.ID)
334 326
 
335 327
 				if err := container.Start(); err != nil {
336 328
 					logrus.Debugf("Failed to start container %s: %s", container.ID, err)
337 329
 				}
338 330
 			}
339
-		}
331
+		}(c.container, c.registered)
340 332
 	}
333
+	group.Wait()
341 334
 
342 335
 	if !debug {
343 336
 		if logrus.GetLevel() == logrus.InfoLevel {
... ...
@@ -535,6 +540,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
535 535
 			ExecDriver:      daemon.execDriver.Name(),
536 536
 			State:           NewState(),
537 537
 			execCommands:    newExecStore(),
538
+			MountPoints:     map[string]*mountPoint{},
538 539
 		},
539 540
 	}
540 541
 	container.root = daemon.containerRoot(container.ID)
... ...
@@ -785,15 +791,11 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
785 785
 		return nil, err
786 786
 	}
787 787
 
788
-	volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
789
-	if err != nil {
790
-		return nil, err
791
-	}
792
-
793
-	volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
788
+	volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
794 789
 	if err != nil {
795 790
 		return nil, err
796 791
 	}
792
+	volumedrivers.Register(volumesDriver, volumesDriver.Name())
797 793
 
798 794
 	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
799 795
 	if err != nil {
... ...
@@ -872,7 +874,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
872 872
 	d.repositories = repositories
873 873
 	d.idIndex = truncindex.NewTruncIndex([]string{})
874 874
 	d.sysInfo = sysInfo
875
-	d.volumes = volumes
876 875
 	d.config = config
877 876
 	d.sysInitPath = sysInitPath
878 877
 	d.execDriver = ed
... ...
@@ -880,6 +881,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
880 880
 	d.defaultLogConfig = config.LogConfig
881 881
 	d.RegistryService = registryService
882 882
 	d.EventsService = eventsService
883
+	d.root = config.Root
883 884
 
884 885
 	if err := d.restore(); err != nil {
885 886
 		return nil, err
... ...
@@ -1218,6 +1220,10 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
1218 1218
 }
1219 1219
 
1220 1220
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
1221
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
1222
+		return err
1223
+	}
1224
+
1221 1225
 	container.Lock()
1222 1226
 	defer container.Unlock()
1223 1227
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
... ...
@@ -1231,6 +1237,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
1231 1231
 
1232 1232
 	container.hostConfig = hostConfig
1233 1233
 	container.toDisk()
1234
-
1235 1234
 	return nil
1236 1235
 }
... ...
@@ -70,22 +70,14 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
70 70
 			}
71 71
 		}
72 72
 		container.LogEvent("destroy")
73
+
73 74
 		if config.RemoveVolume {
74
-			daemon.DeleteVolumes(container.VolumePaths())
75
+			container.removeMountPoints()
75 76
 		}
76 77
 	}
77 78
 	return nil
78 79
 }
79 80
 
80
-func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
81
-	for id := range volumeIDs {
82
-		if err := daemon.volumes.Delete(id); err != nil {
83
-			logrus.Infof("%s", err)
84
-			continue
85
-		}
86
-	}
87
-}
88
-
89 81
 func (daemon *Daemon) Rm(container *Container) (err error) {
90 82
 	return daemon.commonRm(container, false)
91 83
 }
... ...
@@ -134,7 +126,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
134 134
 		}
135 135
 	}()
136 136
 
137
-	container.derefVolumes()
138 137
 	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
139 138
 		logrus.Debugf("Unable to remove container from link graph: %s", err)
140 139
 	}
... ...
@@ -162,3 +153,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
162 162
 
163 163
 	return nil
164 164
 }
165
+
166
+func (daemon *Daemon) DeleteVolumes(c *Container) error {
167
+	return c.removeMountPoints()
168
+}
... ...
@@ -10,6 +10,10 @@ import (
10 10
 type ContainerJSONRaw struct {
11 11
 	*Container
12 12
 	HostConfig *runconfig.HostConfig
13
+
14
+	// Unused fields for backward compatibility with API versions < 1.12.
15
+	Volumes   map[string]string
16
+	VolumesRW map[string]bool
13 17
 }
14 18
 
15 19
 func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
... ...
@@ -48,6 +52,14 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
48 48
 		FinishedAt: container.State.FinishedAt,
49 49
 	}
50 50
 
51
+	volumes := make(map[string]string)
52
+	volumesRW := make(map[string]bool)
53
+
54
+	for _, m := range container.MountPoints {
55
+		volumes[m.Destination] = m.Path()
56
+		volumesRW[m.Destination] = m.RW
57
+	}
58
+
51 59
 	contJSON := &types.ContainerJSON{
52 60
 		Id:              container.ID,
53 61
 		Created:         container.Created,
... ...
@@ -67,8 +79,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
67 67
 		ExecDriver:      container.ExecDriver,
68 68
 		MountLabel:      container.MountLabel,
69 69
 		ProcessLabel:    container.ProcessLabel,
70
-		Volumes:         container.Volumes,
71
-		VolumesRW:       container.VolumesRW,
70
+		Volumes:         volumes,
71
+		VolumesRW:       volumesRW,
72 72
 		AppArmorProfile: container.AppArmorProfile,
73 73
 		ExecIDs:         container.GetExecIDs(),
74 74
 		HostConfig:      &hostConfig,
... ...
@@ -1,213 +1,103 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
+	"encoding/json"
4 5
 	"fmt"
5 6
 	"io/ioutil"
6 7
 	"os"
7 8
 	"path/filepath"
8
-	"sort"
9 9
 	"strings"
10 10
 
11
-	"github.com/Sirupsen/logrus"
12 11
 	"github.com/docker/docker/daemon/execdriver"
13 12
 	"github.com/docker/docker/pkg/chrootarchive"
14
-	"github.com/docker/docker/pkg/mount"
15
-	"github.com/docker/docker/pkg/symlink"
13
+	"github.com/docker/docker/runconfig"
14
+	"github.com/docker/docker/volume"
16 15
 )
17 16
 
18
-type volumeMount struct {
19
-	containerPath string
20
-	hostPath      string
21
-	writable      bool
22
-	copyData      bool
23
-	from          string
17
+type mountPoint struct {
18
+	Name        string
19
+	Destination string
20
+	Driver      string
21
+	RW          bool
22
+	Volume      volume.Volume `json:"-"`
23
+	Source      string
24 24
 }
25 25
 
26
-func (container *Container) createVolumes() error {
27
-	mounts := make(map[string]*volumeMount)
28
-
29
-	// get the normal volumes
30
-	for path := range container.Config.Volumes {
31
-		path = filepath.Clean(path)
32
-		// skip if there is already a volume for this container path
33
-		if _, exists := container.Volumes[path]; exists {
34
-			continue
35
-		}
36
-
37
-		realPath, err := container.GetResourcePath(path)
38
-		if err != nil {
39
-			return err
40
-		}
41
-		if stat, err := os.Stat(realPath); err == nil {
42
-			if !stat.IsDir() {
43
-				return fmt.Errorf("can't mount to container path, file exists - %s", path)
44
-			}
45
-		}
46
-
47
-		mnt := &volumeMount{
48
-			containerPath: path,
49
-			writable:      true,
50
-			copyData:      true,
51
-		}
52
-		mounts[mnt.containerPath] = mnt
53
-	}
54
-
55
-	// Get all the bind mounts
56
-	// track bind paths separately due to #10618
57
-	bindPaths := make(map[string]struct{})
58
-	for _, spec := range container.hostConfig.Binds {
59
-		mnt, err := parseBindMountSpec(spec)
60
-		if err != nil {
61
-			return err
62
-		}
63
-
64
-		// #10618
65
-		if _, exists := bindPaths[mnt.containerPath]; exists {
66
-			return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath)
67
-		}
68
-
69
-		bindPaths[mnt.containerPath] = struct{}{}
70
-		mounts[mnt.containerPath] = mnt
71
-	}
72
-
73
-	// Get volumes from
74
-	for _, from := range container.hostConfig.VolumesFrom {
75
-		cID, mode, err := parseVolumesFromSpec(from)
76
-		if err != nil {
77
-			return err
78
-		}
79
-		if _, exists := container.AppliedVolumesFrom[cID]; exists {
80
-			// skip since it's already been applied
81
-			continue
82
-		}
83
-
84
-		c, err := container.daemon.Get(cID)
85
-		if err != nil {
86
-			return fmt.Errorf("container %s not found, impossible to mount its volumes", cID)
87
-		}
88
-
89
-		for _, mnt := range c.volumeMounts() {
90
-			mnt.writable = mnt.writable && (mode == "rw")
91
-			mnt.from = cID
92
-			mounts[mnt.containerPath] = mnt
93
-		}
26
+func (m *mountPoint) Setup() (string, error) {
27
+	if m.Volume != nil {
28
+		return m.Volume.Mount()
94 29
 	}
95 30
 
96
-	for _, mnt := range mounts {
97
-		containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs)
98
-		if err != nil {
99
-			return err
100
-		}
101
-
102
-		// Create the actual volume
103
-		v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable)
104
-		if err != nil {
105
-			return err
106
-		}
107
-
108
-		container.VolumesRW[mnt.containerPath] = mnt.writable
109
-		container.Volumes[mnt.containerPath] = v.Path
110
-		v.AddContainer(container.ID)
111
-		if mnt.from != "" {
112
-			container.AppliedVolumesFrom[mnt.from] = struct{}{}
113
-		}
114
-
115
-		if mnt.writable && mnt.copyData {
116
-			// Copy whatever is in the container at the containerPath to the volume
117
-			copyExistingContents(containerMntPath, v.Path)
31
+	if len(m.Source) > 0 {
32
+		if _, err := os.Stat(m.Source); err != nil {
33
+			if !os.IsNotExist(err) {
34
+				return "", err
35
+			}
36
+			if err := os.MkdirAll(m.Source, 0755); err != nil {
37
+				return "", err
38
+			}
118 39
 		}
40
+		return m.Source, nil
119 41
 	}
120 42
 
121
-	return nil
122
-}
123
-
124
-// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
125
-func (container *Container) sortedVolumeMounts() []string {
126
-	var mountPaths []string
127
-	for path := range container.Volumes {
128
-		mountPaths = append(mountPaths, path)
129
-	}
130
-
131
-	sort.Strings(mountPaths)
132
-	return mountPaths
43
+	return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
133 44
 }
134 45
 
135
-func (container *Container) VolumePaths() map[string]struct{} {
136
-	var paths = make(map[string]struct{})
137
-	for _, path := range container.Volumes {
138
-		paths[path] = struct{}{}
46
+func (m *mountPoint) Path() string {
47
+	if m.Volume != nil {
48
+		return m.Volume.Path()
139 49
 	}
140
-	return paths
141
-}
142
-
143
-func (container *Container) registerVolumes() {
144
-	for path := range container.VolumePaths() {
145
-		if v := container.daemon.volumes.Get(path); v != nil {
146
-			v.AddContainer(container.ID)
147
-			continue
148
-		}
149 50
 
150
-		// if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
151
-		writable := true
152
-		if rw, exists := container.VolumesRW[path]; exists {
153
-			writable = rw
154
-		}
155
-		v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
156
-		if err != nil {
157
-			logrus.Debugf("error registering volume %s: %v", path, err)
158
-			continue
159
-		}
160
-		v.AddContainer(container.ID)
161
-	}
51
+	return m.Source
162 52
 }
163 53
 
164
-func (container *Container) derefVolumes() {
165
-	for path := range container.VolumePaths() {
166
-		vol := container.daemon.volumes.Get(path)
167
-		if vol == nil {
168
-			logrus.Debugf("Volume %s was not found and could not be dereferenced", path)
169
-			continue
170
-		}
171
-		vol.RemoveContainer(container.ID)
54
+func parseBindMount(spec string, config *runconfig.Config) (*mountPoint, error) {
55
+	bind := &mountPoint{
56
+		RW: true,
172 57
 	}
173
-}
174
-
175
-func parseBindMountSpec(spec string) (*volumeMount, error) {
176 58
 	arr := strings.Split(spec, ":")
177 59
 
178
-	mnt := &volumeMount{}
179 60
 	switch len(arr) {
180 61
 	case 2:
181
-		mnt.hostPath = arr[0]
182
-		mnt.containerPath = arr[1]
183
-		mnt.writable = true
62
+		bind.Destination = arr[1]
184 63
 	case 3:
185
-		mnt.hostPath = arr[0]
186
-		mnt.containerPath = arr[1]
187
-		mnt.writable = validMountMode(arr[2]) && arr[2] == "rw"
64
+		bind.Destination = arr[1]
65
+		if !validMountMode(arr[2]) {
66
+			return nil, fmt.Errorf("invalid mode for volumes-from: %s", arr[2])
67
+		}
68
+		bind.RW = arr[2] == "rw"
188 69
 	default:
189 70
 		return nil, fmt.Errorf("Invalid volume specification: %s", spec)
190 71
 	}
191 72
 
192
-	if !filepath.IsAbs(mnt.hostPath) {
193
-		return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath)
73
+	name, source, err := parseVolumeSource(arr[0], config)
74
+	if err != nil {
75
+		return nil, err
194 76
 	}
195 77
 
196
-	mnt.hostPath = filepath.Clean(mnt.hostPath)
197
-	mnt.containerPath = filepath.Clean(mnt.containerPath)
198
-	return mnt, nil
78
+	if len(source) == 0 {
79
+		bind.Driver = config.VolumeDriver
80
+		if len(bind.Driver) == 0 {
81
+			bind.Driver = volume.DefaultDriverName
82
+		}
83
+	} else {
84
+		bind.Source = filepath.Clean(source)
85
+	}
86
+
87
+	bind.Name = name
88
+	bind.Destination = filepath.Clean(bind.Destination)
89
+	return bind, nil
199 90
 }
200 91
 
201
-func parseVolumesFromSpec(spec string) (string, string, error) {
202
-	specParts := strings.SplitN(spec, ":", 2)
203
-	if len(specParts) == 0 {
92
+func parseVolumesFrom(spec string) (string, string, error) {
93
+	if len(spec) == 0 {
204 94
 		return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
205 95
 	}
206 96
 
207
-	var (
208
-		id   = specParts[0]
209
-		mode = "rw"
210
-	)
97
+	specParts := strings.SplitN(spec, ":", 2)
98
+	id := specParts[0]
99
+	mode := "rw"
100
+
211 101
 	if len(specParts) == 2 {
212 102
 		mode = specParts[1]
213 103
 		if !validMountMode(mode) {
... ...
@@ -222,7 +112,6 @@ func validMountMode(mode string) bool {
222 222
 		"rw": true,
223 223
 		"ro": true,
224 224
 	}
225
-
226 225
 	return validModes[mode]
227 226
 }
228 227
 
... ...
@@ -240,34 +129,16 @@ func (container *Container) specialMounts() []execdriver.Mount {
240 240
 	return mounts
241 241
 }
242 242
 
243
-func (container *Container) volumeMounts() map[string]*volumeMount {
244
-	mounts := make(map[string]*volumeMount)
245
-
246
-	for containerPath, path := range container.Volumes {
247
-		v := container.daemon.volumes.Get(path)
248
-		if v == nil {
249
-			// This should never happen
250
-			logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path)
251
-			continue
252
-		}
253
-		mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]}
254
-	}
255
-
256
-	return mounts
257
-}
258
-
259 243
 func copyExistingContents(source, destination string) error {
260 244
 	volList, err := ioutil.ReadDir(source)
261 245
 	if err != nil {
262 246
 		return err
263 247
 	}
264
-
265 248
 	if len(volList) > 0 {
266 249
 		srcList, err := ioutil.ReadDir(destination)
267 250
 		if err != nil {
268 251
 			return err
269 252
 		}
270
-
271 253
 		if len(srcList) == 0 {
272 254
 			// If the source volume is empty copy files from the root into the volume
273 255
 			if err := chrootarchive.CopyWithTar(source, destination); err != nil {
... ...
@@ -275,60 +146,136 @@ func copyExistingContents(source, destination string) error {
275 275
 			}
276 276
 		}
277 277
 	}
278
-
279 278
 	return copyOwnership(source, destination)
280 279
 }
281 280
 
282
-func (container *Container) mountVolumes() error {
283
-	for dest, source := range container.Volumes {
284
-		v := container.daemon.volumes.Get(source)
285
-		if v == nil {
286
-			return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
281
+// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
282
+// It follows the next sequence to decide what to mount in each final destination:
283
+//
284
+// 1. Select the previously configured mount points for the containers, if any.
285
+// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
286
+// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
287
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
288
+	binds := map[string]bool{}
289
+	mountPoints := map[string]*mountPoint{}
290
+
291
+	// 1. Read already configured mount points.
292
+	for name, point := range container.MountPoints {
293
+		mountPoints[name] = point
294
+	}
295
+
296
+	// 2. Read volumes from other containers.
297
+	for _, v := range hostConfig.VolumesFrom {
298
+		containerID, mode, err := parseVolumesFrom(v)
299
+		if err != nil {
300
+			return err
287 301
 		}
288 302
 
289
-		destPath, err := container.GetResourcePath(dest)
303
+		c, err := daemon.Get(containerID)
290 304
 		if err != nil {
291 305
 			return err
292 306
 		}
293 307
 
294
-		if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil {
295
-			return fmt.Errorf("error while mounting volume %s: %v", source, err)
308
+		for _, m := range c.MountPoints {
309
+			cp := m
310
+			cp.RW = m.RW && mode != "ro"
311
+
312
+			if len(m.Source) == 0 {
313
+				v, err := createVolume(m.Name, m.Driver)
314
+				if err != nil {
315
+					return err
316
+				}
317
+				cp.Volume = v
318
+			}
319
+
320
+			mountPoints[cp.Destination] = cp
296 321
 		}
297 322
 	}
298 323
 
299
-	for _, mnt := range container.specialMounts() {
300
-		destPath, err := container.GetResourcePath(mnt.Destination)
324
+	// 3. Read bind mounts
325
+	for _, b := range hostConfig.Binds {
326
+		// #10618
327
+		bind, err := parseBindMount(b, container.Config)
301 328
 		if err != nil {
302 329
 			return err
303 330
 		}
304
-		if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil {
305
-			return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err)
331
+
332
+		if binds[bind.Destination] {
333
+			return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
306 334
 		}
335
+
336
+		if len(bind.Name) > 0 && len(bind.Driver) > 0 {
337
+			v, err := createVolume(bind.Name, bind.Driver)
338
+			if err != nil {
339
+				return err
340
+			}
341
+			bind.Volume = v
342
+		}
343
+
344
+		binds[bind.Destination] = true
345
+		mountPoints[bind.Destination] = bind
307 346
 	}
347
+
348
+	container.MountPoints = mountPoints
349
+
308 350
 	return nil
309 351
 }
310 352
 
311
-func (container *Container) unmountVolumes() {
312
-	for dest := range container.Volumes {
313
-		destPath, err := container.GetResourcePath(dest)
314
-		if err != nil {
315
-			logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
316
-			continue
317
-		}
318
-		if err := mount.ForceUnmount(destPath); err != nil {
319
-			logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
320
-			continue
353
+// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
354
+// It reads the container configuration and creates valid mount points for the old volumes.
355
+func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
356
+	jsonPath, err := container.jsonPath()
357
+	if err != nil {
358
+		return err
359
+	}
360
+	f, err := os.Open(jsonPath)
361
+	if err != nil {
362
+		if os.IsNotExist(err) {
363
+			return nil
321 364
 		}
365
+		return err
322 366
 	}
323 367
 
324
-	for _, mnt := range container.specialMounts() {
325
-		destPath, err := container.GetResourcePath(mnt.Destination)
326
-		if err != nil {
327
-			logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
328
-			continue
329
-		}
330
-		if err := mount.ForceUnmount(destPath); err != nil {
331
-			logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
368
+	type oldContVolCfg struct {
369
+		Volumes   map[string]string
370
+		VolumesRW map[string]bool
371
+	}
372
+
373
+	vols := oldContVolCfg{
374
+		Volumes:   make(map[string]string),
375
+		VolumesRW: make(map[string]bool),
376
+	}
377
+	if err := json.NewDecoder(f).Decode(&vols); err != nil {
378
+		return err
379
+	}
380
+
381
+	for destination, hostPath := range vols.Volumes {
382
+		vfsPath := filepath.Join(daemon.root, "vfs", "dir")
383
+
384
+		if strings.HasPrefix(hostPath, vfsPath) {
385
+			id := filepath.Base(hostPath)
386
+
387
+			rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
388
+			container.addLocalMountPoint(id, destination, rw)
332 389
 		}
333 390
 	}
391
+
392
+	return container.ToDisk()
393
+}
394
+
395
+func createVolume(name, driverName string) (volume.Volume, error) {
396
+	vd, err := getVolumeDriver(driverName)
397
+
398
+	if err != nil {
399
+		return nil, err
400
+	}
401
+	return vd.Create(name)
402
+}
403
+
404
+func removeVolume(v volume.Volume) error {
405
+	vd, err := getVolumeDriver(v.DriverName())
406
+	if err != nil {
407
+		return nil
408
+	}
409
+	return vd.Remove(v)
334 410
 }
335 411
new file mode 100644
... ...
@@ -0,0 +1,26 @@
0
+// +build experimental
1
+
2
+package daemon
3
+
4
+import (
5
+	"path/filepath"
6
+
7
+	"github.com/docker/docker/runconfig"
8
+	"github.com/docker/docker/volume"
9
+	"github.com/docker/docker/volume/drivers"
10
+)
11
+
12
+func getVolumeDriver(name string) (volume.Driver, error) {
13
+	if name == "" {
14
+		name = volume.DefaultDriverName
15
+	}
16
+	return volumedrivers.Lookup(name)
17
+}
18
+
19
+func parseVolumeSource(spec string, config *runconfig.Config) (string, string, error) {
20
+	if !filepath.IsAbs(spec) {
21
+		return spec, "", nil
22
+	}
23
+
24
+	return "", spec, nil
25
+}
0 26
new file mode 100644
... ...
@@ -0,0 +1,86 @@
0
+// +build experimental
1
+
2
+package daemon
3
+
4
+import (
5
+	"testing"
6
+
7
+	"github.com/docker/docker/runconfig"
8
+	"github.com/docker/docker/volume"
9
+	"github.com/docker/docker/volume/drivers"
10
+)
11
+
12
+type fakeDriver struct{}
13
+
14
+func (fakeDriver) Name() string                              { return "fake" }
15
+func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
16
+func (fakeDriver) Remove(v volume.Volume) error              { return nil }
17
+
18
+func TestGetVolumeDriver(t *testing.T) {
19
+	_, err := getVolumeDriver("missing")
20
+	if err == nil {
21
+		t.Fatal("Expected error, was nil")
22
+	}
23
+
24
+	volumedrivers.Register(fakeDriver{}, "fake")
25
+	d, err := getVolumeDriver("fake")
26
+	if err != nil {
27
+		t.Fatal(err)
28
+	}
29
+	if d.Name() != "fake" {
30
+		t.Fatalf("Expected fake driver, got %s\n", d.Name())
31
+	}
32
+}
33
+
34
+func TestParseBindMount(t *testing.T) {
35
+	cases := []struct {
36
+		bind      string
37
+		driver    string
38
+		expDest   string
39
+		expSource string
40
+		expName   string
41
+		expDriver string
42
+		expRW     bool
43
+		fail      bool
44
+	}{
45
+		{"/tmp:/tmp", "", "/tmp", "/tmp", "", "", true, false},
46
+		{"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", false, false},
47
+		{"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", true, false},
48
+		{"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", false, true},
49
+		{"name:/tmp", "", "/tmp", "", "name", "local", true, false},
50
+		{"name:/tmp", "external", "/tmp", "", "name", "external", true, false},
51
+		{"name:/tmp:ro", "local", "/tmp", "", "name", "local", false, false},
52
+		{"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", true, false},
53
+	}
54
+
55
+	for _, c := range cases {
56
+		conf := &runconfig.Config{VolumeDriver: c.driver}
57
+		m, err := parseBindMount(c.bind, conf)
58
+		if c.fail {
59
+			if err == nil {
60
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
61
+			}
62
+			continue
63
+		}
64
+
65
+		if m.Destination != c.expDest {
66
+			t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
67
+		}
68
+
69
+		if m.Source != c.expSource {
70
+			t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
71
+		}
72
+
73
+		if m.Name != c.expName {
74
+			t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
75
+		}
76
+
77
+		if m.Driver != c.expDriver {
78
+			t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
79
+		}
80
+
81
+		if m.RW != c.expRW {
82
+			t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
83
+		}
84
+	}
85
+}
... ...
@@ -4,6 +4,9 @@ package daemon
4 4
 
5 5
 import (
6 6
 	"os"
7
+	"path/filepath"
8
+	"sort"
9
+	"strings"
7 10
 
8 11
 	"github.com/docker/docker/daemon/execdriver"
9 12
 	"github.com/docker/docker/pkg/system"
... ...
@@ -24,36 +27,44 @@ func copyOwnership(source, destination string) error {
24 24
 	return os.Chmod(destination, os.FileMode(stat.Mode()))
25 25
 }
26 26
 
27
-func (container *Container) prepareVolumes() error {
28
-	if container.Volumes == nil || len(container.Volumes) == 0 {
29
-		container.Volumes = make(map[string]string)
30
-		container.VolumesRW = make(map[string]bool)
31
-	}
27
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
28
+	var mounts []execdriver.Mount
29
+	for _, m := range container.MountPoints {
30
+		path, err := m.Setup()
31
+		if err != nil {
32
+			return nil, err
33
+		}
32 34
 
33
-	if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil {
34
-		container.AppliedVolumesFrom = make(map[string]struct{})
35
+		mounts = append(mounts, execdriver.Mount{
36
+			Source:      path,
37
+			Destination: m.Destination,
38
+			Writable:    m.RW,
39
+		})
35 40
 	}
36
-	return container.createVolumes()
41
+
42
+	mounts = sortMounts(mounts)
43
+	return append(mounts, container.networkMounts()...), nil
37 44
 }
38 45
 
39
-func (container *Container) setupMounts() error {
40
-	mounts := []execdriver.Mount{}
46
+func sortMounts(m []execdriver.Mount) []execdriver.Mount {
47
+	sort.Sort(mounts(m))
48
+	return m
49
+}
41 50
 
42
-	// Mount user specified volumes
43
-	// Note, these are not private because you may want propagation of (un)mounts from host
44
-	// volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
45
-	// want this new mount in the container
46
-	// These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
47
-	for _, path := range container.sortedVolumeMounts() {
48
-		mounts = append(mounts, execdriver.Mount{
49
-			Source:      container.Volumes[path],
50
-			Destination: path,
51
-			Writable:    container.VolumesRW[path],
52
-		})
53
-	}
51
+type mounts []execdriver.Mount
52
+
53
+func (m mounts) Len() int {
54
+	return len(m)
55
+}
54 56
 
55
-	mounts = append(mounts, container.specialMounts()...)
57
+func (m mounts) Less(i, j int) bool {
58
+	return m.parts(i) < m.parts(j)
59
+}
60
+
61
+func (m mounts) Swap(i, j int) {
62
+	m[i], m[j] = m[j], m[i]
63
+}
56 64
 
57
-	container.command.Mounts = mounts
58
-	return nil
65
+func (m mounts) parts(i int) int {
66
+	return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator)))
59 67
 }
60 68
new file mode 100644
... ...
@@ -0,0 +1,24 @@
0
+// +build !experimental
1
+
2
+package daemon
3
+
4
+import (
5
+	"fmt"
6
+	"path/filepath"
7
+
8
+	"github.com/docker/docker/runconfig"
9
+	"github.com/docker/docker/volume"
10
+	"github.com/docker/docker/volume/drivers"
11
+)
12
+
13
+func getVolumeDriver(_ string) (volume.Driver, error) {
14
+	return volumedrivers.Lookup(volume.DefaultDriverName)
15
+}
16
+
17
+func parseVolumeSource(spec string, _ *runconfig.Config) (string, string, error) {
18
+	if !filepath.IsAbs(spec) {
19
+		return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
20
+	}
21
+
22
+	return "", spec, nil
23
+}
0 24
new file mode 100644
... ...
@@ -0,0 +1,81 @@
0
+// +build !experimental
1
+
2
+package daemon
3
+
4
+import (
5
+	"io/ioutil"
6
+	"os"
7
+	"testing"
8
+
9
+	"github.com/docker/docker/runconfig"
10
+	"github.com/docker/docker/volume"
11
+	"github.com/docker/docker/volume/drivers"
12
+	"github.com/docker/docker/volume/local"
13
+)
14
+
15
+func TestGetVolumeDefaultDriver(t *testing.T) {
16
+	tmp, err := ioutil.TempDir("", "volume-test-")
17
+	if err != nil {
18
+		t.Fatal(err)
19
+	}
20
+	defer os.RemoveAll(tmp)
21
+
22
+	l, err := local.New(tmp)
23
+	if err != nil {
24
+		t.Fatal(err)
25
+	}
26
+	volumedrivers.Register(l, volume.DefaultDriverName)
27
+	d, err := getVolumeDriver("missing")
28
+	if err != nil {
29
+		t.Fatal(err)
30
+	}
31
+
32
+	if d.Name() != volume.DefaultDriverName {
33
+		t.Fatalf("Expected local driver, was %s\n", d.Name)
34
+	}
35
+}
36
+
37
+func TestParseBindMount(t *testing.T) {
38
+	cases := []struct {
39
+		bind      string
40
+		expDest   string
41
+		expSource string
42
+		expName   string
43
+		expRW     bool
44
+		fail      bool
45
+	}{
46
+		{"/tmp:/tmp", "/tmp", "/tmp", "", true, false},
47
+		{"/tmp:/tmp:ro", "/tmp", "/tmp", "", false, false},
48
+		{"/tmp:/tmp:rw", "/tmp", "/tmp", "", true, false},
49
+		{"/tmp:/tmp:foo", "/tmp", "/tmp", "", false, true},
50
+		{"name:/tmp", "", "", "", false, true},
51
+		{"local/name:/tmp:rw", "", "", "", true, true},
52
+	}
53
+
54
+	for _, c := range cases {
55
+		conf := &runconfig.Config{}
56
+		m, err := parseBindMount(c.bind, conf)
57
+		if c.fail {
58
+			if err == nil {
59
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
60
+			}
61
+			continue
62
+		}
63
+
64
+		if m.Destination != c.expDest {
65
+			t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
66
+		}
67
+
68
+		if m.Source != c.expSource {
69
+			t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
70
+		}
71
+
72
+		if m.Name != c.expName {
73
+			t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
74
+		}
75
+
76
+		if m.RW != c.expRW {
77
+			t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
78
+		}
79
+	}
80
+}
0 81
new file mode 100644
... ...
@@ -0,0 +1,35 @@
0
+package daemon
1
+
2
+import "testing"
3
+
4
+func TestParseVolumeFrom(t *testing.T) {
5
+	cases := []struct {
6
+		spec    string
7
+		expId   string
8
+		expMode string
9
+		fail    bool
10
+	}{
11
+		{"", "", "", true},
12
+		{"foobar", "foobar", "rw", false},
13
+		{"foobar:rw", "foobar", "rw", false},
14
+		{"foobar:ro", "foobar", "ro", false},
15
+		{"foobar:baz", "", "", true},
16
+	}
17
+
18
+	for _, c := range cases {
19
+		id, mode, err := parseVolumesFrom(c.spec)
20
+		if c.fail {
21
+			if err == nil {
22
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
23
+			}
24
+			continue
25
+		}
26
+
27
+		if id != c.expId {
28
+			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
29
+		}
30
+		if mode != c.expMode {
31
+			t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
32
+		}
33
+	}
34
+}
... ...
@@ -2,15 +2,13 @@
2 2
 
3 3
 package daemon
4 4
 
5
+import "github.com/docker/docker/daemon/execdriver"
6
+
5 7
 // Not supported on Windows
6 8
 func copyOwnership(source, destination string) error {
7
-	return nil
8
-}
9
-
10
-func (container *Container) prepareVolumes() error {
11
-	return nil
9
+	return nil, nil
12 10
 }
13 11
 
14
-func (container *Container) setupMounts() error {
12
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
15 13
 	return nil
16 14
 }
17 15
new file mode 100644
... ...
@@ -0,0 +1,223 @@
0
+page_title: Plugin API documentation
1
+page_description: Documentation for writing a Docker plugin.
2
+page_keywords: docker, plugins, api, extensions
3
+
4
+# Docker Plugin API
5
+
6
+Docker plugins are out-of-process extensions which add capabilities to the
7
+Docker Engine.
8
+
9
+This page is intended for people who want to develop their own Docker plugin.
10
+If you just want to learn about or use Docker plugins, look
11
+[here](/userguide/plugins).
12
+
13
+## What plugins are
14
+
15
+A plugin is a process running on the same docker host as the docker daemon,
16
+which registers itself by placing a file in `/usr/share/docker/plugins` (the
17
+"plugin directory").
18
+
19
+Plugins have human-readable names, which are short, lowercase strings. For
20
+example, `flocker` or `weave`.
21
+
22
+Plugins can run inside or outside containers. Currently running them outside
23
+containers is recommended.
24
+
25
+## Plugin discovery
26
+
27
+Docker discovers plugins by looking for them in the plugin directory whenever a
28
+user or container tries to use one by name.
29
+
30
+There are two types of files which can be put in the plugin directory.
31
+
32
+* `.sock` files are UNIX domain sockets.
33
+* `.spec` files are text files containing a URL, such as `unix:///other.sock`.
34
+
35
+The name of the file (excluding the extension) determines the plugin name.
36
+
37
+For example, the `flocker` plugin might create a UNIX socket at
38
+`/usr/share/docker/plugins/flocker.sock`.
39
+
40
+Plugins must be run locally on the same machine as the Docker daemon.  UNIX
41
+domain sockets are strongly encouraged for security reasons.
42
+
43
+## Plugin lifecycle
44
+
45
+Plugins should be started before Docker, and stopped after Docker.  For
46
+example, when packaging a plugin for a platform which supports `systemd`, you
47
+might use [`systemd` dependencies](
48
+http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
49
+manage startup and shutdown order.
50
+
51
+When upgrading a plugin, you should first stop the Docker daemon, upgrade the
52
+plugin, then start Docker again.
53
+
54
+If a plugin is packaged as a container, this may cause issues. Plugins as
55
+containers are currently considered experimental due to these shutdown/startup
56
+ordering issues. These issues are mitigated by plugin retries (see below).
57
+
58
+## Plugin activation
59
+
60
+When a plugin is first referred to -- either by a user referring to it by name
61
+(e.g.  `docker run --volume-driver=foo`) or a container already configured to
62
+use a plugin being started -- Docker looks for the named plugin in the plugin
63
+directory and activates it with a handshake. See Handshake API below.
64
+
65
+Plugins are *not* activated automatically at Docker daemon startup. Rather,
66
+they are activated only lazily, or on-demand, when they are needed.
67
+
68
+## API design
69
+
70
+The Plugin API is RPC-style JSON over HTTP, much like webhooks.
71
+
72
+Requests flow *from* the Docker daemon *to* the plugin.  So the plugin needs to
73
+implement an HTTP server and bind this to the UNIX socket mentioned in the
74
+"plugin discovery" section.
75
+
76
+All requests are HTTP `POST` requests.
77
+
78
+The API is versioned via an Accept header, which currently is always set to
79
+`application/vnd.docker.plugins.v1+json`.
80
+
81
+## Handshake API
82
+
83
+Plugins are activated via the following "handshake" API call.
84
+
85
+### /Plugin.Activate
86
+
87
+**Request:** empty body
88
+
89
+**Response:**
90
+```
91
+{
92
+    "Implements": ["VolumeDriver"]
93
+}
94
+```
95
+
96
+Responds with a list of Docker subsystems which this plugin implements.
97
+After activation, the plugin will then be sent events from this subsystem.
98
+
99
+## Volume API
100
+
101
+If a plugin registers itself as a `VolumeDriver` (see above) then it is
102
+expected to provide writeable paths on the host filesystem for the Docker
103
+daemon to provide to containers to consume.
104
+
105
+The Docker daemon handles bind-mounting the provided paths into user
106
+containers.
107
+
108
+### /VolumeDriver.Create
109
+
110
+**Request**:
111
+```
112
+{
113
+    "Name": "volume_name"
114
+}
115
+```
116
+
117
+Instruct the plugin that the user wants to create a volume, given a user
118
+specified volume name.  The plugin does not need to actually manifest the
119
+volume on the filesystem yet (until Mount is called).
120
+
121
+**Response**:
122
+```
123
+{
124
+    "Err": null
125
+}
126
+```
127
+
128
+Respond with a string error if an error occurred.
129
+
130
+### /VolumeDriver.Remove
131
+
132
+**Request**:
133
+```
134
+{
135
+    "Name": "volume_name"
136
+}
137
+```
138
+
139
+Create a volume, given a user specified volume name.
140
+
141
+**Response**:
142
+```
143
+{
144
+    "Err": null
145
+}
146
+```
147
+
148
+Respond with a string error if an error occurred.
149
+
150
+### /VolumeDriver.Mount
151
+
152
+**Request**:
153
+```
154
+{
155
+    "Name": "volume_name"
156
+}
157
+```
158
+
159
+Docker requires the plugin to provide a volume, given a user specified volume
160
+name. This is called once per container start.
161
+
162
+**Response**:
163
+```
164
+{
165
+    "Mountpoint": "/path/to/directory/on/host",
166
+    "Err": null
167
+}
168
+```
169
+
170
+Respond with the path on the host filesystem where the volume has been made
171
+available, and/or a string error if an error occurred.
172
+
173
+### /VolumeDriver.Path
174
+
175
+**Request**:
176
+```
177
+{
178
+    "Name": "volume_name"
179
+}
180
+```
181
+
182
+Docker needs reminding of the path to the volume on the host.
183
+
184
+**Response**:
185
+```
186
+{
187
+    "Mountpoint": "/path/to/directory/on/host",
188
+    "Err": null
189
+}
190
+```
191
+
192
+Respond with the path on the host filesystem where the volume has been made
193
+available, and/or a string error if an error occurred.
194
+
195
+### /VolumeDriver.Unmount
196
+
197
+**Request**:
198
+```
199
+{
200
+    "Name": "volume_name"
201
+}
202
+```
203
+
204
+Indication that Docker no longer is using the named volume. This is called once
205
+per container stop.  Plugin may deduce that it is safe to deprovision it at
206
+this point.
207
+
208
+**Response**:
209
+```
210
+{
211
+    "Err": null
212
+}
213
+```
214
+
215
+Respond with a string error if an error occurred.
216
+
217
+## Plugin retries
218
+
219
+Attempts to call a method on a plugin are retried with an exponential backoff
220
+for up to 30 seconds. This may help when packaging plugins as containers, since
221
+it gives plugin containers a chance to start up before failing any user
222
+containers which depend on them.
0 223
new file mode 100644
... ...
@@ -0,0 +1,46 @@
0
+page_title: Experimental feature - Plugins
1
+page_keywords: experimental, Docker, plugins
2
+
3
+# Overview
4
+
5
+You can extend the capabilities of the Docker Engine by loading third-party
6
+plugins.
7
+
8
+## Types of plugins
9
+
10
+Plugins extend Docker's functionality.  They come in specific types.  For
11
+example, a [volume plugin](/experimental/plugins_volume) might enable Docker
12
+volumes to persist across multiple Docker hosts.
13
+
14
+Currently Docker supports volume plugins. In the future it will support
15
+additional plugin types.
16
+
17
+## Installing a plugin
18
+
19
+Follow the instructions in the plugin's documentation.
20
+
21
+## Finding a plugin
22
+
23
+The following plugins exist:
24
+
25
+* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
26
+which provides multi-host portable volumes for Docker, enabling you to run
27
+  databases and other stateful containers and move them around across a cluster
28
+  of machines.
29
+
30
+## Troubleshooting a plugin
31
+
32
+If you are having problems with Docker after loading a plugin, ask the authors
33
+of the plugin for help. The Docker team may not be able to assist you.
34
+
35
+## Writing a plugin
36
+
37
+If you are interested in writing a plugin for Docker, or seeing how they work
38
+under the hood, see the [docker plugins reference](/experimental/plugin_api).
39
+
40
+# Related GitHub PRs and issues
41
+
42
+- [#13222](https://github.com/docker/docker/pull/13222) Plugins plumbing
43
+
44
+Send us feedback and comments on [#13419](https://github.com/docker/docker/issues/13419),
45
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
0 46
new file mode 100644
... ...
@@ -0,0 +1,43 @@
0
+page_title: Experimental feature - Volume plugins
1
+page_keywords: experimental, Docker, plugins, volume
2
+
3
+# Overview
4
+
5
+Docker volume plugins enable Docker deployments to be integrated with external
6
+storage systems, such as Amazon EBS, and enable data volumes to persist beyond
7
+the lifetime of a single Docker host. See the [plugin documentation](/experimental/plugins)
8
+for more information.
9
+
10
+# Command-line changes
11
+
12
+This experimental features introduces two changes to the `docker run` command:
13
+
14
+- The `--volume-driver` flag is introduced.
15
+- The `-v` syntax is changed to accept a volume name a first component.
16
+
17
+Example:
18
+
19
+    $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
20
+
21
+By specifying a volume name in conjunction with a volume driver, volume plugins
22
+such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
23
+used to manage volumes external to a single host, such as those on EBS. In this
24
+example, "volumename" is passed through to the volume plugin as a user-given
25
+name for the volume which allows the plugin to associate it with an external
26
+volume beyond the lifetime of a single container or container host. This can be
27
+used, for example, to move a stateful container from one server to another.
28
+
29
+The `volumename` must not begin with a `/`.
30
+
31
+# API changes
32
+
33
+The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
34
+field of type `string` allowing to specify the name of the driver. It's default
35
+value of `"local"` (the default driver for local volumes).
36
+
37
+# Related GitHub PRs and issues
38
+
39
+- [#13161](https://github.com/docker/docker/pull/13161) Volume refactor and external volume plugins
40
+
41
+Send us feedback and comments on [#13420](https://github.com/docker/docker/issues/13420),
42
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
... ...
@@ -166,7 +166,7 @@ func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
166 166
 	c.Assert(status, check.Equals, http.StatusInternalServerError)
167 167
 	c.Assert(err, check.IsNil)
168 168
 
169
-	if !strings.Contains(string(body), "Duplicate volume") {
169
+	if !strings.Contains(string(body), "Duplicate bind") {
170 170
 		c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
171 171
 	}
172 172
 }
... ...
@@ -210,49 +210,6 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
210 210
 	}
211 211
 }
212 212
 
213
-// Ensure that volumes-from has priority over binds/anything else
214
-// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start
215
-func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) {
216
-	volName := "voltst2"
217
-	volPath := "/tmp"
218
-
219
-	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
220
-		c.Fatal(out, err)
221
-	}
222
-
223
-	name := "testing"
224
-	config := map[string]interface{}{
225
-		"Image":   "busybox",
226
-		"Volumes": map[string]struct{}{volPath: {}},
227
-	}
228
-
229
-	status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
230
-	c.Assert(status, check.Equals, http.StatusCreated)
231
-	c.Assert(err, check.IsNil)
232
-
233
-	bindPath := randomUnixTmpDirPath("test")
234
-	config = map[string]interface{}{
235
-		"VolumesFrom": []string{volName},
236
-		"Binds":       []string{bindPath + ":/tmp"},
237
-	}
238
-	status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
239
-	c.Assert(status, check.Equals, http.StatusNoContent)
240
-	c.Assert(err, check.IsNil)
241
-
242
-	pth, err := inspectFieldMap(name, "Volumes", volPath)
243
-	if err != nil {
244
-		c.Fatal(err)
245
-	}
246
-	pth2, err := inspectFieldMap(volName, "Volumes", volPath)
247
-	if err != nil {
248
-		c.Fatal(err)
249
-	}
250
-
251
-	if pth != pth2 {
252
-		c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
253
-	}
254
-}
255
-
256 213
 func (s *DockerSuite) TestGetContainerStats(c *check.C) {
257 214
 	var (
258 215
 		name   = "statscontainer"
... ...
@@ -284,35 +284,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
284 284
 	}
285 285
 }
286 286
 
287
-// #9629
288
-func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) {
289
-	if err := s.d.StartWithBusybox(); err != nil {
290
-		c.Fatal(err)
291
-	}
292
-
293
-	tmp, err := ioutil.TempDir(os.TempDir(), "")
294
-	if err != nil {
295
-		c.Fatal(err)
296
-	}
297
-	defer os.RemoveAll(tmp)
298
-
299
-	if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
300
-		c.Fatal(err)
301
-	}
302
-
303
-	if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
304
-		c.Fatal(err, out)
305
-	}
306
-
307
-	if err := s.d.Restart(); err != nil {
308
-		c.Fatal(err)
309
-	}
310
-
311
-	if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
312
-		c.Fatal(err, out)
313
-	}
314
-}
315
-
316 287
 func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
317 288
 	// TODO: skip or update for Windows daemon
318 289
 	os.Remove("/etc/docker/key.json")
... ...
@@ -360,76 +331,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
360 360
 	}
361 361
 }
362 362
 
363
-// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers
364
-//	without corresponding volume json
365
-func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) {
366
-	graphDir := filepath.Join(os.TempDir(), "docker-test")
367
-	defer os.RemoveAll(graphDir)
368
-	if err := s.d.StartWithBusybox("-g", graphDir); err != nil {
369
-		c.Fatal(err)
370
-	}
371
-
372
-	tmpDir := filepath.Join(os.TempDir(), "test")
373
-	defer os.RemoveAll(tmpDir)
374
-
375
-	if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
376
-		c.Fatal(err, out)
377
-	}
378
-
379
-	if err := s.d.Stop(); err != nil {
380
-		c.Fatal(err)
381
-	}
382
-
383
-	// Remove this since we're expecting the daemon to re-create it too
384
-	if err := os.RemoveAll(tmpDir); err != nil {
385
-		c.Fatal(err)
386
-	}
387
-
388
-	configDir := filepath.Join(graphDir, "volumes")
389
-
390
-	if err := os.RemoveAll(configDir); err != nil {
391
-		c.Fatal(err)
392
-	}
393
-
394
-	if err := s.d.Start("-g", graphDir); err != nil {
395
-		c.Fatal(err)
396
-	}
397
-
398
-	if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
399
-		c.Fatalf("expected volume path %s to exist but it does not", tmpDir)
400
-	}
401
-
402
-	dir, err := ioutil.ReadDir(configDir)
403
-	if err != nil {
404
-		c.Fatal(err)
405
-	}
406
-	if len(dir) == 0 {
407
-		c.Fatalf("expected volumes config dir to contain data for new volume")
408
-	}
409
-
410
-	// Now with just removing the volume config and not the volume data
411
-	if err := s.d.Stop(); err != nil {
412
-		c.Fatal(err)
413
-	}
414
-
415
-	if err := os.RemoveAll(configDir); err != nil {
416
-		c.Fatal(err)
417
-	}
418
-
419
-	if err := s.d.Start("-g", graphDir); err != nil {
420
-		c.Fatal(err)
421
-	}
422
-
423
-	dir, err = ioutil.ReadDir(configDir)
424
-	if err != nil {
425
-		c.Fatal(err)
426
-	}
427
-
428
-	if len(dir) == 0 {
429
-		c.Fatalf("expected volumes config dir to contain data for new volume")
430
-	}
431
-}
432
-
433 363
 // GH#11320 - verify that the daemon exits on failure properly
434 364
 // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
435 365
 // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
... ...
@@ -395,21 +395,6 @@ func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
395 395
 	}
396 396
 }
397 397
 
398
-// Regression test for #4741
399
-func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) {
400
-	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
401
-	out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
402
-	if err != nil && exitCode != 0 {
403
-		c.Fatal("1", out, stderr, err)
404
-	}
405
-
406
-	runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
407
-	out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
408
-	if err != nil && exitCode != 0 {
409
-		c.Fatal("2", out, stderr, err)
410
-	}
411
-}
412
-
413 398
 // Regression test for #4979
414 399
 func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
415 400
 	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
... ...
@@ -455,14 +440,6 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
455 455
 	}
456 456
 }
457 457
 
458
-// Regression test for #4830
459
-func (s *DockerSuite) TestRunWithRelativePath(c *check.C) {
460
-	runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true")
461
-	if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil {
462
-		c.Fatalf("relative path should result in an error")
463
-	}
464
-}
465
-
466 458
 func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
467 459
 	cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile")
468 460
 	if code, err := runCommand(cmd); err == nil || code == 0 {
... ...
@@ -536,7 +513,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
536 536
 	if out, _, err := runCommandWithOutput(cmd); err == nil {
537 537
 		c.Fatal("Expected error about duplicate volume definitions")
538 538
 	} else {
539
-		if !strings.Contains(out, "Duplicate volume") {
539
+		if !strings.Contains(out, "Duplicate bind mount") {
540 540
 			c.Fatalf("Expected 'duplicate volume' error, got %v", err)
541 541
 		}
542 542
 	}
... ...
@@ -2333,7 +2310,13 @@ func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
2333 2333
 		c.Fatal(err)
2334 2334
 	}
2335 2335
 
2336
-	cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
2336
+	cmd := exec.Command(dockerBinary, "run",
2337
+		"-v", fmt.Sprintf("%s:/tmp", tmpDir),
2338
+		"-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
2339
+		"-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
2340
+		"-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
2341
+		"busybox:latest", "sh", "-c",
2342
+		"ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
2337 2343
 	out, _, err := runCommandWithOutput(cmd)
2338 2344
 	if err != nil {
2339 2345
 		c.Fatal(out, err)
... ...
@@ -2427,41 +2410,6 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
2427 2427
 	}
2428 2428
 }
2429 2429
 
2430
-func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) {
2431
-	testRequires(c, SameHostDaemon)
2432
-
2433
-	// Clear out any remnants from other tests
2434
-	info, err := ioutil.ReadDir(volumesConfigPath)
2435
-	if err != nil {
2436
-		c.Fatal(err)
2437
-	}
2438
-	if len(info) > 0 {
2439
-		for _, f := range info {
2440
-			if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
2441
-				c.Fatal(err)
2442
-			}
2443
-		}
2444
-	}
2445
-
2446
-	cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox")
2447
-	if _, err := runCommand(cmd); err != nil {
2448
-		c.Fatal(err)
2449
-	}
2450
-
2451
-	cmd = exec.Command(dockerBinary, "start", "lone_starr")
2452
-	if _, err := runCommand(cmd); err != nil {
2453
-		c.Fatal(err)
2454
-	}
2455
-
2456
-	info, err = ioutil.ReadDir(volumesConfigPath)
2457
-	if err != nil {
2458
-		c.Fatal(err)
2459
-	}
2460
-	if len(info) != 1 {
2461
-		c.Fatalf("Expected only 1 volume have %v", len(info))
2462
-	}
2463
-}
2464
-
2465 2430
 func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
2466 2431
 	// just run with unknown image
2467 2432
 	cmd := exec.Command(dockerBinary, "run", "asdfsg")
... ...
@@ -2496,7 +2444,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
2496 2496
 
2497 2497
 	out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
2498 2498
 	c.Assert(err, check.IsNil)
2499
-	if !strings.Contains(out, volumesStoragePath) {
2499
+	if !strings.Contains(out, volumesConfigPath) {
2500 2500
 		c.Fatalf("Volume was not defined for /foo\n%q", out)
2501 2501
 	}
2502 2502
 
... ...
@@ -2507,7 +2455,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
2507 2507
 	}
2508 2508
 	out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
2509 2509
 	c.Assert(err, check.IsNil)
2510
-	if !strings.Contains(out, volumesStoragePath) {
2510
+	if !strings.Contains(out, volumesConfigPath) {
2511 2511
 		c.Fatalf("Volume was not defined for /bar\n%q", out)
2512 2512
 	}
2513 2513
 }
... ...
@@ -126,32 +126,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
126 126
 
127 127
 }
128 128
 
129
-// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s
130
-func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) {
131
-
132
-	// Create the first data volume
133
-	dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
134
-
135
-	// Expect this to fail because the data test after contaienr doesn't exist yet
136
-	if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
137
-		c.Fatal("Expected error but got none")
138
-	}
139
-
140
-	// Create the second data volume
141
-	dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
142
-
143
-	// Now, all the volumes should be there
144
-	dockerCmd(c, "start", "consumer")
145
-
146
-	// Check that we have the volumes we want
147
-	out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer")
148
-	nVolumes := strings.Trim(out, " \r\n'")
149
-	if nVolumes != "2" {
150
-		c.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
151
-	}
152
-
153
-}
154
-
155 129
 func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
156 130
 	defer unpauseAllContainers()
157 131
 
158 132
new file mode 100644
... ...
@@ -0,0 +1,249 @@
0
+// +build experimental
1
+// +build !windows
2
+
3
+package main
4
+
5
+import (
6
+	"encoding/json"
7
+	"fmt"
8
+	"io/ioutil"
9
+	"net/http"
10
+	"net/http/httptest"
11
+	"os"
12
+	"path/filepath"
13
+	"strings"
14
+
15
+	"github.com/go-check/check"
16
+)
17
+
18
+func init() {
19
+	check.Suite(&DockerExternalVolumeSuite{
20
+		ds: &DockerSuite{},
21
+	})
22
+}
23
+
24
+type eventCounter struct {
25
+	activations int
26
+	creations   int
27
+	removals    int
28
+	mounts      int
29
+	unmounts    int
30
+	paths       int
31
+}
32
+
33
+type DockerExternalVolumeSuite struct {
34
+	server *httptest.Server
35
+	ds     *DockerSuite
36
+	d      *Daemon
37
+	ec     *eventCounter
38
+}
39
+
40
+func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
41
+	s.d = NewDaemon(c)
42
+	s.ds.SetUpTest(c)
43
+	s.ec = &eventCounter{}
44
+
45
+}
46
+
47
+func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) {
48
+	s.d.Stop()
49
+	s.ds.TearDownTest(c)
50
+}
51
+
52
+func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
53
+	mux := http.NewServeMux()
54
+	s.server = httptest.NewServer(mux)
55
+
56
+	type pluginRequest struct {
57
+		name string
58
+	}
59
+
60
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
61
+		s.ec.activations++
62
+
63
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
64
+		fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
65
+	})
66
+
67
+	mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
68
+		s.ec.creations++
69
+
70
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
71
+		fmt.Fprintln(w, `{}`)
72
+	})
73
+
74
+	mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
75
+		s.ec.removals++
76
+
77
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
78
+		fmt.Fprintln(w, `{}`)
79
+	})
80
+
81
+	mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
82
+		s.ec.paths++
83
+
84
+		var pr pluginRequest
85
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
86
+			http.Error(w, err.Error(), 500)
87
+		}
88
+
89
+		p := hostVolumePath(pr.name)
90
+
91
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
92
+		fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
93
+	})
94
+
95
+	mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
96
+		s.ec.mounts++
97
+
98
+		var pr pluginRequest
99
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
100
+			http.Error(w, err.Error(), 500)
101
+		}
102
+
103
+		p := hostVolumePath(pr.name)
104
+		if err := os.MkdirAll(p, 0755); err != nil {
105
+			http.Error(w, err.Error(), 500)
106
+		}
107
+
108
+		if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
109
+			http.Error(w, err.Error(), 500)
110
+		}
111
+
112
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
113
+		fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
114
+	})
115
+
116
+	mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) {
117
+		s.ec.unmounts++
118
+
119
+		var pr pluginRequest
120
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
121
+			http.Error(w, err.Error(), 500)
122
+		}
123
+
124
+		p := hostVolumePath(pr.name)
125
+		if err := os.RemoveAll(p); err != nil {
126
+			http.Error(w, err.Error(), 500)
127
+		}
128
+
129
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
130
+		fmt.Fprintln(w, `{}`)
131
+	})
132
+
133
+	if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
134
+		c.Fatal(err)
135
+	}
136
+
137
+	if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
138
+		c.Fatal(err)
139
+	}
140
+}
141
+
142
+func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) {
143
+	s.server.Close()
144
+
145
+	if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
146
+		c.Fatal(err)
147
+	}
148
+}
149
+
150
+func (s *DockerExternalVolumeSuite) TestStartExternalNamedVolumeDriver(c *check.C) {
151
+	if err := s.d.StartWithBusybox(); err != nil {
152
+		c.Fatal(err)
153
+	}
154
+
155
+	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
156
+	if err != nil {
157
+		c.Fatal(err)
158
+	}
159
+
160
+	if !strings.Contains(out, s.server.URL) {
161
+		c.Fatalf("External volume mount failed. Output: %s\n", out)
162
+	}
163
+
164
+	p := hostVolumePath("external-volume-test")
165
+	_, err = os.Lstat(p)
166
+	if err == nil {
167
+		c.Fatalf("Expected error checking volume path in host: %s\n", p)
168
+	}
169
+
170
+	if !os.IsNotExist(err) {
171
+		c.Fatalf("Expected volume path in host to not exist: %s, %v\n", p, err)
172
+	}
173
+
174
+	c.Assert(s.ec.activations, check.Equals, 1)
175
+	c.Assert(s.ec.creations, check.Equals, 1)
176
+	c.Assert(s.ec.removals, check.Equals, 1)
177
+	c.Assert(s.ec.mounts, check.Equals, 1)
178
+	c.Assert(s.ec.unmounts, check.Equals, 1)
179
+}
180
+
181
+func (s *DockerExternalVolumeSuite) TestStartExternalVolumeUnnamedDriver(c *check.C) {
182
+	if err := s.d.StartWithBusybox(); err != nil {
183
+		c.Fatal(err)
184
+	}
185
+
186
+	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
187
+	if err != nil {
188
+		c.Fatal(err)
189
+	}
190
+
191
+	if !strings.Contains(out, s.server.URL) {
192
+		c.Fatalf("External volume mount failed. Output: %s\n", out)
193
+	}
194
+
195
+	c.Assert(s.ec.activations, check.Equals, 1)
196
+	c.Assert(s.ec.creations, check.Equals, 1)
197
+	c.Assert(s.ec.removals, check.Equals, 1)
198
+	c.Assert(s.ec.mounts, check.Equals, 1)
199
+	c.Assert(s.ec.unmounts, check.Equals, 1)
200
+}
201
+
202
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverVolumesFrom(c *check.C) {
203
+	if err := s.d.StartWithBusybox(); err != nil {
204
+		c.Fatal(err)
205
+	}
206
+
207
+	if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
208
+		c.Fatal(err)
209
+	}
210
+
211
+	if _, err := s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp"); err != nil {
212
+		c.Fatal(err)
213
+	}
214
+
215
+	if _, err := s.d.Cmd("rm", "-f", "vol-test1"); err != nil {
216
+		c.Fatal(err)
217
+	}
218
+
219
+	c.Assert(s.ec.activations, check.Equals, 1)
220
+	c.Assert(s.ec.creations, check.Equals, 2)
221
+	c.Assert(s.ec.removals, check.Equals, 1)
222
+	c.Assert(s.ec.mounts, check.Equals, 2)
223
+	c.Assert(s.ec.unmounts, check.Equals, 2)
224
+}
225
+
226
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverDeleteContainer(c *check.C) {
227
+	if err := s.d.StartWithBusybox(); err != nil {
228
+		c.Fatal(err)
229
+	}
230
+
231
+	if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
232
+		c.Fatal(err)
233
+	}
234
+
235
+	if _, err := s.d.Cmd("rm", "-fv", "vol-test1"); err != nil {
236
+		c.Fatal(err)
237
+	}
238
+
239
+	c.Assert(s.ec.activations, check.Equals, 1)
240
+	c.Assert(s.ec.creations, check.Equals, 1)
241
+	c.Assert(s.ec.removals, check.Equals, 1)
242
+	c.Assert(s.ec.mounts, check.Equals, 1)
243
+	c.Assert(s.ec.unmounts, check.Equals, 1)
244
+}
245
+
246
+func hostVolumePath(name string) string {
247
+	return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
248
+}
... ...
@@ -18,7 +18,6 @@ var (
18 18
 
19 19
 	dockerBasePath       = "/var/lib/docker"
20 20
 	volumesConfigPath    = dockerBasePath + "/volumes"
21
-	volumesStoragePath   = dockerBasePath + "/vfs/dir"
22 21
 	containerStoragePath = dockerBasePath + "/containers"
23 22
 
24 23
 	runtimePath    = "/var/run/docker"
... ...
@@ -31,6 +31,10 @@ type Client struct {
31 31
 }
32 32
 
33 33
 func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
34
+	return c.callWithRetry(serviceMethod, args, ret, true)
35
+}
36
+
37
+func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
34 38
 	var buf bytes.Buffer
35 39
 	if err := json.NewEncoder(&buf).Encode(args); err != nil {
36 40
 		return err
... ...
@@ -50,12 +54,16 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
50 50
 	for {
51 51
 		resp, err := c.http.Do(req)
52 52
 		if err != nil {
53
+			if !retry {
54
+				return err
55
+			}
56
+
53 57
 			timeOff := backoff(retries)
54
-			if timeOff+time.Since(start) > defaultTimeOut {
58
+			if abort(start, timeOff) {
55 59
 				return err
56 60
 			}
57 61
 			retries++
58
-			logrus.Warn("Unable to connect to plugin: %s, retrying in %ds\n", c.addr, timeOff)
62
+			logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
59 63
 			time.Sleep(timeOff)
60 64
 			continue
61 65
 		}
... ...
@@ -73,7 +81,7 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
73 73
 }
74 74
 
75 75
 func backoff(retries int) time.Duration {
76
-	b, max := float64(1), float64(defaultTimeOut)
76
+	b, max := 1, defaultTimeOut
77 77
 	for b < max && retries > 0 {
78 78
 		b *= 2
79 79
 		retries--
... ...
@@ -81,7 +89,11 @@ func backoff(retries int) time.Duration {
81 81
 	if b > max {
82 82
 		b = max
83 83
 	}
84
-	return time.Duration(b)
84
+	return time.Duration(b) * time.Second
85
+}
86
+
87
+func abort(start time.Time, timeOff time.Duration) bool {
88
+	return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second
85 89
 }
86 90
 
87 91
 func configureTCPTransport(tr *http.Transport, proto, addr string) {
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"net/http/httptest"
7 7
 	"reflect"
8 8
 	"testing"
9
+	"time"
9 10
 )
10 11
 
11 12
 var (
... ...
@@ -27,7 +28,7 @@ func teardownRemotePluginServer() {
27 27
 
28 28
 func TestFailedConnection(t *testing.T) {
29 29
 	c := NewClient("tcp://127.0.0.1:1")
30
-	err := c.Call("Service.Method", nil, nil)
30
+	err := c.callWithRetry("Service.Method", nil, nil, false)
31 31
 	if err == nil {
32 32
 		t.Fatal("Unexpected successful connection")
33 33
 	}
... ...
@@ -61,3 +62,44 @@ func TestEchoInputOutput(t *testing.T) {
61 61
 		t.Fatalf("Expected %v, was %v\n", m, output)
62 62
 	}
63 63
 }
64
+
65
+func TestBackoff(t *testing.T) {
66
+	cases := []struct {
67
+		retries    int
68
+		expTimeOff time.Duration
69
+	}{
70
+		{0, time.Duration(1)},
71
+		{1, time.Duration(2)},
72
+		{2, time.Duration(4)},
73
+		{4, time.Duration(16)},
74
+		{6, time.Duration(30)},
75
+		{10, time.Duration(30)},
76
+	}
77
+
78
+	for _, c := range cases {
79
+		s := c.expTimeOff * time.Second
80
+		if d := backoff(c.retries); d != s {
81
+			t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
82
+		}
83
+	}
84
+}
85
+
86
+func TestAbortRetry(t *testing.T) {
87
+	cases := []struct {
88
+		timeOff  time.Duration
89
+		expAbort bool
90
+	}{
91
+		{time.Duration(1), false},
92
+		{time.Duration(2), false},
93
+		{time.Duration(10), false},
94
+		{time.Duration(30), true},
95
+		{time.Duration(40), true},
96
+	}
97
+
98
+	for _, c := range cases {
99
+		s := c.timeOff * time.Second
100
+		if a := abort(time.Now(), s); a != c.expAbort {
101
+			t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
102
+		}
103
+	}
104
+}
... ...
@@ -122,6 +122,7 @@ type Config struct {
122 122
 	Cmd             *Command
123 123
 	Image           string // Name of the image as it was passed by the operator (eg. could be symbolic)
124 124
 	Volumes         map[string]struct{}
125
+	VolumeDriver    string
125 126
 	WorkingDir      string
126 127
 	Entrypoint      *Entrypoint
127 128
 	NetworkDisabled bool
... ...
@@ -100,6 +100,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
100 100
 	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")
101 101
 	cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options")
102 102
 
103
+	expFlags := attachExperimentalFlags(cmd)
104
+
103 105
 	cmd.Require(flag.Min, 1)
104 106
 
105 107
 	if err := cmd.ParseFlags(args, true); err != nil {
... ...
@@ -355,6 +357,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
355 355
 		CgroupParent:    *flCgroupParent,
356 356
 	}
357 357
 
358
+	applyExperimentalFlags(expFlags, config, hostConfig)
359
+
358 360
 	// When allocating stdin in attached mode, close stdin at client disconnect
359 361
 	if config.OpenStdin && config.AttachStdin {
360 362
 		config.StdinOnce = true
361 363
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+// +build experimental
1
+
2
+package runconfig
3
+
4
+import flag "github.com/docker/docker/pkg/mflag"
5
+
6
+type experimentalFlags struct {
7
+	flags map[string]interface{}
8
+}
9
+
10
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
11
+	flags := make(map[string]interface{})
12
+	flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
13
+	return &experimentalFlags{flags: flags}
14
+}
15
+
16
+func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) {
17
+	config.VolumeDriver = *(exp.flags["volume-driver"]).(*string)
18
+}
0 19
new file mode 100644
... ...
@@ -0,0 +1,14 @@
0
+// +build !experimental
1
+
2
+package runconfig
3
+
4
+import flag "github.com/docker/docker/pkg/mflag"
5
+
6
+type experimentalFlags struct{}
7
+
8
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
9
+	return nil
10
+}
11
+
12
+func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) {
13
+}
0 14
new file mode 100644
... ...
@@ -0,0 +1,22 @@
0
+package utils
1
+
2
+import (
3
+	"net"
4
+	"net/http"
5
+	"time"
6
+)
7
+
8
+func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
9
+	// Why 32? See https://github.com/docker/docker/pull/8035.
10
+	timeout := 32 * time.Second
11
+	if proto == "unix" {
12
+		// No need for compression in local communications.
13
+		tr.DisableCompression = true
14
+		tr.Dial = func(_, _ string) (net.Conn, error) {
15
+			return net.DialTimeout(proto, addr, timeout)
16
+		}
17
+	} else {
18
+		tr.Proxy = http.ProxyFromEnvironment
19
+		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
20
+	}
21
+}
0 22
new file mode 100644
... ...
@@ -0,0 +1,60 @@
0
+package volumedrivers
1
+
2
+import "github.com/docker/docker/volume"
3
+
4
+type volumeDriverAdapter struct {
5
+	name  string
6
+	proxy *volumeDriverProxy
7
+}
8
+
9
+func (a *volumeDriverAdapter) Name() string {
10
+	return a.name
11
+}
12
+
13
+func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
14
+	err := a.proxy.Create(name)
15
+	if err != nil {
16
+		return nil, err
17
+	}
18
+	return &volumeAdapter{
19
+		proxy:      a.proxy,
20
+		name:       name,
21
+		driverName: a.name}, nil
22
+}
23
+
24
+func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
25
+	return a.proxy.Remove(v.Name())
26
+}
27
+
28
+type volumeAdapter struct {
29
+	proxy      *volumeDriverProxy
30
+	name       string
31
+	driverName string
32
+	eMount     string // ephemeral host volume path
33
+}
34
+
35
+func (a *volumeAdapter) Name() string {
36
+	return a.name
37
+}
38
+
39
+func (a *volumeAdapter) DriverName() string {
40
+	return a.driverName
41
+}
42
+
43
+func (a *volumeAdapter) Path() string {
44
+	if len(a.eMount) > 0 {
45
+		return a.eMount
46
+	}
47
+	m, _ := a.proxy.Path(a.name)
48
+	return m
49
+}
50
+
51
+func (a *volumeAdapter) Mount() (string, error) {
52
+	var err error
53
+	a.eMount, err = a.proxy.Mount(a.name)
54
+	return a.eMount, err
55
+}
56
+
57
+func (a *volumeAdapter) Unmount() error {
58
+	return a.proxy.Unmount(a.name)
59
+}
0 60
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+package volumedrivers
1
+
2
+import "github.com/docker/docker/volume"
3
+
4
+type client interface {
5
+	Call(string, interface{}, interface{}) error
6
+}
7
+
8
+func NewVolumeDriver(name string, c client) volume.Driver {
9
+	proxy := &volumeDriverProxy{c}
10
+	return &volumeDriverAdapter{name, proxy}
11
+}
12
+
13
+type VolumeDriver interface {
14
+	Create(name string) (err error)
15
+	Remove(name string) (err error)
16
+	Path(name string) (mountpoint string, err error)
17
+	Mount(name string) (mountpoint string, err error)
18
+	Unmount(name string) (err error)
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,61 @@
0
+package volumedrivers
1
+
2
+import (
3
+	"fmt"
4
+	"sync"
5
+
6
+	"github.com/docker/docker/pkg/plugins"
7
+	"github.com/docker/docker/volume"
8
+)
9
+
10
+// currently created by hand. generation tool would generate this like:
11
+// $ extpoint-gen Driver > volume/extpoint.go
12
+
13
+var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
14
+
15
+type driverExtpoint struct {
16
+	extensions map[string]volume.Driver
17
+	sync.Mutex
18
+}
19
+
20
+func Register(extension volume.Driver, name string) bool {
21
+	drivers.Lock()
22
+	defer drivers.Unlock()
23
+	if name == "" {
24
+		return false
25
+	}
26
+	_, exists := drivers.extensions[name]
27
+	if exists {
28
+		return false
29
+	}
30
+	drivers.extensions[name] = extension
31
+	return true
32
+}
33
+
34
+func Unregister(name string) bool {
35
+	drivers.Lock()
36
+	defer drivers.Unlock()
37
+	_, exists := drivers.extensions[name]
38
+	if !exists {
39
+		return false
40
+	}
41
+	delete(drivers.extensions, name)
42
+	return true
43
+}
44
+
45
+func Lookup(name string) (volume.Driver, error) {
46
+	drivers.Lock()
47
+	defer drivers.Unlock()
48
+	ext, ok := drivers.extensions[name]
49
+	if ok {
50
+		return ext, nil
51
+	}
52
+	pl, err := plugins.Get(name, "VolumeDriver")
53
+	if err != nil {
54
+		return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
55
+	}
56
+
57
+	d := NewVolumeDriver(name, pl.Client)
58
+	drivers.extensions[name] = d
59
+	return d, nil
60
+}
0 61
new file mode 100644
... ...
@@ -0,0 +1,74 @@
0
+package volumedrivers
1
+
2
+import "fmt"
3
+
4
+// currently created by hand. generation tool would generate this like:
5
+// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go
6
+
7
+type volumeDriverRequest struct {
8
+	Name string
9
+}
10
+
11
+type volumeDriverResponse struct {
12
+	Mountpoint string `json:",ommitempty"`
13
+	Err        error  `json:",ommitempty"`
14
+}
15
+
16
+type volumeDriverProxy struct {
17
+	c client
18
+}
19
+
20
+func (pp *volumeDriverProxy) Create(name string) error {
21
+	args := volumeDriverRequest{name}
22
+	var ret volumeDriverResponse
23
+	err := pp.c.Call("VolumeDriver.Create", args, &ret)
24
+	if err != nil {
25
+		return pp.fmtError(name, err)
26
+	}
27
+	return pp.fmtError(name, ret.Err)
28
+}
29
+
30
+func (pp *volumeDriverProxy) Remove(name string) error {
31
+	args := volumeDriverRequest{name}
32
+	var ret volumeDriverResponse
33
+	err := pp.c.Call("VolumeDriver.Remove", args, &ret)
34
+	if err != nil {
35
+		return pp.fmtError(name, err)
36
+	}
37
+	return pp.fmtError(name, ret.Err)
38
+}
39
+
40
+func (pp *volumeDriverProxy) Path(name string) (string, error) {
41
+	args := volumeDriverRequest{name}
42
+	var ret volumeDriverResponse
43
+	if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
44
+		return "", pp.fmtError(name, err)
45
+	}
46
+	return ret.Mountpoint, pp.fmtError(name, ret.Err)
47
+}
48
+
49
+func (pp *volumeDriverProxy) Mount(name string) (string, error) {
50
+	args := volumeDriverRequest{name}
51
+	var ret volumeDriverResponse
52
+	if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
53
+		return "", pp.fmtError(name, err)
54
+	}
55
+	return ret.Mountpoint, pp.fmtError(name, ret.Err)
56
+}
57
+
58
+func (pp *volumeDriverProxy) Unmount(name string) error {
59
+	args := volumeDriverRequest{name}
60
+	var ret volumeDriverResponse
61
+	err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
62
+	if err != nil {
63
+		return pp.fmtError(name, err)
64
+	}
65
+	return pp.fmtError(name, ret.Err)
66
+}
67
+
68
+func (pp *volumeDriverProxy) fmtError(name string, err error) error {
69
+	if err == nil {
70
+		return nil
71
+	}
72
+	return fmt.Errorf("External volume driver request failed for %s: %v", name, err)
73
+}
0 74
new file mode 100644
... ...
@@ -0,0 +1,126 @@
0
+package local
1
+
2
+import (
3
+	"errors"
4
+	"fmt"
5
+	"io/ioutil"
6
+	"os"
7
+	"path/filepath"
8
+	"sync"
9
+
10
+	"github.com/docker/docker/volume"
11
+)
12
+
13
+func New(rootDirectory string) (*Root, error) {
14
+	if err := os.MkdirAll(rootDirectory, 0700); err != nil {
15
+		return nil, err
16
+	}
17
+	r := &Root{
18
+		path:    rootDirectory,
19
+		volumes: make(map[string]*Volume),
20
+	}
21
+	dirs, err := ioutil.ReadDir(rootDirectory)
22
+	if err != nil {
23
+		return nil, err
24
+	}
25
+	for _, d := range dirs {
26
+		name := filepath.Base(d.Name())
27
+		r.volumes[name] = &Volume{
28
+			driverName: r.Name(),
29
+			name:       name,
30
+			path:       filepath.Join(rootDirectory, name),
31
+		}
32
+	}
33
+	return r, nil
34
+}
35
+
36
+type Root struct {
37
+	m       sync.Mutex
38
+	path    string
39
+	volumes map[string]*Volume
40
+}
41
+
42
+func (r *Root) Name() string {
43
+	return "local"
44
+}
45
+
46
+func (r *Root) Create(name string) (volume.Volume, error) {
47
+	r.m.Lock()
48
+	defer r.m.Unlock()
49
+	v, exists := r.volumes[name]
50
+	if !exists {
51
+		path := filepath.Join(r.path, name)
52
+		if err := os.Mkdir(path, 0755); err != nil {
53
+			if os.IsExist(err) {
54
+				return nil, fmt.Errorf("volume already exists under %s", path)
55
+			}
56
+			return nil, err
57
+		}
58
+		v = &Volume{
59
+			driverName: r.Name(),
60
+			name:       name,
61
+			path:       path,
62
+		}
63
+		r.volumes[name] = v
64
+	}
65
+	v.use()
66
+	return v, nil
67
+}
68
+
69
+func (r *Root) Remove(v volume.Volume) error {
70
+	r.m.Lock()
71
+	defer r.m.Unlock()
72
+	lv, ok := v.(*Volume)
73
+	if !ok {
74
+		return errors.New("unknown volume type")
75
+	}
76
+	lv.release()
77
+	if lv.usedCount == 0 {
78
+		delete(r.volumes, lv.name)
79
+		return os.RemoveAll(lv.path)
80
+	}
81
+	return nil
82
+}
83
+
84
+type Volume struct {
85
+	m         sync.Mutex
86
+	usedCount int
87
+	// unique name of the volume
88
+	name string
89
+	// path is the path on the host where the data lives
90
+	path string
91
+	// driverName is the name of the driver that created the volume.
92
+	driverName string
93
+}
94
+
95
+func (v *Volume) Name() string {
96
+	return v.name
97
+}
98
+
99
+func (v *Volume) DriverName() string {
100
+	return v.driverName
101
+}
102
+
103
+func (v *Volume) Path() string {
104
+	return v.path
105
+}
106
+
107
+func (v *Volume) Mount() (string, error) {
108
+	return v.path, nil
109
+}
110
+
111
+func (v *Volume) Unmount() error {
112
+	return nil
113
+}
114
+
115
+func (v *Volume) use() {
116
+	v.m.Lock()
117
+	v.usedCount++
118
+	v.m.Unlock()
119
+}
120
+
121
+func (v *Volume) release() {
122
+	v.m.Lock()
123
+	v.usedCount--
124
+	v.m.Unlock()
125
+}
0 126
new file mode 100644
... ...
@@ -0,0 +1,26 @@
0
+package volume
1
+
2
+const DefaultDriverName = "local"
3
+
4
+type Driver interface {
5
+	// Name returns the name of the volume driver.
6
+	Name() string
7
+	// Create makes a new volume with the given id.
8
+	Create(string) (Volume, error)
9
+	// Remove deletes the volume.
10
+	Remove(Volume) error
11
+}
12
+
13
+type Volume interface {
14
+	// Name returns the name of the volume
15
+	Name() string
16
+	// DriverName returns the name of the driver which owns this volume.
17
+	DriverName() string
18
+	// Path returns the absolute path to the volume.
19
+	Path() string
20
+	// Mount mounts the volume and returns the absolute path to
21
+	// where it can be consumed.
22
+	Mount() (string, error)
23
+	// Unmount unmounts the volume when it is no longer in use.
24
+	Unmount() error
25
+}
0 26
deleted file mode 100644
... ...
@@ -1,193 +0,0 @@
1
-package volumes
2
-
3
-import (
4
-	"fmt"
5
-	"io/ioutil"
6
-	"os"
7
-	"path/filepath"
8
-	"sync"
9
-
10
-	"github.com/Sirupsen/logrus"
11
-	"github.com/docker/docker/daemon/graphdriver"
12
-	"github.com/docker/docker/pkg/stringid"
13
-)
14
-
15
-type Repository struct {
16
-	configPath string
17
-	driver     graphdriver.Driver
18
-	volumes    map[string]*Volume
19
-	lock       sync.Mutex
20
-}
21
-
22
-func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
23
-	abspath, err := filepath.Abs(configPath)
24
-	if err != nil {
25
-		return nil, err
26
-	}
27
-
28
-	// Create the config path
29
-	if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
30
-		return nil, err
31
-	}
32
-
33
-	repo := &Repository{
34
-		driver:     driver,
35
-		configPath: abspath,
36
-		volumes:    make(map[string]*Volume),
37
-	}
38
-
39
-	return repo, repo.restore()
40
-}
41
-
42
-func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
43
-	var (
44
-		isBindMount bool
45
-		err         error
46
-		id          = stringid.GenerateRandomID()
47
-	)
48
-	if path != "" {
49
-		isBindMount = true
50
-	}
51
-
52
-	if path == "" {
53
-		path, err = r.createNewVolumePath(id)
54
-		if err != nil {
55
-			return nil, err
56
-		}
57
-	}
58
-	path = filepath.Clean(path)
59
-
60
-	// Ignore the error here since the path may not exist
61
-	// Really just want to make sure the path we are using is real(or nonexistent)
62
-	if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
63
-		path = cleanPath
64
-	}
65
-
66
-	v := &Volume{
67
-		ID:          id,
68
-		Path:        path,
69
-		repository:  r,
70
-		Writable:    writable,
71
-		containers:  make(map[string]struct{}),
72
-		configPath:  r.configPath + "/" + id,
73
-		IsBindMount: isBindMount,
74
-	}
75
-
76
-	if err := v.initialize(); err != nil {
77
-		return nil, err
78
-	}
79
-
80
-	r.add(v)
81
-	return v, nil
82
-}
83
-
84
-func (r *Repository) restore() error {
85
-	dir, err := ioutil.ReadDir(r.configPath)
86
-	if err != nil {
87
-		return err
88
-	}
89
-
90
-	for _, v := range dir {
91
-		id := v.Name()
92
-		vol := &Volume{
93
-			ID:         id,
94
-			configPath: r.configPath + "/" + id,
95
-			containers: make(map[string]struct{}),
96
-		}
97
-		if err := vol.FromDisk(); err != nil {
98
-			if !os.IsNotExist(err) {
99
-				logrus.Debugf("Error restoring volume: %v", err)
100
-				continue
101
-			}
102
-			if err := vol.initialize(); err != nil {
103
-				logrus.Debugf("%s", err)
104
-				continue
105
-			}
106
-		}
107
-		r.add(vol)
108
-	}
109
-	return nil
110
-}
111
-
112
-func (r *Repository) Get(path string) *Volume {
113
-	r.lock.Lock()
114
-	vol := r.get(path)
115
-	r.lock.Unlock()
116
-	return vol
117
-}
118
-
119
-func (r *Repository) get(path string) *Volume {
120
-	path, err := filepath.EvalSymlinks(path)
121
-	if err != nil {
122
-		return nil
123
-	}
124
-	return r.volumes[filepath.Clean(path)]
125
-}
126
-
127
-func (r *Repository) add(volume *Volume) {
128
-	if vol := r.get(volume.Path); vol != nil {
129
-		return
130
-	}
131
-	r.volumes[volume.Path] = volume
132
-}
133
-
134
-func (r *Repository) Delete(path string) error {
135
-	r.lock.Lock()
136
-	defer r.lock.Unlock()
137
-	path, err := filepath.EvalSymlinks(path)
138
-	if err != nil {
139
-		return err
140
-	}
141
-	volume := r.get(filepath.Clean(path))
142
-	if volume == nil {
143
-		return fmt.Errorf("Volume %s does not exist", path)
144
-	}
145
-
146
-	containers := volume.Containers()
147
-	if len(containers) > 0 {
148
-		return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
149
-	}
150
-
151
-	if err := os.RemoveAll(volume.configPath); err != nil {
152
-		return err
153
-	}
154
-
155
-	if !volume.IsBindMount {
156
-		if err := r.driver.Remove(volume.ID); err != nil {
157
-			if !os.IsNotExist(err) {
158
-				return err
159
-			}
160
-		}
161
-	}
162
-
163
-	delete(r.volumes, volume.Path)
164
-	return nil
165
-}
166
-
167
-func (r *Repository) createNewVolumePath(id string) (string, error) {
168
-	if err := r.driver.Create(id, ""); err != nil {
169
-		return "", err
170
-	}
171
-
172
-	path, err := r.driver.Get(id, "")
173
-	if err != nil {
174
-		return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
175
-	}
176
-
177
-	return path, nil
178
-}
179
-
180
-func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
181
-	r.lock.Lock()
182
-	defer r.lock.Unlock()
183
-
184
-	if path == "" {
185
-		return r.newVolume(path, writable)
186
-	}
187
-
188
-	if v := r.get(path); v != nil {
189
-		return v, nil
190
-	}
191
-
192
-	return r.newVolume(path, writable)
193
-}
194 1
deleted file mode 100644
... ...
@@ -1,164 +0,0 @@
1
-package volumes
2
-
3
-import (
4
-	"io/ioutil"
5
-	"os"
6
-	"path/filepath"
7
-	"testing"
8
-
9
-	"github.com/docker/docker/daemon/graphdriver"
10
-	_ "github.com/docker/docker/daemon/graphdriver/vfs"
11
-)
12
-
13
-func TestRepositoryFindOrCreate(t *testing.T) {
14
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
15
-	if err != nil {
16
-		t.Fatal(err)
17
-	}
18
-	defer os.RemoveAll(root)
19
-	repo, err := newRepo(root)
20
-	if err != nil {
21
-		t.Fatal(err)
22
-	}
23
-
24
-	// no path
25
-	v, err := repo.FindOrCreateVolume("", true)
26
-	if err != nil {
27
-		t.Fatal(err)
28
-	}
29
-
30
-	// FIXME: volumes are heavily dependent on the vfs driver, but this should not be so!
31
-	expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID)
32
-	if v.Path != expected {
33
-		t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
34
-	}
35
-
36
-	// with a non-existant path
37
-	dir := filepath.Join(root, "doesntexist")
38
-	v, err = repo.FindOrCreateVolume(dir, true)
39
-	if err != nil {
40
-		t.Fatal(err)
41
-	}
42
-
43
-	if v.Path != dir {
44
-		t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
45
-	}
46
-
47
-	if _, err := os.Stat(v.Path); err != nil {
48
-		t.Fatal(err)
49
-	}
50
-
51
-	// with a pre-existing path
52
-	// can just use the same path from above since it now exists
53
-	v, err = repo.FindOrCreateVolume(dir, true)
54
-	if err != nil {
55
-		t.Fatal(err)
56
-	}
57
-	if v.Path != dir {
58
-		t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
59
-	}
60
-
61
-}
62
-
63
-func TestRepositoryGet(t *testing.T) {
64
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
65
-	if err != nil {
66
-		t.Fatal(err)
67
-	}
68
-	defer os.RemoveAll(root)
69
-	repo, err := newRepo(root)
70
-	if err != nil {
71
-		t.Fatal(err)
72
-	}
73
-
74
-	v, err := repo.FindOrCreateVolume("", true)
75
-	if err != nil {
76
-		t.Fatal(err)
77
-	}
78
-
79
-	v2 := repo.Get(v.Path)
80
-	if v2 == nil {
81
-		t.Fatalf("expected to find volume but didn't")
82
-	}
83
-	if v2 != v {
84
-		t.Fatalf("expected get to return same volume")
85
-	}
86
-}
87
-
88
-func TestRepositoryDelete(t *testing.T) {
89
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
90
-	if err != nil {
91
-		t.Fatal(err)
92
-	}
93
-	defer os.RemoveAll(root)
94
-	repo, err := newRepo(root)
95
-	if err != nil {
96
-		t.Fatal(err)
97
-	}
98
-
99
-	// with a normal volume
100
-	v, err := repo.FindOrCreateVolume("", true)
101
-	if err != nil {
102
-		t.Fatal(err)
103
-	}
104
-
105
-	if err := repo.Delete(v.Path); err != nil {
106
-		t.Fatal(err)
107
-	}
108
-
109
-	if v := repo.Get(v.Path); v != nil {
110
-		t.Fatalf("expected volume to not exist")
111
-	}
112
-
113
-	if _, err := os.Stat(v.Path); err == nil {
114
-		t.Fatalf("expected volume files to be removed")
115
-	}
116
-
117
-	// with a bind mount
118
-	dir := filepath.Join(root, "test")
119
-	v, err = repo.FindOrCreateVolume(dir, true)
120
-	if err != nil {
121
-		t.Fatal(err)
122
-	}
123
-
124
-	if err := repo.Delete(v.Path); err != nil {
125
-		t.Fatal(err)
126
-	}
127
-
128
-	if v := repo.Get(v.Path); v != nil {
129
-		t.Fatalf("expected volume to not exist")
130
-	}
131
-
132
-	if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
133
-		t.Fatalf("expected bind volume data to persist after destroying volume")
134
-	}
135
-
136
-	// with container refs
137
-	dir = filepath.Join(root, "test")
138
-	v, err = repo.FindOrCreateVolume(dir, true)
139
-	if err != nil {
140
-		t.Fatal(err)
141
-	}
142
-	v.AddContainer("1234")
143
-
144
-	if err := repo.Delete(v.Path); err == nil {
145
-		t.Fatalf("expected volume delete to fail due to container refs")
146
-	}
147
-
148
-	v.RemoveContainer("1234")
149
-	if err := repo.Delete(v.Path); err != nil {
150
-		t.Fatal(err)
151
-	}
152
-
153
-}
154
-
155
-func newRepo(root string) (*Repository, error) {
156
-	configPath := filepath.Join(root, "repo-config")
157
-	graphDir := filepath.Join(root, "repo-graph")
158
-
159
-	driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
160
-	if err != nil {
161
-		return nil, err
162
-	}
163
-	return NewRepository(configPath, driver)
164
-}
165 1
deleted file mode 100644
... ...
@@ -1,152 +0,0 @@
1
-package volumes
2
-
3
-import (
4
-	"encoding/json"
5
-	"os"
6
-	"path/filepath"
7
-	"sync"
8
-
9
-	"github.com/docker/docker/pkg/symlink"
10
-)
11
-
12
-type Volume struct {
13
-	ID          string
14
-	Path        string
15
-	IsBindMount bool
16
-	Writable    bool
17
-	containers  map[string]struct{}
18
-	configPath  string
19
-	repository  *Repository
20
-	lock        sync.Mutex
21
-}
22
-
23
-func (v *Volume) IsDir() (bool, error) {
24
-	stat, err := os.Stat(v.Path)
25
-	if err != nil {
26
-		return false, err
27
-	}
28
-
29
-	return stat.IsDir(), nil
30
-}
31
-
32
-func (v *Volume) Containers() []string {
33
-	v.lock.Lock()
34
-
35
-	var containers []string
36
-	for c := range v.containers {
37
-		containers = append(containers, c)
38
-	}
39
-
40
-	v.lock.Unlock()
41
-	return containers
42
-}
43
-
44
-func (v *Volume) RemoveContainer(containerId string) {
45
-	v.lock.Lock()
46
-	delete(v.containers, containerId)
47
-	v.lock.Unlock()
48
-}
49
-
50
-func (v *Volume) AddContainer(containerId string) {
51
-	v.lock.Lock()
52
-	v.containers[containerId] = struct{}{}
53
-	v.lock.Unlock()
54
-}
55
-
56
-func (v *Volume) initialize() error {
57
-	v.lock.Lock()
58
-	defer v.lock.Unlock()
59
-
60
-	if _, err := os.Stat(v.Path); err != nil {
61
-		if !os.IsNotExist(err) {
62
-			return err
63
-		}
64
-		if err := os.MkdirAll(v.Path, 0755); err != nil {
65
-			return err
66
-		}
67
-	}
68
-
69
-	if err := os.MkdirAll(v.configPath, 0755); err != nil {
70
-		return err
71
-	}
72
-
73
-	return v.toDisk()
74
-}
75
-
76
-func (v *Volume) ToDisk() error {
77
-	v.lock.Lock()
78
-	defer v.lock.Unlock()
79
-	return v.toDisk()
80
-}
81
-
82
-func (v *Volume) toDisk() error {
83
-	jsonPath, err := v.jsonPath()
84
-	if err != nil {
85
-		return err
86
-	}
87
-	f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
88
-	if err != nil {
89
-		return err
90
-	}
91
-	if err := json.NewEncoder(f).Encode(v); err != nil {
92
-		f.Close()
93
-		return err
94
-	}
95
-	return f.Close()
96
-}
97
-
98
-func (v *Volume) FromDisk() error {
99
-	v.lock.Lock()
100
-	defer v.lock.Unlock()
101
-	pth, err := v.jsonPath()
102
-	if err != nil {
103
-		return err
104
-	}
105
-
106
-	jsonSource, err := os.Open(pth)
107
-	if err != nil {
108
-		return err
109
-	}
110
-	defer jsonSource.Close()
111
-
112
-	dec := json.NewDecoder(jsonSource)
113
-
114
-	return dec.Decode(v)
115
-}
116
-
117
-func (v *Volume) jsonPath() (string, error) {
118
-	return v.GetRootResourcePath("config.json")
119
-}
120
-
121
-// Evalutes `path` in the scope of the volume's root path, with proper path
122
-// sanitisation. Symlinks are all scoped to the root of the volume, as
123
-// though the volume's root was `/`.
124
-//
125
-// The volume's root path is the host-facing path of the root of the volume's
126
-// mountpoint inside a container.
127
-//
128
-// NOTE: The returned path is *only* safely scoped inside the volume's root
129
-//       if no component of the returned path changes (such as a component
130
-//       symlinking to a different path) between using this method and using the
131
-//       path. See symlink.FollowSymlinkInScope for more details.
132
-func (v *Volume) GetResourcePath(path string) (string, error) {
133
-	cleanPath := filepath.Join("/", path)
134
-	return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path)
135
-}
136
-
137
-// Evalutes `path` in the scope of the volume's config path, with proper path
138
-// sanitisation. Symlinks are all scoped to the root of the config path, as
139
-// though the config path was `/`.
140
-//
141
-// The config path of a volume is not exposed to the container and is just used
142
-// to store volume configuration options and other internal information. If in
143
-// doubt, you probably want to just use v.GetResourcePath.
144
-//
145
-// NOTE: The returned path is *only* safely scoped inside the volume's config
146
-//       path if no component of the returned path changes (such as a component
147
-//       symlinking to a different path) between using this method and using the
148
-//       path. See symlink.FollowSymlinkInScope for more details.
149
-func (v *Volume) GetRootResourcePath(path string) (string, error) {
150
-	cleanPath := filepath.Join("/", path)
151
-	return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
152
-}
153 1
deleted file mode 100644
... ...
@@ -1,55 +0,0 @@
1
-package volumes
2
-
3
-import (
4
-	"os"
5
-	"testing"
6
-
7
-	"github.com/docker/docker/pkg/stringutils"
8
-)
9
-
10
-func TestContainers(t *testing.T) {
11
-	v := &Volume{containers: make(map[string]struct{})}
12
-	id := "1234"
13
-
14
-	v.AddContainer(id)
15
-
16
-	if v.Containers()[0] != id {
17
-		t.Fatalf("adding a container ref failed")
18
-	}
19
-
20
-	v.RemoveContainer(id)
21
-	if len(v.Containers()) != 0 {
22
-		t.Fatalf("removing container failed")
23
-	}
24
-}
25
-
26
-// os.Stat(v.Path) is returning ErrNotExist, initialize catch it and try to
27
-// mkdir v.Path but it dies and correctly returns the error
28
-func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) {
29
-	v := &Volume{Path: "nonexistentpath"}
30
-
31
-	err := v.initialize()
32
-	if err == nil {
33
-		t.Fatal("Expected not to initialize volume with a non existent path")
34
-	}
35
-
36
-	if !os.IsNotExist(err) {
37
-		t.Fatalf("Expected to get ErrNotExist error, got %s", err)
38
-	}
39
-}
40
-
41
-// os.Stat(v.Path) is NOT returning ErrNotExist so skip and return error from
42
-// initialize
43
-func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) {
44
-	// ENAMETOOLONG
45
-	v := &Volume{Path: stringutils.GenerateRandomAlphaOnlyString(300)}
46
-
47
-	err := v.initialize()
48
-	if err == nil {
49
-		t.Fatal("Expected not to initialize volume with a non existent path")
50
-	}
51
-
52
-	if os.IsNotExist(err) {
53
-		t.Fatal("Expected to not get ErrNotExist")
54
-	}
55
-}