Signed by all authors:
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
Signed-off-by: Arnaud Porterie <arnaud.porterie@docker.com>
Signed-off-by: David Calavera <david.calavera@gmail.com>
Signed-off-by: Jeff Lindsay <progrium@gmail.com>
Signed-off-by: Alexander Morozov <lk4d4@docker.com>
Signed-off-by: Luke Marsden <luke@clusterhq.com>
Signed-off-by: David Calavera <david.calavera@gmail.com>
| ... | ... |
@@ -6,19 +6,18 @@ import ( |
| 6 | 6 |
"errors" |
| 7 | 7 |
"fmt" |
| 8 | 8 |
"io" |
| 9 |
- "net" |
|
| 10 | 9 |
"net/http" |
| 11 | 10 |
"os" |
| 12 | 11 |
"path/filepath" |
| 13 | 12 |
"reflect" |
| 14 | 13 |
"strings" |
| 15 | 14 |
"text/template" |
| 16 |
- "time" |
|
| 17 | 15 |
|
| 18 | 16 |
"github.com/docker/docker/cliconfig" |
| 19 | 17 |
"github.com/docker/docker/pkg/homedir" |
| 20 | 18 |
flag "github.com/docker/docker/pkg/mflag" |
| 21 | 19 |
"github.com/docker/docker/pkg/term" |
| 20 |
+ "github.com/docker/docker/utils" |
|
| 22 | 21 |
) |
| 23 | 22 |
|
| 24 | 23 |
// DockerCli represents the docker command line client. |
| ... | ... |
@@ -178,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a |
| 178 | 178 |
tr := &http.Transport{
|
| 179 | 179 |
TLSClientConfig: tlsConfig, |
| 180 | 180 |
} |
| 181 |
- |
|
| 182 |
- // Why 32? See https://github.com/docker/docker/pull/8035. |
|
| 183 |
- timeout := 32 * time.Second |
|
| 184 |
- if proto == "unix" {
|
|
| 185 |
- // No need for compression in local communications. |
|
| 186 |
- tr.DisableCompression = true |
|
| 187 |
- tr.Dial = func(_, _ string) (net.Conn, error) {
|
|
| 188 |
- return net.DialTimeout(proto, addr, timeout) |
|
| 189 |
- } |
|
| 190 |
- } else {
|
|
| 191 |
- tr.Proxy = http.ProxyFromEnvironment |
|
| 192 |
- tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
|
|
| 193 |
- } |
|
| 181 |
+ utils.ConfigureTCPTransport(tr, proto, addr) |
|
| 194 | 182 |
|
| 195 | 183 |
configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker")) |
| 196 | 184 |
if e != nil {
|
| ... | ... |
@@ -773,7 +773,7 @@ func (b *Builder) clearTmp() {
|
| 773 | 773 |
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
| 774 | 774 |
return |
| 775 | 775 |
} |
| 776 |
- b.Daemon.DeleteVolumes(tmp.VolumePaths()) |
|
| 776 |
+ b.Daemon.DeleteVolumes(tmp) |
|
| 777 | 777 |
delete(b.TmpContainers, c) |
| 778 | 778 |
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) |
| 779 | 779 |
} |
| ... | ... |
@@ -26,9 +26,11 @@ import ( |
| 26 | 26 |
"github.com/docker/docker/pkg/broadcastwriter" |
| 27 | 27 |
"github.com/docker/docker/pkg/ioutils" |
| 28 | 28 |
"github.com/docker/docker/pkg/jsonlog" |
| 29 |
+ "github.com/docker/docker/pkg/mount" |
|
| 29 | 30 |
"github.com/docker/docker/pkg/promise" |
| 30 | 31 |
"github.com/docker/docker/pkg/symlink" |
| 31 | 32 |
"github.com/docker/docker/runconfig" |
| 33 |
+ "github.com/docker/docker/volume" |
|
| 32 | 34 |
) |
| 33 | 35 |
|
| 34 | 36 |
var ( |
| ... | ... |
@@ -48,46 +50,37 @@ type StreamConfig struct {
|
| 48 | 48 |
// CommonContainer holds the settings for a container which are applicable |
| 49 | 49 |
// across all platforms supported by the daemon. |
| 50 | 50 |
type CommonContainer struct {
|
| 51 |
+ StreamConfig |
|
| 52 |
+ |
|
| 51 | 53 |
*State `json:"State"` // Needed for remote api version <= 1.11 |
| 52 | 54 |
root string // Path to the "home" of the container, including metadata. |
| 53 | 55 |
basefs string // Path to the graphdriver mountpoint |
| 54 | 56 |
|
| 55 |
- ID string |
|
| 56 |
- |
|
| 57 |
- Created time.Time |
|
| 58 |
- |
|
| 59 |
- Path string |
|
| 60 |
- Args []string |
|
| 61 |
- |
|
| 62 |
- Config *runconfig.Config |
|
| 63 |
- ImageID string `json:"Image"` |
|
| 64 |
- |
|
| 65 |
- NetworkSettings *network.Settings |
|
| 66 |
- |
|
| 67 |
- ResolvConfPath string |
|
| 68 |
- HostnamePath string |
|
| 69 |
- HostsPath string |
|
| 70 |
- LogPath string |
|
| 71 |
- Name string |
|
| 72 |
- Driver string |
|
| 73 |
- ExecDriver string |
|
| 74 |
- |
|
| 75 |
- command *execdriver.Command |
|
| 76 |
- StreamConfig |
|
| 77 |
- |
|
| 78 |
- daemon *Daemon |
|
| 57 |
+ ID string |
|
| 58 |
+ Created time.Time |
|
| 59 |
+ Path string |
|
| 60 |
+ Args []string |
|
| 61 |
+ Config *runconfig.Config |
|
| 62 |
+ ImageID string `json:"Image"` |
|
| 63 |
+ NetworkSettings *network.Settings |
|
| 64 |
+ ResolvConfPath string |
|
| 65 |
+ HostnamePath string |
|
| 66 |
+ HostsPath string |
|
| 67 |
+ LogPath string |
|
| 68 |
+ Name string |
|
| 69 |
+ Driver string |
|
| 70 |
+ ExecDriver string |
|
| 79 | 71 |
MountLabel, ProcessLabel string |
| 80 | 72 |
RestartCount int |
| 81 | 73 |
UpdateDns bool |
| 74 |
+ MountPoints map[string]*mountPoint |
|
| 82 | 75 |
|
| 83 |
- // Maps container paths to volume paths. The key in this is the path to which |
|
| 84 |
- // the volume is being mounted inside the container. Value is the path of the |
|
| 85 |
- // volume on disk |
|
| 86 |
- Volumes map[string]string |
|
| 87 | 76 |
hostConfig *runconfig.HostConfig |
| 77 |
+ command *execdriver.Command |
|
| 88 | 78 |
|
| 89 | 79 |
monitor *containerMonitor |
| 90 | 80 |
execCommands *execStore |
| 81 |
+ daemon *Daemon |
|
| 91 | 82 |
// logDriver for closing |
| 92 | 83 |
logDriver logger.Logger |
| 93 | 84 |
logCopier *logger.Copier |
| ... | ... |
@@ -259,9 +252,6 @@ func (container *Container) Start() (err error) {
|
| 259 | 259 |
return err |
| 260 | 260 |
} |
| 261 | 261 |
container.verifyDaemonSettings() |
| 262 |
- if err := container.prepareVolumes(); err != nil {
|
|
| 263 |
- return err |
|
| 264 |
- } |
|
| 265 | 262 |
linkedEnv, err := container.setupLinkedContainers() |
| 266 | 263 |
if err != nil {
|
| 267 | 264 |
return err |
| ... | ... |
@@ -273,10 +263,13 @@ func (container *Container) Start() (err error) {
|
| 273 | 273 |
if err := populateCommand(container, env); err != nil {
|
| 274 | 274 |
return err |
| 275 | 275 |
} |
| 276 |
- if err := container.setupMounts(); err != nil {
|
|
| 276 |
+ |
|
| 277 |
+ mounts, err := container.setupMounts() |
|
| 278 |
+ if err != nil {
|
|
| 277 | 279 |
return err |
| 278 | 280 |
} |
| 279 | 281 |
|
| 282 |
+ container.command.Mounts = mounts |
|
| 280 | 283 |
return container.waitForStart() |
| 281 | 284 |
} |
| 282 | 285 |
|
| ... | ... |
@@ -571,27 +564,38 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
|
| 571 | 571 |
if err := container.Mount(); err != nil {
|
| 572 | 572 |
return nil, err |
| 573 | 573 |
} |
| 574 |
+ var paths []string |
|
| 575 |
+ unmount := func() {
|
|
| 576 |
+ for _, p := range paths {
|
|
| 577 |
+ syscall.Unmount(p, 0) |
|
| 578 |
+ } |
|
| 579 |
+ } |
|
| 574 | 580 |
defer func() {
|
| 575 | 581 |
if err != nil {
|
| 582 |
+ // unmount any volumes |
|
| 583 |
+ unmount() |
|
| 584 |
+ // unmount the container's rootfs |
|
| 576 | 585 |
container.Unmount() |
| 577 | 586 |
} |
| 578 | 587 |
}() |
| 579 |
- |
|
| 580 |
- if err = container.mountVolumes(); err != nil {
|
|
| 581 |
- container.unmountVolumes() |
|
| 588 |
+ mounts, err := container.setupMounts() |
|
| 589 |
+ if err != nil {
|
|
| 582 | 590 |
return nil, err |
| 583 | 591 |
} |
| 584 |
- defer func() {
|
|
| 592 |
+ for _, m := range mounts {
|
|
| 593 |
+ dest, err := container.GetResourcePath(m.Destination) |
|
| 585 | 594 |
if err != nil {
|
| 586 |
- container.unmountVolumes() |
|
| 595 |
+ return nil, err |
|
| 587 | 596 |
} |
| 588 |
- }() |
|
| 589 |
- |
|
| 597 |
+ paths = append(paths, dest) |
|
| 598 |
+ if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
|
|
| 599 |
+ return nil, err |
|
| 600 |
+ } |
|
| 601 |
+ } |
|
| 590 | 602 |
basePath, err := container.GetResourcePath(resource) |
| 591 | 603 |
if err != nil {
|
| 592 | 604 |
return nil, err |
| 593 | 605 |
} |
| 594 |
- |
|
| 595 | 606 |
stat, err := os.Stat(basePath) |
| 596 | 607 |
if err != nil {
|
| 597 | 608 |
return nil, err |
| ... | ... |
@@ -605,7 +609,6 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
|
| 605 | 605 |
filter = []string{filepath.Base(basePath)}
|
| 606 | 606 |
basePath = filepath.Dir(basePath) |
| 607 | 607 |
} |
| 608 |
- |
|
| 609 | 608 |
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
|
| 610 | 609 |
Compression: archive.Uncompressed, |
| 611 | 610 |
IncludeFiles: filter, |
| ... | ... |
@@ -613,10 +616,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
|
| 613 | 613 |
if err != nil {
|
| 614 | 614 |
return nil, err |
| 615 | 615 |
} |
| 616 |
- |
|
| 617 | 616 |
return ioutils.NewReadCloserWrapper(archive, func() error {
|
| 618 | 617 |
err := archive.Close() |
| 619 |
- container.unmountVolumes() |
|
| 618 |
+ unmount() |
|
| 620 | 619 |
container.Unmount() |
| 621 | 620 |
return err |
| 622 | 621 |
}), |
| ... | ... |
@@ -1007,3 +1009,84 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) |
| 1007 | 1007 |
} |
| 1008 | 1008 |
return written, err |
| 1009 | 1009 |
} |
| 1010 |
+ |
|
| 1011 |
+func (container *Container) networkMounts() []execdriver.Mount {
|
|
| 1012 |
+ var mounts []execdriver.Mount |
|
| 1013 |
+ if container.ResolvConfPath != "" {
|
|
| 1014 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 1015 |
+ Source: container.ResolvConfPath, |
|
| 1016 |
+ Destination: "/etc/resolv.conf", |
|
| 1017 |
+ Writable: !container.hostConfig.ReadonlyRootfs, |
|
| 1018 |
+ Private: true, |
|
| 1019 |
+ }) |
|
| 1020 |
+ } |
|
| 1021 |
+ if container.HostnamePath != "" {
|
|
| 1022 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 1023 |
+ Source: container.HostnamePath, |
|
| 1024 |
+ Destination: "/etc/hostname", |
|
| 1025 |
+ Writable: !container.hostConfig.ReadonlyRootfs, |
|
| 1026 |
+ Private: true, |
|
| 1027 |
+ }) |
|
| 1028 |
+ } |
|
| 1029 |
+ if container.HostsPath != "" {
|
|
| 1030 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 1031 |
+ Source: container.HostsPath, |
|
| 1032 |
+ Destination: "/etc/hosts", |
|
| 1033 |
+ Writable: !container.hostConfig.ReadonlyRootfs, |
|
| 1034 |
+ Private: true, |
|
| 1035 |
+ }) |
|
| 1036 |
+ } |
|
| 1037 |
+ return mounts |
|
| 1038 |
+} |
|
| 1039 |
+ |
|
| 1040 |
+func (container *Container) AddLocalMountPoint(name, destination string, rw bool) {
|
|
| 1041 |
+ container.MountPoints[destination] = &mountPoint{
|
|
| 1042 |
+ Name: name, |
|
| 1043 |
+ Driver: volume.DefaultDriverName, |
|
| 1044 |
+ Destination: destination, |
|
| 1045 |
+ RW: rw, |
|
| 1046 |
+ } |
|
| 1047 |
+} |
|
| 1048 |
+ |
|
| 1049 |
+func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
|
|
| 1050 |
+ container.MountPoints[destination] = &mountPoint{
|
|
| 1051 |
+ Name: vol.Name(), |
|
| 1052 |
+ Driver: vol.DriverName(), |
|
| 1053 |
+ Destination: destination, |
|
| 1054 |
+ RW: rw, |
|
| 1055 |
+ Volume: vol, |
|
| 1056 |
+ } |
|
| 1057 |
+} |
|
| 1058 |
+ |
|
| 1059 |
+func (container *Container) IsDestinationMounted(destination string) bool {
|
|
| 1060 |
+ return container.MountPoints[destination] != nil |
|
| 1061 |
+} |
|
| 1062 |
+ |
|
| 1063 |
+func (container *Container) PrepareMountPoints() error {
|
|
| 1064 |
+ for _, config := range container.MountPoints {
|
|
| 1065 |
+ if len(config.Driver) > 0 {
|
|
| 1066 |
+ v, err := createVolume(config.Name, config.Driver) |
|
| 1067 |
+ if err != nil {
|
|
| 1068 |
+ return err |
|
| 1069 |
+ } |
|
| 1070 |
+ config.Volume = v |
|
| 1071 |
+ } |
|
| 1072 |
+ } |
|
| 1073 |
+ return nil |
|
| 1074 |
+} |
|
| 1075 |
+ |
|
| 1076 |
+func (container *Container) RemoveMountPoints() error {
|
|
| 1077 |
+ for _, m := range container.MountPoints {
|
|
| 1078 |
+ if m.Volume != nil {
|
|
| 1079 |
+ if err := removeVolume(m.Volume); err != nil {
|
|
| 1080 |
+ return err |
|
| 1081 |
+ } |
|
| 1082 |
+ } |
|
| 1083 |
+ } |
|
| 1084 |
+ return nil |
|
| 1085 |
+} |
|
| 1086 |
+ |
|
| 1087 |
+func (container *Container) ShouldRestart() bool {
|
|
| 1088 |
+ return container.hostConfig.RestartPolicy.Name == "always" || |
|
| 1089 |
+ (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) |
|
| 1090 |
+} |
| ... | ... |
@@ -42,14 +42,7 @@ type Container struct {
|
| 42 | 42 |
// Fields below here are platform specific. |
| 43 | 43 |
|
| 44 | 44 |
AppArmorProfile string |
| 45 |
- |
|
| 46 |
- // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. |
|
| 47 |
- // Easier than migrating older container configs :) |
|
| 48 |
- VolumesRW map[string]bool |
|
| 49 |
- |
|
| 50 |
- AppliedVolumesFrom map[string]struct{}
|
|
| 51 |
- |
|
| 52 |
- activeLinks map[string]*links.Link |
|
| 45 |
+ activeLinks map[string]*links.Link |
|
| 53 | 46 |
} |
| 54 | 47 |
|
| 55 | 48 |
func killProcessDirectly(container *Container) error {
|
| ... | ... |
@@ -27,12 +27,6 @@ type Container struct {
|
| 27 | 27 |
// removed in subsequent PRs. |
| 28 | 28 |
|
| 29 | 29 |
AppArmorProfile string |
| 30 |
- |
|
| 31 |
- // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. |
|
| 32 |
- // Easier than migrating older container configs :) |
|
| 33 |
- VolumesRW map[string]bool |
|
| 34 |
- |
|
| 35 |
- AppliedVolumesFrom map[string]struct{}
|
|
| 36 | 30 |
// ---- END OF TEMPORARY DECLARATION ---- |
| 37 | 31 |
|
| 38 | 32 |
} |
| ... | ... |
@@ -2,11 +2,15 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
+ "os" |
|
| 5 | 6 |
"path/filepath" |
| 7 |
+ "strings" |
|
| 6 | 8 |
|
| 7 | 9 |
"github.com/docker/docker/graph" |
| 8 | 10 |
"github.com/docker/docker/image" |
| 9 | 11 |
"github.com/docker/docker/pkg/parsers" |
| 12 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 13 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 10 | 14 |
"github.com/docker/docker/runconfig" |
| 11 | 15 |
"github.com/docker/libcontainer/label" |
| 12 | 16 |
) |
| ... | ... |
@@ -87,17 +91,52 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 87 | 87 |
if err := daemon.createRootfs(container); err != nil {
|
| 88 | 88 |
return nil, nil, err |
| 89 | 89 |
} |
| 90 |
- if hostConfig != nil {
|
|
| 91 |
- if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
|
| 92 |
- return nil, nil, err |
|
| 93 |
- } |
|
| 90 |
+ if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
|
| 91 |
+ return nil, nil, err |
|
| 94 | 92 |
} |
| 95 | 93 |
if err := container.Mount(); err != nil {
|
| 96 | 94 |
return nil, nil, err |
| 97 | 95 |
} |
| 98 | 96 |
defer container.Unmount() |
| 99 |
- if err := container.prepareVolumes(); err != nil {
|
|
| 100 |
- return nil, nil, err |
|
| 97 |
+ |
|
| 98 |
+ for spec := range config.Volumes {
|
|
| 99 |
+ var ( |
|
| 100 |
+ name, destination string |
|
| 101 |
+ parts = strings.Split(spec, ":") |
|
| 102 |
+ ) |
|
| 103 |
+ switch len(parts) {
|
|
| 104 |
+ case 2: |
|
| 105 |
+ name, destination = parts[0], filepath.Clean(parts[1]) |
|
| 106 |
+ default: |
|
| 107 |
+ name = stringid.GenerateRandomID() |
|
| 108 |
+ destination = filepath.Clean(parts[0]) |
|
| 109 |
+ } |
|
| 110 |
+ // Skip volumes for which we already have something mounted on that |
|
| 111 |
+ // destination because of a --volume-from. |
|
| 112 |
+ if container.IsDestinationMounted(destination) {
|
|
| 113 |
+ continue |
|
| 114 |
+ } |
|
| 115 |
+ path, err := container.GetResourcePath(destination) |
|
| 116 |
+ if err != nil {
|
|
| 117 |
+ return nil, nil, err |
|
| 118 |
+ } |
|
| 119 |
+ if stat, err := os.Stat(path); err == nil && !stat.IsDir() {
|
|
| 120 |
+ return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
|
|
| 121 |
+ } |
|
| 122 |
+ v, err := createVolume(name, config.VolumeDriver) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return nil, nil, err |
|
| 125 |
+ } |
|
| 126 |
+ rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) |
|
| 127 |
+ if err != nil {
|
|
| 128 |
+ return nil, nil, err |
|
| 129 |
+ } |
|
| 130 |
+ if path, err = v.Mount(); err != nil {
|
|
| 131 |
+ return nil, nil, err |
|
| 132 |
+ } |
|
| 133 |
+ copyExistingContents(rootfs, path) |
|
| 134 |
+ |
|
| 135 |
+ container.AddMountPointWithVolume(destination, v, true) |
|
| 101 | 136 |
} |
| 102 | 137 |
if err := container.ToDisk(); err != nil {
|
| 103 | 138 |
return nil, nil, err |
| ... | ... |
@@ -46,9 +46,12 @@ import ( |
| 46 | 46 |
"github.com/docker/docker/runconfig" |
| 47 | 47 |
"github.com/docker/docker/trust" |
| 48 | 48 |
"github.com/docker/docker/utils" |
| 49 |
- "github.com/docker/docker/volumes" |
|
| 49 |
+ volumedrivers "github.com/docker/docker/volume/drivers" |
|
| 50 |
+ "github.com/docker/docker/volume/local" |
|
| 50 | 51 |
) |
| 51 | 52 |
|
| 53 |
+const defaultVolumesPathName = "volumes" |
|
| 54 |
+ |
|
| 52 | 55 |
var ( |
| 53 | 56 |
validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` |
| 54 | 57 |
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) |
| ... | ... |
@@ -99,7 +102,6 @@ type Daemon struct {
|
| 99 | 99 |
repositories *graph.TagStore |
| 100 | 100 |
idIndex *truncindex.TruncIndex |
| 101 | 101 |
sysInfo *sysinfo.SysInfo |
| 102 |
- volumes *volumes.Repository |
|
| 103 | 102 |
config *Config |
| 104 | 103 |
containerGraph *graphdb.Database |
| 105 | 104 |
driver graphdriver.Driver |
| ... | ... |
@@ -109,6 +111,7 @@ type Daemon struct {
|
| 109 | 109 |
RegistryService *registry.Service |
| 110 | 110 |
EventsService *events.Events |
| 111 | 111 |
netController libnetwork.NetworkController |
| 112 |
+ root string |
|
| 112 | 113 |
} |
| 113 | 114 |
|
| 114 | 115 |
// Get looks for a container using the provided information, which could be |
| ... | ... |
@@ -209,7 +212,13 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err |
| 209 | 209 |
// we'll waste time if we update it for every container |
| 210 | 210 |
daemon.idIndex.Add(container.ID) |
| 211 | 211 |
|
| 212 |
- container.registerVolumes() |
|
| 212 |
+ if err := daemon.verifyOldVolumesInfo(container); err != nil {
|
|
| 213 |
+ return err |
|
| 214 |
+ } |
|
| 215 |
+ |
|
| 216 |
+ if err := container.PrepareMountPoints(); err != nil {
|
|
| 217 |
+ return err |
|
| 218 |
+ } |
|
| 213 | 219 |
|
| 214 | 220 |
if container.IsRunning() {
|
| 215 | 221 |
logrus.Debugf("killing old running container %s", container.ID)
|
| ... | ... |
@@ -249,10 +258,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
| 249 | 249 |
} |
| 250 | 250 |
|
| 251 | 251 |
func (daemon *Daemon) restore() error {
|
| 252 |
+ type cr struct {
|
|
| 253 |
+ container *Container |
|
| 254 |
+ registered bool |
|
| 255 |
+ } |
|
| 256 |
+ |
|
| 252 | 257 |
var ( |
| 253 | 258 |
debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
|
| 254 |
- containers = make(map[string]*Container) |
|
| 255 | 259 |
currentDriver = daemon.driver.String() |
| 260 |
+ containers = make(map[string]*cr) |
|
| 256 | 261 |
) |
| 257 | 262 |
|
| 258 | 263 |
if !debug {
|
| ... | ... |
@@ -278,14 +292,12 @@ func (daemon *Daemon) restore() error {
|
| 278 | 278 |
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
|
| 279 | 279 |
logrus.Debugf("Loaded container %v", container.ID)
|
| 280 | 280 |
|
| 281 |
- containers[container.ID] = container |
|
| 281 |
+ containers[container.ID] = &cr{container: container}
|
|
| 282 | 282 |
} else {
|
| 283 | 283 |
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
|
| 284 | 284 |
} |
| 285 | 285 |
} |
| 286 | 286 |
|
| 287 |
- registeredContainers := []*Container{}
|
|
| 288 |
- |
|
| 289 | 287 |
if entities := daemon.containerGraph.List("/", -1); entities != nil {
|
| 290 | 288 |
for _, p := range entities.Paths() {
|
| 291 | 289 |
if !debug && logrus.GetLevel() == logrus.InfoLevel {
|
| ... | ... |
@@ -294,50 +306,43 @@ func (daemon *Daemon) restore() error {
|
| 294 | 294 |
|
| 295 | 295 |
e := entities[p] |
| 296 | 296 |
|
| 297 |
- if container, ok := containers[e.ID()]; ok {
|
|
| 298 |
- if err := daemon.register(container, false); err != nil {
|
|
| 299 |
- logrus.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 300 |
- } |
|
| 301 |
- |
|
| 302 |
- registeredContainers = append(registeredContainers, container) |
|
| 303 |
- |
|
| 304 |
- // delete from the map so that a new name is not automatically generated |
|
| 305 |
- delete(containers, e.ID()) |
|
| 297 |
+ if c, ok := containers[e.ID()]; ok {
|
|
| 298 |
+ c.registered = true |
|
| 306 | 299 |
} |
| 307 | 300 |
} |
| 308 | 301 |
} |
| 309 | 302 |
|
| 310 |
- // Any containers that are left over do not exist in the graph |
|
| 311 |
- for _, container := range containers {
|
|
| 312 |
- // Try to set the default name for a container if it exists prior to links |
|
| 313 |
- container.Name, err = daemon.generateNewName(container.ID) |
|
| 314 |
- if err != nil {
|
|
| 315 |
- logrus.Debugf("Setting default id - %s", err)
|
|
| 316 |
- } |
|
| 303 |
+ group := sync.WaitGroup{}
|
|
| 304 |
+ for _, c := range containers {
|
|
| 305 |
+ group.Add(1) |
|
| 317 | 306 |
|
| 318 |
- if err := daemon.register(container, false); err != nil {
|
|
| 319 |
- logrus.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 320 |
- } |
|
| 307 |
+ go func(container *Container, registered bool) {
|
|
| 308 |
+ defer group.Done() |
|
| 321 | 309 |
|
| 322 |
- registeredContainers = append(registeredContainers, container) |
|
| 323 |
- } |
|
| 310 |
+ if !registered {
|
|
| 311 |
+ // Try to set the default name for a container if it exists prior to links |
|
| 312 |
+ container.Name, err = daemon.generateNewName(container.ID) |
|
| 313 |
+ if err != nil {
|
|
| 314 |
+ logrus.Debugf("Setting default id - %s", err)
|
|
| 315 |
+ } |
|
| 316 |
+ } |
|
| 324 | 317 |
|
| 325 |
- // check the restart policy on the containers and restart any container with |
|
| 326 |
- // the restart policy of "always" |
|
| 327 |
- if daemon.config.AutoRestart {
|
|
| 328 |
- logrus.Debug("Restarting containers...")
|
|
| 318 |
+ if err := daemon.register(container, false); err != nil {
|
|
| 319 |
+ logrus.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 320 |
+ } |
|
| 329 | 321 |
|
| 330 |
- for _, container := range registeredContainers {
|
|
| 331 |
- if container.hostConfig.RestartPolicy.IsAlways() || |
|
| 332 |
- (container.hostConfig.RestartPolicy.IsOnFailure() && container.ExitCode != 0) {
|
|
| 322 |
+ // check the restart policy on the containers and restart any container with |
|
| 323 |
+ // the restart policy of "always" |
|
| 324 |
+ if daemon.config.AutoRestart && container.ShouldRestart() {
|
|
| 333 | 325 |
logrus.Debugf("Starting container %s", container.ID)
|
| 334 | 326 |
|
| 335 | 327 |
if err := container.Start(); err != nil {
|
| 336 | 328 |
logrus.Debugf("Failed to start container %s: %s", container.ID, err)
|
| 337 | 329 |
} |
| 338 | 330 |
} |
| 339 |
- } |
|
| 331 |
+ }(c.container, c.registered) |
|
| 340 | 332 |
} |
| 333 |
+ group.Wait() |
|
| 341 | 334 |
|
| 342 | 335 |
if !debug {
|
| 343 | 336 |
if logrus.GetLevel() == logrus.InfoLevel {
|
| ... | ... |
@@ -535,6 +540,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID |
| 535 | 535 |
ExecDriver: daemon.execDriver.Name(), |
| 536 | 536 |
State: NewState(), |
| 537 | 537 |
execCommands: newExecStore(), |
| 538 |
+ MountPoints: map[string]*mountPoint{},
|
|
| 538 | 539 |
}, |
| 539 | 540 |
} |
| 540 | 541 |
container.root = daemon.containerRoot(container.ID) |
| ... | ... |
@@ -785,15 +791,11 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 785 | 785 |
return nil, err |
| 786 | 786 |
} |
| 787 | 787 |
|
| 788 |
- volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
|
|
| 789 |
- if err != nil {
|
|
| 790 |
- return nil, err |
|
| 791 |
- } |
|
| 792 |
- |
|
| 793 |
- volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver) |
|
| 788 |
+ volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName)) |
|
| 794 | 789 |
if err != nil {
|
| 795 | 790 |
return nil, err |
| 796 | 791 |
} |
| 792 |
+ volumedrivers.Register(volumesDriver, volumesDriver.Name()) |
|
| 797 | 793 |
|
| 798 | 794 |
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) |
| 799 | 795 |
if err != nil {
|
| ... | ... |
@@ -872,7 +874,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 872 | 872 |
d.repositories = repositories |
| 873 | 873 |
d.idIndex = truncindex.NewTruncIndex([]string{})
|
| 874 | 874 |
d.sysInfo = sysInfo |
| 875 |
- d.volumes = volumes |
|
| 876 | 875 |
d.config = config |
| 877 | 876 |
d.sysInitPath = sysInitPath |
| 878 | 877 |
d.execDriver = ed |
| ... | ... |
@@ -880,6 +881,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 880 | 880 |
d.defaultLogConfig = config.LogConfig |
| 881 | 881 |
d.RegistryService = registryService |
| 882 | 882 |
d.EventsService = eventsService |
| 883 |
+ d.root = config.Root |
|
| 883 | 884 |
|
| 884 | 885 |
if err := d.restore(); err != nil {
|
| 885 | 886 |
return nil, err |
| ... | ... |
@@ -1218,6 +1220,10 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri |
| 1218 | 1218 |
} |
| 1219 | 1219 |
|
| 1220 | 1220 |
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
|
| 1221 |
+ if err := daemon.registerMountPoints(container, hostConfig); err != nil {
|
|
| 1222 |
+ return err |
|
| 1223 |
+ } |
|
| 1224 |
+ |
|
| 1221 | 1225 |
container.Lock() |
| 1222 | 1226 |
defer container.Unlock() |
| 1223 | 1227 |
if err := parseSecurityOpt(container, hostConfig); err != nil {
|
| ... | ... |
@@ -1231,6 +1237,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig. |
| 1231 | 1231 |
|
| 1232 | 1232 |
container.hostConfig = hostConfig |
| 1233 | 1233 |
container.toDisk() |
| 1234 |
- |
|
| 1235 | 1234 |
return nil |
| 1236 | 1235 |
} |
| ... | ... |
@@ -71,21 +71,12 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error |
| 71 | 71 |
} |
| 72 | 72 |
container.LogEvent("destroy")
|
| 73 | 73 |
if config.RemoveVolume {
|
| 74 |
- daemon.DeleteVolumes(container.VolumePaths()) |
|
| 74 |
+ container.RemoveMountPoints() |
|
| 75 | 75 |
} |
| 76 | 76 |
} |
| 77 | 77 |
return nil |
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 |
-func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
|
|
| 81 |
- for id := range volumeIDs {
|
|
| 82 |
- if err := daemon.volumes.Delete(id); err != nil {
|
|
| 83 |
- logrus.Infof("%s", err)
|
|
| 84 |
- continue |
|
| 85 |
- } |
|
| 86 |
- } |
|
| 87 |
-} |
|
| 88 |
- |
|
| 89 | 80 |
func (daemon *Daemon) Rm(container *Container) (err error) {
|
| 90 | 81 |
return daemon.commonRm(container, false) |
| 91 | 82 |
} |
| ... | ... |
@@ -134,7 +125,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro |
| 134 | 134 |
} |
| 135 | 135 |
}() |
| 136 | 136 |
|
| 137 |
- container.derefVolumes() |
|
| 138 | 137 |
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
|
| 139 | 138 |
logrus.Debugf("Unable to remove container from link graph: %s", err)
|
| 140 | 139 |
} |
| ... | ... |
@@ -162,3 +152,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro |
| 162 | 162 |
|
| 163 | 163 |
return nil |
| 164 | 164 |
} |
| 165 |
+ |
|
| 166 |
+func (daemon *Daemon) DeleteVolumes(c *Container) error {
|
|
| 167 |
+ return c.RemoveMountPoints() |
|
| 168 |
+} |
| ... | ... |
@@ -10,6 +10,10 @@ import ( |
| 10 | 10 |
type ContainerJSONRaw struct {
|
| 11 | 11 |
*Container |
| 12 | 12 |
HostConfig *runconfig.HostConfig |
| 13 |
+ |
|
| 14 |
+ // Unused fields for backward compatibility with API versions < 1.12. |
|
| 15 |
+ Volumes map[string]string |
|
| 16 |
+ VolumesRW map[string]bool |
|
| 13 | 17 |
} |
| 14 | 18 |
|
| 15 | 19 |
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
|
| ... | ... |
@@ -48,6 +52,14 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error |
| 48 | 48 |
FinishedAt: container.State.FinishedAt, |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 |
+ volumes := make(map[string]string) |
|
| 52 |
+ volumesRW := make(map[string]bool) |
|
| 53 |
+ |
|
| 54 |
+ for _, m := range container.MountPoints {
|
|
| 55 |
+ volumes[m.Destination] = m.Path() |
|
| 56 |
+ volumesRW[m.Destination] = m.RW |
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 51 | 59 |
contJSON := &types.ContainerJSON{
|
| 52 | 60 |
Id: container.ID, |
| 53 | 61 |
Created: container.Created, |
| ... | ... |
@@ -67,8 +79,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error |
| 67 | 67 |
ExecDriver: container.ExecDriver, |
| 68 | 68 |
MountLabel: container.MountLabel, |
| 69 | 69 |
ProcessLabel: container.ProcessLabel, |
| 70 |
- Volumes: container.Volumes, |
|
| 71 |
- VolumesRW: container.VolumesRW, |
|
| 70 |
+ Volumes: volumes, |
|
| 71 |
+ VolumesRW: volumesRW, |
|
| 72 | 72 |
AppArmorProfile: container.AppArmorProfile, |
| 73 | 73 |
ExecIDs: container.GetExecIDs(), |
| 74 | 74 |
HostConfig: &hostConfig, |
| ... | ... |
@@ -1,213 +1,116 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "encoding/json" |
|
| 4 | 5 |
"fmt" |
| 5 | 6 |
"io/ioutil" |
| 6 | 7 |
"os" |
| 7 | 8 |
"path/filepath" |
| 8 |
- "sort" |
|
| 9 | 9 |
"strings" |
| 10 | 10 |
|
| 11 |
- "github.com/Sirupsen/logrus" |
|
| 12 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 13 | 12 |
"github.com/docker/docker/pkg/chrootarchive" |
| 14 |
- "github.com/docker/docker/pkg/mount" |
|
| 15 |
- "github.com/docker/docker/pkg/symlink" |
|
| 13 |
+ "github.com/docker/docker/runconfig" |
|
| 14 |
+ "github.com/docker/docker/volume" |
|
| 15 |
+ volumedrivers "github.com/docker/docker/volume/drivers" |
|
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-type volumeMount struct {
|
|
| 19 |
- containerPath string |
|
| 20 |
- hostPath string |
|
| 21 |
- writable bool |
|
| 22 |
- copyData bool |
|
| 23 |
- from string |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-func (container *Container) createVolumes() error {
|
|
| 27 |
- mounts := make(map[string]*volumeMount) |
|
| 28 |
- |
|
| 29 |
- // get the normal volumes |
|
| 30 |
- for path := range container.Config.Volumes {
|
|
| 31 |
- path = filepath.Clean(path) |
|
| 32 |
- // skip if there is already a volume for this container path |
|
| 33 |
- if _, exists := container.Volumes[path]; exists {
|
|
| 34 |
- continue |
|
| 35 |
- } |
|
| 36 |
- |
|
| 37 |
- realPath, err := container.GetResourcePath(path) |
|
| 38 |
- if err != nil {
|
|
| 39 |
- return err |
|
| 40 |
- } |
|
| 41 |
- if stat, err := os.Stat(realPath); err == nil {
|
|
| 42 |
- if !stat.IsDir() {
|
|
| 43 |
- return fmt.Errorf("can't mount to container path, file exists - %s", path)
|
|
| 44 |
- } |
|
| 45 |
- } |
|
| 46 |
- |
|
| 47 |
- mnt := &volumeMount{
|
|
| 48 |
- containerPath: path, |
|
| 49 |
- writable: true, |
|
| 50 |
- copyData: true, |
|
| 51 |
- } |
|
| 52 |
- mounts[mnt.containerPath] = mnt |
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- // Get all the bind mounts |
|
| 56 |
- // track bind paths separately due to #10618 |
|
| 57 |
- bindPaths := make(map[string]struct{})
|
|
| 58 |
- for _, spec := range container.hostConfig.Binds {
|
|
| 59 |
- mnt, err := parseBindMountSpec(spec) |
|
| 60 |
- if err != nil {
|
|
| 61 |
- return err |
|
| 62 |
- } |
|
| 63 |
- |
|
| 64 |
- // #10618 |
|
| 65 |
- if _, exists := bindPaths[mnt.containerPath]; exists {
|
|
| 66 |
- return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath)
|
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- bindPaths[mnt.containerPath] = struct{}{}
|
|
| 70 |
- mounts[mnt.containerPath] = mnt |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- // Get volumes from |
|
| 74 |
- for _, from := range container.hostConfig.VolumesFrom {
|
|
| 75 |
- cID, mode, err := parseVolumesFromSpec(from) |
|
| 76 |
- if err != nil {
|
|
| 77 |
- return err |
|
| 78 |
- } |
|
| 79 |
- if _, exists := container.AppliedVolumesFrom[cID]; exists {
|
|
| 80 |
- // skip since it's already been applied |
|
| 81 |
- continue |
|
| 82 |
- } |
|
| 18 |
+var localMountErr = fmt.Errorf("Invalid driver: %s driver doesn't support named volumes", volume.DefaultDriverName)
|
|
| 83 | 19 |
|
| 84 |
- c, err := container.daemon.Get(cID) |
|
| 85 |
- if err != nil {
|
|
| 86 |
- return fmt.Errorf("container %s not found, impossible to mount its volumes", cID)
|
|
| 87 |
- } |
|
| 20 |
+type mountPoint struct {
|
|
| 21 |
+ Name string |
|
| 22 |
+ Destination string |
|
| 23 |
+ Driver string |
|
| 24 |
+ RW bool |
|
| 25 |
+ Volume volume.Volume `json:"-"` |
|
| 26 |
+ Source string |
|
| 27 |
+} |
|
| 88 | 28 |
|
| 89 |
- for _, mnt := range c.volumeMounts() {
|
|
| 90 |
- mnt.writable = mnt.writable && (mode == "rw") |
|
| 91 |
- mnt.from = cID |
|
| 92 |
- mounts[mnt.containerPath] = mnt |
|
| 93 |
- } |
|
| 29 |
+func (m *mountPoint) Setup() (string, error) {
|
|
| 30 |
+ if m.Volume != nil {
|
|
| 31 |
+ return m.Volume.Mount() |
|
| 94 | 32 |
} |
| 95 | 33 |
|
| 96 |
- for _, mnt := range mounts {
|
|
| 97 |
- containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs) |
|
| 98 |
- if err != nil {
|
|
| 99 |
- return err |
|
| 100 |
- } |
|
| 101 |
- |
|
| 102 |
- // Create the actual volume |
|
| 103 |
- v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable) |
|
| 104 |
- if err != nil {
|
|
| 105 |
- return err |
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
- container.VolumesRW[mnt.containerPath] = mnt.writable |
|
| 109 |
- container.Volumes[mnt.containerPath] = v.Path |
|
| 110 |
- v.AddContainer(container.ID) |
|
| 111 |
- if mnt.from != "" {
|
|
| 112 |
- container.AppliedVolumesFrom[mnt.from] = struct{}{}
|
|
| 113 |
- } |
|
| 114 |
- |
|
| 115 |
- if mnt.writable && mnt.copyData {
|
|
| 116 |
- // Copy whatever is in the container at the containerPath to the volume |
|
| 117 |
- copyExistingContents(containerMntPath, v.Path) |
|
| 34 |
+ if len(m.Source) > 0 {
|
|
| 35 |
+ if _, err := os.Stat(m.Source); err != nil {
|
|
| 36 |
+ if !os.IsNotExist(err) {
|
|
| 37 |
+ return "", err |
|
| 38 |
+ } |
|
| 39 |
+ if err := os.MkdirAll(m.Source, 0755); err != nil {
|
|
| 40 |
+ return "", err |
|
| 41 |
+ } |
|
| 118 | 42 |
} |
| 43 |
+ return m.Source, nil |
|
| 119 | 44 |
} |
| 120 | 45 |
|
| 121 |
- return nil |
|
| 46 |
+ return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
|
|
| 122 | 47 |
} |
| 123 | 48 |
|
| 124 |
-// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order |
|
| 125 |
-func (container *Container) sortedVolumeMounts() []string {
|
|
| 126 |
- var mountPaths []string |
|
| 127 |
- for path := range container.Volumes {
|
|
| 128 |
- mountPaths = append(mountPaths, path) |
|
| 49 |
+func (m *mountPoint) Path() string {
|
|
| 50 |
+ if m.Volume != nil {
|
|
| 51 |
+ return m.Volume.Path() |
|
| 129 | 52 |
} |
| 130 | 53 |
|
| 131 |
- sort.Strings(mountPaths) |
|
| 132 |
- return mountPaths |
|
| 54 |
+ return m.Source |
|
| 133 | 55 |
} |
| 134 | 56 |
|
| 135 |
-func (container *Container) VolumePaths() map[string]struct{} {
|
|
| 136 |
- var paths = make(map[string]struct{})
|
|
| 137 |
- for _, path := range container.Volumes {
|
|
| 138 |
- paths[path] = struct{}{}
|
|
| 57 |
+func parseBindMount(spec string, config *runconfig.Config) (*mountPoint, error) {
|
|
| 58 |
+ bind := &mountPoint{
|
|
| 59 |
+ RW: true, |
|
| 139 | 60 |
} |
| 140 |
- return paths |
|
| 141 |
-} |
|
| 142 |
- |
|
| 143 |
-func (container *Container) registerVolumes() {
|
|
| 144 |
- for path := range container.VolumePaths() {
|
|
| 145 |
- if v := container.daemon.volumes.Get(path); v != nil {
|
|
| 146 |
- v.AddContainer(container.ID) |
|
| 147 |
- continue |
|
| 148 |
- } |
|
| 61 |
+ arr := strings.Split(spec, ":") |
|
| 149 | 62 |
|
| 150 |
- // if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered |
|
| 151 |
- writable := true |
|
| 152 |
- if rw, exists := container.VolumesRW[path]; exists {
|
|
| 153 |
- writable = rw |
|
| 154 |
- } |
|
| 155 |
- v, err := container.daemon.volumes.FindOrCreateVolume(path, writable) |
|
| 156 |
- if err != nil {
|
|
| 157 |
- logrus.Debugf("error registering volume %s: %v", path, err)
|
|
| 158 |
- continue |
|
| 63 |
+ switch len(arr) {
|
|
| 64 |
+ case 2: |
|
| 65 |
+ bind.Destination = arr[1] |
|
| 66 |
+ case 3: |
|
| 67 |
+ bind.Destination = arr[1] |
|
| 68 |
+ if !validMountMode(arr[2]) {
|
|
| 69 |
+ return nil, fmt.Errorf("invalid mode for volumes-from: %s", arr[2])
|
|
| 159 | 70 |
} |
| 160 |
- v.AddContainer(container.ID) |
|
| 71 |
+ bind.RW = arr[2] == "rw" |
|
| 72 |
+ default: |
|
| 73 |
+ return nil, fmt.Errorf("Invalid volume specification: %s", spec)
|
|
| 161 | 74 |
} |
| 162 |
-} |
|
| 163 | 75 |
|
| 164 |
-func (container *Container) derefVolumes() {
|
|
| 165 |
- for path := range container.VolumePaths() {
|
|
| 166 |
- vol := container.daemon.volumes.Get(path) |
|
| 167 |
- if vol == nil {
|
|
| 168 |
- logrus.Debugf("Volume %s was not found and could not be dereferenced", path)
|
|
| 169 |
- continue |
|
| 76 |
+ if !filepath.IsAbs(arr[0]) {
|
|
| 77 |
+ bind.Driver, bind.Name = parseNamedVolumeInfo(arr[0], config) |
|
| 78 |
+ if bind.Driver == volume.DefaultDriverName {
|
|
| 79 |
+ return nil, localMountErr |
|
| 170 | 80 |
} |
| 171 |
- vol.RemoveContainer(container.ID) |
|
| 81 |
+ } else {
|
|
| 82 |
+ bind.Source = filepath.Clean(arr[0]) |
|
| 172 | 83 |
} |
| 173 |
-} |
|
| 174 | 84 |
|
| 175 |
-func parseBindMountSpec(spec string) (*volumeMount, error) {
|
|
| 176 |
- arr := strings.Split(spec, ":") |
|
| 85 |
+ bind.Destination = filepath.Clean(bind.Destination) |
|
| 86 |
+ return bind, nil |
|
| 87 |
+} |
|
| 177 | 88 |
|
| 178 |
- mnt := &volumeMount{}
|
|
| 179 |
- switch len(arr) {
|
|
| 89 |
+func parseNamedVolumeInfo(info string, config *runconfig.Config) (driver string, name string) {
|
|
| 90 |
+ p := strings.SplitN(info, "/", 2) |
|
| 91 |
+ switch len(p) {
|
|
| 180 | 92 |
case 2: |
| 181 |
- mnt.hostPath = arr[0] |
|
| 182 |
- mnt.containerPath = arr[1] |
|
| 183 |
- mnt.writable = true |
|
| 184 |
- case 3: |
|
| 185 |
- mnt.hostPath = arr[0] |
|
| 186 |
- mnt.containerPath = arr[1] |
|
| 187 |
- mnt.writable = validMountMode(arr[2]) && arr[2] == "rw" |
|
| 93 |
+ driver = p[0] |
|
| 94 |
+ name = p[1] |
|
| 188 | 95 |
default: |
| 189 |
- return nil, fmt.Errorf("Invalid volume specification: %s", spec)
|
|
| 190 |
- } |
|
| 191 |
- |
|
| 192 |
- if !filepath.IsAbs(mnt.hostPath) {
|
|
| 193 |
- return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath)
|
|
| 96 |
+ if driver = config.VolumeDriver; len(driver) == 0 {
|
|
| 97 |
+ driver = volume.DefaultDriverName |
|
| 98 |
+ } |
|
| 99 |
+ name = p[0] |
|
| 194 | 100 |
} |
| 195 | 101 |
|
| 196 |
- mnt.hostPath = filepath.Clean(mnt.hostPath) |
|
| 197 |
- mnt.containerPath = filepath.Clean(mnt.containerPath) |
|
| 198 |
- return mnt, nil |
|
| 102 |
+ return |
|
| 199 | 103 |
} |
| 200 | 104 |
|
| 201 |
-func parseVolumesFromSpec(spec string) (string, string, error) {
|
|
| 202 |
- specParts := strings.SplitN(spec, ":", 2) |
|
| 203 |
- if len(specParts) == 0 {
|
|
| 105 |
+func parseVolumesFrom(spec string) (string, string, error) {
|
|
| 106 |
+ if len(spec) == 0 {
|
|
| 204 | 107 |
return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
|
| 205 | 108 |
} |
| 206 | 109 |
|
| 207 |
- var ( |
|
| 208 |
- id = specParts[0] |
|
| 209 |
- mode = "rw" |
|
| 210 |
- ) |
|
| 110 |
+ specParts := strings.SplitN(spec, ":", 2) |
|
| 111 |
+ id := specParts[0] |
|
| 112 |
+ mode := "rw" |
|
| 113 |
+ |
|
| 211 | 114 |
if len(specParts) == 2 {
|
| 212 | 115 |
mode = specParts[1] |
| 213 | 116 |
if !validMountMode(mode) {
|
| ... | ... |
@@ -222,7 +125,6 @@ func validMountMode(mode string) bool {
|
| 222 | 222 |
"rw": true, |
| 223 | 223 |
"ro": true, |
| 224 | 224 |
} |
| 225 |
- |
|
| 226 | 225 |
return validModes[mode] |
| 227 | 226 |
} |
| 228 | 227 |
|
| ... | ... |
@@ -240,34 +142,16 @@ func (container *Container) specialMounts() []execdriver.Mount {
|
| 240 | 240 |
return mounts |
| 241 | 241 |
} |
| 242 | 242 |
|
| 243 |
-func (container *Container) volumeMounts() map[string]*volumeMount {
|
|
| 244 |
- mounts := make(map[string]*volumeMount) |
|
| 245 |
- |
|
| 246 |
- for containerPath, path := range container.Volumes {
|
|
| 247 |
- v := container.daemon.volumes.Get(path) |
|
| 248 |
- if v == nil {
|
|
| 249 |
- // This should never happen |
|
| 250 |
- logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path)
|
|
| 251 |
- continue |
|
| 252 |
- } |
|
| 253 |
- mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]}
|
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- return mounts |
|
| 257 |
-} |
|
| 258 |
- |
|
| 259 | 243 |
func copyExistingContents(source, destination string) error {
|
| 260 | 244 |
volList, err := ioutil.ReadDir(source) |
| 261 | 245 |
if err != nil {
|
| 262 | 246 |
return err |
| 263 | 247 |
} |
| 264 |
- |
|
| 265 | 248 |
if len(volList) > 0 {
|
| 266 | 249 |
srcList, err := ioutil.ReadDir(destination) |
| 267 | 250 |
if err != nil {
|
| 268 | 251 |
return err |
| 269 | 252 |
} |
| 270 |
- |
|
| 271 | 253 |
if len(srcList) == 0 {
|
| 272 | 254 |
// If the source volume is empty copy files from the root into the volume |
| 273 | 255 |
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
| ... | ... |
@@ -275,60 +159,145 @@ func copyExistingContents(source, destination string) error {
|
| 275 | 275 |
} |
| 276 | 276 |
} |
| 277 | 277 |
} |
| 278 |
- |
|
| 279 | 278 |
return copyOwnership(source, destination) |
| 280 | 279 |
} |
| 281 | 280 |
|
| 282 |
-func (container *Container) mountVolumes() error {
|
|
| 283 |
- for dest, source := range container.Volumes {
|
|
| 284 |
- v := container.daemon.volumes.Get(source) |
|
| 285 |
- if v == nil {
|
|
| 286 |
- return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
|
|
| 281 |
+// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. |
|
| 282 |
+// It follows the next sequence to decide what to mount in each final destination: |
|
| 283 |
+// |
|
| 284 |
+// 1. Select the previously configured mount points for the containers, if any. |
|
| 285 |
+// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. |
|
| 286 |
+// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. |
|
| 287 |
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
| 288 |
+ binds := map[string]bool{}
|
|
| 289 |
+ mountPoints := map[string]*mountPoint{}
|
|
| 290 |
+ |
|
| 291 |
+ // 1. Read already configured mount points. |
|
| 292 |
+ for name, point := range container.MountPoints {
|
|
| 293 |
+ mountPoints[name] = point |
|
| 294 |
+ } |
|
| 295 |
+ |
|
| 296 |
+ // 2. Read volumes from other containers. |
|
| 297 |
+ for _, v := range hostConfig.VolumesFrom {
|
|
| 298 |
+ containerID, mode, err := parseVolumesFrom(v) |
|
| 299 |
+ if err != nil {
|
|
| 300 |
+ return err |
|
| 287 | 301 |
} |
| 288 | 302 |
|
| 289 |
- destPath, err := container.GetResourcePath(dest) |
|
| 303 |
+ c, err := daemon.Get(containerID) |
|
| 290 | 304 |
if err != nil {
|
| 291 | 305 |
return err |
| 292 | 306 |
} |
| 293 | 307 |
|
| 294 |
- if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil {
|
|
| 295 |
- return fmt.Errorf("error while mounting volume %s: %v", source, err)
|
|
| 308 |
+ for _, m := range c.MountPoints {
|
|
| 309 |
+ cp := m |
|
| 310 |
+ cp.RW = m.RW && mode != "ro" |
|
| 311 |
+ |
|
| 312 |
+ if len(m.Source) == 0 {
|
|
| 313 |
+ v, err := createVolume(m.Name, m.Driver) |
|
| 314 |
+ if err != nil {
|
|
| 315 |
+ return err |
|
| 316 |
+ } |
|
| 317 |
+ cp.Volume = v |
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ mountPoints[cp.Destination] = cp |
|
| 296 | 321 |
} |
| 297 | 322 |
} |
| 298 | 323 |
|
| 299 |
- for _, mnt := range container.specialMounts() {
|
|
| 300 |
- destPath, err := container.GetResourcePath(mnt.Destination) |
|
| 324 |
+ // 3. Read bind mounts |
|
| 325 |
+ for _, b := range hostConfig.Binds {
|
|
| 326 |
+ // #10618 |
|
| 327 |
+ bind, err := parseBindMount(b, container.Config) |
|
| 301 | 328 |
if err != nil {
|
| 302 | 329 |
return err |
| 303 | 330 |
} |
| 304 |
- if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil {
|
|
| 305 |
- return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err)
|
|
| 331 |
+ |
|
| 332 |
+ if binds[bind.Destination] {
|
|
| 333 |
+ return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
|
|
| 306 | 334 |
} |
| 335 |
+ |
|
| 336 |
+ if len(bind.Name) > 0 && len(bind.Driver) > 0 {
|
|
| 337 |
+ v, err := createVolume(bind.Name, bind.Driver) |
|
| 338 |
+ if err != nil {
|
|
| 339 |
+ return err |
|
| 340 |
+ } |
|
| 341 |
+ bind.Volume = v |
|
| 342 |
+ } |
|
| 343 |
+ |
|
| 344 |
+ binds[bind.Destination] = true |
|
| 345 |
+ mountPoints[bind.Destination] = bind |
|
| 307 | 346 |
} |
| 347 |
+ |
|
| 348 |
+ container.MountPoints = mountPoints |
|
| 349 |
+ |
|
| 308 | 350 |
return nil |
| 309 | 351 |
} |
| 310 | 352 |
|
| 311 |
-func (container *Container) unmountVolumes() {
|
|
| 312 |
- for dest := range container.Volumes {
|
|
| 313 |
- destPath, err := container.GetResourcePath(dest) |
|
| 314 |
- if err != nil {
|
|
| 315 |
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
|
|
| 316 |
- continue |
|
| 317 |
- } |
|
| 318 |
- if err := mount.ForceUnmount(destPath); err != nil {
|
|
| 319 |
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
|
|
| 320 |
- continue |
|
| 353 |
+// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7. |
|
| 354 |
+// It reads the container configuration and creates valid mount points for the old volumes. |
|
| 355 |
+func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
|
|
| 356 |
+ jsonPath, err := container.jsonPath() |
|
| 357 |
+ if err != nil {
|
|
| 358 |
+ return err |
|
| 359 |
+ } |
|
| 360 |
+ f, err := os.Open(jsonPath) |
|
| 361 |
+ if err != nil {
|
|
| 362 |
+ if os.IsNotExist(err) {
|
|
| 363 |
+ return nil |
|
| 321 | 364 |
} |
| 365 |
+ return err |
|
| 322 | 366 |
} |
| 323 | 367 |
|
| 324 |
- for _, mnt := range container.specialMounts() {
|
|
| 325 |
- destPath, err := container.GetResourcePath(mnt.Destination) |
|
| 326 |
- if err != nil {
|
|
| 327 |
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
|
|
| 328 |
- continue |
|
| 329 |
- } |
|
| 330 |
- if err := mount.ForceUnmount(destPath); err != nil {
|
|
| 331 |
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
|
|
| 368 |
+ type oldContVolCfg struct {
|
|
| 369 |
+ Volumes map[string]string |
|
| 370 |
+ VolumesRW map[string]bool |
|
| 371 |
+ } |
|
| 372 |
+ |
|
| 373 |
+ vols := oldContVolCfg{
|
|
| 374 |
+ Volumes: make(map[string]string), |
|
| 375 |
+ VolumesRW: make(map[string]bool), |
|
| 376 |
+ } |
|
| 377 |
+ if err := json.NewDecoder(f).Decode(&vols); err != nil {
|
|
| 378 |
+ return err |
|
| 379 |
+ } |
|
| 380 |
+ |
|
| 381 |
+ for destination, hostPath := range vols.Volumes {
|
|
| 382 |
+ vfsPath := filepath.Join(daemon.root, "vfs", "dir") |
|
| 383 |
+ |
|
| 384 |
+ if strings.HasPrefix(hostPath, vfsPath) {
|
|
| 385 |
+ id := filepath.Base(hostPath) |
|
| 386 |
+ |
|
| 387 |
+ container.AddLocalMountPoint(id, destination, vols.VolumesRW[destination]) |
|
| 332 | 388 |
} |
| 333 | 389 |
} |
| 390 |
+ |
|
| 391 |
+ return container.ToDisk() |
|
| 392 |
+} |
|
| 393 |
+ |
|
| 394 |
+func createVolume(name, driverName string) (volume.Volume, error) {
|
|
| 395 |
+ vd, err := getVolumeDriver(driverName) |
|
| 396 |
+ if err != nil {
|
|
| 397 |
+ return nil, err |
|
| 398 |
+ } |
|
| 399 |
+ return vd.Create(name) |
|
| 400 |
+} |
|
| 401 |
+ |
|
| 402 |
+func removeVolume(v volume.Volume) error {
|
|
| 403 |
+ vd, err := getVolumeDriver(v.DriverName()) |
|
| 404 |
+ if err != nil {
|
|
| 405 |
+ return nil |
|
| 406 |
+ } |
|
| 407 |
+ return vd.Remove(v) |
|
| 408 |
+} |
|
| 409 |
+ |
|
| 410 |
+func getVolumeDriver(name string) (volume.Driver, error) {
|
|
| 411 |
+ if name == "" {
|
|
| 412 |
+ name = volume.DefaultDriverName |
|
| 413 |
+ } |
|
| 414 |
+ vd := volumedrivers.Lookup(name) |
|
| 415 |
+ if vd == nil {
|
|
| 416 |
+ return nil, fmt.Errorf("Volumes Driver %s isn't registered", name)
|
|
| 417 |
+ } |
|
| 418 |
+ return vd, nil |
|
| 334 | 419 |
} |
| ... | ... |
@@ -4,6 +4,9 @@ package daemon |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 | 6 |
"os" |
| 7 |
+ "path/filepath" |
|
| 8 |
+ "sort" |
|
| 9 |
+ "strings" |
|
| 7 | 10 |
|
| 8 | 11 |
"github.com/docker/docker/daemon/execdriver" |
| 9 | 12 |
"github.com/docker/docker/pkg/system" |
| ... | ... |
@@ -24,36 +27,44 @@ func copyOwnership(source, destination string) error {
|
| 24 | 24 |
return os.Chmod(destination, os.FileMode(stat.Mode())) |
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 |
-func (container *Container) prepareVolumes() error {
|
|
| 28 |
- if container.Volumes == nil || len(container.Volumes) == 0 {
|
|
| 29 |
- container.Volumes = make(map[string]string) |
|
| 30 |
- container.VolumesRW = make(map[string]bool) |
|
| 31 |
- } |
|
| 27 |
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
|
|
| 28 |
+ var mounts []execdriver.Mount |
|
| 29 |
+ for _, m := range container.MountPoints {
|
|
| 30 |
+ path, err := m.Setup() |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ return nil, err |
|
| 33 |
+ } |
|
| 32 | 34 |
|
| 33 |
- if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil {
|
|
| 34 |
- container.AppliedVolumesFrom = make(map[string]struct{})
|
|
| 35 |
+ mounts = append(mounts, execdriver.Mount{
|
|
| 36 |
+ Source: path, |
|
| 37 |
+ Destination: m.Destination, |
|
| 38 |
+ Writable: m.RW, |
|
| 39 |
+ }) |
|
| 35 | 40 |
} |
| 36 |
- return container.createVolumes() |
|
| 41 |
+ |
|
| 42 |
+ mounts = sortMounts(mounts) |
|
| 43 |
+ return append(mounts, container.networkMounts()...), nil |
|
| 37 | 44 |
} |
| 38 | 45 |
|
| 39 |
-func (container *Container) setupMounts() error {
|
|
| 40 |
- mounts := []execdriver.Mount{}
|
|
| 46 |
+func sortMounts(m []execdriver.Mount) []execdriver.Mount {
|
|
| 47 |
+ sort.Sort(mounts(m)) |
|
| 48 |
+ return m |
|
| 49 |
+} |
|
| 41 | 50 |
|
| 42 |
- // Mount user specified volumes |
|
| 43 |
- // Note, these are not private because you may want propagation of (un)mounts from host |
|
| 44 |
- // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you |
|
| 45 |
- // want this new mount in the container |
|
| 46 |
- // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic) |
|
| 47 |
- for _, path := range container.sortedVolumeMounts() {
|
|
| 48 |
- mounts = append(mounts, execdriver.Mount{
|
|
| 49 |
- Source: container.Volumes[path], |
|
| 50 |
- Destination: path, |
|
| 51 |
- Writable: container.VolumesRW[path], |
|
| 52 |
- }) |
|
| 53 |
- } |
|
| 51 |
+type mounts []execdriver.Mount |
|
| 52 |
+ |
|
| 53 |
+func (m mounts) Len() int {
|
|
| 54 |
+ return len(m) |
|
| 55 |
+} |
|
| 54 | 56 |
|
| 55 |
- mounts = append(mounts, container.specialMounts()...) |
|
| 57 |
+func (m mounts) Less(i, j int) bool {
|
|
| 58 |
+ return m.parts(i) < m.parts(j) |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+func (m mounts) Swap(i, j int) {
|
|
| 62 |
+ m[i], m[j] = m[j], m[i] |
|
| 63 |
+} |
|
| 56 | 64 |
|
| 57 |
- container.command.Mounts = mounts |
|
| 58 |
- return nil |
|
| 65 |
+func (m mounts) parts(i int) int {
|
|
| 66 |
+ return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator))) |
|
| 59 | 67 |
} |
| 60 | 68 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,146 @@ |
| 0 |
+package daemon |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "testing" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/docker/runconfig" |
|
| 6 |
+ "github.com/docker/docker/volume" |
|
| 7 |
+ volumedrivers "github.com/docker/docker/volume/drivers" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+func TestParseNamedVolumeInfo(t *testing.T) {
|
|
| 11 |
+ cases := []struct {
|
|
| 12 |
+ driver string |
|
| 13 |
+ name string |
|
| 14 |
+ expDriver string |
|
| 15 |
+ expName string |
|
| 16 |
+ }{
|
|
| 17 |
+ {"", "name", "local", "name"},
|
|
| 18 |
+ {"external", "name", "external", "name"},
|
|
| 19 |
+ {"", "external/name", "external", "name"},
|
|
| 20 |
+ {"ignored", "external/name", "external", "name"},
|
|
| 21 |
+ } |
|
| 22 |
+ |
|
| 23 |
+ for _, c := range cases {
|
|
| 24 |
+ conf := &runconfig.Config{VolumeDriver: c.driver}
|
|
| 25 |
+ driver, name := parseNamedVolumeInfo(c.name, conf) |
|
| 26 |
+ |
|
| 27 |
+ if driver != c.expDriver {
|
|
| 28 |
+ t.Fatalf("Expected %s, was %s\n", c.expDriver, driver)
|
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ if name != c.expName {
|
|
| 32 |
+ t.Fatalf("Expected %s, was %s\n", c.expName, name)
|
|
| 33 |
+ } |
|
| 34 |
+ } |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+func TestParseBindMount(t *testing.T) {
|
|
| 38 |
+ cases := []struct {
|
|
| 39 |
+ bind string |
|
| 40 |
+ driver string |
|
| 41 |
+ expDest string |
|
| 42 |
+ expSource string |
|
| 43 |
+ expName string |
|
| 44 |
+ expDriver string |
|
| 45 |
+ expRW bool |
|
| 46 |
+ fail bool |
|
| 47 |
+ }{
|
|
| 48 |
+ {"/tmp:/tmp", "", "/tmp", "/tmp", "", "", true, false},
|
|
| 49 |
+ {"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", false, false},
|
|
| 50 |
+ {"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", true, false},
|
|
| 51 |
+ {"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", false, true},
|
|
| 52 |
+ {"name:/tmp", "", "", "", "", "", false, true},
|
|
| 53 |
+ {"name:/tmp", "external", "/tmp", "", "name", "external", true, false},
|
|
| 54 |
+ {"external/name:/tmp:rw", "", "/tmp", "", "name", "external", true, false},
|
|
| 55 |
+ {"external/name:/tmp:ro", "", "/tmp", "", "name", "external", false, false},
|
|
| 56 |
+ {"external/name:/tmp:foo", "", "/tmp", "", "name", "external", false, true},
|
|
| 57 |
+ {"name:/tmp", "local", "", "", "", "", false, true},
|
|
| 58 |
+ {"local/name:/tmp:rw", "", "", "", "", "", true, true},
|
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 61 |
+ for _, c := range cases {
|
|
| 62 |
+ conf := &runconfig.Config{VolumeDriver: c.driver}
|
|
| 63 |
+ m, err := parseBindMount(c.bind, conf) |
|
| 64 |
+ if c.fail {
|
|
| 65 |
+ if err == nil {
|
|
| 66 |
+ t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
|
|
| 67 |
+ } |
|
| 68 |
+ continue |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ if m.Destination != c.expDest {
|
|
| 72 |
+ t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
|
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ if m.Source != c.expSource {
|
|
| 76 |
+ t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
|
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ if m.Name != c.expName {
|
|
| 80 |
+ t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
|
|
| 81 |
+ } |
|
| 82 |
+ |
|
| 83 |
+ if m.Driver != c.expDriver {
|
|
| 84 |
+ t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
|
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ if m.RW != c.expRW {
|
|
| 88 |
+ t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
|
|
| 89 |
+ } |
|
| 90 |
+ } |
|
| 91 |
+} |
|
| 92 |
+ |
|
| 93 |
+func TestParseVolumeFrom(t *testing.T) {
|
|
| 94 |
+ cases := []struct {
|
|
| 95 |
+ spec string |
|
| 96 |
+ expId string |
|
| 97 |
+ expMode string |
|
| 98 |
+ fail bool |
|
| 99 |
+ }{
|
|
| 100 |
+ {"", "", "", true},
|
|
| 101 |
+ {"foobar", "foobar", "rw", false},
|
|
| 102 |
+ {"foobar:rw", "foobar", "rw", false},
|
|
| 103 |
+ {"foobar:ro", "foobar", "ro", false},
|
|
| 104 |
+ {"foobar:baz", "", "", true},
|
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 107 |
+ for _, c := range cases {
|
|
| 108 |
+ id, mode, err := parseVolumesFrom(c.spec) |
|
| 109 |
+ if c.fail {
|
|
| 110 |
+ if err == nil {
|
|
| 111 |
+ t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
|
|
| 112 |
+ } |
|
| 113 |
+ continue |
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ if id != c.expId {
|
|
| 117 |
+ t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
|
|
| 118 |
+ } |
|
| 119 |
+ if mode != c.expMode {
|
|
| 120 |
+ t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
|
|
| 121 |
+ } |
|
| 122 |
+ } |
|
| 123 |
+} |
|
| 124 |
+ |
|
| 125 |
+type fakeDriver struct{}
|
|
| 126 |
+ |
|
| 127 |
+func (fakeDriver) Name() string { return "fake" }
|
|
| 128 |
+func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
|
|
| 129 |
+func (fakeDriver) Remove(v volume.Volume) error { return nil }
|
|
| 130 |
+ |
|
| 131 |
+func TestGetVolumeDriver(t *testing.T) {
|
|
| 132 |
+ _, err := getVolumeDriver("missing")
|
|
| 133 |
+ if err == nil {
|
|
| 134 |
+ t.Fatal("Expected error, was nil")
|
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ volumedrivers.Register(fakeDriver{}, "fake")
|
|
| 138 |
+ d, err := getVolumeDriver("fake")
|
|
| 139 |
+ if err != nil {
|
|
| 140 |
+ t.Fatal(err) |
|
| 141 |
+ } |
|
| 142 |
+ if d.Name() != "fake" {
|
|
| 143 |
+ t.Fatalf("Expected fake driver, got %s\n", d.Name())
|
|
| 144 |
+ } |
|
| 145 |
+} |
| ... | ... |
@@ -2,15 +2,13 @@ |
| 2 | 2 |
|
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 |
+import "github.com/docker/docker/daemon/execdriver" |
|
| 6 |
+ |
|
| 5 | 7 |
// Not supported on Windows |
| 6 | 8 |
func copyOwnership(source, destination string) error {
|
| 7 |
- return nil |
|
| 8 |
-} |
|
| 9 |
- |
|
| 10 |
-func (container *Container) prepareVolumes() error {
|
|
| 11 |
- return nil |
|
| 9 |
+ return nil, nil |
|
| 12 | 10 |
} |
| 13 | 11 |
|
| 14 |
-func (container *Container) setupMounts() error {
|
|
| 12 |
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
|
|
| 15 | 13 |
return nil |
| 16 | 14 |
} |
| ... | ... |
@@ -73,6 +73,7 @@ pages: |
| 73 | 73 |
- ['machine/index.md', 'User Guide', 'Docker Machine' ] |
| 74 | 74 |
- ['swarm/index.md', 'User Guide', 'Docker Swarm' ] |
| 75 | 75 |
- ['kitematic/userguide.md', 'User Guide', 'Kitematic'] |
| 76 |
+- ['userguide/plugins.md', 'User Guide', 'Docker Plugins'] |
|
| 76 | 77 |
|
| 77 | 78 |
# Docker Hub docs: |
| 78 | 79 |
- ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ] |
| ... | ... |
@@ -185,6 +186,7 @@ pages: |
| 185 | 185 |
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] |
| 186 | 186 |
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API client libraries'] |
| 187 | 187 |
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub accounts API'] |
| 188 |
+- ['reference/api/plugin_api.md', 'Reference', 'Docker Plugin API'] |
|
| 188 | 189 |
- ['kitematic/faq.md', 'Reference', 'Kitematic: FAQ'] |
| 189 | 190 |
- ['kitematic/known-issues.md', 'Reference', 'Kitematic: Known issues'] |
| 190 | 191 |
|
| ... | ... |
@@ -73,6 +73,13 @@ are now returned as boolean instead of as an int. |
| 73 | 73 |
In addition, the end point now returns the new boolean fields |
| 74 | 74 |
`CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`. |
| 75 | 75 |
|
| 76 |
+**New!** |
|
| 77 |
+ |
|
| 78 |
+You can now specify a volume plugin in `/v1.19/containers/create`, for example |
|
| 79 |
+`"HostConfig": {"Binds": ["flocker/name:/data"]}` where `flocker` is the name
|
|
| 80 |
+of the plugin, `name` is the user-facing name of the volume (passed to the |
|
| 81 |
+volume plugin) and `/data` is the mountpoint inside the container. |
|
| 82 |
+ |
|
| 76 | 83 |
## v1.18 |
| 77 | 84 |
|
| 78 | 85 |
### Full documentation |
| ... | ... |
@@ -226,8 +226,11 @@ Json Parameters: |
| 226 | 226 |
- **Binds** – A list of volume bindings for this container. Each volume |
| 227 | 227 |
binding is a string of the form `container_path` (to create a new |
| 228 | 228 |
volume for the container), `host_path:container_path` (to bind-mount |
| 229 |
- a host path into the container), or `host_path:container_path:ro` |
|
| 230 |
- (to make the bind-mount read-only inside the container). |
|
| 229 |
+ a host path into the container), `host_path:container_path:ro` |
|
| 230 |
+ (to make the bind-mount read-only inside the container), or |
|
| 231 |
+ `volume_plugin/volume_name:container_path` (to provision a |
|
| 232 |
+ volume named `volume_name` from a [volume plugin](/userguide/plugins) |
|
| 233 |
+ named `volume_plugin`). |
|
| 231 | 234 |
- **Links** - A list of links for the container. Each link entry should be |
| 232 | 235 |
in the form of `container_name:alias`. |
| 233 | 236 |
- **LxcConf** - LXC specific configurations. These configurations will only |
| 234 | 237 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,223 @@ |
| 0 |
+page_title: Plugin API documentation |
|
| 1 |
+page_description: Documentation for writing a Docker plugin. |
|
| 2 |
+page_keywords: docker, plugins, api, extensions |
|
| 3 |
+ |
|
| 4 |
+# Docker Plugin API |
|
| 5 |
+ |
|
| 6 |
+Docker plugins are out-of-process extensions which add capabilities to the |
|
| 7 |
+Docker Engine. |
|
| 8 |
+ |
|
| 9 |
+This page is intended for people who want to develop their own Docker plugin. |
|
| 10 |
+If you just want to learn about or use Docker plugins, look |
|
| 11 |
+[here](/userguide/plugins). |
|
| 12 |
+ |
|
| 13 |
+## What plugins are |
|
| 14 |
+ |
|
| 15 |
+A plugin is a process running on the same docker host as the docker daemon, |
|
| 16 |
+which registers itself by placing a file in `/usr/share/docker/plugins` (the |
|
| 17 |
+"plugin directory"). |
|
| 18 |
+ |
|
| 19 |
+Plugins have human-readable names, which are short, lowercase strings. For |
|
| 20 |
+example, `flocker` or `weave`. |
|
| 21 |
+ |
|
| 22 |
+Plugins can run inside or outside containers. Currently running them outside |
|
| 23 |
+containers is recommended. |
|
| 24 |
+ |
|
| 25 |
+## Plugin discovery |
|
| 26 |
+ |
|
| 27 |
+Docker discovers plugins by looking for them in the plugin directory whenever a |
|
| 28 |
+user or container tries to use one by name. |
|
| 29 |
+ |
|
| 30 |
+There are two types of files which can be put in the plugin directory. |
|
| 31 |
+ |
|
| 32 |
+* `.sock` files are UNIX domain sockets. |
|
| 33 |
+* `.spec` files are text files containing a URL, such as `unix:///other.sock`. |
|
| 34 |
+ |
|
| 35 |
+The name of the file (excluding the extension) determines the plugin name. |
|
| 36 |
+ |
|
| 37 |
+For example, the `flocker` plugin might create a UNIX socket at |
|
| 38 |
+`/usr/share/docker/plugins/flocker.sock`. |
|
| 39 |
+ |
|
| 40 |
+Plugins must be run locally on the same machine as the Docker daemon. UNIX |
|
| 41 |
+domain sockets are strongly encouraged for security reasons. |
|
| 42 |
+ |
|
| 43 |
+## Plugin lifecycle |
|
| 44 |
+ |
|
| 45 |
+Plugins should be started before Docker, and stopped after Docker. For |
|
| 46 |
+example, when packaging a plugin for a platform which supports `systemd`, you |
|
| 47 |
+might use [`systemd` dependencies]( |
|
| 48 |
+http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to |
|
| 49 |
+manage startup and shutdown order. |
|
| 50 |
+ |
|
| 51 |
+When upgrading a plugin, you should first stop the Docker daemon, upgrade the |
|
| 52 |
+plugin, then start Docker again. |
|
| 53 |
+ |
|
| 54 |
+If a plugin is packaged as a container, this may cause issues. Plugins as |
|
| 55 |
+containers are currently considered experimental due to these shutdown/startup |
|
| 56 |
+ordering issues. These issues are mitigated by plugin retries (see below). |
|
| 57 |
+ |
|
| 58 |
+## Plugin activation |
|
| 59 |
+ |
|
| 60 |
+When a plugin is first referred to -- either by a user referring to it by name |
|
| 61 |
+(e.g. `docker run --volume-driver=foo`) or a container already configured to |
|
| 62 |
+use a plugin being started -- Docker looks for the named plugin in the plugin |
|
| 63 |
+directory and activates it with a handshake. See Handshake API below. |
|
| 64 |
+ |
|
| 65 |
+Plugins are *not* activated automatically at Docker daemon startup. Rather, |
|
| 66 |
+they are activated only lazily, or on-demand, when they are needed. |
|
| 67 |
+ |
|
| 68 |
+## API design |
|
| 69 |
+ |
|
| 70 |
+The Plugin API is RPC-style JSON over HTTP, much like webhooks. |
|
| 71 |
+ |
|
| 72 |
+Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to |
|
| 73 |
+implement an HTTP server and bind this to the UNIX socket mentioned in the |
|
| 74 |
+"plugin discovery" section. |
|
| 75 |
+ |
|
| 76 |
+All requests are HTTP `POST` requests. |
|
| 77 |
+ |
|
| 78 |
+The API is versioned via an Accept header, which currently is always set to |
|
| 79 |
+`application/vnd.docker.plugins.v1+json`. |
|
| 80 |
+ |
|
| 81 |
+## Handshake API |
|
| 82 |
+ |
|
| 83 |
+Plugins are activated via the following "handshake" API call. |
|
| 84 |
+ |
|
| 85 |
+### /Plugin.Activate |
|
| 86 |
+ |
|
| 87 |
+**Request:** empty body |
|
| 88 |
+ |
|
| 89 |
+**Response:** |
|
| 90 |
+``` |
|
| 91 |
+{
|
|
| 92 |
+ "Implements": ["VolumeDriver"] |
|
| 93 |
+} |
|
| 94 |
+``` |
|
| 95 |
+ |
|
| 96 |
+Responds with a list of Docker subsystems which this plugin implements. |
|
| 97 |
+After activation, the plugin will then be sent events from this subsystem. |
|
| 98 |
+ |
|
| 99 |
+## Volume API |
|
| 100 |
+ |
|
| 101 |
+If a plugin registers itself as a `VolumeDriver` (see above) then it is |
|
| 102 |
+expected to provide writeable paths on the host filesystem for the Docker |
|
| 103 |
+daemon to provide to containers to consume. |
|
| 104 |
+ |
|
| 105 |
+The Docker daemon handles bind-mounting the provided paths into user |
|
| 106 |
+containers. |
|
| 107 |
+ |
|
| 108 |
+### /VolumeDriver.Create |
|
| 109 |
+ |
|
| 110 |
+**Request**: |
|
| 111 |
+``` |
|
| 112 |
+{
|
|
| 113 |
+ "Name": "volume_name" |
|
| 114 |
+} |
|
| 115 |
+``` |
|
| 116 |
+ |
|
| 117 |
+Instruct the plugin that the user wants to create a volume, given a user |
|
| 118 |
+specified volume name. The plugin does not need to actually manifest the |
|
| 119 |
+volume on the filesystem yet (until Mount is called). |
|
| 120 |
+ |
|
| 121 |
+**Response**: |
|
| 122 |
+``` |
|
| 123 |
+{
|
|
| 124 |
+ "Err": null |
|
| 125 |
+} |
|
| 126 |
+``` |
|
| 127 |
+ |
|
| 128 |
+Respond with a string error if an error occurred. |
|
| 129 |
+ |
|
| 130 |
+### /VolumeDriver.Remove |
|
| 131 |
+ |
|
| 132 |
+**Request**: |
|
| 133 |
+``` |
|
| 134 |
+{
|
|
| 135 |
+ "Name": "volume_name" |
|
| 136 |
+} |
|
| 137 |
+``` |
|
| 138 |
+ |
|
| 139 |
+Create a volume, given a user specified volume name. |
|
| 140 |
+ |
|
| 141 |
+**Response**: |
|
| 142 |
+``` |
|
| 143 |
+{
|
|
| 144 |
+ "Err": null |
|
| 145 |
+} |
|
| 146 |
+``` |
|
| 147 |
+ |
|
| 148 |
+Respond with a string error if an error occurred. |
|
| 149 |
+ |
|
| 150 |
+### /VolumeDriver.Mount |
|
| 151 |
+ |
|
| 152 |
+**Request**: |
|
| 153 |
+``` |
|
| 154 |
+{
|
|
| 155 |
+ "Name": "volume_name" |
|
| 156 |
+} |
|
| 157 |
+``` |
|
| 158 |
+ |
|
| 159 |
+Docker requires the plugin to provide a volume, given a user specified volume |
|
| 160 |
+name. This is called once per container start. |
|
| 161 |
+ |
|
| 162 |
+**Response**: |
|
| 163 |
+``` |
|
| 164 |
+{
|
|
| 165 |
+ "Mountpoint": "/path/to/directory/on/host", |
|
| 166 |
+ "Err": null |
|
| 167 |
+} |
|
| 168 |
+``` |
|
| 169 |
+ |
|
| 170 |
+Respond with the path on the host filesystem where the volume has been made |
|
| 171 |
+available, and/or a string error if an error occurred. |
|
| 172 |
+ |
|
| 173 |
+### /VolumeDriver.Path |
|
| 174 |
+ |
|
| 175 |
+**Request**: |
|
| 176 |
+``` |
|
| 177 |
+{
|
|
| 178 |
+ "Name": "volume_name" |
|
| 179 |
+} |
|
| 180 |
+``` |
|
| 181 |
+ |
|
| 182 |
+Docker needs reminding of the path to the volume on the host. |
|
| 183 |
+ |
|
| 184 |
+**Response**: |
|
| 185 |
+``` |
|
| 186 |
+{
|
|
| 187 |
+ "Mountpoint": "/path/to/directory/on/host", |
|
| 188 |
+ "Err": null |
|
| 189 |
+} |
|
| 190 |
+``` |
|
| 191 |
+ |
|
| 192 |
+Respond with the path on the host filesystem where the volume has been made |
|
| 193 |
+available, and/or a string error if an error occurred. |
|
| 194 |
+ |
|
| 195 |
+### /VolumeDriver.Unmount |
|
| 196 |
+ |
|
| 197 |
+**Request**: |
|
| 198 |
+``` |
|
| 199 |
+{
|
|
| 200 |
+ "Name": "volume_name" |
|
| 201 |
+} |
|
| 202 |
+``` |
|
| 203 |
+ |
|
| 204 |
+Indication that Docker no longer is using the named volume. This is called once |
|
| 205 |
+per container stop. Plugin may deduce that it is safe to deprovision it at |
|
| 206 |
+this point. |
|
| 207 |
+ |
|
| 208 |
+**Response**: |
|
| 209 |
+``` |
|
| 210 |
+{
|
|
| 211 |
+ "Err": null |
|
| 212 |
+} |
|
| 213 |
+``` |
|
| 214 |
+ |
|
| 215 |
+Respond with a string error if an error occurred. |
|
| 216 |
+ |
|
| 217 |
+## Plugin retries |
|
| 218 |
+ |
|
| 219 |
+Attempts to call a method on a plugin are retried with an exponential backoff |
|
| 220 |
+for up to 30 seconds. This may help when packaging plugins as containers, since |
|
| 221 |
+it gives plugin containers a chance to start up before failing any user |
|
| 222 |
+containers which depend on them. |
| ... | ... |
@@ -1000,7 +1000,8 @@ Creates a new container. |
| 1000 | 1000 |
--security-opt=[] Security options |
| 1001 | 1001 |
-t, --tty=false Allocate a pseudo-TTY |
| 1002 | 1002 |
-u, --user="" Username or UID |
| 1003 |
- -v, --volume=[] Bind mount a volume |
|
| 1003 |
+ -v, --volume=[] Bind mount a volume, or specify name for volume plugin |
|
| 1004 |
+ --volume-driver= Optional volume driver (plugin name) for the container |
|
| 1004 | 1005 |
--volumes-from=[] Mount volumes from the specified container(s) |
| 1005 | 1006 |
-w, --workdir="" Working directory inside the container |
| 1006 | 1007 |
|
| ... | ... |
@@ -1970,7 +1971,8 @@ To remove an image using its digest: |
| 1970 | 1970 |
--sig-proxy=true Proxy received signals to the process |
| 1971 | 1971 |
-t, --tty=false Allocate a pseudo-TTY |
| 1972 | 1972 |
-u, --user="" Username or UID (format: <name|uid>[:<group|gid>]) |
| 1973 |
- -v, --volume=[] Bind mount a volume |
|
| 1973 |
+ -v, --volume=[] Bind mount a volume, or specify name for volume plugin |
|
| 1974 |
+ --volume-driver= Optional volume driver (plugin name) for the container |
|
| 1974 | 1975 |
--volumes-from=[] Mount volumes from the specified container(s) |
| 1975 | 1976 |
-w, --workdir="" Working directory inside the container |
| 1976 | 1977 |
|
| ... | ... |
@@ -2066,6 +2068,18 @@ binary (such as that provided by [https://get.docker.com]( |
| 2066 | 2066 |
https://get.docker.com)), you give the container the full access to create and |
| 2067 | 2067 |
manipulate the host's Docker daemon. |
| 2068 | 2068 |
|
| 2069 |
+ $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh |
|
| 2070 |
+ |
|
| 2071 |
+By specifying a volume name in conjunction with a volume driver, volume plugins |
|
| 2072 |
+such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be |
|
| 2073 |
+used to manage volumes external to a single host, such as those on EBS. In this |
|
| 2074 |
+example, "volumename" is passed through to the volume plugin as a user-given |
|
| 2075 |
+name for the volume which allows the plugin to associate it with an external |
|
| 2076 |
+volume beyond the lifetime of a single container or container host. This can be |
|
| 2077 |
+used, for example, to move a stateful container from one server to another. |
|
| 2078 |
+ |
|
| 2079 |
+The `volumename` must not begin with a `/`. |
|
| 2080 |
+ |
|
| 2069 | 2081 |
$ docker run -p 127.0.0.1:80:8080 ubuntu bash |
| 2070 | 2082 |
|
| 2071 | 2083 |
This binds port `8080` of the container to port `80` on `127.0.0.1` of |
| ... | ... |
@@ -210,6 +210,14 @@ Then un-tar the backup file in the new container's data volume. |
| 210 | 210 |
You can use the techniques above to automate backup, migration and |
| 211 | 211 |
restore testing using your preferred tools. |
| 212 | 212 |
|
| 213 |
+## Integrating Docker with external storage systems |
|
| 214 |
+ |
|
| 215 |
+Docker volume plugins such as [Flocker](https://clusterhq.com/docker-plugin/) |
|
| 216 |
+enable Docker deployments to be integrated with external storage systems, such |
|
| 217 |
+as Amazon EBS, and enable data volumes to persist beyond the lifetime of a |
|
| 218 |
+single Docker host. See the [plugin section of the user |
|
| 219 |
+guide](/userguide/plugins) for more information. |
|
| 220 |
+ |
|
| 213 | 221 |
# Next steps |
| 214 | 222 |
|
| 215 | 223 |
Now we've learned a bit more about how to use Docker we're going to see how to |
| ... | ... |
@@ -105,6 +105,12 @@ works with Docker can now transparently scale up to multiple hosts. |
| 105 | 105 |
|
| 106 | 106 |
Go to [Docker Swarm user guide](/swarm/). |
| 107 | 107 |
|
| 108 |
+## Docker Plugins |
|
| 109 |
+ |
|
| 110 |
+Docker plugins allow you to extend the capabilities of the Docker Engine. |
|
| 111 |
+ |
|
| 112 |
+Go to [Docker Plugins](/userguide/plugins). |
|
| 113 |
+ |
|
| 108 | 114 |
## Getting help |
| 109 | 115 |
|
| 110 | 116 |
* [Docker homepage](http://www.docker.com/) |
| 111 | 117 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,51 @@ |
| 0 |
+page_title: Docker Plugins |
|
| 1 |
+page_description: Learn what Docker Plugins are and how to use them. |
|
| 2 |
+page_keywords: plugins, extensions, extensibility |
|
| 3 |
+ |
|
| 4 |
+# Understanding Docker Plugins |
|
| 5 |
+ |
|
| 6 |
+You can extend the capabilities of the Docker Engine by loading third-party |
|
| 7 |
+plugins. |
|
| 8 |
+ |
|
| 9 |
+## Types of plugins |
|
| 10 |
+ |
|
| 11 |
+Plugins extend Docker's functionality. They come in specific types. For |
|
| 12 |
+example, a **volume plugin** might enable Docker volumes to persist across |
|
| 13 |
+multiple Docker hosts. |
|
| 14 |
+ |
|
| 15 |
+Currently Docker supports **volume plugins**. In the future it will support |
|
| 16 |
+additional plugin types. |
|
| 17 |
+ |
|
| 18 |
+## Installing a plugin |
|
| 19 |
+ |
|
| 20 |
+Follow the instructions in the plugin's documentation. |
|
| 21 |
+ |
|
| 22 |
+## Finding a plugin |
|
| 23 |
+ |
|
| 24 |
+The following plugins exist: |
|
| 25 |
+ |
|
| 26 |
+* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin |
|
| 27 |
+ which provides multi-host portable volumes for Docker, enabling you to run |
|
| 28 |
+ databases and other stateful containers and move them around across a cluster |
|
| 29 |
+ of machines. |
|
| 30 |
+ |
|
| 31 |
+## Using a plugin |
|
| 32 |
+ |
|
| 33 |
+Depending on the plugin type, there are additional arguments to `docker` CLI |
|
| 34 |
+commands. |
|
| 35 |
+ |
|
| 36 |
+* For example `docker run` has a [`--volume-driver` argument]( |
|
| 37 |
+ /reference/commandline/cli/#run). |
|
| 38 |
+ |
|
| 39 |
+You can also use plugins via the [Docker Remote API]( |
|
| 40 |
+/reference/api/docker_remote_api/). |
|
| 41 |
+ |
|
| 42 |
+## Troubleshooting a plugin |
|
| 43 |
+ |
|
| 44 |
+If you are having problems with Docker after loading a plugin, ask the authors |
|
| 45 |
+of the plugin for help. The Docker team may not be able to assist you. |
|
| 46 |
+ |
|
| 47 |
+## Writing a plugin |
|
| 48 |
+ |
|
| 49 |
+If you are interested in writing a plugin for Docker, or seeing how they work |
|
| 50 |
+under the hood, see the [docker plugins reference](/reference/api/plugin_api). |
| ... | ... |
@@ -166,7 +166,7 @@ func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
|
| 166 | 166 |
c.Assert(status, check.Equals, http.StatusInternalServerError) |
| 167 | 167 |
c.Assert(err, check.IsNil) |
| 168 | 168 |
|
| 169 |
- if !strings.Contains(string(body), "Duplicate volume") {
|
|
| 169 |
+ if !strings.Contains(string(body), "Duplicate bind") {
|
|
| 170 | 170 |
c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
|
| 171 | 171 |
} |
| 172 | 172 |
} |
| ... | ... |
@@ -210,49 +210,6 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
|
| 210 | 210 |
} |
| 211 | 211 |
} |
| 212 | 212 |
|
| 213 |
-// Ensure that volumes-from has priority over binds/anything else |
|
| 214 |
-// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start |
|
| 215 |
-func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) {
|
|
| 216 |
- volName := "voltst2" |
|
| 217 |
- volPath := "/tmp" |
|
| 218 |
- |
|
| 219 |
- if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
|
|
| 220 |
- c.Fatal(out, err) |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- name := "testing" |
|
| 224 |
- config := map[string]interface{}{
|
|
| 225 |
- "Image": "busybox", |
|
| 226 |
- "Volumes": map[string]struct{}{volPath: {}},
|
|
| 227 |
- } |
|
| 228 |
- |
|
| 229 |
- status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
|
|
| 230 |
- c.Assert(status, check.Equals, http.StatusCreated) |
|
| 231 |
- c.Assert(err, check.IsNil) |
|
| 232 |
- |
|
| 233 |
- bindPath := randomUnixTmpDirPath("test")
|
|
| 234 |
- config = map[string]interface{}{
|
|
| 235 |
- "VolumesFrom": []string{volName},
|
|
| 236 |
- "Binds": []string{bindPath + ":/tmp"},
|
|
| 237 |
- } |
|
| 238 |
- status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
|
|
| 239 |
- c.Assert(status, check.Equals, http.StatusNoContent) |
|
| 240 |
- c.Assert(err, check.IsNil) |
|
| 241 |
- |
|
| 242 |
- pth, err := inspectFieldMap(name, "Volumes", volPath) |
|
| 243 |
- if err != nil {
|
|
| 244 |
- c.Fatal(err) |
|
| 245 |
- } |
|
| 246 |
- pth2, err := inspectFieldMap(volName, "Volumes", volPath) |
|
| 247 |
- if err != nil {
|
|
| 248 |
- c.Fatal(err) |
|
| 249 |
- } |
|
| 250 |
- |
|
| 251 |
- if pth != pth2 {
|
|
| 252 |
- c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
|
|
| 253 |
- } |
|
| 254 |
-} |
|
| 255 |
- |
|
| 256 | 213 |
func (s *DockerSuite) TestGetContainerStats(c *check.C) {
|
| 257 | 214 |
var ( |
| 258 | 215 |
name = "statscontainer" |
| ... | ... |
@@ -284,35 +284,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
|
| 284 | 284 |
} |
| 285 | 285 |
} |
| 286 | 286 |
|
| 287 |
-// #9629 |
|
| 288 |
-func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) {
|
|
| 289 |
- if err := s.d.StartWithBusybox(); err != nil {
|
|
| 290 |
- c.Fatal(err) |
|
| 291 |
- } |
|
| 292 |
- |
|
| 293 |
- tmp, err := ioutil.TempDir(os.TempDir(), "") |
|
| 294 |
- if err != nil {
|
|
| 295 |
- c.Fatal(err) |
|
| 296 |
- } |
|
| 297 |
- defer os.RemoveAll(tmp) |
|
| 298 |
- |
|
| 299 |
- if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
|
|
| 300 |
- c.Fatal(err) |
|
| 301 |
- } |
|
| 302 |
- |
|
| 303 |
- if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
|
|
| 304 |
- c.Fatal(err, out) |
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- if err := s.d.Restart(); err != nil {
|
|
| 308 |
- c.Fatal(err) |
|
| 309 |
- } |
|
| 310 |
- |
|
| 311 |
- if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
|
|
| 312 |
- c.Fatal(err, out) |
|
| 313 |
- } |
|
| 314 |
-} |
|
| 315 |
- |
|
| 316 | 287 |
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
|
| 317 | 288 |
// TODO: skip or update for Windows daemon |
| 318 | 289 |
os.Remove("/etc/docker/key.json")
|
| ... | ... |
@@ -360,76 +331,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
|
| 360 | 360 |
} |
| 361 | 361 |
} |
| 362 | 362 |
|
| 363 |
-// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers |
|
| 364 |
-// without corresponding volume json |
|
| 365 |
-func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) {
|
|
| 366 |
- graphDir := filepath.Join(os.TempDir(), "docker-test") |
|
| 367 |
- defer os.RemoveAll(graphDir) |
|
| 368 |
- if err := s.d.StartWithBusybox("-g", graphDir); err != nil {
|
|
| 369 |
- c.Fatal(err) |
|
| 370 |
- } |
|
| 371 |
- |
|
| 372 |
- tmpDir := filepath.Join(os.TempDir(), "test") |
|
| 373 |
- defer os.RemoveAll(tmpDir) |
|
| 374 |
- |
|
| 375 |
- if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
|
|
| 376 |
- c.Fatal(err, out) |
|
| 377 |
- } |
|
| 378 |
- |
|
| 379 |
- if err := s.d.Stop(); err != nil {
|
|
| 380 |
- c.Fatal(err) |
|
| 381 |
- } |
|
| 382 |
- |
|
| 383 |
- // Remove this since we're expecting the daemon to re-create it too |
|
| 384 |
- if err := os.RemoveAll(tmpDir); err != nil {
|
|
| 385 |
- c.Fatal(err) |
|
| 386 |
- } |
|
| 387 |
- |
|
| 388 |
- configDir := filepath.Join(graphDir, "volumes") |
|
| 389 |
- |
|
| 390 |
- if err := os.RemoveAll(configDir); err != nil {
|
|
| 391 |
- c.Fatal(err) |
|
| 392 |
- } |
|
| 393 |
- |
|
| 394 |
- if err := s.d.Start("-g", graphDir); err != nil {
|
|
| 395 |
- c.Fatal(err) |
|
| 396 |
- } |
|
| 397 |
- |
|
| 398 |
- if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
|
|
| 399 |
- c.Fatalf("expected volume path %s to exist but it does not", tmpDir)
|
|
| 400 |
- } |
|
| 401 |
- |
|
| 402 |
- dir, err := ioutil.ReadDir(configDir) |
|
| 403 |
- if err != nil {
|
|
| 404 |
- c.Fatal(err) |
|
| 405 |
- } |
|
| 406 |
- if len(dir) == 0 {
|
|
| 407 |
- c.Fatalf("expected volumes config dir to contain data for new volume")
|
|
| 408 |
- } |
|
| 409 |
- |
|
| 410 |
- // Now with just removing the volume config and not the volume data |
|
| 411 |
- if err := s.d.Stop(); err != nil {
|
|
| 412 |
- c.Fatal(err) |
|
| 413 |
- } |
|
| 414 |
- |
|
| 415 |
- if err := os.RemoveAll(configDir); err != nil {
|
|
| 416 |
- c.Fatal(err) |
|
| 417 |
- } |
|
| 418 |
- |
|
| 419 |
- if err := s.d.Start("-g", graphDir); err != nil {
|
|
| 420 |
- c.Fatal(err) |
|
| 421 |
- } |
|
| 422 |
- |
|
| 423 |
- dir, err = ioutil.ReadDir(configDir) |
|
| 424 |
- if err != nil {
|
|
| 425 |
- c.Fatal(err) |
|
| 426 |
- } |
|
| 427 |
- |
|
| 428 |
- if len(dir) == 0 {
|
|
| 429 |
- c.Fatalf("expected volumes config dir to contain data for new volume")
|
|
| 430 |
- } |
|
| 431 |
-} |
|
| 432 |
- |
|
| 433 | 363 |
// GH#11320 - verify that the daemon exits on failure properly |
| 434 | 364 |
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
|
| 435 | 365 |
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required |
| ... | ... |
@@ -395,21 +395,6 @@ func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
|
| 395 | 395 |
} |
| 396 | 396 |
} |
| 397 | 397 |
|
| 398 |
-// Regression test for #4741 |
|
| 399 |
-func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) {
|
|
| 400 |
- runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") |
|
| 401 |
- out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) |
|
| 402 |
- if err != nil && exitCode != 0 {
|
|
| 403 |
- c.Fatal("1", out, stderr, err)
|
|
| 404 |
- } |
|
| 405 |
- |
|
| 406 |
- runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") |
|
| 407 |
- out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) |
|
| 408 |
- if err != nil && exitCode != 0 {
|
|
| 409 |
- c.Fatal("2", out, stderr, err)
|
|
| 410 |
- } |
|
| 411 |
-} |
|
| 412 |
- |
|
| 413 | 398 |
// Regression test for #4979 |
| 414 | 399 |
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
|
| 415 | 400 |
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") |
| ... | ... |
@@ -536,7 +521,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
|
| 536 | 536 |
if out, _, err := runCommandWithOutput(cmd); err == nil {
|
| 537 | 537 |
c.Fatal("Expected error about duplicate volume definitions")
|
| 538 | 538 |
} else {
|
| 539 |
- if !strings.Contains(out, "Duplicate volume") {
|
|
| 539 |
+ if !strings.Contains(out, "Duplicate bind mount") {
|
|
| 540 | 540 |
c.Fatalf("Expected 'duplicate volume' error, got %v", err)
|
| 541 | 541 |
} |
| 542 | 542 |
} |
| ... | ... |
@@ -2333,7 +2318,13 @@ func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
|
| 2333 | 2333 |
c.Fatal(err) |
| 2334 | 2334 |
} |
| 2335 | 2335 |
|
| 2336 |
- cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
|
|
| 2336 |
+ cmd := exec.Command(dockerBinary, "run", |
|
| 2337 |
+ "-v", fmt.Sprintf("%s:/tmp", tmpDir),
|
|
| 2338 |
+ "-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
|
|
| 2339 |
+ "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
|
|
| 2340 |
+ "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
|
|
| 2341 |
+ "busybox:latest", "sh", "-c", |
|
| 2342 |
+ "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") |
|
| 2337 | 2343 |
out, _, err := runCommandWithOutput(cmd) |
| 2338 | 2344 |
if err != nil {
|
| 2339 | 2345 |
c.Fatal(out, err) |
| ... | ... |
@@ -2427,41 +2418,6 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
|
| 2427 | 2427 |
} |
| 2428 | 2428 |
} |
| 2429 | 2429 |
|
| 2430 |
-func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) {
|
|
| 2431 |
- testRequires(c, SameHostDaemon) |
|
| 2432 |
- |
|
| 2433 |
- // Clear out any remnants from other tests |
|
| 2434 |
- info, err := ioutil.ReadDir(volumesConfigPath) |
|
| 2435 |
- if err != nil {
|
|
| 2436 |
- c.Fatal(err) |
|
| 2437 |
- } |
|
| 2438 |
- if len(info) > 0 {
|
|
| 2439 |
- for _, f := range info {
|
|
| 2440 |
- if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
|
|
| 2441 |
- c.Fatal(err) |
|
| 2442 |
- } |
|
| 2443 |
- } |
|
| 2444 |
- } |
|
| 2445 |
- |
|
| 2446 |
- cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox") |
|
| 2447 |
- if _, err := runCommand(cmd); err != nil {
|
|
| 2448 |
- c.Fatal(err) |
|
| 2449 |
- } |
|
| 2450 |
- |
|
| 2451 |
- cmd = exec.Command(dockerBinary, "start", "lone_starr") |
|
| 2452 |
- if _, err := runCommand(cmd); err != nil {
|
|
| 2453 |
- c.Fatal(err) |
|
| 2454 |
- } |
|
| 2455 |
- |
|
| 2456 |
- info, err = ioutil.ReadDir(volumesConfigPath) |
|
| 2457 |
- if err != nil {
|
|
| 2458 |
- c.Fatal(err) |
|
| 2459 |
- } |
|
| 2460 |
- if len(info) != 1 {
|
|
| 2461 |
- c.Fatalf("Expected only 1 volume have %v", len(info))
|
|
| 2462 |
- } |
|
| 2463 |
-} |
|
| 2464 |
- |
|
| 2465 | 2430 |
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
|
| 2466 | 2431 |
// just run with unknown image |
| 2467 | 2432 |
cmd := exec.Command(dockerBinary, "run", "asdfsg") |
| ... | ... |
@@ -2496,7 +2452,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
|
| 2496 | 2496 |
|
| 2497 | 2497 |
out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
|
| 2498 | 2498 |
c.Assert(err, check.IsNil) |
| 2499 |
- if !strings.Contains(out, volumesStoragePath) {
|
|
| 2499 |
+ if !strings.Contains(out, volumesConfigPath) {
|
|
| 2500 | 2500 |
c.Fatalf("Volume was not defined for /foo\n%q", out)
|
| 2501 | 2501 |
} |
| 2502 | 2502 |
|
| ... | ... |
@@ -2507,7 +2463,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
|
| 2507 | 2507 |
} |
| 2508 | 2508 |
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
|
| 2509 | 2509 |
c.Assert(err, check.IsNil) |
| 2510 |
- if !strings.Contains(out, volumesStoragePath) {
|
|
| 2510 |
+ if !strings.Contains(out, volumesConfigPath) {
|
|
| 2511 | 2511 |
c.Fatalf("Volume was not defined for /bar\n%q", out)
|
| 2512 | 2512 |
} |
| 2513 | 2513 |
} |
| ... | ... |
@@ -126,32 +126,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
|
| 126 | 126 |
|
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 |
-// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s |
|
| 130 |
-func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) {
|
|
| 131 |
- |
|
| 132 |
- // Create the first data volume |
|
| 133 |
- dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") |
|
| 134 |
- |
|
| 135 |
- // Expect this to fail because the data test after contaienr doesn't exist yet |
|
| 136 |
- if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
|
|
| 137 |
- c.Fatal("Expected error but got none")
|
|
| 138 |
- } |
|
| 139 |
- |
|
| 140 |
- // Create the second data volume |
|
| 141 |
- dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") |
|
| 142 |
- |
|
| 143 |
- // Now, all the volumes should be there |
|
| 144 |
- dockerCmd(c, "start", "consumer") |
|
| 145 |
- |
|
| 146 |
- // Check that we have the volumes we want |
|
| 147 |
- out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer")
|
|
| 148 |
- nVolumes := strings.Trim(out, " \r\n'") |
|
| 149 |
- if nVolumes != "2" {
|
|
| 150 |
- c.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
|
|
| 151 |
- } |
|
| 152 |
- |
|
| 153 |
-} |
|
| 154 |
- |
|
| 155 | 129 |
func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
|
| 156 | 130 |
defer unpauseAllContainers() |
| 157 | 131 |
|
| 158 | 132 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,150 @@ |
| 0 |
+// +build !windows |
|
| 1 |
+ |
|
| 2 |
+package main |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "encoding/json" |
|
| 6 |
+ "fmt" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "net/http" |
|
| 9 |
+ "net/http/httptest" |
|
| 10 |
+ "os" |
|
| 11 |
+ "os/exec" |
|
| 12 |
+ "path/filepath" |
|
| 13 |
+ "strings" |
|
| 14 |
+ |
|
| 15 |
+ "github.com/go-check/check" |
|
| 16 |
+) |
|
| 17 |
+ |
|
| 18 |
+func init() {
|
|
| 19 |
+ check.Suite(&ExternalVolumeSuite{
|
|
| 20 |
+ ds: &DockerSuite{},
|
|
| 21 |
+ }) |
|
| 22 |
+} |
|
| 23 |
+ |
|
| 24 |
+type ExternalVolumeSuite struct {
|
|
| 25 |
+ server *httptest.Server |
|
| 26 |
+ ds *DockerSuite |
|
| 27 |
+} |
|
| 28 |
+ |
|
| 29 |
+func (s *ExternalVolumeSuite) SetUpTest(c *check.C) {
|
|
| 30 |
+ s.ds.SetUpTest(c) |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+func (s *ExternalVolumeSuite) TearDownTest(c *check.C) {
|
|
| 34 |
+ s.ds.TearDownTest(c) |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+func (s *ExternalVolumeSuite) SetUpSuite(c *check.C) {
|
|
| 38 |
+ mux := http.NewServeMux() |
|
| 39 |
+ s.server = httptest.NewServer(mux) |
|
| 40 |
+ |
|
| 41 |
+ type pluginRequest struct {
|
|
| 42 |
+ name string |
|
| 43 |
+ } |
|
| 44 |
+ |
|
| 45 |
+ hostVolumePath := func(name string) string {
|
|
| 46 |
+ return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
|
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
|
|
| 50 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 51 |
+ fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
|
|
| 52 |
+ }) |
|
| 53 |
+ |
|
| 54 |
+ mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
|
|
| 55 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 56 |
+ fmt.Fprintln(w, `{}`)
|
|
| 57 |
+ }) |
|
| 58 |
+ |
|
| 59 |
+ mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
|
|
| 60 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 61 |
+ fmt.Fprintln(w, `{}`)
|
|
| 62 |
+ }) |
|
| 63 |
+ |
|
| 64 |
+ mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
|
|
| 65 |
+ var pr pluginRequest |
|
| 66 |
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
|
|
| 67 |
+ http.Error(w, err.Error(), 500) |
|
| 68 |
+ } |
|
| 69 |
+ |
|
| 70 |
+ p := hostVolumePath(pr.name) |
|
| 71 |
+ |
|
| 72 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 73 |
+ fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
|
|
| 74 |
+ }) |
|
| 75 |
+ |
|
| 76 |
+ mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
|
|
| 77 |
+ var pr pluginRequest |
|
| 78 |
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
|
|
| 79 |
+ http.Error(w, err.Error(), 500) |
|
| 80 |
+ } |
|
| 81 |
+ |
|
| 82 |
+ p := hostVolumePath(pr.name) |
|
| 83 |
+ if err := os.MkdirAll(p, 0755); err != nil {
|
|
| 84 |
+ http.Error(w, err.Error(), 500) |
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
|
|
| 88 |
+ http.Error(w, err.Error(), 500) |
|
| 89 |
+ } |
|
| 90 |
+ |
|
| 91 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 92 |
+ fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
|
|
| 93 |
+ }) |
|
| 94 |
+ |
|
| 95 |
+ mux.HandleFunc("/VolumeDriver.Umount", func(w http.ResponseWriter, r *http.Request) {
|
|
| 96 |
+ var pr pluginRequest |
|
| 97 |
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
|
|
| 98 |
+ http.Error(w, err.Error(), 500) |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ p := hostVolumePath(pr.name) |
|
| 102 |
+ if err := os.RemoveAll(p); err != nil {
|
|
| 103 |
+ http.Error(w, err.Error(), 500) |
|
| 104 |
+ } |
|
| 105 |
+ |
|
| 106 |
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
|
|
| 107 |
+ fmt.Fprintln(w, `{}`)
|
|
| 108 |
+ }) |
|
| 109 |
+ |
|
| 110 |
+ if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
|
|
| 111 |
+ c.Fatal(err) |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
|
|
| 115 |
+ c.Fatal(err) |
|
| 116 |
+ } |
|
| 117 |
+} |
|
| 118 |
+ |
|
| 119 |
+func (s *ExternalVolumeSuite) TearDownSuite(c *check.C) {
|
|
| 120 |
+ s.server.Close() |
|
| 121 |
+ |
|
| 122 |
+ if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
|
|
| 123 |
+ c.Fatal(err) |
|
| 124 |
+ } |
|
| 125 |
+} |
|
| 126 |
+ |
|
| 127 |
+func (s *ExternalVolumeSuite) TestStartExternalVolumeDriver(c *check.C) {
|
|
| 128 |
+ runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") |
|
| 129 |
+ out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) |
|
| 130 |
+ if err != nil && exitCode != 0 {
|
|
| 131 |
+ c.Fatal(out, stderr, err) |
|
| 132 |
+ } |
|
| 133 |
+ |
|
| 134 |
+ if !strings.Contains(out, s.server.URL) {
|
|
| 135 |
+ c.Fatalf("External volume mount failed. Output: %s\n", out)
|
|
| 136 |
+ } |
|
| 137 |
+} |
|
| 138 |
+ |
|
| 139 |
+func (s *ExternalVolumeSuite) TestStartExternalVolumeNamedDriver(c *check.C) {
|
|
| 140 |
+ runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "-v", "test-external-volume-driver/volume-1:/tmp/external-volume-test", "busybox:latest", "cat", "/tmp/external-volume-test/test") |
|
| 141 |
+ out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) |
|
| 142 |
+ if err != nil && exitCode != 0 {
|
|
| 143 |
+ c.Fatal(out, stderr, err) |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ if !strings.Contains(out, s.server.URL) {
|
|
| 147 |
+ c.Fatalf("External volume mount failed. Output: %s\n", out)
|
|
| 148 |
+ } |
|
| 149 |
+} |
| ... | ... |
@@ -31,6 +31,10 @@ type Client struct {
|
| 31 | 31 |
} |
| 32 | 32 |
|
| 33 | 33 |
func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
|
| 34 |
+ return c.callWithRetry(serviceMethod, args, ret, true) |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
|
|
| 34 | 38 |
var buf bytes.Buffer |
| 35 | 39 |
if err := json.NewEncoder(&buf).Encode(args); err != nil {
|
| 36 | 40 |
return err |
| ... | ... |
@@ -50,12 +54,16 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
|
| 50 | 50 |
for {
|
| 51 | 51 |
resp, err := c.http.Do(req) |
| 52 | 52 |
if err != nil {
|
| 53 |
+ if !retry {
|
|
| 54 |
+ return err |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 53 | 57 |
timeOff := backoff(retries) |
| 54 |
- if timeOff+time.Since(start) > defaultTimeOut {
|
|
| 58 |
+ if abort(start, timeOff) {
|
|
| 55 | 59 |
return err |
| 56 | 60 |
} |
| 57 | 61 |
retries++ |
| 58 |
- logrus.Warn("Unable to connect to plugin: %s, retrying in %ds\n", c.addr, timeOff)
|
|
| 62 |
+ logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
|
|
| 59 | 63 |
time.Sleep(timeOff) |
| 60 | 64 |
continue |
| 61 | 65 |
} |
| ... | ... |
@@ -73,7 +81,7 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
|
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 | 75 |
func backoff(retries int) time.Duration {
|
| 76 |
- b, max := float64(1), float64(defaultTimeOut) |
|
| 76 |
+ b, max := 1, defaultTimeOut |
|
| 77 | 77 |
for b < max && retries > 0 {
|
| 78 | 78 |
b *= 2 |
| 79 | 79 |
retries-- |
| ... | ... |
@@ -81,7 +89,11 @@ func backoff(retries int) time.Duration {
|
| 81 | 81 |
if b > max {
|
| 82 | 82 |
b = max |
| 83 | 83 |
} |
| 84 |
- return time.Duration(b) |
|
| 84 |
+ return time.Duration(b) * time.Second |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+func abort(start time.Time, timeOff time.Duration) bool {
|
|
| 88 |
+ return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second |
|
| 85 | 89 |
} |
| 86 | 90 |
|
| 87 | 91 |
func configureTCPTransport(tr *http.Transport, proto, addr string) {
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"net/http/httptest" |
| 7 | 7 |
"reflect" |
| 8 | 8 |
"testing" |
| 9 |
+ "time" |
|
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
var ( |
| ... | ... |
@@ -27,7 +28,7 @@ func teardownRemotePluginServer() {
|
| 27 | 27 |
|
| 28 | 28 |
func TestFailedConnection(t *testing.T) {
|
| 29 | 29 |
c := NewClient("tcp://127.0.0.1:1")
|
| 30 |
- err := c.Call("Service.Method", nil, nil)
|
|
| 30 |
+ err := c.callWithRetry("Service.Method", nil, nil, false)
|
|
| 31 | 31 |
if err == nil {
|
| 32 | 32 |
t.Fatal("Unexpected successful connection")
|
| 33 | 33 |
} |
| ... | ... |
@@ -61,3 +62,44 @@ func TestEchoInputOutput(t *testing.T) {
|
| 61 | 61 |
t.Fatalf("Expected %v, was %v\n", m, output)
|
| 62 | 62 |
} |
| 63 | 63 |
} |
| 64 |
+ |
|
| 65 |
+func TestBackoff(t *testing.T) {
|
|
| 66 |
+ cases := []struct {
|
|
| 67 |
+ retries int |
|
| 68 |
+ expTimeOff time.Duration |
|
| 69 |
+ }{
|
|
| 70 |
+ {0, time.Duration(1)},
|
|
| 71 |
+ {1, time.Duration(2)},
|
|
| 72 |
+ {2, time.Duration(4)},
|
|
| 73 |
+ {4, time.Duration(16)},
|
|
| 74 |
+ {6, time.Duration(30)},
|
|
| 75 |
+ {10, time.Duration(30)},
|
|
| 76 |
+ } |
|
| 77 |
+ |
|
| 78 |
+ for _, c := range cases {
|
|
| 79 |
+ s := c.expTimeOff * time.Second |
|
| 80 |
+ if d := backoff(c.retries); d != s {
|
|
| 81 |
+ t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
|
|
| 82 |
+ } |
|
| 83 |
+ } |
|
| 84 |
+} |
|
| 85 |
+ |
|
| 86 |
+func TestAbortRetry(t *testing.T) {
|
|
| 87 |
+ cases := []struct {
|
|
| 88 |
+ timeOff time.Duration |
|
| 89 |
+ expAbort bool |
|
| 90 |
+ }{
|
|
| 91 |
+ {time.Duration(1), false},
|
|
| 92 |
+ {time.Duration(2), false},
|
|
| 93 |
+ {time.Duration(10), false},
|
|
| 94 |
+ {time.Duration(30), true},
|
|
| 95 |
+ {time.Duration(40), true},
|
|
| 96 |
+ } |
|
| 97 |
+ |
|
| 98 |
+ for _, c := range cases {
|
|
| 99 |
+ s := c.timeOff * time.Second |
|
| 100 |
+ if a := abort(time.Now(), s); a != c.expAbort {
|
|
| 101 |
+ t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
|
|
| 102 |
+ } |
|
| 103 |
+ } |
|
| 104 |
+} |
| ... | ... |
@@ -122,6 +122,7 @@ type Config struct {
|
| 122 | 122 |
Cmd *Command |
| 123 | 123 |
Image string // Name of the image as it was passed by the operator (eg. could be symbolic) |
| 124 | 124 |
Volumes map[string]struct{}
|
| 125 |
+ VolumeDriver string |
|
| 125 | 126 |
WorkingDir string |
| 126 | 127 |
Entrypoint *Entrypoint |
| 127 | 128 |
NetworkDisabled bool |
| ... | ... |
@@ -77,6 +77,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe |
| 77 | 77 |
flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only")
|
| 78 | 78 |
flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container")
|
| 79 | 79 |
flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
|
| 80 |
+ flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
|
|
| 80 | 81 |
) |
| 81 | 82 |
|
| 82 | 83 |
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR")
|
| ... | ... |
@@ -317,6 +318,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe |
| 317 | 317 |
Entrypoint: entrypoint, |
| 318 | 318 |
WorkingDir: *flWorkingDir, |
| 319 | 319 |
Labels: convertKVStringsToMap(labels), |
| 320 |
+ VolumeDriver: *flVolumeDriver, |
|
| 320 | 321 |
} |
| 321 | 322 |
|
| 322 | 323 |
hostConfig := &HostConfig{
|
| 323 | 324 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+package utils |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "net" |
|
| 4 |
+ "net/http" |
|
| 5 |
+ "time" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
|
|
| 9 |
+ // Why 32? See https://github.com/docker/docker/pull/8035. |
|
| 10 |
+ timeout := 32 * time.Second |
|
| 11 |
+ if proto == "unix" {
|
|
| 12 |
+ // No need for compression in local communications. |
|
| 13 |
+ tr.DisableCompression = true |
|
| 14 |
+ tr.Dial = func(_, _ string) (net.Conn, error) {
|
|
| 15 |
+ return net.DialTimeout(proto, addr, timeout) |
|
| 16 |
+ } |
|
| 17 |
+ } else {
|
|
| 18 |
+ tr.Proxy = http.ProxyFromEnvironment |
|
| 19 |
+ tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
|
|
| 20 |
+ } |
|
| 21 |
+} |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,51 @@ |
| 0 |
+package volumedrivers |
|
| 1 |
+ |
|
| 2 |
+import "github.com/docker/docker/volume" |
|
| 3 |
+ |
|
| 4 |
+type volumeDriverAdapter struct {
|
|
| 5 |
+ name string |
|
| 6 |
+ proxy *volumeDriverProxy |
|
| 7 |
+} |
|
| 8 |
+ |
|
| 9 |
+func (a *volumeDriverAdapter) Name() string {
|
|
| 10 |
+ return a.name |
|
| 11 |
+} |
|
| 12 |
+ |
|
| 13 |
+func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
|
|
| 14 |
+ err := a.proxy.Create(name) |
|
| 15 |
+ if err != nil {
|
|
| 16 |
+ return nil, err |
|
| 17 |
+ } |
|
| 18 |
+ return &volumeAdapter{a.proxy, name, a.name}, nil
|
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
|
|
| 22 |
+ return a.proxy.Remove(v.Name()) |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+type volumeAdapter struct {
|
|
| 26 |
+ proxy *volumeDriverProxy |
|
| 27 |
+ name string |
|
| 28 |
+ driverName string |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+func (a *volumeAdapter) Name() string {
|
|
| 32 |
+ return a.name |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+func (a *volumeAdapter) DriverName() string {
|
|
| 36 |
+ return a.driverName |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+func (a *volumeAdapter) Path() string {
|
|
| 40 |
+ m, _ := a.proxy.Path(a.name) |
|
| 41 |
+ return m |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+func (a *volumeAdapter) Mount() (string, error) {
|
|
| 45 |
+ return a.proxy.Mount(a.name) |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+func (a *volumeAdapter) Unmount() error {
|
|
| 49 |
+ return a.proxy.Unmount(a.name) |
|
| 50 |
+} |
| 0 | 51 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,20 @@ |
| 0 |
+package volumedrivers |
|
| 1 |
+ |
|
| 2 |
+import "github.com/docker/docker/volume" |
|
| 3 |
+ |
|
| 4 |
+type client interface {
|
|
| 5 |
+ Call(string, interface{}, interface{}) error
|
|
| 6 |
+} |
|
| 7 |
+ |
|
| 8 |
+func NewVolumeDriver(name string, c client) volume.Driver {
|
|
| 9 |
+ proxy := &volumeDriverProxy{c}
|
|
| 10 |
+ return &volumeDriverAdapter{name, proxy}
|
|
| 11 |
+} |
|
| 12 |
+ |
|
| 13 |
+type VolumeDriver interface {
|
|
| 14 |
+ Create(name string) (err error) |
|
| 15 |
+ Remove(name string) (err error) |
|
| 16 |
+ Path(name string) (mountpoint string, err error) |
|
| 17 |
+ Mount(name string) (mountpoint string, err error) |
|
| 18 |
+ Unmount(name string) (err error) |
|
| 19 |
+} |
| 0 | 20 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,61 @@ |
| 0 |
+package volumedrivers |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "sync" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/Sirupsen/logrus" |
|
| 6 |
+ "github.com/docker/docker/pkg/plugins" |
|
| 7 |
+ "github.com/docker/docker/volume" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// currently created by hand. generation tool would generate this like: |
|
| 11 |
+// $ extpoint-gen Driver > volume/extpoint.go |
|
| 12 |
+ |
|
| 13 |
+var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
|
|
| 14 |
+ |
|
| 15 |
+type driverExtpoint struct {
|
|
| 16 |
+ extensions map[string]volume.Driver |
|
| 17 |
+ sync.Mutex |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+func Register(extension volume.Driver, name string) bool {
|
|
| 21 |
+ drivers.Lock() |
|
| 22 |
+ defer drivers.Unlock() |
|
| 23 |
+ if name == "" {
|
|
| 24 |
+ return false |
|
| 25 |
+ } |
|
| 26 |
+ _, exists := drivers.extensions[name] |
|
| 27 |
+ if exists {
|
|
| 28 |
+ return false |
|
| 29 |
+ } |
|
| 30 |
+ drivers.extensions[name] = extension |
|
| 31 |
+ return true |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+func Unregister(name string) bool {
|
|
| 35 |
+ drivers.Lock() |
|
| 36 |
+ defer drivers.Unlock() |
|
| 37 |
+ _, exists := drivers.extensions[name] |
|
| 38 |
+ if !exists {
|
|
| 39 |
+ return false |
|
| 40 |
+ } |
|
| 41 |
+ delete(drivers.extensions, name) |
|
| 42 |
+ return true |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+func Lookup(name string) volume.Driver {
|
|
| 46 |
+ drivers.Lock() |
|
| 47 |
+ defer drivers.Unlock() |
|
| 48 |
+ ext, ok := drivers.extensions[name] |
|
| 49 |
+ if ok {
|
|
| 50 |
+ return ext |
|
| 51 |
+ } |
|
| 52 |
+ pl, err := plugins.Get(name, "VolumeDriver") |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ logrus.Errorf("Error: %v", err)
|
|
| 55 |
+ return nil |
|
| 56 |
+ } |
|
| 57 |
+ d := NewVolumeDriver(name, pl.Client) |
|
| 58 |
+ drivers.extensions[name] = d |
|
| 59 |
+ return d |
|
| 60 |
+} |
| 0 | 61 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,65 @@ |
| 0 |
+package volumedrivers |
|
| 1 |
+ |
|
| 2 |
+// currently created by hand. generation tool would generate this like: |
|
| 3 |
+// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go |
|
| 4 |
+ |
|
| 5 |
+type volumeDriverRequest struct {
|
|
| 6 |
+ Name string |
|
| 7 |
+} |
|
| 8 |
+ |
|
| 9 |
+type volumeDriverResponse struct {
|
|
| 10 |
+ Mountpoint string `json:",ommitempty"` |
|
| 11 |
+ Err error `json:",ommitempty"` |
|
| 12 |
+} |
|
| 13 |
+ |
|
| 14 |
+type volumeDriverProxy struct {
|
|
| 15 |
+ c client |
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+func (pp *volumeDriverProxy) Create(name string) error {
|
|
| 19 |
+ args := volumeDriverRequest{name}
|
|
| 20 |
+ var ret volumeDriverResponse |
|
| 21 |
+ err := pp.c.Call("VolumeDriver.Create", args, &ret)
|
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ return err |
|
| 24 |
+ } |
|
| 25 |
+ return ret.Err |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+func (pp *volumeDriverProxy) Remove(name string) error {
|
|
| 29 |
+ args := volumeDriverRequest{name}
|
|
| 30 |
+ var ret volumeDriverResponse |
|
| 31 |
+ err := pp.c.Call("VolumeDriver.Remove", args, &ret)
|
|
| 32 |
+ if err != nil {
|
|
| 33 |
+ return err |
|
| 34 |
+ } |
|
| 35 |
+ return ret.Err |
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+func (pp *volumeDriverProxy) Path(name string) (string, error) {
|
|
| 39 |
+ args := volumeDriverRequest{name}
|
|
| 40 |
+ var ret volumeDriverResponse |
|
| 41 |
+ if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
|
|
| 42 |
+ return "", err |
|
| 43 |
+ } |
|
| 44 |
+ return ret.Mountpoint, ret.Err |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+func (pp *volumeDriverProxy) Mount(name string) (string, error) {
|
|
| 48 |
+ args := volumeDriverRequest{name}
|
|
| 49 |
+ var ret volumeDriverResponse |
|
| 50 |
+ if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
|
|
| 51 |
+ return "", err |
|
| 52 |
+ } |
|
| 53 |
+ return ret.Mountpoint, ret.Err |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+func (pp *volumeDriverProxy) Unmount(name string) error {
|
|
| 57 |
+ args := volumeDriverRequest{name}
|
|
| 58 |
+ var ret volumeDriverResponse |
|
| 59 |
+ err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
|
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ return err |
|
| 62 |
+ } |
|
| 63 |
+ return ret.Err |
|
| 64 |
+} |
| 0 | 65 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,126 @@ |
| 0 |
+package local |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "sync" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/docker/volume" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+func New(rootDirectory string) (*Root, error) {
|
|
| 14 |
+ if err := os.MkdirAll(rootDirectory, 0700); err != nil {
|
|
| 15 |
+ return nil, err |
|
| 16 |
+ } |
|
| 17 |
+ r := &Root{
|
|
| 18 |
+ path: rootDirectory, |
|
| 19 |
+ volumes: make(map[string]*Volume), |
|
| 20 |
+ } |
|
| 21 |
+ dirs, err := ioutil.ReadDir(rootDirectory) |
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ return nil, err |
|
| 24 |
+ } |
|
| 25 |
+ for _, d := range dirs {
|
|
| 26 |
+ name := filepath.Base(d.Name()) |
|
| 27 |
+ r.volumes[name] = &Volume{
|
|
| 28 |
+ driverName: r.Name(), |
|
| 29 |
+ name: name, |
|
| 30 |
+ path: filepath.Join(rootDirectory, name), |
|
| 31 |
+ } |
|
| 32 |
+ } |
|
| 33 |
+ return r, nil |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+type Root struct {
|
|
| 37 |
+ m sync.Mutex |
|
| 38 |
+ path string |
|
| 39 |
+ volumes map[string]*Volume |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+func (r *Root) Name() string {
|
|
| 43 |
+ return "local" |
|
| 44 |
+} |
|
| 45 |
+ |
|
| 46 |
+func (r *Root) Create(name string) (volume.Volume, error) {
|
|
| 47 |
+ r.m.Lock() |
|
| 48 |
+ defer r.m.Unlock() |
|
| 49 |
+ v, exists := r.volumes[name] |
|
| 50 |
+ if !exists {
|
|
| 51 |
+ path := filepath.Join(r.path, name) |
|
| 52 |
+ if err := os.Mkdir(path, 0755); err != nil {
|
|
| 53 |
+ if os.IsExist(err) {
|
|
| 54 |
+ return nil, fmt.Errorf("volume already exists under %s", path)
|
|
| 55 |
+ } |
|
| 56 |
+ return nil, err |
|
| 57 |
+ } |
|
| 58 |
+ v = &Volume{
|
|
| 59 |
+ driverName: r.Name(), |
|
| 60 |
+ name: name, |
|
| 61 |
+ path: path, |
|
| 62 |
+ } |
|
| 63 |
+ r.volumes[name] = v |
|
| 64 |
+ } |
|
| 65 |
+ v.use() |
|
| 66 |
+ return v, nil |
|
| 67 |
+} |
|
| 68 |
+ |
|
| 69 |
+func (r *Root) Remove(v volume.Volume) error {
|
|
| 70 |
+ r.m.Lock() |
|
| 71 |
+ defer r.m.Unlock() |
|
| 72 |
+ lv, ok := v.(*Volume) |
|
| 73 |
+ if !ok {
|
|
| 74 |
+ return errors.New("unknown volume type")
|
|
| 75 |
+ } |
|
| 76 |
+ lv.release() |
|
| 77 |
+ if lv.usedCount == 0 {
|
|
| 78 |
+ delete(r.volumes, lv.name) |
|
| 79 |
+ return os.RemoveAll(lv.path) |
|
| 80 |
+ } |
|
| 81 |
+ return nil |
|
| 82 |
+} |
|
| 83 |
+ |
|
| 84 |
+type Volume struct {
|
|
| 85 |
+ m sync.Mutex |
|
| 86 |
+ usedCount int |
|
| 87 |
+ // unique name of the volume |
|
| 88 |
+ name string |
|
| 89 |
+ // path is the path on the host where the data lives |
|
| 90 |
+ path string |
|
| 91 |
+ // driverName is the name of the driver that created the volume. |
|
| 92 |
+ driverName string |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+func (v *Volume) Name() string {
|
|
| 96 |
+ return v.name |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func (v *Volume) DriverName() string {
|
|
| 100 |
+ return v.driverName |
|
| 101 |
+} |
|
| 102 |
+ |
|
| 103 |
+func (v *Volume) Path() string {
|
|
| 104 |
+ return v.path |
|
| 105 |
+} |
|
| 106 |
+ |
|
| 107 |
+func (v *Volume) Mount() (string, error) {
|
|
| 108 |
+ return v.path, nil |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+func (v *Volume) Unmount() error {
|
|
| 112 |
+ return nil |
|
| 113 |
+} |
|
| 114 |
+ |
|
| 115 |
+func (v *Volume) use() {
|
|
| 116 |
+ v.m.Lock() |
|
| 117 |
+ v.usedCount++ |
|
| 118 |
+ v.m.Unlock() |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+func (v *Volume) release() {
|
|
| 122 |
+ v.m.Lock() |
|
| 123 |
+ v.usedCount-- |
|
| 124 |
+ v.m.Unlock() |
|
| 125 |
+} |
| 0 | 126 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,26 @@ |
| 0 |
+package volume |
|
| 1 |
+ |
|
| 2 |
+const DefaultDriverName = "local" |
|
| 3 |
+ |
|
| 4 |
+type Driver interface {
|
|
| 5 |
+ // Name returns the name of the volume driver. |
|
| 6 |
+ Name() string |
|
| 7 |
+ // Create makes a new volume with the given id. |
|
| 8 |
+ Create(string) (Volume, error) |
|
| 9 |
+ // Remove deletes the volume. |
|
| 10 |
+ Remove(Volume) error |
|
| 11 |
+} |
|
| 12 |
+ |
|
| 13 |
+type Volume interface {
|
|
| 14 |
+ // Name returns the name of the volume |
|
| 15 |
+ Name() string |
|
| 16 |
+ // DriverName returns the name of the driver which owns this volume. |
|
| 17 |
+ DriverName() string |
|
| 18 |
+ // Path returns the absolute path to the volume. |
|
| 19 |
+ Path() string |
|
| 20 |
+ // Mount mounts the volume and returns the absolute path to |
|
| 21 |
+ // where it can be consumed. |
|
| 22 |
+ Mount() (string, error) |
|
| 23 |
+ // Unmount unmounts the volume when it is no longer in use. |
|
| 24 |
+ Unmount() error |
|
| 25 |
+} |
| 0 | 26 |
deleted file mode 100644 |
| ... | ... |
@@ -1,193 +0,0 @@ |
| 1 |
-package volumes |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "io/ioutil" |
|
| 6 |
- "os" |
|
| 7 |
- "path/filepath" |
|
| 8 |
- "sync" |
|
| 9 |
- |
|
| 10 |
- "github.com/Sirupsen/logrus" |
|
| 11 |
- "github.com/docker/docker/daemon/graphdriver" |
|
| 12 |
- "github.com/docker/docker/pkg/stringid" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-type Repository struct {
|
|
| 16 |
- configPath string |
|
| 17 |
- driver graphdriver.Driver |
|
| 18 |
- volumes map[string]*Volume |
|
| 19 |
- lock sync.Mutex |
|
| 20 |
-} |
|
| 21 |
- |
|
| 22 |
-func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
|
|
| 23 |
- abspath, err := filepath.Abs(configPath) |
|
| 24 |
- if err != nil {
|
|
| 25 |
- return nil, err |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 |
- // Create the config path |
|
| 29 |
- if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
|
|
| 30 |
- return nil, err |
|
| 31 |
- } |
|
| 32 |
- |
|
| 33 |
- repo := &Repository{
|
|
| 34 |
- driver: driver, |
|
| 35 |
- configPath: abspath, |
|
| 36 |
- volumes: make(map[string]*Volume), |
|
| 37 |
- } |
|
| 38 |
- |
|
| 39 |
- return repo, repo.restore() |
|
| 40 |
-} |
|
| 41 |
- |
|
| 42 |
-func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
|
|
| 43 |
- var ( |
|
| 44 |
- isBindMount bool |
|
| 45 |
- err error |
|
| 46 |
- id = stringid.GenerateRandomID() |
|
| 47 |
- ) |
|
| 48 |
- if path != "" {
|
|
| 49 |
- isBindMount = true |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- if path == "" {
|
|
| 53 |
- path, err = r.createNewVolumePath(id) |
|
| 54 |
- if err != nil {
|
|
| 55 |
- return nil, err |
|
| 56 |
- } |
|
| 57 |
- } |
|
| 58 |
- path = filepath.Clean(path) |
|
| 59 |
- |
|
| 60 |
- // Ignore the error here since the path may not exist |
|
| 61 |
- // Really just want to make sure the path we are using is real(or nonexistent) |
|
| 62 |
- if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
|
|
| 63 |
- path = cleanPath |
|
| 64 |
- } |
|
| 65 |
- |
|
| 66 |
- v := &Volume{
|
|
| 67 |
- ID: id, |
|
| 68 |
- Path: path, |
|
| 69 |
- repository: r, |
|
| 70 |
- Writable: writable, |
|
| 71 |
- containers: make(map[string]struct{}),
|
|
| 72 |
- configPath: r.configPath + "/" + id, |
|
| 73 |
- IsBindMount: isBindMount, |
|
| 74 |
- } |
|
| 75 |
- |
|
| 76 |
- if err := v.initialize(); err != nil {
|
|
| 77 |
- return nil, err |
|
| 78 |
- } |
|
| 79 |
- |
|
| 80 |
- r.add(v) |
|
| 81 |
- return v, nil |
|
| 82 |
-} |
|
| 83 |
- |
|
| 84 |
-func (r *Repository) restore() error {
|
|
| 85 |
- dir, err := ioutil.ReadDir(r.configPath) |
|
| 86 |
- if err != nil {
|
|
| 87 |
- return err |
|
| 88 |
- } |
|
| 89 |
- |
|
| 90 |
- for _, v := range dir {
|
|
| 91 |
- id := v.Name() |
|
| 92 |
- vol := &Volume{
|
|
| 93 |
- ID: id, |
|
| 94 |
- configPath: r.configPath + "/" + id, |
|
| 95 |
- containers: make(map[string]struct{}),
|
|
| 96 |
- } |
|
| 97 |
- if err := vol.FromDisk(); err != nil {
|
|
| 98 |
- if !os.IsNotExist(err) {
|
|
| 99 |
- logrus.Debugf("Error restoring volume: %v", err)
|
|
| 100 |
- continue |
|
| 101 |
- } |
|
| 102 |
- if err := vol.initialize(); err != nil {
|
|
| 103 |
- logrus.Debugf("%s", err)
|
|
| 104 |
- continue |
|
| 105 |
- } |
|
| 106 |
- } |
|
| 107 |
- r.add(vol) |
|
| 108 |
- } |
|
| 109 |
- return nil |
|
| 110 |
-} |
|
| 111 |
- |
|
| 112 |
-func (r *Repository) Get(path string) *Volume {
|
|
| 113 |
- r.lock.Lock() |
|
| 114 |
- vol := r.get(path) |
|
| 115 |
- r.lock.Unlock() |
|
| 116 |
- return vol |
|
| 117 |
-} |
|
| 118 |
- |
|
| 119 |
-func (r *Repository) get(path string) *Volume {
|
|
| 120 |
- path, err := filepath.EvalSymlinks(path) |
|
| 121 |
- if err != nil {
|
|
| 122 |
- return nil |
|
| 123 |
- } |
|
| 124 |
- return r.volumes[filepath.Clean(path)] |
|
| 125 |
-} |
|
| 126 |
- |
|
| 127 |
-func (r *Repository) add(volume *Volume) {
|
|
| 128 |
- if vol := r.get(volume.Path); vol != nil {
|
|
| 129 |
- return |
|
| 130 |
- } |
|
| 131 |
- r.volumes[volume.Path] = volume |
|
| 132 |
-} |
|
| 133 |
- |
|
| 134 |
-func (r *Repository) Delete(path string) error {
|
|
| 135 |
- r.lock.Lock() |
|
| 136 |
- defer r.lock.Unlock() |
|
| 137 |
- path, err := filepath.EvalSymlinks(path) |
|
| 138 |
- if err != nil {
|
|
| 139 |
- return err |
|
| 140 |
- } |
|
| 141 |
- volume := r.get(filepath.Clean(path)) |
|
| 142 |
- if volume == nil {
|
|
| 143 |
- return fmt.Errorf("Volume %s does not exist", path)
|
|
| 144 |
- } |
|
| 145 |
- |
|
| 146 |
- containers := volume.Containers() |
|
| 147 |
- if len(containers) > 0 {
|
|
| 148 |
- return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
|
|
| 149 |
- } |
|
| 150 |
- |
|
| 151 |
- if err := os.RemoveAll(volume.configPath); err != nil {
|
|
| 152 |
- return err |
|
| 153 |
- } |
|
| 154 |
- |
|
| 155 |
- if !volume.IsBindMount {
|
|
| 156 |
- if err := r.driver.Remove(volume.ID); err != nil {
|
|
| 157 |
- if !os.IsNotExist(err) {
|
|
| 158 |
- return err |
|
| 159 |
- } |
|
| 160 |
- } |
|
| 161 |
- } |
|
| 162 |
- |
|
| 163 |
- delete(r.volumes, volume.Path) |
|
| 164 |
- return nil |
|
| 165 |
-} |
|
| 166 |
- |
|
| 167 |
-func (r *Repository) createNewVolumePath(id string) (string, error) {
|
|
| 168 |
- if err := r.driver.Create(id, ""); err != nil {
|
|
| 169 |
- return "", err |
|
| 170 |
- } |
|
| 171 |
- |
|
| 172 |
- path, err := r.driver.Get(id, "") |
|
| 173 |
- if err != nil {
|
|
| 174 |
- return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
|
|
| 175 |
- } |
|
| 176 |
- |
|
| 177 |
- return path, nil |
|
| 178 |
-} |
|
| 179 |
- |
|
| 180 |
-func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
|
|
| 181 |
- r.lock.Lock() |
|
| 182 |
- defer r.lock.Unlock() |
|
| 183 |
- |
|
| 184 |
- if path == "" {
|
|
| 185 |
- return r.newVolume(path, writable) |
|
| 186 |
- } |
|
| 187 |
- |
|
| 188 |
- if v := r.get(path); v != nil {
|
|
| 189 |
- return v, nil |
|
| 190 |
- } |
|
| 191 |
- |
|
| 192 |
- return r.newVolume(path, writable) |
|
| 193 |
-} |
| 194 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,164 +0,0 @@ |
| 1 |
-package volumes |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "io/ioutil" |
|
| 5 |
- "os" |
|
| 6 |
- "path/filepath" |
|
| 7 |
- "testing" |
|
| 8 |
- |
|
| 9 |
- "github.com/docker/docker/daemon/graphdriver" |
|
| 10 |
- _ "github.com/docker/docker/daemon/graphdriver/vfs" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-func TestRepositoryFindOrCreate(t *testing.T) {
|
|
| 14 |
- root, err := ioutil.TempDir(os.TempDir(), "volumes") |
|
| 15 |
- if err != nil {
|
|
| 16 |
- t.Fatal(err) |
|
| 17 |
- } |
|
| 18 |
- defer os.RemoveAll(root) |
|
| 19 |
- repo, err := newRepo(root) |
|
| 20 |
- if err != nil {
|
|
| 21 |
- t.Fatal(err) |
|
| 22 |
- } |
|
| 23 |
- |
|
| 24 |
- // no path |
|
| 25 |
- v, err := repo.FindOrCreateVolume("", true)
|
|
| 26 |
- if err != nil {
|
|
| 27 |
- t.Fatal(err) |
|
| 28 |
- } |
|
| 29 |
- |
|
| 30 |
- // FIXME: volumes are heavily dependent on the vfs driver, but this should not be so! |
|
| 31 |
- expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID) |
|
| 32 |
- if v.Path != expected {
|
|
| 33 |
- t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
|
|
| 34 |
- } |
|
| 35 |
- |
|
| 36 |
- // with a non-existant path |
|
| 37 |
- dir := filepath.Join(root, "doesntexist") |
|
| 38 |
- v, err = repo.FindOrCreateVolume(dir, true) |
|
| 39 |
- if err != nil {
|
|
| 40 |
- t.Fatal(err) |
|
| 41 |
- } |
|
| 42 |
- |
|
| 43 |
- if v.Path != dir {
|
|
| 44 |
- t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
|
|
| 45 |
- } |
|
| 46 |
- |
|
| 47 |
- if _, err := os.Stat(v.Path); err != nil {
|
|
| 48 |
- t.Fatal(err) |
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- // with a pre-existing path |
|
| 52 |
- // can just use the same path from above since it now exists |
|
| 53 |
- v, err = repo.FindOrCreateVolume(dir, true) |
|
| 54 |
- if err != nil {
|
|
| 55 |
- t.Fatal(err) |
|
| 56 |
- } |
|
| 57 |
- if v.Path != dir {
|
|
| 58 |
- t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
|
|
| 59 |
- } |
|
| 60 |
- |
|
| 61 |
-} |
|
| 62 |
- |
|
| 63 |
-func TestRepositoryGet(t *testing.T) {
|
|
| 64 |
- root, err := ioutil.TempDir(os.TempDir(), "volumes") |
|
| 65 |
- if err != nil {
|
|
| 66 |
- t.Fatal(err) |
|
| 67 |
- } |
|
| 68 |
- defer os.RemoveAll(root) |
|
| 69 |
- repo, err := newRepo(root) |
|
| 70 |
- if err != nil {
|
|
| 71 |
- t.Fatal(err) |
|
| 72 |
- } |
|
| 73 |
- |
|
| 74 |
- v, err := repo.FindOrCreateVolume("", true)
|
|
| 75 |
- if err != nil {
|
|
| 76 |
- t.Fatal(err) |
|
| 77 |
- } |
|
| 78 |
- |
|
| 79 |
- v2 := repo.Get(v.Path) |
|
| 80 |
- if v2 == nil {
|
|
| 81 |
- t.Fatalf("expected to find volume but didn't")
|
|
| 82 |
- } |
|
| 83 |
- if v2 != v {
|
|
| 84 |
- t.Fatalf("expected get to return same volume")
|
|
| 85 |
- } |
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-func TestRepositoryDelete(t *testing.T) {
|
|
| 89 |
- root, err := ioutil.TempDir(os.TempDir(), "volumes") |
|
| 90 |
- if err != nil {
|
|
| 91 |
- t.Fatal(err) |
|
| 92 |
- } |
|
| 93 |
- defer os.RemoveAll(root) |
|
| 94 |
- repo, err := newRepo(root) |
|
| 95 |
- if err != nil {
|
|
| 96 |
- t.Fatal(err) |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- // with a normal volume |
|
| 100 |
- v, err := repo.FindOrCreateVolume("", true)
|
|
| 101 |
- if err != nil {
|
|
| 102 |
- t.Fatal(err) |
|
| 103 |
- } |
|
| 104 |
- |
|
| 105 |
- if err := repo.Delete(v.Path); err != nil {
|
|
| 106 |
- t.Fatal(err) |
|
| 107 |
- } |
|
| 108 |
- |
|
| 109 |
- if v := repo.Get(v.Path); v != nil {
|
|
| 110 |
- t.Fatalf("expected volume to not exist")
|
|
| 111 |
- } |
|
| 112 |
- |
|
| 113 |
- if _, err := os.Stat(v.Path); err == nil {
|
|
| 114 |
- t.Fatalf("expected volume files to be removed")
|
|
| 115 |
- } |
|
| 116 |
- |
|
| 117 |
- // with a bind mount |
|
| 118 |
- dir := filepath.Join(root, "test") |
|
| 119 |
- v, err = repo.FindOrCreateVolume(dir, true) |
|
| 120 |
- if err != nil {
|
|
| 121 |
- t.Fatal(err) |
|
| 122 |
- } |
|
| 123 |
- |
|
| 124 |
- if err := repo.Delete(v.Path); err != nil {
|
|
| 125 |
- t.Fatal(err) |
|
| 126 |
- } |
|
| 127 |
- |
|
| 128 |
- if v := repo.Get(v.Path); v != nil {
|
|
| 129 |
- t.Fatalf("expected volume to not exist")
|
|
| 130 |
- } |
|
| 131 |
- |
|
| 132 |
- if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
|
|
| 133 |
- t.Fatalf("expected bind volume data to persist after destroying volume")
|
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- // with container refs |
|
| 137 |
- dir = filepath.Join(root, "test") |
|
| 138 |
- v, err = repo.FindOrCreateVolume(dir, true) |
|
| 139 |
- if err != nil {
|
|
| 140 |
- t.Fatal(err) |
|
| 141 |
- } |
|
| 142 |
- v.AddContainer("1234")
|
|
| 143 |
- |
|
| 144 |
- if err := repo.Delete(v.Path); err == nil {
|
|
| 145 |
- t.Fatalf("expected volume delete to fail due to container refs")
|
|
| 146 |
- } |
|
| 147 |
- |
|
| 148 |
- v.RemoveContainer("1234")
|
|
| 149 |
- if err := repo.Delete(v.Path); err != nil {
|
|
| 150 |
- t.Fatal(err) |
|
| 151 |
- } |
|
| 152 |
- |
|
| 153 |
-} |
|
| 154 |
- |
|
| 155 |
-func newRepo(root string) (*Repository, error) {
|
|
| 156 |
- configPath := filepath.Join(root, "repo-config") |
|
| 157 |
- graphDir := filepath.Join(root, "repo-graph") |
|
| 158 |
- |
|
| 159 |
- driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
|
|
| 160 |
- if err != nil {
|
|
| 161 |
- return nil, err |
|
| 162 |
- } |
|
| 163 |
- return NewRepository(configPath, driver) |
|
| 164 |
-} |
| 165 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,152 +0,0 @@ |
| 1 |
-package volumes |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/json" |
|
| 5 |
- "os" |
|
| 6 |
- "path/filepath" |
|
| 7 |
- "sync" |
|
| 8 |
- |
|
| 9 |
- "github.com/docker/docker/pkg/symlink" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-type Volume struct {
|
|
| 13 |
- ID string |
|
| 14 |
- Path string |
|
| 15 |
- IsBindMount bool |
|
| 16 |
- Writable bool |
|
| 17 |
- containers map[string]struct{}
|
|
| 18 |
- configPath string |
|
| 19 |
- repository *Repository |
|
| 20 |
- lock sync.Mutex |
|
| 21 |
-} |
|
| 22 |
- |
|
| 23 |
-func (v *Volume) IsDir() (bool, error) {
|
|
| 24 |
- stat, err := os.Stat(v.Path) |
|
| 25 |
- if err != nil {
|
|
| 26 |
- return false, err |
|
| 27 |
- } |
|
| 28 |
- |
|
| 29 |
- return stat.IsDir(), nil |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-func (v *Volume) Containers() []string {
|
|
| 33 |
- v.lock.Lock() |
|
| 34 |
- |
|
| 35 |
- var containers []string |
|
| 36 |
- for c := range v.containers {
|
|
| 37 |
- containers = append(containers, c) |
|
| 38 |
- } |
|
| 39 |
- |
|
| 40 |
- v.lock.Unlock() |
|
| 41 |
- return containers |
|
| 42 |
-} |
|
| 43 |
- |
|
| 44 |
-func (v *Volume) RemoveContainer(containerId string) {
|
|
| 45 |
- v.lock.Lock() |
|
| 46 |
- delete(v.containers, containerId) |
|
| 47 |
- v.lock.Unlock() |
|
| 48 |
-} |
|
| 49 |
- |
|
| 50 |
-func (v *Volume) AddContainer(containerId string) {
|
|
| 51 |
- v.lock.Lock() |
|
| 52 |
- v.containers[containerId] = struct{}{}
|
|
| 53 |
- v.lock.Unlock() |
|
| 54 |
-} |
|
| 55 |
- |
|
| 56 |
-func (v *Volume) initialize() error {
|
|
| 57 |
- v.lock.Lock() |
|
| 58 |
- defer v.lock.Unlock() |
|
| 59 |
- |
|
| 60 |
- if _, err := os.Stat(v.Path); err != nil {
|
|
| 61 |
- if !os.IsNotExist(err) {
|
|
| 62 |
- return err |
|
| 63 |
- } |
|
| 64 |
- if err := os.MkdirAll(v.Path, 0755); err != nil {
|
|
| 65 |
- return err |
|
| 66 |
- } |
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- if err := os.MkdirAll(v.configPath, 0755); err != nil {
|
|
| 70 |
- return err |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- return v.toDisk() |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-func (v *Volume) ToDisk() error {
|
|
| 77 |
- v.lock.Lock() |
|
| 78 |
- defer v.lock.Unlock() |
|
| 79 |
- return v.toDisk() |
|
| 80 |
-} |
|
| 81 |
- |
|
| 82 |
-func (v *Volume) toDisk() error {
|
|
| 83 |
- jsonPath, err := v.jsonPath() |
|
| 84 |
- if err != nil {
|
|
| 85 |
- return err |
|
| 86 |
- } |
|
| 87 |
- f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) |
|
| 88 |
- if err != nil {
|
|
| 89 |
- return err |
|
| 90 |
- } |
|
| 91 |
- if err := json.NewEncoder(f).Encode(v); err != nil {
|
|
| 92 |
- f.Close() |
|
| 93 |
- return err |
|
| 94 |
- } |
|
| 95 |
- return f.Close() |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-func (v *Volume) FromDisk() error {
|
|
| 99 |
- v.lock.Lock() |
|
| 100 |
- defer v.lock.Unlock() |
|
| 101 |
- pth, err := v.jsonPath() |
|
| 102 |
- if err != nil {
|
|
| 103 |
- return err |
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
- jsonSource, err := os.Open(pth) |
|
| 107 |
- if err != nil {
|
|
| 108 |
- return err |
|
| 109 |
- } |
|
| 110 |
- defer jsonSource.Close() |
|
| 111 |
- |
|
| 112 |
- dec := json.NewDecoder(jsonSource) |
|
| 113 |
- |
|
| 114 |
- return dec.Decode(v) |
|
| 115 |
-} |
|
| 116 |
- |
|
| 117 |
-func (v *Volume) jsonPath() (string, error) {
|
|
| 118 |
- return v.GetRootResourcePath("config.json")
|
|
| 119 |
-} |
|
| 120 |
- |
|
| 121 |
-// Evalutes `path` in the scope of the volume's root path, with proper path |
|
| 122 |
-// sanitisation. Symlinks are all scoped to the root of the volume, as |
|
| 123 |
-// though the volume's root was `/`. |
|
| 124 |
-// |
|
| 125 |
-// The volume's root path is the host-facing path of the root of the volume's |
|
| 126 |
-// mountpoint inside a container. |
|
| 127 |
-// |
|
| 128 |
-// NOTE: The returned path is *only* safely scoped inside the volume's root |
|
| 129 |
-// if no component of the returned path changes (such as a component |
|
| 130 |
-// symlinking to a different path) between using this method and using the |
|
| 131 |
-// path. See symlink.FollowSymlinkInScope for more details. |
|
| 132 |
-func (v *Volume) GetResourcePath(path string) (string, error) {
|
|
| 133 |
- cleanPath := filepath.Join("/", path)
|
|
| 134 |
- return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-// Evalutes `path` in the scope of the volume's config path, with proper path |
|
| 138 |
-// sanitisation. Symlinks are all scoped to the root of the config path, as |
|
| 139 |
-// though the config path was `/`. |
|
| 140 |
-// |
|
| 141 |
-// The config path of a volume is not exposed to the container and is just used |
|
| 142 |
-// to store volume configuration options and other internal information. If in |
|
| 143 |
-// doubt, you probably want to just use v.GetResourcePath. |
|
| 144 |
-// |
|
| 145 |
-// NOTE: The returned path is *only* safely scoped inside the volume's config |
|
| 146 |
-// path if no component of the returned path changes (such as a component |
|
| 147 |
-// symlinking to a different path) between using this method and using the |
|
| 148 |
-// path. See symlink.FollowSymlinkInScope for more details. |
|
| 149 |
-func (v *Volume) GetRootResourcePath(path string) (string, error) {
|
|
| 150 |
- cleanPath := filepath.Join("/", path)
|
|
| 151 |
- return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) |
|
| 152 |
-} |
| 153 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,55 +0,0 @@ |
| 1 |
-package volumes |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "os" |
|
| 5 |
- "testing" |
|
| 6 |
- |
|
| 7 |
- "github.com/docker/docker/pkg/stringutils" |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-func TestContainers(t *testing.T) {
|
|
| 11 |
- v := &Volume{containers: make(map[string]struct{})}
|
|
| 12 |
- id := "1234" |
|
| 13 |
- |
|
| 14 |
- v.AddContainer(id) |
|
| 15 |
- |
|
| 16 |
- if v.Containers()[0] != id {
|
|
| 17 |
- t.Fatalf("adding a container ref failed")
|
|
| 18 |
- } |
|
| 19 |
- |
|
| 20 |
- v.RemoveContainer(id) |
|
| 21 |
- if len(v.Containers()) != 0 {
|
|
| 22 |
- t.Fatalf("removing container failed")
|
|
| 23 |
- } |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-// os.Stat(v.Path) is returning ErrNotExist, initialize catch it and try to |
|
| 27 |
-// mkdir v.Path but it dies and correctly returns the error |
|
| 28 |
-func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) {
|
|
| 29 |
- v := &Volume{Path: "nonexistentpath"}
|
|
| 30 |
- |
|
| 31 |
- err := v.initialize() |
|
| 32 |
- if err == nil {
|
|
| 33 |
- t.Fatal("Expected not to initialize volume with a non existent path")
|
|
| 34 |
- } |
|
| 35 |
- |
|
| 36 |
- if !os.IsNotExist(err) {
|
|
| 37 |
- t.Fatalf("Expected to get ErrNotExist error, got %s", err)
|
|
| 38 |
- } |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// os.Stat(v.Path) is NOT returning ErrNotExist so skip and return error from |
|
| 42 |
-// initialize |
|
| 43 |
-func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) {
|
|
| 44 |
- // ENAMETOOLONG |
|
| 45 |
- v := &Volume{Path: stringutils.GenerateRandomAlphaOnlyString(300)}
|
|
| 46 |
- |
|
| 47 |
- err := v.initialize() |
|
| 48 |
- if err == nil {
|
|
| 49 |
- t.Fatal("Expected not to initialize volume with a non existent path")
|
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- if os.IsNotExist(err) {
|
|
| 53 |
- t.Fatal("Expected to not get ErrNotExist")
|
|
| 54 |
- } |
|
| 55 |
-} |