4f0d95fa |
package daemon // import "github.com/docker/docker/daemon" |
bd54d40f |
import ( |
e4b6adc8 |
"context" |
bd54d40f |
"os"
"path/filepath" |
3cf18596 |
"reflect" |
bd54d40f |
"strings" |
a46f757c |
"time" |
4994b0fe |
|
91e197d6 |
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container" |
589a0afa |
"github.com/docker/docker/api/types/mount" |
fc7b904d |
mounttypes "github.com/docker/docker/api/types/mount" |
6bb0d181 |
"github.com/docker/docker/container" |
d453fe35 |
"github.com/docker/docker/errdefs" |
81fa9feb |
"github.com/docker/docker/volume" |
6a70fd22 |
volumemounts "github.com/docker/docker/volume/mounts" |
e4b6adc8 |
"github.com/docker/docker/volume/service"
volumeopts "github.com/docker/docker/volume/service/opts" |
ebcb7d6b |
"github.com/pkg/errors" |
1009e6a4 |
"github.com/sirupsen/logrus" |
bd54d40f |
)
|
b3b7eb27 |
var (
// ErrVolumeReadonly is used to signal an error when trying to copy data into
// a volume mount that is not writable.
ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
) |
c32dde5b |
|
9c4570a9 |
type mounts []container.Mount |
f3680e74 |
|
a7e686a7 |
// Len returns the number of mounts. Used in sorting.
func (m mounts) Len() int {
return len(m)
} |
45407cf0 |
|
a7e686a7 |
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
// mount indexed by parameter 1 is less than that of the mount indexed by
// parameter 2. Used in sorting.
func (m mounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j) |
f3680e74 |
}
|
a7e686a7 |
// Swap swaps two items in an array of mounts. Used in sorting
func (m mounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
} |
c32dde5b |
|
a7e686a7 |
// parts returns the number of parts in the destination of a mount. Used in sorting.
func (m mounts) parts(i int) int {
return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) |
c32dde5b |
}
|
a7e686a7 |
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. |
927b334e |
// 4. Cleanup old volumes that are about to be reassigned. |
5e5e1d7a |
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { |
a7e686a7 |
binds := map[string]bool{} |
6a70fd22 |
mountPoints := map[string]*volumemounts.MountPoint{}
parser := volumemounts.NewParser(container.OS)
|
e4b6adc8 |
ctx := context.TODO() |
5e5e1d7a |
defer func() {
// clean up the container mountpoints once return with error
if retErr != nil {
for _, m := range mountPoints {
if m.Volume == nil {
continue
} |
e4b6adc8 |
daemon.volumes.Release(ctx, m.Volume.Name(), container.ID) |
5e5e1d7a |
}
}
}() |
a7e686a7 |
|
9526e5c6 |
dereferenceIfExists := func(destination string) {
if v, ok := mountPoints[destination]; ok {
logrus.Debugf("Duplicate mount point '%s'", destination)
if v.Volume != nil { |
e4b6adc8 |
daemon.volumes.Release(ctx, v.Volume.Name(), container.ID) |
9526e5c6 |
}
}
}
|
a7e686a7 |
// 1. Read already configured mount points. |
57cec5a7 |
for destination, point := range container.MountPoints {
mountPoints[destination] = point |
7495fbc0 |
}
|
a7e686a7 |
// 2. Read volumes from other containers.
for _, v := range hostConfig.VolumesFrom { |
e89b6e8c |
containerID, mode, err := parser.ParseVolumesFrom(v) |
a7e686a7 |
if err != nil {
return err
} |
7495fbc0 |
|
d7d512bb |
c, err := daemon.GetContainer(containerID) |
a7e686a7 |
if err != nil {
return err
}
for _, m := range c.MountPoints { |
6a70fd22 |
cp := &volumemounts.MountPoint{ |
3cf18596 |
Type: m.Type, |
a7e686a7 |
Name: m.Name,
Source: m.Source, |
e89b6e8c |
RW: m.RW && parser.ReadWrite(mode), |
a7e686a7 |
Driver: m.Driver,
Destination: m.Destination, |
a2dc4f79 |
Propagation: m.Propagation, |
fc7b904d |
Spec: m.Spec,
CopyData: false, |
a7e686a7 |
}
if len(cp.Source) == 0 { |
e4b6adc8 |
v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(container.ID)) |
a7e686a7 |
if err != nil {
return err
} |
e4b6adc8 |
cp.Volume = &volumeWrapper{v: v, s: daemon.volumes} |
a7e686a7 |
} |
9526e5c6 |
dereferenceIfExists(cp.Destination) |
a7e686a7 |
mountPoints[cp.Destination] = cp
} |
d535d981 |
} |
a7e686a7 |
// 3. Read bind mounts
for _, b := range hostConfig.Binds { |
e89b6e8c |
bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver) |
e454be75 |
if err != nil { |
a57900e3 |
return err |
bd54d40f |
} |
589a0afa |
needsSlavePropagation, err := daemon.validateBindDaemonRoot(bind.Spec)
if err != nil {
return err
}
if needsSlavePropagation {
bind.Propagation = mount.PropagationRSlave
} |
a7e686a7 |
|
fc7b904d |
// #10618 |
756f6cef |
_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
if binds[bind.Destination] || tmpfsExists { |
ebcb7d6b |
return duplicateMountPointError(bind.Destination) |
a7e686a7 |
}
|
fc7b904d |
if bind.Type == mounttypes.TypeVolume { |
a7e686a7 |
// create the volume |
e4b6adc8 |
v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(container.ID)) |
a7e686a7 |
if err != nil { |
bd54d40f |
return err
} |
e4b6adc8 |
bind.Volume = &volumeWrapper{v: v, s: daemon.volumes}
bind.Source = v.Mountpoint |
a7e686a7 |
// bind.Name is an already existing volume, we need to use that here |
e4b6adc8 |
bind.Driver = v.Driver |
fc7b904d |
if bind.Driver == volume.DefaultDriverName {
setBindModeIfNull(bind) |
843a119d |
} |
a7e686a7 |
} |
b0ac69b6 |
|
a7e686a7 |
binds[bind.Destination] = true |
9526e5c6 |
dereferenceIfExists(bind.Destination) |
a7e686a7 |
mountPoints[bind.Destination] = bind |
d535d981 |
} |
b3b7eb27 |
|
fc7b904d |
for _, cfg := range hostConfig.Mounts { |
e89b6e8c |
mp, err := parser.ParseMountSpec(cfg) |
fc7b904d |
if err != nil { |
87a12421 |
return errdefs.InvalidParameter(err) |
fc7b904d |
} |
589a0afa |
needsSlavePropagation, err := daemon.validateBindDaemonRoot(mp.Spec)
if err != nil {
return err
}
if needsSlavePropagation {
mp.Propagation = mount.PropagationRSlave
} |
fc7b904d |
if binds[mp.Destination] { |
ebcb7d6b |
return duplicateMountPointError(cfg.Target) |
fc7b904d |
}
if mp.Type == mounttypes.TypeVolume { |
e4b6adc8 |
var v *types.Volume |
fc7b904d |
if cfg.VolumeOptions != nil {
var driverOpts map[string]string
if cfg.VolumeOptions.DriverConfig != nil {
driverOpts = cfg.VolumeOptions.DriverConfig.Options
} |
e4b6adc8 |
v, err = daemon.volumes.Create(ctx,
mp.Name,
mp.Driver,
volumeopts.WithCreateReference(container.ID),
volumeopts.WithCreateOptions(driverOpts),
volumeopts.WithCreateLabels(cfg.VolumeOptions.Labels),
) |
fc7b904d |
} else { |
e4b6adc8 |
v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(container.ID)) |
fc7b904d |
}
if err != nil {
return err
}
|
e4b6adc8 |
mp.Volume = &volumeWrapper{v: v, s: daemon.volumes}
mp.Name = v.Name
mp.Driver = v.Driver |
fc7b904d |
|
27d9030b |
// need to selinux-relabel local mounts
mp.Source = v.Mountpoint |
5bbf5cc6 |
if mp.Driver == volume.DefaultDriverName {
setBindModeIfNull(mp)
} |
fc7b904d |
}
|
1caeb799 |
if mp.Type == mounttypes.TypeBind {
mp.SkipMountpointCreation = true
}
|
fc7b904d |
binds[mp.Destination] = true |
9526e5c6 |
dereferenceIfExists(mp.Destination) |
fc7b904d |
mountPoints[mp.Destination] = mp
}
|
a7e686a7 |
container.Lock() |
060f4ae6 |
|
927b334e |
// 4. Cleanup old volumes that are about to be reassigned. |
060f4ae6 |
for _, m := range mountPoints { |
e89b6e8c |
if parser.IsBackwardCompatible(m) { |
060f4ae6 |
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { |
e4b6adc8 |
daemon.volumes.Release(ctx, mp.Volume.Name(), container.ID) |
060f4ae6 |
}
}
} |
a7e686a7 |
container.MountPoints = mountPoints
container.Unlock()
return nil |
b3b7eb27 |
} |
aab35963 |
// lazyInitializeVolume initializes a mountpoint's volume if needed.
// This happens after a daemon restart. |
6a70fd22 |
func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volumemounts.MountPoint) error { |
aab35963 |
if len(m.Driver) > 0 && m.Volume == nil { |
e4b6adc8 |
v, err := daemon.volumes.Get(context.TODO(), m.Name, volumeopts.WithGetDriver(m.Driver), volumeopts.WithGetReference(containerID)) |
aab35963 |
if err != nil {
return err
} |
e4b6adc8 |
m.Volume = &volumeWrapper{v: v, s: daemon.volumes} |
aab35963 |
}
return nil
} |
29b1c1da |
|
3cf18596 |
// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13
// mount configurations
// The container lock should not be held when calling this function.
// Changes are only made in-memory and may make changes to containers referenced
// by `container.HostConfig.VolumesFrom`
func (daemon *Daemon) backportMountSpec(container *container.Container) {
container.Lock()
defer container.Unlock()
|
6a70fd22 |
parser := volumemounts.NewParser(container.OS) |
e89b6e8c |
|
3cf18596 |
maybeUpdate := make(map[string]bool)
for _, mp := range container.MountPoints {
if mp.Spec.Source != "" && mp.Type != "" {
continue |
29b1c1da |
} |
3cf18596 |
maybeUpdate[mp.Destination] = true
}
if len(maybeUpdate) == 0 {
return
} |
29b1c1da |
|
3cf18596 |
mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts))
for _, m := range container.HostConfig.Mounts {
mountSpecs[m.Target] = true
}
|
6a70fd22 |
binds := make(map[string]*volumemounts.MountPoint, len(container.HostConfig.Binds)) |
3cf18596 |
for _, rawSpec := range container.HostConfig.Binds { |
e89b6e8c |
mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) |
3cf18596 |
if err != nil {
logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
continue
}
binds[mp.Destination] = mp
}
|
6a70fd22 |
volumesFrom := make(map[string]volumemounts.MountPoint) |
3cf18596 |
for _, fromSpec := range container.HostConfig.VolumesFrom { |
e89b6e8c |
from, _, err := parser.ParseVolumesFrom(fromSpec) |
3cf18596 |
if err != nil {
logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") |
d6e1cb7c |
continue |
3cf18596 |
}
fromC, err := daemon.GetContainer(from)
if err != nil {
logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container")
continue
}
// make sure from container's specs have been backported
daemon.backportMountSpec(fromC)
fromC.Lock()
for t, mp := range fromC.MountPoints {
volumesFrom[t] = *mp
}
fromC.Unlock()
}
|
6a70fd22 |
needsUpdate := func(containerMount, other *volumemounts.MountPoint) bool { |
3cf18596 |
if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) {
return true
}
return false
}
// main
for _, cm := range container.MountPoints {
if !maybeUpdate[cm.Destination] {
continue
}
// nothing to backport if from hostconfig.Mounts
if mountSpecs[cm.Destination] {
continue
}
if mp, exists := binds[cm.Destination]; exists {
if needsUpdate(cm, mp) {
cm.Spec = mp.Spec
cm.Type = mp.Type |
29b1c1da |
} |
3cf18596 |
continue
}
if cm.Name != "" {
if mp, exists := volumesFrom[cm.Destination]; exists {
if needsUpdate(cm, &mp) {
cm.Spec = mp.Spec
cm.Type = mp.Type |
29b1c1da |
} |
3cf18596 |
continue |
29b1c1da |
} |
3cf18596 |
if cm.Type != "" {
// probably specified via the hostconfig.Mounts
continue |
29b1c1da |
} |
3cf18596 |
// anon volume
cm.Type = mounttypes.TypeVolume
cm.Spec.Type = mounttypes.TypeVolume |
29b1c1da |
} else { |
3cf18596 |
if cm.Type != "" {
// already updated
continue
}
cm.Type = mounttypes.TypeBind
cm.Spec.Type = mounttypes.TypeBind
cm.Spec.Source = cm.Source
if cm.Propagation != "" {
cm.Spec.BindOptions = &mounttypes.BindOptions{
Propagation: cm.Propagation, |
29b1c1da |
}
}
}
|
3cf18596 |
cm.Spec.Target = cm.Destination
cm.Spec.ReadOnly = !cm.RW |
29b1c1da |
}
} |
e4b6adc8 |
// VolumesService is used to perform volume operations
func (daemon *Daemon) VolumesService() *service.VolumesService {
return daemon.volumes
}
type volumeMounter interface {
Mount(ctx context.Context, v *types.Volume, ref string) (string, error)
Unmount(ctx context.Context, v *types.Volume, ref string) error
}
type volumeWrapper struct {
v *types.Volume
s volumeMounter
}
func (v *volumeWrapper) Name() string {
return v.v.Name
}
func (v *volumeWrapper) DriverName() string {
return v.v.Driver
}
func (v *volumeWrapper) Path() string {
return v.v.Mountpoint
}
func (v *volumeWrapper) Mount(ref string) (string, error) {
return v.s.Mount(context.TODO(), v.v, ref)
}
func (v *volumeWrapper) Unmount(ref string) error {
return v.s.Unmount(context.TODO(), v.v, ref)
}
func (v *volumeWrapper) CreatedAt() (time.Time, error) {
return time.Time{}, errors.New("not implemented")
}
func (v *volumeWrapper) Status() map[string]interface{} {
return v.v.Status
} |