abd72d40 |
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior. |
359b7df5 |
package daemon |
a27b4b8c |
import ( |
cfdf84d5 |
"context" |
a27b4b8c |
"fmt" |
46e05ed2 |
"io/ioutil" |
79c23fdb |
"net" |
46e05ed2 |
"os" |
0f9f9950 |
"path" |
e744b0dc |
"path/filepath" |
4949e070 |
"runtime" |
a793564b |
"strings" |
46e05ed2 |
"sync"
"time"
|
91e197d6 |
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container" |
87e1464c |
"github.com/docker/docker/api/types/swarm" |
6bb0d181 |
"github.com/docker/docker/container" |
db63f937 |
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/discovery" |
c9eb37f9 |
"github.com/docker/docker/daemon/events" |
9ca2e4e8 |
"github.com/docker/docker/daemon/exec" |
27bd6842 |
"github.com/docker/docker/daemon/logger" |
9bed0883 |
"github.com/docker/docker/daemon/network" |
d453fe35 |
"github.com/docker/docker/errdefs" |
1009e6a4 |
"github.com/sirupsen/logrus" |
91154e92 |
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register" |
835971c6 |
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/daemon/stats" |
4352da78 |
dmetadata "github.com/docker/docker/distribution/metadata" |
572ce802 |
"github.com/docker/docker/distribution/xfer" |
835971c6 |
"github.com/docker/docker/dockerversion" |
9001ea26 |
"github.com/docker/docker/image" |
4352da78 |
"github.com/docker/docker/layer" |
9c4570a9 |
"github.com/docker/docker/libcontainerd" |
4352da78 |
"github.com/docker/docker/migrate/v1" |
7a7357da |
"github.com/docker/docker/pkg/containerfs" |
442b4562 |
"github.com/docker/docker/pkg/idtools" |
a98be034 |
"github.com/docker/docker/pkg/plugingetter" |
b3ee9ac7 |
"github.com/docker/docker/pkg/sysinfo" |
8fb0ca2c |
"github.com/docker/docker/pkg/system" |
b3ee9ac7 |
"github.com/docker/docker/pkg/truncindex" |
835971c6 |
"github.com/docker/docker/plugin" |
c85e8622 |
pluginexec "github.com/docker/docker/plugin/executor/containerd" |
3a127939 |
refstore "github.com/docker/docker/reference" |
03d3d79b |
"github.com/docker/docker/registry" |
b3ee9ac7 |
"github.com/docker/docker/runconfig" |
42a46ed1 |
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local" |
72bb5661 |
"github.com/docker/docker/volume/store" |
8fb0ca2c |
"github.com/docker/libnetwork" |
835971c6 |
"github.com/docker/libnetwork/cluster" |
e8026d8a |
nwconfig "github.com/docker/libnetwork/config" |
4352da78 |
"github.com/docker/libtrust" |
3d86b0c7 |
"github.com/pkg/errors" |
572ce802 |
)
|
521e7eba |
// ContainersNamespace is the name of the namespace used for users containers
const ContainersNamespace = "moby" |
7b2e5216 |
|
ddae20c0 |
var ( |
9b47b7b1 |
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") |
f63cdf02 |
) |
24e02043 |
|
ce8e529e |
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
containersReplica container.ViewDB
execCommands *exec.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *config.Config
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discovery.Reloader
root string
seccompEnabled bool
apparmorEnabled bool
shutdown bool
idMappings *idtools.IDMappings
graphDrivers map[string]string // By operating system
referenceStore refstore.Store |
3aa4a007 |
imageStore image.Store |
ce8e529e |
imageRoot string |
afd305c4 |
layerStores map[string]layer.Store // By operating system |
3aa4a007 |
distributionMetadataStore dmetadata.Store |
ce8e529e |
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
genericResources []swarm.GenericResource
metricsPluginListener net.Listener |
b237189e |
|
835971c6 |
machineMemory uint64
|
b237189e |
seccompProfile []byte
seccompProfilePath string |
5a9f2a3c |
|
3279ca3c |
diskUsageRunning int32
pruneRunning int32 |
7318eba5 |
hosts map[string]bool // hosts stores the addresses the daemon is listening on |
5b0993d6 |
startupDone chan struct{} |
9bed0883 |
|
d00a07b1 |
attachmentStore network.AttachmentStore |
7318eba5 |
}
// StoreHosts stores the addresses the daemon is listening on
func (daemon *Daemon) StoreHosts(hosts []string) {
if daemon.hosts == nil {
daemon.hosts = make(map[string]bool)
}
for _, h := range hosts {
daemon.hosts[h] = true
} |
7c57a4cf |
}
|
7781a1bf |
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool { |
dbf580be |
return daemon.configStore != nil && daemon.configStore.Experimental |
7781a1bf |
}
|
b08f071e |
func (daemon *Daemon) restore() error { |
3aa4a007 |
containers := make(map[string]*container.Container) |
0fd0deb7 |
|
6184ff31 |
logrus.Info("Loading containers: start.")
|
359b7df5 |
dir, err := ioutil.ReadDir(daemon.repository) |
a27b4b8c |
if err != nil {
return err
} |
356af154 |
|
fde909ff |
for _, v := range dir { |
7c57a4cf |
id := v.Name() |
359b7df5 |
container, err := daemon.load(id) |
a27b4b8c |
if err != nil { |
6f4d8470 |
logrus.Errorf("Failed to load container %v: %v", id, err) |
a27b4b8c |
continue
} |
0cba7740 |
if !system.IsOSSupported(container.OS) {
logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS)
continue
} |
4908d7f8 |
// Ignore the container if it does not support the current driver being used by the graph |
ce8e529e |
currentDriverForContainerOS := daemon.graphDrivers[container.OS] |
0380fbff |
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { |
afd305c4 |
rwlayer, err := daemon.layerStores[container.OS].GetRWLayer(container.ID) |
899f1b18 |
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer |
ddae20c0 |
logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) |
41870a42 |
|
0f9f9950 |
containers[container.ID] = container |
4908d7f8 |
} else { |
6f4d8470 |
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) |
4908d7f8 |
} |
1cbdaeba |
}
|
3c2886d8 |
removeContainers := make(map[string]*container.Container) |
19762da6 |
restartContainers := make(map[*container.Container]chan struct{}) |
ecffb6d5 |
activeSandboxes := make(map[string]interface{}) |
a9c1575b |
for id, c := range containers { |
0f9f9950 |
if err := daemon.registerName(c); err != nil { |
eed4c7b7 |
logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
delete(containers, id)
continue
} |
dc712b92 |
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon.verifyVolumesInfo(c); err != nil {
// don't skip the container due to error
logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
} |
76d96418 |
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
} |
dc712b92 |
|
391441c2 |
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
} |
9c4570a9 |
} |
3f6127b1 |
|
ddae20c0 |
var (
wg sync.WaitGroup
mapLock sync.Mutex
) |
9c4570a9 |
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done() |
3cf18596 |
daemon.backportMountSpec(c) |
edad5270 |
if err := daemon.checkpointAndSave(c); err != nil { |
3cf18596 |
logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") |
29b1c1da |
}
|
3cf18596 |
daemon.setStateCounter(c) |
ddae20c0 |
logrus.WithFields(logrus.Fields{
"container": c.ID,
"running": c.IsRunning(),
"paused": c.IsPaused(),
}).Debug("restoring container")
var (
err error
alive bool
ec uint32
exitedAt time.Time
)
alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
if err != nil && !errdefs.IsNotFound(err) {
logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err)
return
}
if !alive {
ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID)
if err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
} |
e69127bd |
} else if !daemon.configStore.LiveRestoreEnabled {
if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
return
} |
ddae20c0 |
}
|
9c4570a9 |
if c.IsRunning() || c.IsPaused() { |
606a245d |
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking |
ddae20c0 |
if c.IsPaused() && alive {
s, err := daemon.containerd.Status(context.Background(), c.ID)
if err != nil {
logrus.WithError(err).WithField("container", c.ID).
Errorf("Failed to get container status")
} else {
logrus.WithField("container", c.ID).WithField("state", s).
Info("restored container paused")
switch s {
case libcontainerd.StatusPaused, libcontainerd.StatusPausing:
// nothing to do
case libcontainerd.StatusStopped:
alive = false
case libcontainerd.StatusUnknown:
logrus.WithField("container", c.ID).
Error("Unknown status for container during restore")
default:
// running
c.Lock()
c.Paused = false
daemon.setStateCounter(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.WithError(err).WithField("container", c.ID).
Error("Failed to update stopped container state")
}
c.Unlock()
}
}
}
if !alive {
c.Lock()
c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
daemon.Cleanup(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err)
}
c.Unlock() |
9c4570a9 |
} |
77c725ea |
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
|
606a245d |
c.ResetRestartManager(false) |
78f30945 |
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { |
ecffb6d5 |
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
} |
e69127bd |
}
// get list of containers we need to restart
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock() |
9c4570a9 |
} |
0f9f9950 |
|
481a92cb |
c.Lock() |
ce724731 |
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) |
481a92cb |
c.RemovalInProgress = false
c.Dead = true |
edad5270 |
if err := c.CheckpointTo(daemon.containersReplica); err != nil { |
ddae20c0 |
logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) |
edad5270 |
} |
ce724731 |
} |
481a92cb |
c.Unlock() |
9c4570a9 |
}(c) |
26007774 |
} |
9c4570a9 |
wg.Wait() |
ecffb6d5 |
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
} |
26007774 |
|
0f9f9950 |
// Now that all the containers are registered, register the links
for _, c := range containers {
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err) |
19762da6 |
}
} |
41870a42 |
|
19762da6 |
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1) |
0f9f9950 |
go func(c *container.Container, chNotify chan struct{}) { |
19762da6 |
defer group.Done() |
0f9f9950 |
logrus.Debugf("Starting container %s", c.ID) |
41870a42 |
|
19762da6 |
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container |
0f9f9950 |
children := daemon.children(c) |
19762da6 |
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
} |
41870a42 |
}
} |
2dce79e0 |
// Make sure networks are available before starting
daemon.waitForNetworks(c) |
bd7d5129 |
if err := daemon.containerStart(c, "", "", true); err != nil { |
0f9f9950 |
logrus.Errorf("Failed to start container %s: %s", c.ID, err) |
19762da6 |
}
close(chNotify)
}(c, notifier) |
0f9f9950 |
|
34bd2d62 |
} |
81fa9feb |
group.Wait() |
34bd2d62 |
|
3c2886d8 |
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
} |
6dd8e10d |
removeGroup.Done() |
3c2886d8 |
}(id)
}
removeGroup.Wait()
|
d85b9f85 |
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers { |
0feeab2e |
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container |
054e479b |
// has a volume and the volume driver is not available. |
0feeab2e |
if _, ok := restartContainers[c]; ok {
continue |
3c2886d8 |
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue |
0feeab2e |
} |
3c2886d8 |
|
d85b9f85 |
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
|
6184ff31 |
logrus.Info("Loading containers: done.") |
1cbdaeba |
|
a27b4b8c |
return nil
}
|
c9fb551d |
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
group := sync.WaitGroup{}
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
group.Add(1)
go func(c *container.Container) {
defer group.Done() |
bd7d5129 |
if err := daemon.containerStart(c, "", "", true); err != nil { |
c9fb551d |
logrus.Error(err)
}
}(c)
}
}
}
group.Wait()
}
|
2dce79e0 |
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks { |
edc307cb |
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready |
2dce79e0 |
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
|
0f9f9950 |
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c) |
1cbdaeba |
}
|
abd72d40 |
// parents returns the names of the parent containers of the container
// with the given name. |
0f9f9950 |
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c) |
450740c8 |
}
|
6bb0d181 |
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { |
0f9f9950 |
fullName := path.Join(parent.Name, alias) |
1128fc1a |
if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil {
if err == container.ErrNameReserved { |
332d95fd |
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
} |
1cbdaeba |
return err
} |
0f9f9950 |
daemon.linkIndex.link(parent, child, fullName) |
0d292440 |
return nil |
1cbdaeba |
}
|
3cedca5d |
// DaemonJoinsCluster informs the daemon has joined the cluster and provides
// the handler to query the cluster component
func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
daemon.setClusterProvider(clusterProvider)
}
// DaemonLeavesCluster informs the daemon has left the cluster
func (daemon *Daemon) DaemonLeavesCluster() {
// Daemon is in charge of removing the attachable networks with
// connected containers when the node leaves the swarm
daemon.clearAttachableNetworks() |
6f4bb796 |
// We no longer need the cluster provider, stop it now so that
// the network agent will stop listening to cluster events. |
3cedca5d |
daemon.setClusterProvider(nil) |
6f4bb796 |
// Wait for the networking cluster agent to stop
daemon.netController.AgentStopWait()
// Daemon is in charge of removing the ingress network when the
// node leaves the swarm. Wait for job to be done or timeout.
// This is called also on graceful daemon shutdown. We need to
// wait, because the ingress release has to happen before the
// network controller is stopped.
if done, err := daemon.ReleaseIngress(); err == nil {
select {
case <-done:
case <-time.After(5 * time.Second):
logrus.Warnf("timeout while waiting for ingress network removal")
}
} else {
logrus.Warnf("failed to initiate ingress network removal: %v", err)
} |
9bed0883 |
|
d00a07b1 |
daemon.attachmentStore.ClearAttachments() |
3cedca5d |
}
// setClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { |
534a90a9 |
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
|
ecffb6d5 |
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
} |
db63f937 |
return daemon.configStore.IsSwarmCompatible() |
ecffb6d5 |
}
|
abd72d40 |
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver. |
38de272b |
func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { |
ff4e58ff |
setDefaultMtu(config)
|
ca3e4545 |
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil { |
27de9f6e |
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) |
ca3e4545 |
}
|
5ce5a8e9 |
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil { |
8fb0ca2c |
return nil, err |
4dc4d56d |
} |
8fb0ca2c |
// Do we have a disabled network? |
c9328c6c |
config.DisableBridge = isBridgeNetworkDisabled(config) |
353b7c8e |
|
62a75fca |
// Verify the platform is supported as a daemon |
10d30c64 |
if !platformSupported { |
abd72d40 |
return nil, errSystemNotSupported |
62a75fca |
}
// Validate platform-specific requirements |
8fb0ca2c |
if err := checkSystem(); err != nil { |
23b2c39a |
return nil, err |
4949e070 |
}
|
09cd96c5 |
idMappings, err := setupRemappedRoot(config) |
442b4562 |
if err != nil {
return nil, err
} |
93fbdb69 |
rootIDs := idMappings.RootPair() |
a894aec8 |
if err := setupDaemonProcess(config); err != nil { |
b4ccd7cb |
return nil, err
}
|
41f69883 |
// set up the tmpDir to use a canonical path |
09cd96c5 |
tmp, err := prepareTempDir(config.Root, rootIDs) |
41f69883 |
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
} |
8e71b1e2 |
realTmp, err := getRealPath(tmp) |
41f69883 |
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
} |
5611f127 |
if runtime.GOOS == "windows" {
if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
if err := system.MkdirAll(realTmp, 0700, ""); err != nil {
return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
}
}
os.Setenv("TEMP", realTmp)
os.Setenv("TMP", realTmp)
} else {
os.Setenv("TMPDIR", realTmp)
} |
41f69883 |
|
5b0993d6 |
d := &Daemon{
configStore: config, |
ddae20c0 |
PluginStore: pluginStore, |
5b0993d6 |
startupDone: make(chan struct{}),
} |
f5916b10 |
// Ensure the daemon is properly shutdown if there is a failure during
// initialization |
531f4122 |
defer func() {
if err != nil { |
b08f071e |
if err := d.Shutdown(); err != nil { |
531f4122 |
logrus.Error(err)
} |
459e58ff |
} |
531f4122 |
}() |
f2bab155 |
|
87e1464c |
if err := d.setGenericResources(config); err != nil {
return nil, err
} |
94d44066 |
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
|
b237189e |
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
|
9c4570a9 |
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
|
3a8728b4 |
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
|
140a7434 |
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
|
567ef8e7 |
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
|
8fb0ca2c |
daemonRepo := filepath.Join(config.Root, "containers") |
516010e9 |
if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { |
06553a75 |
return nil, err
}
|
ddae20c0 |
// Create the directory where we'll store the runtime scripts (i.e. in
// order to support runtimeArgs)
daemonRuntimes := filepath.Join(config.Root, "runtimes") |
516010e9 |
if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { |
ddae20c0 |
return nil, err
}
if err := d.loadRuntimes(); err != nil {
return nil, err
}
|
e85867cb |
if runtime.GOOS == "windows" { |
516010e9 |
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { |
e85867cb |
return nil, err
}
}
|
3aa4a007 |
// On Windows we don't support the environment variable, or a user supplied graphdriver
// as Windows has no choice in terms of which graphdrivers to use. It's a case of
// running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
// lcow. Unix platforms however run a single graphdriver for all containers, and it can
// be set through an environment variable, a daemon start parameter, or chosen through
// initialization of the layerstore through driver priority order for example. |
ce8e529e |
d.graphDrivers = make(map[string]string) |
afd305c4 |
d.layerStores = make(map[string]layer.Store) |
3aa4a007 |
if runtime.GOOS == "windows" { |
ce8e529e |
d.graphDrivers[runtime.GOOS] = "windowsfilter" |
3aa4a007 |
if system.LCOWSupported() { |
ce8e529e |
d.graphDrivers["linux"] = "lcow" |
3aa4a007 |
}
} else {
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver |
84aefe86 |
} else {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) |
3aa4a007 |
} |
ce8e529e |
d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. |
f5916b10 |
} |
fefea805 |
|
020b051d |
d.RegistryService = registryService |
27bd6842 |
logger.RegisterPluginGetter(d.PluginStore) |
38de272b |
|
0e8e8f0f |
metricsSockPath, err := d.listenMetricsSock()
if err != nil {
return nil, err
}
registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
|
c85e8622 |
createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { |
ddae20c0 |
return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m) |
c85e8622 |
}
|
020b051d |
// Plugin system initialization should happen before restore. Do not change order. |
3d86b0c7 |
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"), |
26517a01 |
ExecRoot: getPluginExecRoot(config.Root), |
3d86b0c7 |
Store: d.PluginStore, |
c85e8622 |
CreateExecutor: createPluginExec, |
3d86b0c7 |
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private |
38de272b |
AuthzMiddleware: config.AuthzMiddleware, |
3d86b0c7 |
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager") |
020b051d |
} |
fefea805 |
|
afd305c4 |
for operatingSystem, gd := range d.graphDrivers {
d.layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
Root: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: gd,
GraphDriverOptions: config.GraphOptions,
IDMappings: idMappings,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
OS: operatingSystem,
})
if err != nil {
return nil, err
} |
4352da78 |
}
|
afd305c4 |
// As layerstore initialization may set the driver |
ce8e529e |
for os := range d.graphDrivers { |
afd305c4 |
d.graphDrivers[os] = d.layerStores[os].DriverName() |
ce8e529e |
}
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil { |
4352da78 |
return nil, err
}
|
7368e41c |
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) |
afd305c4 |
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStores, *config.MaxConcurrentDownloads) |
7368e41c |
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) |
4352da78 |
|
ce8e529e |
d.imageRoot = filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
ifs, err := image.NewFSStoreBackend(filepath.Join(d.imageRoot, "imagedb"))
if err != nil {
return nil, err
} |
afd305c4 |
lgrMap := make(map[string]image.LayerGetReleaser)
for os, ls := range d.layerStores {
lgrMap[os] = ls
}
d.imageStore, err = image.NewImageStore(ifs, lgrMap) |
ce8e529e |
if err != nil {
return nil, err |
2ebf3464 |
} |
10f23a94 |
|
8fb0ca2c |
// Configure the volumes driver |
09cd96c5 |
volStore, err := d.configureVolumes(rootIDs) |
b3b7eb27 |
if err != nil { |
1df5f409 |
return nil, err
} |
45407cf0 |
|
2f007e46 |
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) |
8ceb9d20 |
if err != nil {
return nil, err
}
|
8fb0ca2c |
trustDir := filepath.Join(config.Root, "trust")
|
ed10ac6e |
if err := system.MkdirAll(trustDir, 0700, ""); err != nil { |
7c88e8f1 |
return nil, err
}
|
9e50bf62 |
eventsService := events.New() |
4352da78 |
|
7b9a8f46 |
// We have a single tag/reference store for the daemon globally. However, it's
// stored under the graphdriver. On host platforms which only support a single
// container OS, but multiple selectable graphdrivers, this means depending on which
// graphdriver is chosen, the global reference store is under there. For
// platforms which support multiple container operating systems, this is slightly
// more problematic as where does the global ref store get located? Fortunately,
// for Windows, which is currently the only daemon supporting multiple container
// operating systems, the list of graphdrivers available isn't user configurable.
// For backwards compatibility, we just put it under the windowsfilter
// directory regardless. |
ce8e529e |
refStoreLocation := filepath.Join(d.imageRoot, `repositories.json`) |
7b9a8f46 |
rs, err := refstore.NewReferenceStore(refStoreLocation)
if err != nil {
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
}
d.referenceStore = rs
|
ce8e529e |
d.distributionMetadataStore, err = dmetadata.NewFSMetadataStore(filepath.Join(d.imageRoot, "distribution"))
if err != nil {
return nil, err
} |
3aa4a007 |
|
ce8e529e |
// No content-addressability migration on Windows as it never supported pre-CA
if runtime.GOOS != "windows" {
migrationStart := time.Now() |
afd305c4 |
if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], d.layerStores[runtime.GOOS], d.imageStore, rs, d.distributionMetadataStore); err != nil { |
ce8e529e |
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) |
3aa4a007 |
} |
ce8e529e |
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) |
dfbb5520 |
}
|
139ea5b7 |
// Discovery is only enabled when the daemon is launched with an address to advertise. When |
6fb05778 |
// initialized, the daemon is registered and we can store the discovery backend as it's read-only |
677a6b35 |
if err := d.initDiscovery(config); err != nil {
return nil, err |
139ea5b7 |
}
|
07c35b41 |
sysInfo := sysinfo.New(false) |
9d0ed1de |
// Check if Devices cgroup is mounted, it is hard requirement for container security, |
72fefc04 |
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { |
514adcf4 |
return nil, errors.New("Devices cgroup isn't mounted") |
9b05aa6e |
}
|
531f4122 |
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo |
3c82fad4 |
d.containers = container.NewMemoryStore() |
aacddda8 |
if d.containersReplica, err = container.NewViewDB(); err != nil { |
eed4c7b7 |
return nil, err
} |
9ca2e4e8 |
d.execCommands = exec.NewStore() |
4352da78 |
d.trustKey = trustKey |
531f4122 |
d.idIndex = truncindex.NewTruncIndex([]string{}) |
5dc3a9a6 |
d.statsCollector = d.newStatsCollector(1 * time.Second) |
677a6b35 |
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
} |
531f4122 |
d.EventsService = eventsService |
b3b7eb27 |
d.volumes = volStore |
81fa9feb |
d.root = config.Root |
09cd96c5 |
d.idMappings = idMappings |
40d5ced9 |
d.seccompEnabled = sysInfo.Seccomp |
d97a00df |
d.apparmorEnabled = sysInfo.AppArmor |
ddae20c0 |
d.containerdRemote = containerdRemote |
531f4122 |
|
0f9f9950 |
d.linkIndex = newLinkIndex()
|
9c4570a9 |
go d.execCommandGC()
|
521e7eba |
d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d) |
9c4570a9 |
if err != nil { |
0e3f2f2a |
return nil, err
} |
531f4122 |
|
17b8aba1 |
if err := d.restore(); err != nil { |
42abccb8 |
return nil, err
} |
5b0993d6 |
close(d.startupDone) |
42abccb8 |
|
3343d234 |
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
|
a28b173a |
engineInfo.WithValues( |
3343d234 |
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem, |
a28b173a |
info.OSType,
info.ID, |
3343d234 |
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
|
3aa4a007 |
gd := "" |
ce8e529e |
for os, driver := range d.graphDrivers { |
3aa4a007 |
if len(gd) > 0 {
gd += ", "
} |
ce8e529e |
gd += driver
if len(d.graphDrivers) > 1 {
gd = fmt.Sprintf("%s (%s)", gd, os) |
3aa4a007 |
}
}
logrus.WithFields(logrus.Fields{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
"graphdriver(s)": gd,
}).Info("Docker daemon")
|
d18919e3 |
return d, nil
}
|
5b0993d6 |
func (daemon *Daemon) waitForStartupDone() {
<-daemon.startupDone
}
|
6bb0d181 |
func (daemon *Daemon) shutdownContainer(c *container.Container) error { |
cc703784 |
stopTimeout := c.StopTimeout() |
cfdf84d5 |
|
cc703784 |
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil { |
edc307cb |
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) |
ace5854f |
}
|
cfdf84d5 |
// Wait without timeout for the container to exit.
// Ignore the result. |
94cefa21 |
<-c.Wait(context.Background(), container.WaitConditionNotRunning) |
ace5854f |
return nil
}
|
d7be6b2d |
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout. |
cc703784 |
func (daemon *Daemon) ShutdownTimeout() int { |
d7be6b2d |
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon.configStore.ShutdownTimeout
|
cc703784 |
graceTimeout := 5
if daemon.containers != nil {
for _, c := range daemon.containers.List() {
if shutdownTimeout >= 0 {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
shutdownTimeout = -1
} else {
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
|
abd72d40 |
// Shutdown stops the daemon. |
b08f071e |
func (daemon *Daemon) Shutdown() error { |
10305dc5 |
daemon.shutdown = true |
d705dab1 |
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them. |
4a44cf1d |
|
b9454223 |
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { |
2d5dc94b |
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { |
0e8e8f0f |
// metrics plugins still need some cleanup
daemon.cleanupMetricsPlugins() |
2d5dc94b |
return nil
} |
d705dab1 |
} |
2d5dc94b |
|
531f4122 |
if daemon.containers != nil { |
ed74ee12 |
logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) |
3c82fad4 |
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return |
531f4122 |
} |
3c82fad4 |
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
} |
afd305c4 |
if mountid, err := daemon.layerStores[c.OS].GetMountID(c.ID); err == nil { |
9c4570a9 |
daemon.cleanupMountsByID(mountid)
} |
3c82fad4 |
logrus.Debugf("container stopped %s", c.ID)
}) |
ace5854f |
} |
c68e7f96 |
|
6ef1060c |
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
|
afd305c4 |
for os, ls := range daemon.layerStores {
if ls != nil {
if err := ls.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os)
}
} |
4b400ecc |
}
|
6f4bb796 |
// If we are part of a cluster, clean up cluster's stuff
if daemon.clusterProvider != nil {
logrus.Debugf("start clean shutdown of cluster resources...")
daemon.DaemonLeavesCluster()
}
|
0e8e8f0f |
daemon.cleanupMetricsPlugins()
|
4b400ecc |
// Shutdown plugins after containers and layerstore. Don't change the order. |
7781a1bf |
daemon.pluginShutdown() |
ed6e3076 |
|
ace5854f |
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop() |
531f4122 |
} |
f067e263 |
|
b4a63139 |
return daemon.cleanupMounts() |
f067e263 |
}
|
6bb0d181 |
// Mount sets container.BaseFS |
abd72d40 |
// (is it not set coming in? why is it unset?) |
6bb0d181 |
func (daemon *Daemon) Mount(container *container.Container) error { |
d04fa49a |
dir, err := container.RWLayer.Mount(container.GetMountLabel()) |
4352da78 |
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir) |
8fb0ca2c |
|
7a7357da |
if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { |
8fb0ca2c |
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems. |
7a7357da |
if runtime.GOOS != "windows" { |
4352da78 |
daemon.Unmount(container) |
8fb0ca2c |
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", |
0380fbff |
daemon.GraphDriverName(container.OS), container.ID, container.BaseFS, dir) |
8fb0ca2c |
} |
699a1074 |
} |
6bb0d181 |
container.BaseFS = dir // TODO: combine these fields |
f2bab155 |
return nil |
699a1074 |
}
|
3a497650 |
// Unmount unsets the container base filesystem |
9c4570a9 |
func (daemon *Daemon) Unmount(container *container.Container) error { |
d04fa49a |
if err := container.RWLayer.Unmount(); err != nil { |
4352da78 |
logrus.Errorf("Error unmounting container %s: %s", container.ID, err) |
9c4570a9 |
return err |
4352da78 |
} |
3716ec25 |
|
9c4570a9 |
return nil |
5a3d9bd4 |
}
|
3c593208 |
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet |
a0ccd0d4 |
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks { |
3c593208 |
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool) |
a0ccd0d4 |
}
} |
3c593208 |
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool) |
a0ccd0d4 |
}
}
}
|
3c593208 |
return v4Subnets, v6Subnets |
a0ccd0d4 |
}
|
f5916b10 |
// GraphDriverName returns the name of the graph driver used by the layer.Store |
ce8e529e |
func (daemon *Daemon) GraphDriverName(os string) string { |
afd305c4 |
return daemon.layerStores[os].DriverName() |
36c3614f |
}
|
9c451ad0 |
// prepareTempDir prepares and returns the default directory to use
// for temporary files.
// If it doesn't exist, it is created. If it exists, its content is removed. |
09cd96c5 |
func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { |
f1bbc1f3 |
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp") |
9c451ad0 |
newName := tmpDir + "-old" |
5cea9a0a |
if err := os.Rename(tmpDir, newName); err == nil { |
9c451ad0 |
go func() {
if err := os.RemoveAll(newName); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", newName)
}
}() |
2b50b14a |
} else if !os.IsNotExist(err) { |
9c451ad0 |
logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
if err := os.RemoveAll(tmpDir); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
}
} |
f1bbc1f3 |
} |
9c451ad0 |
// We don't remove the content of tmpdir if it's not the default,
// it may hold things that do not belong to us. |
09cd96c5 |
return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) |
93cdb007 |
} |
dde0cc78 |
|
7a7357da |
func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error { |
93fbdb69 |
rootIDs := daemon.idMappings.RootPair() |
09cd96c5 |
return initlayer.Setup(initPath, rootIDs) |
4352da78 |
}
|
87e1464c |
func (daemon *Daemon) setGenericResources(conf *config.Config) error { |
b68221c3 |
genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) |
87e1464c |
if err != nil {
return err
}
daemon.genericResources = genericResources
return nil
}
|
db63f937 |
func setDefaultMtu(conf *config.Config) { |
ff4e58ff |
// do nothing if the config does not have the default 0 value. |
db63f937 |
if conf.Mtu != 0 { |
ff4e58ff |
return
} |
db63f937 |
conf.Mtu = config.DefaultNetworkMtu |
ff4e58ff |
}
|
09cd96c5 |
func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) |
42a46ed1 |
if err != nil {
return nil, err
} |
72bb5661 |
|
c5393ee1 |
volumedrivers.RegisterPluginGetter(daemon.PluginStore) |
fefea805 |
|
2f40b1b2 |
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { |
514adcf4 |
return nil, errors.New("local volume driver could not be registered") |
2f40b1b2 |
} |
f3711704 |
return store.New(daemon.configStore.Root) |
42a46ed1 |
} |
215bfc73 |
|
63efc120 |
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
|
677a6b35 |
// initDiscovery initializes the discovery watcher for this daemon. |
db63f937 |
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) |
677a6b35 |
if err != nil { |
db63f937 |
if err == discovery.ErrDiscoveryDisabled { |
677a6b35 |
return nil
}
return err
}
|
db63f937 |
conf.ClusterAdvertise = advertise
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) |
677a6b35 |
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
|
db63f937 |
func isBridgeNetworkDisabled(conf *config.Config) bool {
return conf.BridgeConfig.Iface == config.DisableNetworkBridge |
e8026d8a |
}
|
db63f937 |
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { |
e8026d8a |
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
|
b0eef4e4 |
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) |
e8026d8a |
options = append(options, nwconfig.OptionDataDir(dconfig.Root)) |
d3af5e3d |
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) |
e8026d8a |
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 { |
514adcf4 |
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") |
e8026d8a |
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...) |
ecffb6d5 |
|
b9454223 |
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { |
ecffb6d5 |
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
|
a00940f0 |
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
|
f9f25ca5 |
options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU))
|
e8026d8a |
return options, nil
} |
9c4570a9 |
|
7e24c160 |
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
} |
c410222e |
func (daemon *Daemon) pluginShutdown() { |
3d86b0c7 |
manager := daemon.pluginManager |
c410222e |
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
} |
46ec4c1a |
|
3d86b0c7 |
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
|
fa784951 |
// PluginGetter returns current pluginStore associated with the daemon
func (daemon *Daemon) PluginGetter() *plugin.Store {
return daemon.PluginStore
}
|
46ec4c1a |
// CreateDaemonRoot creates the root for the daemon |
db63f937 |
func CreateDaemonRoot(config *config.Config) error { |
46ec4c1a |
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else { |
8e71b1e2 |
realRoot, err = getRealPath(config.Root) |
46ec4c1a |
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
|
09cd96c5 |
idMappings, err := setupRemappedRoot(config) |
46ec4c1a |
if err != nil {
return err
} |
93fbdb69 |
return setupDaemonRoot(config, realRoot, idMappings.RootPair()) |
46ec4c1a |
} |
37addf0a |
// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
container.Lock()
defer container.Unlock()
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
return fmt.Errorf("Error saving container state: %v", err)
}
return nil
} |
9d87e6e0 |
// because the CLI sends a -1 when it wants to unset the swappiness value
// we need to clear it on the server side
func fixMemorySwappiness(resources *containertypes.Resources) {
if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 {
resources.MemorySwappiness = nil
}
} |
9bed0883 |
|
d00a07b1 |
// GetAttachmentStore returns current attachment store associated with the daemon
func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
return &daemon.attachmentStore |
9bed0883 |
} |