daemon/daemon.go
abd72d40
 // Package daemon exposes the functions that occur on the host server
 // that the Docker daemon is running.
 //
 // In implementing the various functions of the daemon, there is often
 // a method-specific struct for configuring the runtime behavior.
359b7df5
 package daemon
a27b4b8c
 
 import (
382c152a
 	"encoding/json"
a27b4b8c
 	"fmt"
46e05ed2
 	"io"
 	"io/ioutil"
79c23fdb
 	"net"
46e05ed2
 	"os"
0f9f9950
 	"path"
e744b0dc
 	"path/filepath"
4949e070
 	"runtime"
a793564b
 	"strings"
46e05ed2
 	"sync"
79c23fdb
 	"syscall"
46e05ed2
 	"time"
 
6f4d8470
 	"github.com/Sirupsen/logrus"
9c4570a9
 	containerd "github.com/docker/containerd/api/grpc/types"
9a85f60c
 	"github.com/docker/docker/api"
6bb0d181
 	"github.com/docker/docker/container"
c9eb37f9
 	"github.com/docker/docker/daemon/events"
9ca2e4e8
 	"github.com/docker/docker/daemon/exec"
907407d0
 	"github.com/docker/engine-api/types"
 	containertypes "github.com/docker/engine-api/types/container"
534a90a9
 	"github.com/docker/libnetwork/cluster"
91154e92
 	// register graph drivers
 	_ "github.com/docker/docker/daemon/graphdriver/register"
4352da78
 	dmetadata "github.com/docker/docker/distribution/metadata"
572ce802
 	"github.com/docker/docker/distribution/xfer"
9001ea26
 	"github.com/docker/docker/image"
4352da78
 	"github.com/docker/docker/layer"
9c4570a9
 	"github.com/docker/docker/libcontainerd"
4352da78
 	"github.com/docker/docker/migrate/v1"
c30a55f1
 	"github.com/docker/docker/pkg/fileutils"
26007774
 	"github.com/docker/docker/pkg/graphdb"
442b4562
 	"github.com/docker/docker/pkg/idtools"
572ce802
 	"github.com/docker/docker/pkg/progress"
0f9f9950
 	"github.com/docker/docker/pkg/registrar"
9a9724ad
 	"github.com/docker/docker/pkg/signal"
572ce802
 	"github.com/docker/docker/pkg/streamformatter"
b3ee9ac7
 	"github.com/docker/docker/pkg/sysinfo"
8fb0ca2c
 	"github.com/docker/docker/pkg/system"
b3ee9ac7
 	"github.com/docker/docker/pkg/truncindex"
2655954c
 	"github.com/docker/docker/reference"
03d3d79b
 	"github.com/docker/docker/registry"
b3ee9ac7
 	"github.com/docker/docker/runconfig"
2968fa44
 	"github.com/docker/docker/utils"
42a46ed1
 	volumedrivers "github.com/docker/docker/volume/drivers"
 	"github.com/docker/docker/volume/local"
72bb5661
 	"github.com/docker/docker/volume/store"
8fb0ca2c
 	"github.com/docker/libnetwork"
e8026d8a
 	nwconfig "github.com/docker/libnetwork/config"
4352da78
 	"github.com/docker/libtrust"
572ce802
 )
 
f63cdf02
 var (
7b2e5216
 	// DefaultRuntimeBinary is the default runtime to be used by
 	// containerd if none is specified
 	DefaultRuntimeBinary = "docker-runc"
 
a793564b
 	errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
f63cdf02
 )
24e02043
 
abd72d40
 // Daemon holds information about the Docker daemon.
359b7df5
 type Daemon struct {
4352da78
 	ID                        string
 	repository                string
3c82fad4
 	containers                container.Store
4352da78
 	execCommands              *exec.Store
2655954c
 	referenceStore            reference.Store
572ce802
 	downloadManager           *xfer.LayerDownloadManager
 	uploadManager             *xfer.LayerUploadManager
4352da78
 	distributionMetadataStore dmetadata.Store
 	trustKey                  libtrust.PrivateKey
 	idIndex                   *truncindex.TruncIndex
 	configStore               *Config
 	statsCollector            *statsCollector
7ac4232e
 	defaultLogConfig          containertypes.LogConfig
636c276f
 	RegistryService           registry.Service
4352da78
 	EventsService             *events.Events
 	netController             libnetwork.NetworkController
 	volumes                   *store.VolumeStore
677a6b35
 	discoveryWatcher          discoveryReloader
4352da78
 	root                      string
40d5ced9
 	seccompEnabled            bool
4352da78
 	shutdown                  bool
 	uidMaps                   []idtools.IDMap
 	gidMaps                   []idtools.IDMap
 	layerStore                layer.Store
 	imageStore                image.Store
0f9f9950
 	nameIndex                 *registrar.Registrar
 	linkIndex                 *linkIndex
9c4570a9
 	containerd                libcontainerd.Client
d705dab1
 	containerdRemote          libcontainerd.Remote
9c4570a9
 	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
534a90a9
 	clusterProvider           cluster.Provider
7c57a4cf
 }
 
b08f071e
 func (daemon *Daemon) restore() error {
34bd2d62
 	var (
677a6b35
 		debug         = utils.IsDebugEnabled()
f5916b10
 		currentDriver = daemon.GraphDriverName()
0f9f9950
 		containers    = make(map[string]*container.Container)
34bd2d62
 	)
0fd0deb7
 
 	if !debug {
6f4d8470
 		logrus.Info("Loading containers: start.")
20b1e196
 	}
359b7df5
 	dir, err := ioutil.ReadDir(daemon.repository)
a27b4b8c
 	if err != nil {
 		return err
 	}
356af154
 
fde909ff
 	for _, v := range dir {
7c57a4cf
 		id := v.Name()
359b7df5
 		container, err := daemon.load(id)
6f4d8470
 		if !debug && logrus.GetLevel() == logrus.InfoLevel {
fde909ff
 			fmt.Print(".")
20b1e196
 		}
a27b4b8c
 		if err != nil {
6f4d8470
 			logrus.Errorf("Failed to load container %v: %v", id, err)
a27b4b8c
 			continue
 		}
4908d7f8
 
 		// Ignore the container if it does not support the current driver being used by the graph
41870a42
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
899f1b18
 			rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
 			if err != nil {
 				logrus.Errorf("Failed to load container mount %v: %v", id, err)
 				continue
 			}
 			container.RWLayer = rwlayer
6f4d8470
 			logrus.Debugf("Loaded container %v", container.ID)
41870a42
 
0f9f9950
 			containers[container.ID] = container
4908d7f8
 		} else {
6f4d8470
 			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
4908d7f8
 		}
1cbdaeba
 	}
 
26007774
 	var migrateLegacyLinks bool
19762da6
 	restartContainers := make(map[*container.Container]chan struct{})
ecffb6d5
 	activeSandboxes := make(map[string]interface{})
81fa9feb
 	for _, c := range containers {
0f9f9950
 		if err := daemon.registerName(c); err != nil {
 			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
 			continue
19762da6
 		}
0f9f9950
 		if err := daemon.Register(c); err != nil {
 			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
19762da6
 			continue
 		}
391441c2
 
 		// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
 		// We should rewrite it to use the daemon defaults.
 		// Fixes https://github.com/docker/docker/issues/22536
 		if c.HostConfig.LogConfig.Type == "" {
 			if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
 				logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
 				continue
 			}
 		}
9c4570a9
 	}
 	var wg sync.WaitGroup
 	var mapLock sync.Mutex
 	for _, c := range containers {
 		wg.Add(1)
 		go func(c *container.Container) {
 			defer wg.Done()
51e42e6e
 			rm := c.RestartManager(false)
9c4570a9
 			if c.IsRunning() || c.IsPaused() {
51e42e6e
 				if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
b7687cc6
 					logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
9c4570a9
 					return
 				}
83fbaa3c
 				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
ecffb6d5
 					options, err := daemon.buildSandboxOptions(c)
 					if err != nil {
 						logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
 					}
 					mapLock.Lock()
 					activeSandboxes[c.NetworkSettings.SandboxID] = options
 					mapLock.Unlock()
 				}
 
9c4570a9
 			}
 			// fixme: only if not running
 			// get list of containers we need to restart
 			if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
 				mapLock.Lock()
 				restartContainers[c] = make(chan struct{})
 				mapLock.Unlock()
 			}
0f9f9950
 
ce724731
 			if c.RemovalInProgress {
 				// We probably crashed in the middle of a removal, reset
 				// the flag.
 				//
 				// We DO NOT remove the container here as we do not
 				// know if the user had requested for either the
 				// associated volumes, network links or both to also
 				// be removed. So we put the container in the "dead"
 				// state and leave further processing up to them.
 				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
 				c.ResetRemovalInProgress()
 				c.SetDead()
 				c.ToDisk()
 			}
 
9c4570a9
 			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
 			if c.HostConfig != nil && c.HostConfig.Links == nil {
 				migrateLegacyLinks = true
 			}
 		}(c)
26007774
 	}
9c4570a9
 	wg.Wait()
ecffb6d5
 	daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
 	if err != nil {
 		return fmt.Errorf("Error initializing network controller: %v", err)
 	}
26007774
 
 	// migrate any legacy links from sqlite
 	linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
 	var legacyLinkDB *graphdb.Database
 	if migrateLegacyLinks {
 		legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
 		if err != nil {
 			return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
 		}
 		defer legacyLinkDB.Close()
0f9f9950
 	}
 
 	// Now that all the containers are registered, register the links
 	for _, c := range containers {
26007774
 		if migrateLegacyLinks {
 			if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
 				return err
 			}
 		}
0f9f9950
 		if err := daemon.registerLinks(c, c.HostConfig); err != nil {
 			logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
19762da6
 		}
 	}
41870a42
 
19762da6
 	group := sync.WaitGroup{}
 	for c, notifier := range restartContainers {
 		group.Add(1)
0f9f9950
 
 		go func(c *container.Container, chNotify chan struct{}) {
19762da6
 			defer group.Done()
0f9f9950
 
 			logrus.Debugf("Starting container %s", c.ID)
41870a42
 
19762da6
 			// ignore errors here as this is a best effort to wait for children to be
 			//   running before we try to start the container
0f9f9950
 			children := daemon.children(c)
19762da6
 			timeout := time.After(5 * time.Second)
 			for _, child := range children {
 				if notifier, exists := restartContainers[child]; exists {
 					select {
 					case <-notifier:
 					case <-timeout:
 					}
41870a42
 				}
 			}
2dce79e0
 
 			// Make sure networks are available before starting
 			daemon.waitForNetworks(c)
0f9f9950
 			if err := daemon.containerStart(c); err != nil {
 				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
19762da6
 			}
 			close(chNotify)
 		}(c, notifier)
0f9f9950
 
34bd2d62
 	}
81fa9feb
 	group.Wait()
34bd2d62
 
d85b9f85
 	// any containers that were started above would already have had this done,
 	// however we need to now prepare the mountpoints for the rest of the containers as well.
 	// This shouldn't cause any issue running on the containers that already had this run.
 	// This must be run after any containers with a restart policy so that containerized plugins
 	// can have a chance to be running before we try to initialize them.
 	for _, c := range containers {
0feeab2e
 		// if the container has restart policy, do not
 		// prepare the mountpoints since it has been done on restarting.
 		// This is to speed up the daemon start when a restart container
 		// has a volume and the volume dirver is not available.
 		if _, ok := restartContainers[c]; ok {
 			continue
 		}
d85b9f85
 		group.Add(1)
 		go func(c *container.Container) {
 			defer group.Done()
 			if err := daemon.prepareMountPoints(c); err != nil {
 				logrus.Error(err)
 			}
 		}(c)
 	}
 
 	group.Wait()
 
0fd0deb7
 	if !debug {
6f4d8470
 		if logrus.GetLevel() == logrus.InfoLevel {
88dc6cc2
 			fmt.Println()
 		}
6f4d8470
 		logrus.Info("Loading containers: done.")
20b1e196
 	}
1cbdaeba
 
a27b4b8c
 	return nil
 }
 
2dce79e0
 // waitForNetworks is used during daemon initialization when starting up containers
 // It ensures that all of a container's networks are available before the daemon tries to start the container.
 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
 func (daemon *Daemon) waitForNetworks(c *container.Container) {
 	if daemon.discoveryWatcher == nil {
 		return
 	}
 	// Make sure if the container has a network that requires discovery that the discovery service is available before starting
 	for netName := range c.NetworkSettings.Networks {
3eb83b5b
 		// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
2dce79e0
 		// Most likely this is because the K/V store used for discovery is in a container and needs to be started
 		if _, err := daemon.netController.NetworkByName(netName); err != nil {
 			if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
 				continue
 			}
 			// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
 			// FIXME: why is this slow???
 			logrus.Debugf("Container %s waiting for network to be ready", c.Name)
 			select {
 			case <-daemon.discoveryWatcher.ReadyCh():
 			case <-time.After(60 * time.Second):
 			}
 			return
 		}
 	}
 }
 
0f9f9950
 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
 	return daemon.linkIndex.children(c)
1cbdaeba
 }
 
abd72d40
 // parents returns the names of the parent containers of the container
 // with the given name.
0f9f9950
 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
 	return daemon.linkIndex.parents(c)
450740c8
 }
 
6bb0d181
 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
0f9f9950
 	fullName := path.Join(parent.Name, alias)
 	if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
332d95fd
 		if err == registrar.ErrNameReserved {
 			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
 			return nil
 		}
1cbdaeba
 		return err
 	}
0f9f9950
 	daemon.linkIndex.link(parent, child, fullName)
0d292440
 	return nil
1cbdaeba
 }
 
664c75eb
 // SetClusterProvider sets a component for querying the current cluster state.
534a90a9
 func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
 	daemon.clusterProvider = clusterProvider
 	daemon.netController.SetClusterProvider(clusterProvider)
 }
 
ecffb6d5
 // IsSwarmCompatible verifies if the current daemon
 // configuration is compatible with the swarm mode
 func (daemon *Daemon) IsSwarmCompatible() error {
 	if daemon.configStore == nil {
 		return nil
 	}
 	return daemon.configStore.isSwarmCompatible()
 }
 
abd72d40
 // NewDaemon sets up everything for the daemon to be able to service
 // requests from the webserver.
636c276f
 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
ff4e58ff
 	setDefaultMtu(config)
 
795390b0
 	// Ensure that we have a correct root key limit for launching containers.
 	if err := ModifyRootKeyLimit(); err != nil {
 		logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err)
 	}
 
5ce5a8e9
 	// Ensure we have compatible and valid configuration options
 	if err := verifyDaemonSettings(config); err != nil {
8fb0ca2c
 		return nil, err
4dc4d56d
 	}
8fb0ca2c
 
 	// Do we have a disabled network?
c9328c6c
 	config.DisableBridge = isBridgeNetworkDisabled(config)
353b7c8e
 
62a75fca
 	// Verify the platform is supported as a daemon
10d30c64
 	if !platformSupported {
abd72d40
 		return nil, errSystemNotSupported
62a75fca
 	}
 
 	// Validate platform-specific requirements
8fb0ca2c
 	if err := checkSystem(); err != nil {
23b2c39a
 		return nil, err
4949e070
 	}
 
f4b08c7f
 	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
 	// on Windows to dump Go routine stacks
 	setupDumpStackTrap()
95fcf76c
 
442b4562
 	uidMaps, gidMaps, err := setupRemappedRoot(config)
 	if err != nil {
 		return nil, err
 	}
 	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
 	if err != nil {
 		return nil, err
 	}
 
e92a9e0b
 	// get the canonical path to the Docker root directory
 	var realRoot string
 	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
 		realRoot = config.Root
 	} else {
c30a55f1
 		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
e92a9e0b
 		if err != nil {
23b2c39a
 			return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
e92a9e0b
 		}
 	}
442b4562
 
89986cbf
 	if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
 		return nil, err
 	}
 
 	if err := setupDaemonProcess(config); err != nil {
b4ccd7cb
 		return nil, err
 	}
 
41f69883
 	// set up the tmpDir to use a canonical path
442b4562
 	tmp, err := tempDir(config.Root, rootUID, rootGID)
41f69883
 	if err != nil {
 		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
 	}
 	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
 	if err != nil {
 		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
 	}
 	os.Setenv("TMPDIR", realTmp)
 
9c4570a9
 	d := &Daemon{configStore: config}
f5916b10
 	// Ensure the daemon is properly shutdown if there is a failure during
 	// initialization
531f4122
 	defer func() {
 		if err != nil {
b08f071e
 			if err := d.Shutdown(); err != nil {
531f4122
 				logrus.Error(err)
 			}
459e58ff
 		}
531f4122
 	}()
f2bab155
 
9c4570a9
 	// Set the default isolation mode (only applicable on Windows)
 	if err := d.setDefaultIsolation(); err != nil {
 		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
 	}
 
3a8728b4
 	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
 
140a7434
 	if err := configureMaxThreads(config); err != nil {
 		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
 	}
 
9c4570a9
 	installDefaultAppArmorProfile()
8fb0ca2c
 	daemonRepo := filepath.Join(config.Root, "containers")
442b4562
 	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
06553a75
 		return nil, err
 	}
 
f5916b10
 	driverName := os.Getenv("DOCKER_DRIVER")
 	if driverName == "" {
 		driverName = config.GraphDriver
 	}
 	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
 		StorePath:                 config.Root,
72d3d1ef
 		MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
f5916b10
 		GraphDriver:               driverName,
 		GraphDriverOptions:        config.GraphOptions,
 		UIDMaps:                   uidMaps,
 		GIDMaps:                   gidMaps,
 	})
4352da78
 	if err != nil {
 		return nil, err
 	}
 
f5916b10
 	graphDriver := d.layerStore.DriverName()
 	imageRoot := filepath.Join(config.Root, "image", graphDriver)
 
 	// Configure and validate the kernels security support
 	if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
4352da78
 		return nil, err
 	}
 
7368e41c
 	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
 	d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
 	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
 	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
4352da78
 
 	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
 	if err != nil {
 		return nil, err
 	}
 
 	d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
2ebf3464
 	if err != nil {
 		return nil, err
 	}
10f23a94
 
8fb0ca2c
 	// Configure the volumes driver
f3711704
 	volStore, err := d.configureVolumes(rootUID, rootGID)
b3b7eb27
 	if err != nil {
1df5f409
 		return nil, err
 	}
45407cf0
 
8ceb9d20
 	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
 	if err != nil {
 		return nil, err
 	}
 
8fb0ca2c
 	trustDir := filepath.Join(config.Root, "trust")
 
a83a7693
 	if err := system.MkdirAll(trustDir, 0700); err != nil {
7c88e8f1
 		return nil, err
 	}
 
4352da78
 	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
 	if err != nil {
 		return nil, err
 	}
 
9e50bf62
 	eventsService := events.New()
4352da78
 
2655954c
 	referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
9e50bf62
 	if err != nil {
4352da78
 		return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
9e50bf62
 	}
 
f5916b10
 	if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil {
4352da78
 		return nil, fmt.Errorf("Couldn't restore custom images: %s", err)
 	}
 
a8f88ef4
 	migrationStart := time.Now()
f5916b10
 	if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
55080fc0
 		logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
dfbb5520
 	}
a8f88ef4
 	logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
dfbb5520
 
139ea5b7
 	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
 	// initialized, the daemon is registered and we can store the discovery backend as its read-only
677a6b35
 	if err := d.initDiscovery(config); err != nil {
 		return nil, err
139ea5b7
 	}
 
07c35b41
 	sysInfo := sysinfo.New(false)
9d0ed1de
 	// Check if Devices cgroup is mounted, it is hard requirement for container security,
72fefc04
 	// on Linux.
 	if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
9b05aa6e
 		return nil, fmt.Errorf("Devices cgroup isn't mounted")
 	}
 
531f4122
 	d.ID = trustKey.PublicKey().KeyID()
 	d.repository = daemonRepo
3c82fad4
 	d.containers = container.NewMemoryStore()
9ca2e4e8
 	d.execCommands = exec.NewStore()
2655954c
 	d.referenceStore = referenceStore
4352da78
 	d.distributionMetadataStore = distributionMetadataStore
 	d.trustKey = trustKey
531f4122
 	d.idIndex = truncindex.NewTruncIndex([]string{})
5dc3a9a6
 	d.statsCollector = d.newStatsCollector(1 * time.Second)
677a6b35
 	d.defaultLogConfig = containertypes.LogConfig{
 		Type:   config.LogConfig.Type,
 		Config: config.LogConfig.Config,
 	}
531f4122
 	d.RegistryService = registryService
 	d.EventsService = eventsService
b3b7eb27
 	d.volumes = volStore
81fa9feb
 	d.root = config.Root
442b4562
 	d.uidMaps = uidMaps
 	d.gidMaps = gidMaps
40d5ced9
 	d.seccompEnabled = sysInfo.Seccomp
531f4122
 
0f9f9950
 	d.nameIndex = registrar.NewRegistrar()
 	d.linkIndex = newLinkIndex()
d705dab1
 	d.containerdRemote = containerdRemote
0f9f9950
 
9c4570a9
 	go d.execCommandGC()
 
 	d.containerd, err = containerdRemote.Client(d)
 	if err != nil {
0e3f2f2a
 		return nil, err
 	}
531f4122
 
b08f071e
 	if err := d.restore(); err != nil {
0e3f2f2a
 		return nil, err
 	}
 
d18919e3
 	return d, nil
 }
 
6bb0d181
 func (daemon *Daemon) shutdownContainer(c *container.Container) error {
ace5854f
 	// TODO(windows): Handle docker restart with paused containers
6bb0d181
 	if c.IsPaused() {
ace5854f
 		// To terminate a process in freezer cgroup, we should send
 		// SIGTERM to this process then unfreeze it, and the process will
 		// force to terminate immediately.
3eb83b5b
 		logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
ace5854f
 		sig, ok := signal.SignalMap["TERM"]
 		if !ok {
3eb83b5b
 			return fmt.Errorf("System does not support SIGTERM")
ace5854f
 		}
9f79cfdb
 		if err := daemon.kill(c, int(sig)); err != nil {
ace5854f
 			return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
 		}
9f79cfdb
 		if err := daemon.containerUnpause(c); err != nil {
ace5854f
 			return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
 		}
 		if _, err := c.WaitStop(10 * time.Second); err != nil {
3eb83b5b
 			logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID)
ace5854f
 			sig, ok := signal.SignalMap["KILL"]
 			if !ok {
 				return fmt.Errorf("System does not support SIGKILL")
 			}
9f79cfdb
 			if err := daemon.kill(c, int(sig)); err != nil {
ace5854f
 				logrus.Errorf("Failed to SIGKILL container %s", c.ID)
 			}
 			c.WaitStop(-1 * time.Second)
 			return err
 		}
 	}
 	// If container failed to exit in 10 seconds of SIGTERM, then using the force
4f2a5ba3
 	if err := daemon.containerStop(c, 10); err != nil {
3eb83b5b
 		return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
ace5854f
 	}
 
 	c.WaitStop(-1 * time.Second)
 	return nil
 }
 
abd72d40
 // Shutdown stops the daemon.
b08f071e
 func (daemon *Daemon) Shutdown() error {
10305dc5
 	daemon.shutdown = true
d705dab1
 	// Keep mounts and networking running on daemon shutdown if
 	// we are to keep containers running and restore them.
52b078fb
 
 	pluginShutdown()
 
3d0cd844
 	if daemon.configStore.LiveRestore && daemon.containers != nil {
2bb603aa
 		// check if there are any running containers, if none we should do some cleanup
 		if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
 			return nil
 		}
d705dab1
 	}
2bb603aa
 
531f4122
 	if daemon.containers != nil {
 		logrus.Debug("starting clean shutdown of all containers...")
3c82fad4
 		daemon.containers.ApplyAll(func(c *container.Container) {
 			if !c.IsRunning() {
 				return
531f4122
 			}
3c82fad4
 			logrus.Debugf("stopping %s", c.ID)
 			if err := daemon.shutdownContainer(c); err != nil {
 				logrus.Errorf("Stop container error: %v", err)
 				return
 			}
9c4570a9
 			if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
 				daemon.cleanupMountsByID(mountid)
 			}
3c82fad4
 			logrus.Debugf("container stopped %s", c.ID)
 		})
ace5854f
 	}
c68e7f96
 
ace5854f
 	// trigger libnetwork Stop only if it's initialized
 	if daemon.netController != nil {
 		daemon.netController.Stop()
531f4122
 	}
f067e263
 
f5916b10
 	if daemon.layerStore != nil {
 		if err := daemon.layerStore.Cleanup(); err != nil {
 			logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
0964a664
 		}
 	}
 
c8291f71
 	if err := daemon.cleanupMounts(); err != nil {
 		return err
 	}
 
f067e263
 	return nil
 }
 
6bb0d181
 // Mount sets container.BaseFS
abd72d40
 // (is it not set coming in? why is it unset?)
6bb0d181
 func (daemon *Daemon) Mount(container *container.Container) error {
d04fa49a
 	dir, err := container.RWLayer.Mount(container.GetMountLabel())
4352da78
 	if err != nil {
 		return err
 	}
 	logrus.Debugf("container mounted via layerStore: %v", dir)
8fb0ca2c
 
6bb0d181
 	if container.BaseFS != dir {
8fb0ca2c
 		// The mount path reported by the graph driver should always be trusted on Windows, since the
 		// volume path for a given mounted layer may change over time.  This should only be an error
 		// on non-Windows operating systems.
6bb0d181
 		if container.BaseFS != "" && runtime.GOOS != "windows" {
4352da78
 			daemon.Unmount(container)
8fb0ca2c
 			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
f5916b10
 				daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
8fb0ca2c
 		}
699a1074
 	}
6bb0d181
 	container.BaseFS = dir // TODO: combine these fields
f2bab155
 	return nil
699a1074
 }
 
3a497650
 // Unmount unsets the container base filesystem
9c4570a9
 func (daemon *Daemon) Unmount(container *container.Container) error {
d04fa49a
 	if err := container.RWLayer.Unmount(); err != nil {
4352da78
 		logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
9c4570a9
 		return err
4352da78
 	}
9c4570a9
 	return nil
5a3d9bd4
 }
 
762a73bf
 // V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
 func (daemon *Daemon) V4Subnets() []net.IPNet {
 	var subnets []net.IPNet
 
 	managedNetworks := daemon.netController.Networks()
 
 	for _, managedNetwork := range managedNetworks {
 		v4Infos, _ := managedNetwork.Info().IpamInfo()
 		for _, v4Info := range v4Infos {
 			if v4Info.IPAMData.Pool != nil {
 				subnets = append(subnets, *v4Info.IPAMData.Pool)
 			}
 		}
 	}
 
 	return subnets
 }
 
 // V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
 func (daemon *Daemon) V6Subnets() []net.IPNet {
 	var subnets []net.IPNet
 
 	managedNetworks := daemon.netController.Networks()
 
 	for _, managedNetwork := range managedNetworks {
 		_, v6Infos := managedNetwork.Info().IpamInfo()
 		for _, v6Info := range v6Infos {
 			if v6Info.IPAMData.Pool != nil {
 				subnets = append(subnets, *v6Info.IPAMData.Pool)
 			}
 		}
 	}
 
 	return subnets
 }
 
572ce802
 func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
 	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
 	operationCancelled := false
 
 	for prog := range progressChan {
 		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
79c23fdb
 			// don't log broken pipe errors as this is the normal case when a client aborts
 			if isBrokenPipe(err) {
 				logrus.Info("Pull session cancelled")
 			} else {
 				logrus.Errorf("error writing progress to client: %v", err)
 			}
572ce802
 			cancelFunc()
 			operationCancelled = true
 			// Don't return, because we need to continue draining
 			// progressChan until it's closed to avoid a deadlock.
 		}
 	}
 }
 
79c23fdb
 func isBrokenPipe(e error) bool {
 	if netErr, ok := e.(*net.OpError); ok {
 		e = netErr.Err
 		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
 			e = sysErr.Err
 		}
 	}
 	return e == syscall.EPIPE
 }
 
f5916b10
 // GraphDriverName returns the name of the graph driver used by the layer.Store
 func (daemon *Daemon) GraphDriverName() string {
 	return daemon.layerStore.DriverName()
36c3614f
 }
 
442b4562
 // GetUIDGIDMaps returns the current daemon's user namespace settings
 // for the full uid and gid maps which will be applied to containers
 // started in this instance.
 func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
 	return daemon.uidMaps, daemon.gidMaps
 }
 
 // GetRemappedUIDGID returns the current daemon's uid and gid values
 // if user namespaces are in use for this daemon instance.  If not
 // this function will return "real" root values of 0, 0.
 func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
 	uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
 	return uid, gid
 }
 
f1bbc1f3
 // tempDir returns the default directory to use for temporary files.
442b4562
 func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
f1bbc1f3
 	var tmpDir string
 	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
 		tmpDir = filepath.Join(rootDir, "tmp")
 	}
442b4562
 	return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
93cdb007
 }
dde0cc78
 
4352da78
 func (daemon *Daemon) setupInitLayer(initPath string) error {
 	rootUID, rootGID := daemon.GetRemappedUIDGID()
 	return setupInitLayer(initPath, rootUID, rootGID)
 }
 
ff4e58ff
 func setDefaultMtu(config *Config) {
 	// do nothing if the config does not have the default 0 value.
 	if config.Mtu != 0 {
 		return
 	}
 	config.Mtu = defaultNetworkMtu
 }
 
f3711704
 func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
 	volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
42a46ed1
 	if err != nil {
 		return nil, err
 	}
72bb5661
 
2f40b1b2
 	if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
 		return nil, fmt.Errorf("local volume driver could not be registered")
 	}
f3711704
 	return store.New(daemon.configStore.Root)
42a46ed1
 }
215bfc73
 
63efc120
 // IsShuttingDown tells whether the daemon is shutting down or not
 func (daemon *Daemon) IsShuttingDown() bool {
 	return daemon.shutdown
 }
 
677a6b35
 // initDiscovery initializes the discovery watcher for this daemon.
 func (daemon *Daemon) initDiscovery(config *Config) error {
 	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
 	if err != nil {
 		if err == errDiscoveryDisabled {
 			return nil
 		}
 		return err
 	}
 
 	config.ClusterAdvertise = advertise
 	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
 	if err != nil {
 		return fmt.Errorf("discovery initialization failed (%v)", err)
 	}
 
 	daemon.discoveryWatcher = discoveryWatcher
 	return nil
 }
 
 // Reload reads configuration changes and modifies the
 // daemon according to those changes.
5c161ade
 // These are the settings that Reload changes:
677a6b35
 // - Daemon labels.
5c161ade
 // - Daemon debug log level.
7368e41c
 // - Daemon max concurrent downloads
 // - Daemon max concurrent uploads
ed364b69
 // - Cluster discovery (reconfigure and restart).
d705dab1
 // - Daemon live restore
677a6b35
 func (daemon *Daemon) Reload(config *Config) error {
7b2e5216
 	var err error
 	// used to hold reloaded changes
 	attributes := map[string]string{}
 
 	// We need defer here to ensure the lock is released as
 	// daemon.SystemInfo() will try to get it too
 	defer func() {
 		if err == nil {
 			daemon.LogDaemonEventWithAttributes("reload", attributes)
 		}
 	}()
 
677a6b35
 	daemon.configStore.reloadLock.Lock()
ed364b69
 	defer daemon.configStore.reloadLock.Unlock()
382c152a
 
7b2e5216
 	daemon.platformReload(config, &attributes)
 
 	if err = daemon.reloadClusterDiscovery(config); err != nil {
382c152a
 		return err
 	}
 
455858fc
 	if config.IsValueSet("labels") {
b9366c96
 		daemon.configStore.Labels = config.Labels
 	}
 	if config.IsValueSet("debug") {
 		daemon.configStore.Debug = config.Debug
 	}
d705dab1
 	if config.IsValueSet("live-restore") {
 		daemon.configStore.LiveRestore = config.LiveRestore
 		if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil {
 			return err
 		}
 
 	}
7368e41c
 
 	// If no value is set for max-concurrent-downloads we assume it is the default value
 	// We always "reset" as the cost is lightweight and easy to maintain.
 	if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
 		*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
 	} else {
 		maxConcurrentDownloads := defaultMaxConcurrentDownloads
 		daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
 	}
 	logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
 	if daemon.downloadManager != nil {
 		daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
 	}
 
 	// If no value is set for max-concurrent-upload we assume it is the default value
 	// We always "reset" as the cost is lightweight and easy to maintain.
 	if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
 		*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
 	} else {
 		maxConcurrentUploads := defaultMaxConcurrentUploads
 		daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
 	}
 	logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
 	if daemon.uploadManager != nil {
 		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
 	}
 
382c152a
 	// We emit daemon reload event here with updatable configurations
 	attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
 	attributes["cluster-store"] = daemon.configStore.ClusterStore
 	if daemon.configStore.ClusterOpts != nil {
 		opts, _ := json.Marshal(daemon.configStore.ClusterOpts)
 		attributes["cluster-store-opts"] = string(opts)
 	} else {
 		attributes["cluster-store-opts"] = "{}"
 	}
 	attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
 	if daemon.configStore.Labels != nil {
 		labels, _ := json.Marshal(daemon.configStore.Labels)
 		attributes["labels"] = string(labels)
 	} else {
 		attributes["labels"] = "[]"
 	}
62014aaf
 	attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
 	attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
382c152a
 
 	return nil
677a6b35
 }
 
 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
b9366c96
 	var err error
 	newAdvertise := daemon.configStore.ClusterAdvertise
 	newClusterStore := daemon.configStore.ClusterStore
 	if config.IsValueSet("cluster-advertise") {
 		if config.IsValueSet("cluster-store") {
 			newClusterStore = config.ClusterStore
 		}
 		newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
 		if err != nil && err != errDiscoveryDisabled {
 			return err
 		}
677a6b35
 	}
 
ecffb6d5
 	if daemon.clusterProvider != nil {
 		if err := config.isSwarmCompatible(); err != nil {
 			return err
 		}
 	}
 
677a6b35
 	// check discovery modifications
b9366c96
 	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
677a6b35
 		return nil
 	}
 
 	// enable discovery for the first time if it was not previously enabled
 	if daemon.discoveryWatcher == nil {
b9366c96
 		discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
677a6b35
 		if err != nil {
 			return fmt.Errorf("discovery initialization failed (%v)", err)
 		}
 		daemon.discoveryWatcher = discoveryWatcher
 	} else {
 		if err == errDiscoveryDisabled {
 			// disable discovery if it was previously enabled and it's disabled now
 			daemon.discoveryWatcher.Stop()
 		} else {
 			// reload discovery
 			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
 				return err
 			}
 		}
 	}
 
b9366c96
 	daemon.configStore.ClusterStore = newClusterStore
677a6b35
 	daemon.configStore.ClusterOpts = config.ClusterOpts
 	daemon.configStore.ClusterAdvertise = newAdvertise
 
ed364b69
 	if daemon.netController == nil {
 		return nil
 	}
ecffb6d5
 	netOptions, err := daemon.networkOptions(daemon.configStore, nil)
ed364b69
 	if err != nil {
 		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
 		return nil
 	}
 	err = daemon.netController.ReloadConfiguration(netOptions...)
 	if err != nil {
 		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
 	}
 
677a6b35
 	return nil
 }
 
e8026d8a
 func isBridgeNetworkDisabled(config *Config) bool {
 	return config.bridgeConfig.Iface == disableNetworkBridge
 }
 
ecffb6d5
 func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
e8026d8a
 	options := []nwconfig.Option{}
 	if dconfig == nil {
 		return options, nil
 	}
 
 	options = append(options, nwconfig.OptionDataDir(dconfig.Root))
1cc85c17
 	options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
e8026d8a
 
 	dd := runconfig.DefaultDaemonNetworkMode()
 	dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
 	options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
 	options = append(options, nwconfig.OptionDefaultNetwork(dn))
 
 	if strings.TrimSpace(dconfig.ClusterStore) != "" {
 		kv := strings.Split(dconfig.ClusterStore, "://")
 		if len(kv) != 2 {
 			return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
 		}
 		options = append(options, nwconfig.OptionKVProvider(kv[0]))
 		options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
 	}
 	if len(dconfig.ClusterOpts) > 0 {
 		options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
 	}
 
 	if daemon.discoveryWatcher != nil {
 		options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
 	}
 
 	if dconfig.ClusterAdvertise != "" {
 		options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
 	}
 
 	options = append(options, nwconfig.OptionLabels(dconfig.Labels))
 	options = append(options, driverOptions(dconfig)...)
ecffb6d5
 
 	if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 {
 		options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
 	}
 
e8026d8a
 	return options, nil
 }
9c4570a9
 
 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
 	out := make([]types.BlkioStatEntry, len(entries))
 	for i, re := range entries {
 		out[i] = types.BlkioStatEntry{
 			Major: re.Major,
 			Minor: re.Minor,
 			Op:    re.Op,
 			Value: re.Value,
 		}
 	}
 	return out
 }