Signed-off-by: John Howard <jhoward@microsoft.com>
| ... | ... |
@@ -17,7 +17,7 @@ import ( |
| 17 | 17 |
// ImageComponent provides an interface for working with images |
| 18 | 18 |
type ImageComponent interface {
|
| 19 | 19 |
SquashImage(from string, to string) (string, error) |
| 20 |
- TagImageWithReference(image.ID, reference.Named) error |
|
| 20 |
+ TagImageWithReference(image.ID, string, reference.Named) error |
|
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 | 23 |
// Backend provides build functionality to the API router |
| ... | ... |
@@ -3,9 +3,11 @@ package build |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"io" |
| 6 |
+ "runtime" |
|
| 6 | 7 |
|
| 7 | 8 |
"github.com/docker/distribution/reference" |
| 8 | 9 |
"github.com/docker/docker/image" |
| 10 |
+ "github.com/docker/docker/pkg/system" |
|
| 9 | 11 |
"github.com/pkg/errors" |
| 10 | 12 |
) |
| 11 | 13 |
|
| ... | ... |
@@ -33,7 +35,12 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge |
| 33 | 33 |
// TagImages creates image tags for the imageID |
| 34 | 34 |
func (bt *Tagger) TagImages(imageID image.ID) error {
|
| 35 | 35 |
for _, rt := range bt.repoAndTags {
|
| 36 |
- if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
|
|
| 36 |
+ // TODO @jhowardmsft LCOW support. Will need revisiting. |
|
| 37 |
+ platform := runtime.GOOS |
|
| 38 |
+ if platform == "windows" && system.LCOWSupported() {
|
|
| 39 |
+ platform = "linux" |
|
| 40 |
+ } |
|
| 41 |
+ if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil {
|
|
| 37 | 42 |
return err |
| 38 | 43 |
} |
| 39 | 44 |
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) |
| ... | ... |
@@ -267,12 +267,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
| 267 | 267 |
|
| 268 | 268 |
logrus.Info("Daemon has completed initialization")
|
| 269 | 269 |
|
| 270 |
- logrus.WithFields(logrus.Fields{
|
|
| 271 |
- "version": dockerversion.Version, |
|
| 272 |
- "commit": dockerversion.GitCommit, |
|
| 273 |
- "graphdriver": d.GraphDriverName(), |
|
| 274 |
- }).Info("Docker daemon")
|
|
| 275 |
- |
|
| 276 | 270 |
cli.d = d |
| 277 | 271 |
|
| 278 | 272 |
initRouter(api, d, c) |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/Sirupsen/logrus" |
| 7 | 8 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -147,7 +148,8 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st |
| 147 | 147 |
image, _ := daemon.GetImage(refOrID) |
| 148 | 148 |
// TODO: shouldn't we error out if error is different from "not found" ? |
| 149 | 149 |
if image != nil {
|
| 150 |
- layer, err := newReleasableLayerForImage(image, daemon.layerStore) |
|
| 150 |
+ // TODO LCOW @jhowardmsft. For now using runtime.GOOS for this, will need enhancing for platform when porting the builder |
|
| 151 |
+ layer, err := newReleasableLayerForImage(image, daemon.stores[runtime.GOOS].layerStore) |
|
| 151 | 152 |
return image, layer, err |
| 152 | 153 |
} |
| 153 | 154 |
} |
| ... | ... |
@@ -156,7 +158,8 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st |
| 156 | 156 |
if err != nil {
|
| 157 | 157 |
return nil, nil, err |
| 158 | 158 |
} |
| 159 |
- layer, err := newReleasableLayerForImage(image, daemon.layerStore) |
|
| 159 |
+ // TODO LCOW @jhowardmsft. For now using runtime.GOOS for this, will need enhancing for platform when porting the builder |
|
| 160 |
+ layer, err := newReleasableLayerForImage(image, daemon.stores[runtime.GOOS].layerStore) |
|
| 160 | 161 |
return image, layer, err |
| 161 | 162 |
} |
| 162 | 163 |
|
| ... | ... |
@@ -1,6 +1,8 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "runtime" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/Sirupsen/logrus" |
| 5 | 7 |
"github.com/docker/docker/builder" |
| 6 | 8 |
"github.com/docker/docker/image/cache" |
| ... | ... |
@@ -9,10 +11,12 @@ import ( |
| 9 | 9 |
// MakeImageCache creates a stateful image cache. |
| 10 | 10 |
func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache {
|
| 11 | 11 |
if len(sourceRefs) == 0 {
|
| 12 |
- return cache.NewLocal(daemon.imageStore) |
|
| 12 |
+ // TODO @jhowardmsft LCOW. For now, assume it is the OS of the host |
|
| 13 |
+ return cache.NewLocal(daemon.stores[runtime.GOOS].imageStore) |
|
| 13 | 14 |
} |
| 14 | 15 |
|
| 15 |
- cache := cache.New(daemon.imageStore) |
|
| 16 |
+ // TODO @jhowardmsft LCOW. For now, assume it is the OS of the host |
|
| 17 |
+ cache := cache.New(daemon.stores[runtime.GOOS].imageStore) |
|
| 16 | 18 |
|
| 17 | 19 |
for _, ref := range sourceRefs {
|
| 18 | 20 |
img, err := daemon.GetImage(ref) |
| ... | ... |
@@ -160,26 +160,21 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 160 | 160 |
}() |
| 161 | 161 |
|
| 162 | 162 |
var parent *image.Image |
| 163 |
- os := runtime.GOOS |
|
| 164 | 163 |
if container.ImageID == "" {
|
| 165 | 164 |
parent = new(image.Image) |
| 166 | 165 |
parent.RootFS = image.NewRootFS() |
| 167 | 166 |
} else {
|
| 168 |
- parent, err = daemon.imageStore.Get(container.ImageID) |
|
| 167 |
+ parent, err = daemon.stores[container.Platform].imageStore.Get(container.ImageID) |
|
| 169 | 168 |
if err != nil {
|
| 170 | 169 |
return "", err |
| 171 | 170 |
} |
| 172 |
- // To support LCOW, Windows needs to pass the platform in when registering the layer in the store |
|
| 173 |
- if runtime.GOOS == "windows" {
|
|
| 174 |
- os = parent.OS |
|
| 175 |
- } |
|
| 176 | 171 |
} |
| 177 | 172 |
|
| 178 |
- l, err := daemon.layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.Platform(os)) |
|
| 173 |
+ l, err := daemon.stores[container.Platform].layerStore.Register(rwTar, rootFS.ChainID(), layer.Platform(container.Platform)) |
|
| 179 | 174 |
if err != nil {
|
| 180 | 175 |
return "", err |
| 181 | 176 |
} |
| 182 |
- defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 177 |
+ defer layer.ReleaseAndLog(daemon.stores[container.Platform].layerStore, l) |
|
| 183 | 178 |
|
| 184 | 179 |
containerConfig := c.ContainerConfig |
| 185 | 180 |
if containerConfig == nil {
|
| ... | ... |
@@ -198,13 +193,13 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 198 | 198 |
return "", err |
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 |
- id, err := daemon.imageStore.Create(config) |
|
| 201 |
+ id, err := daemon.stores[container.Platform].imageStore.Create(config) |
|
| 202 | 202 |
if err != nil {
|
| 203 | 203 |
return "", err |
| 204 | 204 |
} |
| 205 | 205 |
|
| 206 | 206 |
if container.ImageID != "" {
|
| 207 |
- if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil {
|
|
| 207 |
+ if err := daemon.stores[container.Platform].imageStore.SetParent(id, container.ImageID); err != nil {
|
|
| 208 | 208 |
return "", err |
| 209 | 209 |
} |
| 210 | 210 |
} |
| ... | ... |
@@ -223,7 +218,7 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 223 | 223 |
return "", err |
| 224 | 224 |
} |
| 225 | 225 |
} |
| 226 |
- if err := daemon.TagImageWithReference(id, newTag); err != nil {
|
|
| 226 |
+ if err := daemon.TagImageWithReference(id, container.Platform, newTag); err != nil {
|
|
| 227 | 227 |
return "", err |
| 228 | 228 |
} |
| 229 | 229 |
imageRef = reference.FamiliarString(newTag) |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"os" |
| 6 | 6 |
"path/filepath" |
| 7 |
+ "runtime" |
|
| 7 | 8 |
"time" |
| 8 | 9 |
|
| 9 | 10 |
"github.com/docker/docker/api/errors" |
| ... | ... |
@@ -144,8 +145,10 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, h |
| 144 | 144 |
base.ImageID = imgID |
| 145 | 145 |
base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
|
| 146 | 146 |
base.Name = name |
| 147 |
- base.Driver = daemon.GraphDriverName() |
|
| 148 |
- |
|
| 147 |
+ // TODO @jhowardmsft LCOW - Get it from the platform of the container. For now, assume it is the OS of the host |
|
| 148 |
+ base.Driver = daemon.GraphDriverName(runtime.GOOS) |
|
| 149 |
+ // TODO @jhowardmsft LCOW - Similarly on this field. To solve this it will need a CLI/REST change in a subsequent PR during LCOW development |
|
| 150 |
+ base.Platform = runtime.GOOS |
|
| 149 | 151 |
return base, err |
| 150 | 152 |
} |
| 151 | 153 |
|
| ... | ... |
@@ -215,7 +215,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) |
| 215 | 215 |
func (daemon *Daemon) setRWLayer(container *container.Container) error {
|
| 216 | 216 |
var layerID layer.ChainID |
| 217 | 217 |
if container.ImageID != "" {
|
| 218 |
- img, err := daemon.imageStore.Get(container.ImageID) |
|
| 218 |
+ img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) |
|
| 219 | 219 |
if err != nil {
|
| 220 | 220 |
return err |
| 221 | 221 |
} |
| ... | ... |
@@ -228,7 +228,7 @@ func (daemon *Daemon) setRWLayer(container *container.Container) error {
|
| 228 | 228 |
StorageOpt: container.HostConfig.StorageOpt, |
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 |
- rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) |
|
| 231 |
+ rwLayer, err := daemon.stores[container.Platform].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) |
|
| 232 | 232 |
if err != nil {
|
| 233 | 233 |
return err |
| 234 | 234 |
} |
| ... | ... |
@@ -69,43 +69,49 @@ var ( |
| 69 | 69 |
errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
|
| 70 | 70 |
) |
| 71 | 71 |
|
| 72 |
+type daemonStore struct {
|
|
| 73 |
+ graphDriver string |
|
| 74 |
+ imageRoot string |
|
| 75 |
+ imageStore image.Store |
|
| 76 |
+ layerStore layer.Store |
|
| 77 |
+ distributionMetadataStore dmetadata.Store |
|
| 78 |
+ referenceStore refstore.Store |
|
| 79 |
+} |
|
| 80 |
+ |
|
| 72 | 81 |
// Daemon holds information about the Docker daemon. |
| 73 | 82 |
type Daemon struct {
|
| 74 |
- ID string |
|
| 75 |
- repository string |
|
| 76 |
- containers container.Store |
|
| 77 |
- execCommands *exec.Store |
|
| 78 |
- referenceStore refstore.Store |
|
| 79 |
- downloadManager *xfer.LayerDownloadManager |
|
| 80 |
- uploadManager *xfer.LayerUploadManager |
|
| 81 |
- distributionMetadataStore dmetadata.Store |
|
| 82 |
- trustKey libtrust.PrivateKey |
|
| 83 |
- idIndex *truncindex.TruncIndex |
|
| 84 |
- configStore *config.Config |
|
| 85 |
- statsCollector *stats.Collector |
|
| 86 |
- defaultLogConfig containertypes.LogConfig |
|
| 87 |
- RegistryService registry.Service |
|
| 88 |
- EventsService *events.Events |
|
| 89 |
- netController libnetwork.NetworkController |
|
| 90 |
- volumes *store.VolumeStore |
|
| 91 |
- discoveryWatcher discovery.Reloader |
|
| 92 |
- root string |
|
| 93 |
- seccompEnabled bool |
|
| 94 |
- apparmorEnabled bool |
|
| 95 |
- shutdown bool |
|
| 96 |
- idMappings *idtools.IDMappings |
|
| 97 |
- layerStore layer.Store |
|
| 98 |
- imageStore image.Store |
|
| 99 |
- PluginStore *plugin.Store // todo: remove |
|
| 100 |
- pluginManager *plugin.Manager |
|
| 101 |
- nameIndex *registrar.Registrar |
|
| 102 |
- linkIndex *linkIndex |
|
| 103 |
- containerd libcontainerd.Client |
|
| 104 |
- containerdRemote libcontainerd.Remote |
|
| 105 |
- defaultIsolation containertypes.Isolation // Default isolation mode on Windows |
|
| 106 |
- clusterProvider cluster.Provider |
|
| 107 |
- cluster Cluster |
|
| 108 |
- metricsPluginListener net.Listener |
|
| 83 |
+ ID string |
|
| 84 |
+ repository string |
|
| 85 |
+ containers container.Store |
|
| 86 |
+ execCommands *exec.Store |
|
| 87 |
+ downloadManager *xfer.LayerDownloadManager |
|
| 88 |
+ uploadManager *xfer.LayerUploadManager |
|
| 89 |
+ trustKey libtrust.PrivateKey |
|
| 90 |
+ idIndex *truncindex.TruncIndex |
|
| 91 |
+ configStore *config.Config |
|
| 92 |
+ statsCollector *stats.Collector |
|
| 93 |
+ defaultLogConfig containertypes.LogConfig |
|
| 94 |
+ RegistryService registry.Service |
|
| 95 |
+ EventsService *events.Events |
|
| 96 |
+ netController libnetwork.NetworkController |
|
| 97 |
+ volumes *store.VolumeStore |
|
| 98 |
+ discoveryWatcher discovery.Reloader |
|
| 99 |
+ root string |
|
| 100 |
+ seccompEnabled bool |
|
| 101 |
+ apparmorEnabled bool |
|
| 102 |
+ shutdown bool |
|
| 103 |
+ idMappings *idtools.IDMappings |
|
| 104 |
+ stores map[string]daemonStore // By container target platform |
|
| 105 |
+ PluginStore *plugin.Store // todo: remove |
|
| 106 |
+ pluginManager *plugin.Manager |
|
| 107 |
+ nameIndex *registrar.Registrar |
|
| 108 |
+ linkIndex *linkIndex |
|
| 109 |
+ containerd libcontainerd.Client |
|
| 110 |
+ containerdRemote libcontainerd.Remote |
|
| 111 |
+ defaultIsolation containertypes.Isolation // Default isolation mode on Windows |
|
| 112 |
+ clusterProvider cluster.Provider |
|
| 113 |
+ cluster Cluster |
|
| 114 |
+ metricsPluginListener net.Listener |
|
| 109 | 115 |
|
| 110 | 116 |
machineMemory uint64 |
| 111 | 117 |
|
| ... | ... |
@@ -137,10 +143,7 @@ func (daemon *Daemon) HasExperimental() bool {
|
| 137 | 137 |
} |
| 138 | 138 |
|
| 139 | 139 |
func (daemon *Daemon) restore() error {
|
| 140 |
- var ( |
|
| 141 |
- currentDriver = daemon.GraphDriverName() |
|
| 142 |
- containers = make(map[string]*container.Container) |
|
| 143 |
- ) |
|
| 140 |
+ containers := make(map[string]*container.Container) |
|
| 144 | 141 |
|
| 145 | 142 |
logrus.Info("Loading containers: start.")
|
| 146 | 143 |
|
| ... | ... |
@@ -158,8 +161,9 @@ func (daemon *Daemon) restore() error {
|
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 | 160 |
// Ignore the container if it does not support the current driver being used by the graph |
| 161 |
- if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
|
|
| 162 |
- rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) |
|
| 161 |
+ currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver |
|
| 162 |
+ if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform {
|
|
| 163 |
+ rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID) |
|
| 163 | 164 |
if err != nil {
|
| 164 | 165 |
logrus.Errorf("Failed to load container mount %v: %v", id, err)
|
| 165 | 166 |
continue |
| ... | ... |
@@ -595,9 +599,24 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 595 | 595 |
} |
| 596 | 596 |
} |
| 597 | 597 |
|
| 598 |
- driverName := os.Getenv("DOCKER_DRIVER")
|
|
| 599 |
- if driverName == "" {
|
|
| 600 |
- driverName = config.GraphDriver |
|
| 598 |
+ // On Windows we don't support the environment variable, or a user supplied graphdriver |
|
| 599 |
+ // as Windows has no choice in terms of which graphdrivers to use. It's a case of |
|
| 600 |
+ // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, |
|
| 601 |
+ // lcow. Unix platforms however run a single graphdriver for all containers, and it can |
|
| 602 |
+ // be set through an environment variable, a daemon start parameter, or chosen through |
|
| 603 |
+ // initialization of the layerstore through driver priority order for example. |
|
| 604 |
+ d.stores = make(map[string]daemonStore) |
|
| 605 |
+ if runtime.GOOS == "windows" {
|
|
| 606 |
+ d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"}
|
|
| 607 |
+ if system.LCOWSupported() {
|
|
| 608 |
+ d.stores["linux"] = daemonStore{graphDriver: "lcow"}
|
|
| 609 |
+ } |
|
| 610 |
+ } else {
|
|
| 611 |
+ driverName := os.Getenv("DOCKER_DRIVER")
|
|
| 612 |
+ if driverName == "" {
|
|
| 613 |
+ driverName = config.GraphDriver |
|
| 614 |
+ } |
|
| 615 |
+ d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead.
|
|
| 601 | 616 |
} |
| 602 | 617 |
|
| 603 | 618 |
d.RegistryService = registryService |
| ... | ... |
@@ -625,42 +644,55 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 625 | 625 |
return nil, errors.Wrap(err, "couldn't create plugin manager") |
| 626 | 626 |
} |
| 627 | 627 |
|
| 628 |
- d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
|
|
| 629 |
- StorePath: config.Root, |
|
| 630 |
- MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), |
|
| 631 |
- GraphDriver: driverName, |
|
| 632 |
- GraphDriverOptions: config.GraphOptions, |
|
| 633 |
- IDMappings: idMappings, |
|
| 634 |
- PluginGetter: d.PluginStore, |
|
| 635 |
- ExperimentalEnabled: config.Experimental, |
|
| 636 |
- }) |
|
| 637 |
- if err != nil {
|
|
| 638 |
- return nil, err |
|
| 628 |
+ var graphDrivers []string |
|
| 629 |
+ for platform, ds := range d.stores {
|
|
| 630 |
+ ls, err := layer.NewStoreFromOptions(layer.StoreOptions{
|
|
| 631 |
+ StorePath: config.Root, |
|
| 632 |
+ MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), |
|
| 633 |
+ GraphDriver: ds.graphDriver, |
|
| 634 |
+ GraphDriverOptions: config.GraphOptions, |
|
| 635 |
+ IDMappings: idMappings, |
|
| 636 |
+ PluginGetter: d.PluginStore, |
|
| 637 |
+ ExperimentalEnabled: config.Experimental, |
|
| 638 |
+ }) |
|
| 639 |
+ if err != nil {
|
|
| 640 |
+ return nil, err |
|
| 641 |
+ } |
|
| 642 |
+ ds.graphDriver = ls.DriverName() // As layerstore may set the driver |
|
| 643 |
+ ds.layerStore = ls |
|
| 644 |
+ d.stores[platform] = ds |
|
| 645 |
+ graphDrivers = append(graphDrivers, ls.DriverName()) |
|
| 639 | 646 |
} |
| 640 | 647 |
|
| 641 |
- graphDriver := d.layerStore.DriverName() |
|
| 642 |
- imageRoot := filepath.Join(config.Root, "image", graphDriver) |
|
| 643 |
- |
|
| 644 | 648 |
// Configure and validate the kernels security support |
| 645 |
- if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
|
|
| 649 |
+ if err := configureKernelSecuritySupport(config, graphDrivers); err != nil {
|
|
| 646 | 650 |
return nil, err |
| 647 | 651 |
} |
| 648 | 652 |
|
| 649 | 653 |
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
|
| 650 |
- d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) |
|
| 654 |
+ lsMap := make(map[string]layer.Store) |
|
| 655 |
+ for platform, ds := range d.stores {
|
|
| 656 |
+ lsMap[platform] = ds.layerStore |
|
| 657 |
+ } |
|
| 658 |
+ d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) |
|
| 651 | 659 |
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
|
| 652 | 660 |
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) |
| 653 | 661 |
|
| 654 |
- ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) |
|
| 655 |
- if err != nil {
|
|
| 656 |
- return nil, err |
|
| 657 |
- } |
|
| 662 |
+ for platform, ds := range d.stores {
|
|
| 663 |
+ imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) |
|
| 664 |
+ ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) |
|
| 665 |
+ if err != nil {
|
|
| 666 |
+ return nil, err |
|
| 667 |
+ } |
|
| 658 | 668 |
|
| 659 |
- // TODO LCOW @jhowardmsft. For now assume it's the runtime OS. This will be modified |
|
| 660 |
- // as the stores are split in a follow-up commit. |
|
| 661 |
- d.imageStore, err = image.NewImageStore(ifs, runtime.GOOS, d.layerStore) |
|
| 662 |
- if err != nil {
|
|
| 663 |
- return nil, err |
|
| 669 |
+ var is image.Store |
|
| 670 |
+ is, err = image.NewImageStore(ifs, platform, ds.layerStore) |
|
| 671 |
+ if err != nil {
|
|
| 672 |
+ return nil, err |
|
| 673 |
+ } |
|
| 674 |
+ ds.imageRoot = imageRoot |
|
| 675 |
+ ds.imageStore = is |
|
| 676 |
+ d.stores[platform] = ds |
|
| 664 | 677 |
} |
| 665 | 678 |
|
| 666 | 679 |
// Configure the volumes driver |
| ... | ... |
@@ -680,23 +712,31 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 680 | 680 |
return nil, err |
| 681 | 681 |
} |
| 682 | 682 |
|
| 683 |
- distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) |
|
| 684 |
- if err != nil {
|
|
| 685 |
- return nil, err |
|
| 686 |
- } |
|
| 687 |
- |
|
| 688 | 683 |
eventsService := events.New() |
| 689 | 684 |
|
| 690 |
- referenceStore, err := refstore.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) |
|
| 691 |
- if err != nil {
|
|
| 692 |
- return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
|
|
| 693 |
- } |
|
| 685 |
+ for platform, ds := range d.stores {
|
|
| 686 |
+ dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) |
|
| 687 |
+ if err != nil {
|
|
| 688 |
+ return nil, err |
|
| 689 |
+ } |
|
| 694 | 690 |
|
| 695 |
- migrationStart := time.Now() |
|
| 696 |
- if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
|
|
| 697 |
- logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
|
|
| 691 |
+ rs, err := refstore.NewReferenceStore(filepath.Join(ds.imageRoot, "repositories.json"), platform) |
|
| 692 |
+ if err != nil {
|
|
| 693 |
+ return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
|
|
| 694 |
+ } |
|
| 695 |
+ ds.distributionMetadataStore = dms |
|
| 696 |
+ ds.referenceStore = rs |
|
| 697 |
+ d.stores[platform] = ds |
|
| 698 |
+ |
|
| 699 |
+ // No content-addressability migration on Windows as it never supported pre-CA |
|
| 700 |
+ if runtime.GOOS != "windows" {
|
|
| 701 |
+ migrationStart := time.Now() |
|
| 702 |
+ if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil {
|
|
| 703 |
+ logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
|
|
| 704 |
+ } |
|
| 705 |
+ logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
|
|
| 706 |
+ } |
|
| 698 | 707 |
} |
| 699 |
- logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
|
|
| 700 | 708 |
|
| 701 | 709 |
// Discovery is only enabled when the daemon is launched with an address to advertise. When |
| 702 | 710 |
// initialized, the daemon is registered and we can store the discovery backend as it's read-only |
| ... | ... |
@@ -715,8 +755,6 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 715 | 715 |
d.repository = daemonRepo |
| 716 | 716 |
d.containers = container.NewMemoryStore() |
| 717 | 717 |
d.execCommands = exec.NewStore() |
| 718 |
- d.referenceStore = referenceStore |
|
| 719 |
- d.distributionMetadataStore = distributionMetadataStore |
|
| 720 | 718 |
d.trustKey = trustKey |
| 721 | 719 |
d.idIndex = truncindex.NewTruncIndex([]string{})
|
| 722 | 720 |
d.statsCollector = d.newStatsCollector(1 * time.Second) |
| ... | ... |
@@ -763,6 +801,22 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 763 | 763 |
engineCpus.Set(float64(info.NCPU)) |
| 764 | 764 |
engineMemory.Set(float64(info.MemTotal)) |
| 765 | 765 |
|
| 766 |
+ gd := "" |
|
| 767 |
+ for platform, ds := range d.stores {
|
|
| 768 |
+ if len(gd) > 0 {
|
|
| 769 |
+ gd += ", " |
|
| 770 |
+ } |
|
| 771 |
+ gd += ds.graphDriver |
|
| 772 |
+ if len(d.stores) > 1 {
|
|
| 773 |
+ gd = fmt.Sprintf("%s (%s)", gd, platform)
|
|
| 774 |
+ } |
|
| 775 |
+ } |
|
| 776 |
+ logrus.WithFields(logrus.Fields{
|
|
| 777 |
+ "version": dockerversion.Version, |
|
| 778 |
+ "commit": dockerversion.GitCommit, |
|
| 779 |
+ "graphdriver(s)": gd, |
|
| 780 |
+ }).Info("Docker daemon")
|
|
| 781 |
+ |
|
| 766 | 782 |
return d, nil |
| 767 | 783 |
} |
| 768 | 784 |
|
| ... | ... |
@@ -869,7 +923,7 @@ func (daemon *Daemon) Shutdown() error {
|
| 869 | 869 |
logrus.Errorf("Stop container error: %v", err)
|
| 870 | 870 |
return |
| 871 | 871 |
} |
| 872 |
- if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
|
|
| 872 |
+ if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil {
|
|
| 873 | 873 |
daemon.cleanupMountsByID(mountid) |
| 874 | 874 |
} |
| 875 | 875 |
logrus.Debugf("container stopped %s", c.ID)
|
| ... | ... |
@@ -882,9 +936,11 @@ func (daemon *Daemon) Shutdown() error {
|
| 882 | 882 |
} |
| 883 | 883 |
} |
| 884 | 884 |
|
| 885 |
- if daemon.layerStore != nil {
|
|
| 886 |
- if err := daemon.layerStore.Cleanup(); err != nil {
|
|
| 887 |
- logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
|
|
| 885 |
+ for platform, ds := range daemon.stores {
|
|
| 886 |
+ if ds.layerStore != nil {
|
|
| 887 |
+ if err := ds.layerStore.Cleanup(); err != nil {
|
|
| 888 |
+ logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform)
|
|
| 889 |
+ } |
|
| 888 | 890 |
} |
| 889 | 891 |
} |
| 890 | 892 |
|
| ... | ... |
@@ -927,7 +983,7 @@ func (daemon *Daemon) Mount(container *container.Container) error {
|
| 927 | 927 |
if container.BaseFS != "" && runtime.GOOS != "windows" {
|
| 928 | 928 |
daemon.Unmount(container) |
| 929 | 929 |
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
|
| 930 |
- daemon.GraphDriverName(), container.ID, container.BaseFS, dir) |
|
| 930 |
+ daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir) |
|
| 931 | 931 |
} |
| 932 | 932 |
} |
| 933 | 933 |
container.BaseFS = dir // TODO: combine these fields |
| ... | ... |
@@ -969,8 +1025,8 @@ func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
|
| 969 | 969 |
} |
| 970 | 970 |
|
| 971 | 971 |
// GraphDriverName returns the name of the graph driver used by the layer.Store |
| 972 |
-func (daemon *Daemon) GraphDriverName() string {
|
|
| 973 |
- return daemon.layerStore.DriverName() |
|
| 972 |
+func (daemon *Daemon) GraphDriverName(platform string) string {
|
|
| 973 |
+ return daemon.stores[platform].layerStore.DriverName() |
|
| 974 | 974 |
} |
| 975 | 975 |
|
| 976 | 976 |
// prepareTempDir prepares and returns the default directory to use |
| ... | ... |
@@ -353,7 +353,7 @@ func configureMaxThreads(config *Config) error {
|
| 353 | 353 |
} |
| 354 | 354 |
|
| 355 | 355 |
// configureKernelSecuritySupport configures and validate security support for the kernel |
| 356 |
-func configureKernelSecuritySupport(config *Config, driverName string) error {
|
|
| 356 |
+func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
|
|
| 357 | 357 |
return nil |
| 358 | 358 |
} |
| 359 | 359 |
|
| ... | ... |
@@ -702,14 +702,22 @@ func overlaySupportsSelinux() (bool, error) {
|
| 702 | 702 |
} |
| 703 | 703 |
|
| 704 | 704 |
// configureKernelSecuritySupport configures and validates security support for the kernel |
| 705 |
-func configureKernelSecuritySupport(config *config.Config, driverName string) error {
|
|
| 705 |
+func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
|
|
| 706 | 706 |
if config.EnableSelinuxSupport {
|
| 707 | 707 |
if !selinuxEnabled() {
|
| 708 | 708 |
logrus.Warn("Docker could not enable SELinux on the host system")
|
| 709 | 709 |
return nil |
| 710 | 710 |
} |
| 711 | 711 |
|
| 712 |
- if driverName == "overlay" || driverName == "overlay2" {
|
|
| 712 |
+ overlayFound := false |
|
| 713 |
+ for _, d := range driverNames {
|
|
| 714 |
+ if d == "overlay" || d == "overlay2" {
|
|
| 715 |
+ overlayFound = true |
|
| 716 |
+ break |
|
| 717 |
+ } |
|
| 718 |
+ } |
|
| 719 |
+ |
|
| 720 |
+ if overlayFound {
|
|
| 713 | 721 |
// If driver is overlay or overlay2, make sure kernel |
| 714 | 722 |
// supports selinux with overlay. |
| 715 | 723 |
supported, err := overlaySupportsSelinux() |
| ... | ... |
@@ -718,7 +726,7 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er |
| 718 | 718 |
} |
| 719 | 719 |
|
| 720 | 720 |
if !supported {
|
| 721 |
- logrus.Warnf("SELinux is not supported with the %s graph driver on this kernel", driverName)
|
|
| 721 |
+ logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames)
|
|
| 722 | 722 |
} |
| 723 | 723 |
} |
| 724 | 724 |
} else {
|
| ... | ... |
@@ -260,7 +260,7 @@ func checkSystem() error {
|
| 260 | 260 |
} |
| 261 | 261 |
|
| 262 | 262 |
// configureKernelSecuritySupport configures and validate security support for the kernel |
| 263 |
-func configureKernelSecuritySupport(config *config.Config, driverName string) error {
|
|
| 263 |
+func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
|
|
| 264 | 264 |
return nil |
| 265 | 265 |
} |
| 266 | 266 |
|
| ... | ... |
@@ -115,10 +115,10 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo |
| 115 | 115 |
// When container creation fails and `RWLayer` has not been created yet, we |
| 116 | 116 |
// do not call `ReleaseRWLayer` |
| 117 | 117 |
if container.RWLayer != nil {
|
| 118 |
- metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) |
|
| 118 |
+ metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer) |
|
| 119 | 119 |
layer.LogReleaseMetadata(metadata) |
| 120 | 120 |
if err != nil && err != layer.ErrMountDoesNotExist {
|
| 121 |
- return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(), container.ID) |
|
| 121 |
+ return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID) |
|
| 122 | 122 |
} |
| 123 | 123 |
} |
| 124 | 124 |
|
| ... | ... |
@@ -15,12 +15,12 @@ import ( |
| 15 | 15 |
"github.com/opencontainers/go-digest" |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
|
|
| 19 |
- tmpImages := daemon.imageStore.Map() |
|
| 18 |
+func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int {
|
|
| 19 |
+ tmpImages := daemon.stores[platform].imageStore.Map() |
|
| 20 | 20 |
layerRefs := map[layer.ChainID]int{}
|
| 21 | 21 |
for id, img := range tmpImages {
|
| 22 | 22 |
dgst := digest.Digest(id) |
| 23 |
- if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
|
|
| 23 |
+ if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 {
|
|
| 24 | 24 |
continue |
| 25 | 25 |
} |
| 26 | 26 |
|
| ... | ... |
@@ -53,6 +53,7 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er |
| 53 | 53 |
} |
| 54 | 54 |
|
| 55 | 55 |
// Get all top images with extra attributes |
| 56 |
+ // TODO @jhowardmsft LCOW. This may need revisiting |
|
| 56 | 57 |
allImages, err := daemon.Images(filters.NewArgs(), false, true) |
| 57 | 58 |
if err != nil {
|
| 58 | 59 |
return nil, fmt.Errorf("failed to retrieve image list: %v", err)
|
| ... | ... |
@@ -94,23 +95,26 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er |
| 94 | 94 |
} |
| 95 | 95 |
|
| 96 | 96 |
// Get total layers size on disk |
| 97 |
- layerRefs := daemon.getLayerRefs() |
|
| 98 |
- allLayers := daemon.layerStore.Map() |
|
| 99 | 97 |
var allLayersSize int64 |
| 100 |
- for _, l := range allLayers {
|
|
| 101 |
- select {
|
|
| 102 |
- case <-ctx.Done(): |
|
| 103 |
- return nil, ctx.Err() |
|
| 104 |
- default: |
|
| 105 |
- size, err := l.DiffSize() |
|
| 106 |
- if err == nil {
|
|
| 107 |
- if _, ok := layerRefs[l.ChainID()]; ok {
|
|
| 108 |
- allLayersSize += size |
|
| 98 |
+ for platform := range daemon.stores {
|
|
| 99 |
+ layerRefs := daemon.getLayerRefs(platform) |
|
| 100 |
+ allLayers := daemon.stores[platform].layerStore.Map() |
|
| 101 |
+ var allLayersSize int64 |
|
| 102 |
+ for _, l := range allLayers {
|
|
| 103 |
+ select {
|
|
| 104 |
+ case <-ctx.Done(): |
|
| 105 |
+ return nil, ctx.Err() |
|
| 106 |
+ default: |
|
| 107 |
+ size, err := l.DiffSize() |
|
| 108 |
+ if err == nil {
|
|
| 109 |
+ if _, ok := layerRefs[l.ChainID()]; ok {
|
|
| 110 |
+ allLayersSize += size |
|
| 111 |
+ } else {
|
|
| 112 |
+ logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform)
|
|
| 113 |
+ } |
|
| 109 | 114 |
} else {
|
| 110 |
- logrus.Warnf("found leaked image layer %v", l.ChainID())
|
|
| 115 |
+ logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform)
|
|
| 111 | 116 |
} |
| 112 |
- } else {
|
|
| 113 |
- logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
|
|
| 114 | 117 |
} |
| 115 | 118 |
} |
| 116 | 119 |
} |
| ... | ... |
@@ -3,6 +3,8 @@ |
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
+ "runtime" |
|
| 7 |
+ |
|
| 6 | 8 |
"github.com/Sirupsen/logrus" |
| 7 | 9 |
) |
| 8 | 10 |
|
| ... | ... |
@@ -13,17 +15,17 @@ func (daemon *Daemon) getSize(containerID string) (int64, int64) {
|
| 13 | 13 |
err error |
| 14 | 14 |
) |
| 15 | 15 |
|
| 16 |
- rwlayer, err := daemon.layerStore.GetRWLayer(containerID) |
|
| 16 |
+ rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) |
|
| 17 | 17 |
if err != nil {
|
| 18 | 18 |
logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err)
|
| 19 | 19 |
return sizeRw, sizeRootfs |
| 20 | 20 |
} |
| 21 |
- defer daemon.layerStore.ReleaseRWLayer(rwlayer) |
|
| 21 |
+ defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) |
|
| 22 | 22 |
|
| 23 | 23 |
sizeRw, err = rwlayer.Size() |
| 24 | 24 |
if err != nil {
|
| 25 | 25 |
logrus.Errorf("Driver %s couldn't return diff size of container %s: %s",
|
| 26 |
- daemon.GraphDriverName(), containerID, err) |
|
| 26 |
+ daemon.GraphDriverName(runtime.GOOS), containerID, err) |
|
| 27 | 27 |
// FIXME: GetSize should return an error. Not changing it now in case |
| 28 | 28 |
// there is a side-effect. |
| 29 | 29 |
sizeRw = -1 |
| ... | ... |
@@ -94,6 +94,10 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) |
| 94 | 94 |
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
|
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 |
+ if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
|
|
| 98 |
+ return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
|
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 97 | 101 |
d := &Driver{
|
| 98 | 102 |
info: hcsshim.DriverInfo{
|
| 99 | 103 |
HomeDir: home, |
| ... | ... |
@@ -21,37 +21,43 @@ func (e ErrImageDoesNotExist) Error() string {
|
| 21 | 21 |
return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref))
|
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
-// GetImageID returns an image ID corresponding to the image referred to by |
|
| 24 |
+// GetImageIDAndPlatform returns an image ID and platform corresponding to the image referred to by |
|
| 25 | 25 |
// refOrID. |
| 26 |
-func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
|
|
| 26 |
+func (daemon *Daemon) GetImageIDAndPlatform(refOrID string) (image.ID, string, error) {
|
|
| 27 | 27 |
ref, err := reference.ParseAnyReference(refOrID) |
| 28 | 28 |
if err != nil {
|
| 29 |
- return "", err |
|
| 29 |
+ return "", "", err |
|
| 30 | 30 |
} |
| 31 | 31 |
namedRef, ok := ref.(reference.Named) |
| 32 | 32 |
if !ok {
|
| 33 | 33 |
digested, ok := ref.(reference.Digested) |
| 34 | 34 |
if !ok {
|
| 35 |
- return "", ErrImageDoesNotExist{ref}
|
|
| 35 |
+ return "", "", ErrImageDoesNotExist{ref}
|
|
| 36 | 36 |
} |
| 37 | 37 |
id := image.IDFromDigest(digested.Digest()) |
| 38 |
- if _, err := daemon.imageStore.Get(id); err != nil {
|
|
| 39 |
- return "", ErrImageDoesNotExist{ref}
|
|
| 38 |
+ for platform := range daemon.stores {
|
|
| 39 |
+ if _, err = daemon.stores[platform].imageStore.Get(id); err == nil {
|
|
| 40 |
+ return id, platform, nil |
|
| 41 |
+ } |
|
| 40 | 42 |
} |
| 41 |
- return id, nil |
|
| 43 |
+ return "", "", ErrImageDoesNotExist{ref}
|
|
| 42 | 44 |
} |
| 43 | 45 |
|
| 44 |
- if id, err := daemon.referenceStore.Get(namedRef); err == nil {
|
|
| 45 |
- return image.IDFromDigest(id), nil |
|
| 46 |
+ for platform := range daemon.stores {
|
|
| 47 |
+ if id, err := daemon.stores[platform].referenceStore.Get(namedRef); err == nil {
|
|
| 48 |
+ return image.IDFromDigest(id), platform, nil |
|
| 49 |
+ } |
|
| 46 | 50 |
} |
| 47 | 51 |
|
| 48 | 52 |
// deprecated: repo:shortid https://github.com/docker/docker/pull/799 |
| 49 | 53 |
if tagged, ok := namedRef.(reference.Tagged); ok {
|
| 50 | 54 |
if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) {
|
| 51 |
- if id, err := daemon.imageStore.Search(tag); err == nil {
|
|
| 52 |
- for _, storeRef := range daemon.referenceStore.References(id.Digest()) {
|
|
| 53 |
- if storeRef.Name() == namedRef.Name() {
|
|
| 54 |
- return id, nil |
|
| 55 |
+ for platform := range daemon.stores {
|
|
| 56 |
+ if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil {
|
|
| 57 |
+ for _, storeRef := range daemon.stores[platform].referenceStore.References(id.Digest()) {
|
|
| 58 |
+ if storeRef.Name() == namedRef.Name() {
|
|
| 59 |
+ return id, platform, nil |
|
| 60 |
+ } |
|
| 55 | 61 |
} |
| 56 | 62 |
} |
| 57 | 63 |
} |
| ... | ... |
@@ -59,18 +65,20 @@ func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
|
| 59 | 59 |
} |
| 60 | 60 |
|
| 61 | 61 |
// Search based on ID |
| 62 |
- if id, err := daemon.imageStore.Search(refOrID); err == nil {
|
|
| 63 |
- return id, nil |
|
| 62 |
+ for platform := range daemon.stores {
|
|
| 63 |
+ if id, err := daemon.stores[platform].imageStore.Search(refOrID); err == nil {
|
|
| 64 |
+ return id, platform, nil |
|
| 65 |
+ } |
|
| 64 | 66 |
} |
| 65 | 67 |
|
| 66 |
- return "", ErrImageDoesNotExist{ref}
|
|
| 68 |
+ return "", "", ErrImageDoesNotExist{ref}
|
|
| 67 | 69 |
} |
| 68 | 70 |
|
| 69 | 71 |
// GetImage returns an image corresponding to the image referred to by refOrID. |
| 70 | 72 |
func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
|
| 71 |
- imgID, err := daemon.GetImageID(refOrID) |
|
| 73 |
+ imgID, platform, err := daemon.GetImageIDAndPlatform(refOrID) |
|
| 72 | 74 |
if err != nil {
|
| 73 | 75 |
return nil, err |
| 74 | 76 |
} |
| 75 |
- return daemon.imageStore.Get(imgID) |
|
| 77 |
+ return daemon.stores[platform].imageStore.Get(imgID) |
|
| 76 | 78 |
} |
| ... | ... |
@@ -65,12 +65,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 65 | 65 |
start := time.Now() |
| 66 | 66 |
records := []types.ImageDeleteResponseItem{}
|
| 67 | 67 |
|
| 68 |
- imgID, err := daemon.GetImageID(imageRef) |
|
| 68 |
+ imgID, platform, err := daemon.GetImageIDAndPlatform(imageRef) |
|
| 69 | 69 |
if err != nil {
|
| 70 | 70 |
return nil, daemon.imageNotExistToErrcode(err) |
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 |
- repoRefs := daemon.referenceStore.References(imgID.Digest()) |
|
| 73 |
+ repoRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) |
|
| 74 | 74 |
|
| 75 | 75 |
var removedRepositoryRef bool |
| 76 | 76 |
if !isImageIDPrefix(imgID.String(), imageRef) {
|
| ... | ... |
@@ -94,7 +94,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 94 | 94 |
return nil, err |
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 |
- parsedRef, err = daemon.removeImageRef(parsedRef) |
|
| 97 |
+ parsedRef, err = daemon.removeImageRef(platform, parsedRef) |
|
| 98 | 98 |
if err != nil {
|
| 99 | 99 |
return nil, err |
| 100 | 100 |
} |
| ... | ... |
@@ -104,7 +104,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 104 | 104 |
daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") |
| 105 | 105 |
records = append(records, untaggedRecord) |
| 106 | 106 |
|
| 107 |
- repoRefs = daemon.referenceStore.References(imgID.Digest()) |
|
| 107 |
+ repoRefs = daemon.stores[platform].referenceStore.References(imgID.Digest()) |
|
| 108 | 108 |
|
| 109 | 109 |
// If a tag reference was removed and the only remaining |
| 110 | 110 |
// references to the same repository are digest references, |
| ... | ... |
@@ -122,7 +122,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 122 | 122 |
remainingRefs := []reference.Named{}
|
| 123 | 123 |
for _, repoRef := range repoRefs {
|
| 124 | 124 |
if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
|
| 125 |
- if _, err := daemon.removeImageRef(repoRef); err != nil {
|
|
| 125 |
+ if _, err := daemon.removeImageRef(platform, repoRef); err != nil {
|
|
| 126 | 126 |
return records, err |
| 127 | 127 |
} |
| 128 | 128 |
|
| ... | ... |
@@ -152,12 +152,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 152 | 152 |
if !force {
|
| 153 | 153 |
c |= conflictSoft &^ conflictActiveReference |
| 154 | 154 |
} |
| 155 |
- if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
|
|
| 155 |
+ if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil {
|
|
| 156 | 156 |
return nil, conflict |
| 157 | 157 |
} |
| 158 | 158 |
|
| 159 | 159 |
for _, repoRef := range repoRefs {
|
| 160 |
- parsedRef, err := daemon.removeImageRef(repoRef) |
|
| 160 |
+ parsedRef, err := daemon.removeImageRef(platform, repoRef) |
|
| 161 | 161 |
if err != nil {
|
| 162 | 162 |
return nil, err |
| 163 | 163 |
} |
| ... | ... |
@@ -170,7 +170,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 170 | 170 |
} |
| 171 | 171 |
} |
| 172 | 172 |
|
| 173 |
- if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil {
|
|
| 173 |
+ if err := daemon.imageDeleteHelper(imgID, platform, &records, force, prune, removedRepositoryRef); err != nil {
|
|
| 174 | 174 |
return nil, err |
| 175 | 175 |
} |
| 176 | 176 |
|
| ... | ... |
@@ -231,13 +231,13 @@ func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Contai |
| 231 | 231 |
// repositoryRef must not be an image ID but a repository name followed by an |
| 232 | 232 |
// optional tag or digest reference. If tag or digest is omitted, the default |
| 233 | 233 |
// tag is used. Returns the resolved image reference and an error. |
| 234 |
-func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) {
|
|
| 234 |
+func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) {
|
|
| 235 | 235 |
ref = reference.TagNameOnly(ref) |
| 236 | 236 |
|
| 237 | 237 |
// Ignore the boolean value returned, as far as we're concerned, this |
| 238 | 238 |
// is an idempotent operation and it's okay if the reference didn't |
| 239 | 239 |
// exist in the first place. |
| 240 |
- _, err := daemon.referenceStore.Delete(ref) |
|
| 240 |
+ _, err := daemon.stores[platform].referenceStore.Delete(ref) |
|
| 241 | 241 |
|
| 242 | 242 |
return ref, err |
| 243 | 243 |
} |
| ... | ... |
@@ -247,11 +247,11 @@ func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, erro |
| 247 | 247 |
// on the first encountered error. Removed references are logged to this |
| 248 | 248 |
// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the |
| 249 | 249 |
// given list of records. |
| 250 |
-func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error {
|
|
| 251 |
- imageRefs := daemon.referenceStore.References(imgID.Digest()) |
|
| 250 |
+func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error {
|
|
| 251 |
+ imageRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) |
|
| 252 | 252 |
|
| 253 | 253 |
for _, imageRef := range imageRefs {
|
| 254 |
- parsedRef, err := daemon.removeImageRef(imageRef) |
|
| 254 |
+ parsedRef, err := daemon.removeImageRef(platform, imageRef) |
|
| 255 | 255 |
if err != nil {
|
| 256 | 256 |
return err |
| 257 | 257 |
} |
| ... | ... |
@@ -296,15 +296,15 @@ func (idc *imageDeleteConflict) Error() string {
|
| 296 | 296 |
// conflict is encountered, it will be returned immediately without deleting |
| 297 | 297 |
// the image. If quiet is true, any encountered conflicts will be ignored and |
| 298 | 298 |
// the function will return nil immediately without deleting the image. |
| 299 |
-func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
|
|
| 299 |
+func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
|
|
| 300 | 300 |
// First, determine if this image has any conflicts. Ignore soft conflicts |
| 301 | 301 |
// if force is true. |
| 302 | 302 |
c := conflictHard |
| 303 | 303 |
if !force {
|
| 304 | 304 |
c |= conflictSoft |
| 305 | 305 |
} |
| 306 |
- if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
|
|
| 307 |
- if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) {
|
|
| 306 |
+ if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil {
|
|
| 307 |
+ if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) {
|
|
| 308 | 308 |
// Ignore conflicts UNLESS the image is "dangling" or not being used in |
| 309 | 309 |
// which case we want the user to know. |
| 310 | 310 |
return nil |
| ... | ... |
@@ -315,18 +315,18 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe |
| 315 | 315 |
return conflict |
| 316 | 316 |
} |
| 317 | 317 |
|
| 318 |
- parent, err := daemon.imageStore.GetParent(imgID) |
|
| 318 |
+ parent, err := daemon.stores[platform].imageStore.GetParent(imgID) |
|
| 319 | 319 |
if err != nil {
|
| 320 | 320 |
// There may be no parent |
| 321 | 321 |
parent = "" |
| 322 | 322 |
} |
| 323 | 323 |
|
| 324 | 324 |
// Delete all repository tag/digest references to this image. |
| 325 |
- if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil {
|
|
| 325 |
+ if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil {
|
|
| 326 | 326 |
return err |
| 327 | 327 |
} |
| 328 | 328 |
|
| 329 |
- removedLayers, err := daemon.imageStore.Delete(imgID) |
|
| 329 |
+ removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) |
|
| 330 | 330 |
if err != nil {
|
| 331 | 331 |
return err |
| 332 | 332 |
} |
| ... | ... |
@@ -346,7 +346,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe |
| 346 | 346 |
// either running or stopped). |
| 347 | 347 |
// Do not force prunings, but do so quietly (stopping on any encountered |
| 348 | 348 |
// conflicts). |
| 349 |
- return daemon.imageDeleteHelper(parent, records, false, true, true) |
|
| 349 |
+ return daemon.imageDeleteHelper(parent, platform, records, false, true, true) |
|
| 350 | 350 |
} |
| 351 | 351 |
|
| 352 | 352 |
// checkImageDeleteConflict determines whether there are any conflicts |
| ... | ... |
@@ -355,9 +355,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe |
| 355 | 355 |
// using the image. A soft conflict is any tags/digest referencing the given |
| 356 | 356 |
// image or any stopped container using the image. If ignoreSoftConflicts is |
| 357 | 357 |
// true, this function will not check for soft conflict conditions. |
| 358 |
-func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
|
|
| 358 |
+func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict {
|
|
| 359 | 359 |
// Check if the image has any descendant images. |
| 360 |
- if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
|
|
| 360 |
+ if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 {
|
|
| 361 | 361 |
return &imageDeleteConflict{
|
| 362 | 362 |
hard: true, |
| 363 | 363 |
imgID: imgID, |
| ... | ... |
@@ -381,7 +381,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType |
| 381 | 381 |
} |
| 382 | 382 |
|
| 383 | 383 |
// Check if any repository tags/digest reference this image. |
| 384 |
- if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 {
|
|
| 384 |
+ if mask&conflictActiveReference != 0 && len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 {
|
|
| 385 | 385 |
return &imageDeleteConflict{
|
| 386 | 386 |
imgID: imgID, |
| 387 | 387 |
message: "image is referenced in multiple repositories", |
| ... | ... |
@@ -408,6 +408,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType |
| 408 | 408 |
// imageIsDangling returns whether the given image is "dangling" which means |
| 409 | 409 |
// that there are no repository references to the given image and it has no |
| 410 | 410 |
// child images. |
| 411 |
-func (daemon *Daemon) imageIsDangling(imgID image.ID) bool {
|
|
| 412 |
- return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) |
|
| 411 |
+func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool {
|
|
| 412 |
+ return !(len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) |
|
| 413 | 413 |
} |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/image/tarexport" |
| 7 | 8 |
) |
| ... | ... |
@@ -12,7 +13,8 @@ import ( |
| 12 | 12 |
// the same tag are exported. names is the set of tags to export, and |
| 13 | 13 |
// outStream is the writer which the images are written to. |
| 14 | 14 |
func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
|
| 15 |
- imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) |
|
| 15 |
+ // TODO @jhowardmsft LCOW. For now, assume it is the OS of the host |
|
| 16 |
+ imageExporter := tarexport.NewTarExporter(daemon.stores[runtime.GOOS].imageStore, daemon.stores[runtime.GOOS].layerStore, daemon.stores[runtime.GOOS].referenceStore, daemon) |
|
| 16 | 17 |
return imageExporter.Save(names, outStream) |
| 17 | 18 |
} |
| 18 | 19 |
|
| ... | ... |
@@ -20,6 +22,7 @@ func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
|
| 20 | 20 |
// complement of ImageExport. The input stream is an uncompressed tar |
| 21 | 21 |
// ball containing images and metadata. |
| 22 | 22 |
func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
|
| 23 |
- imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) |
|
| 23 |
+ // TODO @jhowardmsft LCOW. For now, assume it is the OS of the host |
|
| 24 |
+ imageExporter := tarexport.NewTarExporter(daemon.stores[runtime.GOOS].imageStore, daemon.stores[runtime.GOOS].layerStore, daemon.stores[runtime.GOOS].referenceStore, daemon) |
|
| 24 | 25 |
return imageExporter.Load(inTar, outStream, quiet) |
| 25 | 26 |
} |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
"time" |
| 6 | 7 |
|
| 7 | 8 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -18,6 +19,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e |
| 18 | 18 |
return nil, err |
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 |
+ // If the image OS isn't set, assume it's the host OS |
|
| 22 |
+ platform := img.OS |
|
| 23 |
+ if platform == "" {
|
|
| 24 |
+ platform = runtime.GOOS |
|
| 25 |
+ } |
|
| 26 |
+ |
|
| 21 | 27 |
history := []*image.HistoryResponseItem{}
|
| 22 | 28 |
|
| 23 | 29 |
layerCounter := 0 |
| ... | ... |
@@ -33,12 +40,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e |
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 | 35 |
rootFS.Append(img.RootFS.DiffIDs[layerCounter]) |
| 36 |
- l, err := daemon.layerStore.Get(rootFS.ChainID()) |
|
| 36 |
+ l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) |
|
| 37 | 37 |
if err != nil {
|
| 38 | 38 |
return nil, err |
| 39 | 39 |
} |
| 40 | 40 |
layerSize, err = l.DiffSize() |
| 41 |
- layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 41 |
+ layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 42 | 42 |
if err != nil {
|
| 43 | 43 |
return nil, err |
| 44 | 44 |
} |
| ... | ... |
@@ -62,7 +69,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e |
| 62 | 62 |
h.ID = id.String() |
| 63 | 63 |
|
| 64 | 64 |
var tags []string |
| 65 |
- for _, r := range daemon.referenceStore.References(id.Digest()) {
|
|
| 65 |
+ for _, r := range daemon.stores[platform].referenceStore.References(id.Digest()) {
|
|
| 66 | 66 |
if _, ok := r.(reference.NamedTagged); ok {
|
| 67 | 67 |
tags = append(tags, reference.FamiliarString(r)) |
| 68 | 68 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "runtime" |
|
| 4 | 5 |
"time" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -17,7 +18,13 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 17 | 17 |
return nil, errors.Wrapf(err, "no such image: %s", name) |
| 18 | 18 |
} |
| 19 | 19 |
|
| 20 |
- refs := daemon.referenceStore.References(img.ID().Digest()) |
|
| 20 |
+ // If the image OS isn't set, assume it's the host OS |
|
| 21 |
+ platform := img.OS |
|
| 22 |
+ if platform == "" {
|
|
| 23 |
+ platform = runtime.GOOS |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ refs := daemon.stores[platform].referenceStore.References(img.ID().Digest()) |
|
| 21 | 27 |
repoTags := []string{}
|
| 22 | 28 |
repoDigests := []string{}
|
| 23 | 29 |
for _, ref := range refs {
|
| ... | ... |
@@ -33,11 +40,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 33 | 33 |
var layerMetadata map[string]string |
| 34 | 34 |
layerID := img.RootFS.ChainID() |
| 35 | 35 |
if layerID != "" {
|
| 36 |
- l, err := daemon.layerStore.Get(layerID) |
|
| 36 |
+ l, err := daemon.stores[platform].layerStore.Get(layerID) |
|
| 37 | 37 |
if err != nil {
|
| 38 | 38 |
return nil, err |
| 39 | 39 |
} |
| 40 |
- defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 40 |
+ defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 41 | 41 |
size, err = l.Size() |
| 42 | 42 |
if err != nil {
|
| 43 | 43 |
return nil, err |
| ... | ... |
@@ -67,15 +74,14 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 67 | 67 |
Author: img.Author, |
| 68 | 68 |
Config: img.Config, |
| 69 | 69 |
Architecture: img.Architecture, |
| 70 |
- Os: img.OS, |
|
| 70 |
+ Os: platform, |
|
| 71 | 71 |
OsVersion: img.OSVersion, |
| 72 | 72 |
Size: size, |
| 73 | 73 |
VirtualSize: size, // TODO: field unused, deprecate |
| 74 | 74 |
RootFS: rootFSToAPIType(img.RootFS), |
| 75 | 75 |
} |
| 76 | 76 |
|
| 77 |
- imageInspect.GraphDriver.Name = daemon.GraphDriverName() |
|
| 78 |
- |
|
| 77 |
+ imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) |
|
| 79 | 78 |
imageInspect.GraphDriver.Data = layerMetadata |
| 80 | 79 |
|
| 81 | 80 |
return imageInspect, nil |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
"strings" |
| 6 | 7 |
|
| 7 | 8 |
dist "github.com/docker/distribution" |
| ... | ... |
@@ -59,6 +60,12 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. |
| 59 | 59 |
close(writesDone) |
| 60 | 60 |
}() |
| 61 | 61 |
|
| 62 |
+ // ------------------------------------------------------------------------------ |
|
| 63 |
+ // TODO @jhowardmsft LCOW. For now, use just the store for the host OS. This will |
|
| 64 |
+ // need some work to complete - we won't know the platform until after metadata |
|
| 65 |
+ // is pulled from the repository. This affects plugin as well to complete. |
|
| 66 |
+ // ------------------------------------------------------------------------------ |
|
| 67 |
+ |
|
| 62 | 68 |
imagePullConfig := &distribution.ImagePullConfig{
|
| 63 | 69 |
Config: distribution.Config{
|
| 64 | 70 |
MetaHeaders: metaHeaders, |
| ... | ... |
@@ -66,9 +73,9 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. |
| 66 | 66 |
ProgressOutput: progress.ChanOutput(progressChan), |
| 67 | 67 |
RegistryService: daemon.RegistryService, |
| 68 | 68 |
ImageEventLogger: daemon.LogImageEvent, |
| 69 |
- MetadataStore: daemon.distributionMetadataStore, |
|
| 70 |
- ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), |
|
| 71 |
- ReferenceStore: daemon.referenceStore, |
|
| 69 |
+ MetadataStore: daemon.stores[runtime.GOOS].distributionMetadataStore, |
|
| 70 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[runtime.GOOS].imageStore), |
|
| 71 |
+ ReferenceStore: daemon.stores[runtime.GOOS].referenceStore, |
|
| 72 | 72 |
}, |
| 73 | 73 |
DownloadManager: daemon.downloadManager, |
| 74 | 74 |
Schema2Types: distribution.ImageTypes, |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/distribution/manifest/schema2" |
| 7 | 8 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -39,6 +40,11 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead |
| 39 | 39 |
close(writesDone) |
| 40 | 40 |
}() |
| 41 | 41 |
|
| 42 |
+ // ------------------------------------------------------------------------------ |
|
| 43 |
+ // TODO @jhowardmsft LCOW. For now, use just the store for the host OS. This will |
|
| 44 |
+ // need some work to complete. |
|
| 45 |
+ // ------------------------------------------------------------------------------ |
|
| 46 |
+ |
|
| 42 | 47 |
imagePushConfig := &distribution.ImagePushConfig{
|
| 43 | 48 |
Config: distribution.Config{
|
| 44 | 49 |
MetaHeaders: metaHeaders, |
| ... | ... |
@@ -46,12 +52,12 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead |
| 46 | 46 |
ProgressOutput: progress.ChanOutput(progressChan), |
| 47 | 47 |
RegistryService: daemon.RegistryService, |
| 48 | 48 |
ImageEventLogger: daemon.LogImageEvent, |
| 49 |
- MetadataStore: daemon.distributionMetadataStore, |
|
| 50 |
- ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), |
|
| 51 |
- ReferenceStore: daemon.referenceStore, |
|
| 49 |
+ MetadataStore: daemon.stores[runtime.GOOS].distributionMetadataStore, |
|
| 50 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[runtime.GOOS].imageStore), |
|
| 51 |
+ ReferenceStore: daemon.stores[runtime.GOOS].referenceStore, |
|
| 52 | 52 |
}, |
| 53 | 53 |
ConfigMediaType: schema2.MediaTypeImageConfig, |
| 54 |
- LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), |
|
| 54 |
+ LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[runtime.GOOS].layerStore), |
|
| 55 | 55 |
TrustKey: daemon.trustKey, |
| 56 | 56 |
UploadManager: daemon.uploadManager, |
| 57 | 57 |
} |
| ... | ... |
@@ -8,7 +8,7 @@ import ( |
| 8 | 8 |
// TagImage creates the tag specified by newTag, pointing to the image named |
| 9 | 9 |
// imageName (alternatively, imageName can also be an image ID). |
| 10 | 10 |
func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
|
| 11 |
- imageID, err := daemon.GetImageID(imageName) |
|
| 11 |
+ imageID, platform, err := daemon.GetImageIDAndPlatform(imageName) |
|
| 12 | 12 |
if err != nil {
|
| 13 | 13 |
return err |
| 14 | 14 |
} |
| ... | ... |
@@ -23,12 +23,12 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
|
| 23 | 23 |
} |
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
- return daemon.TagImageWithReference(imageID, newTag) |
|
| 26 |
+ return daemon.TagImageWithReference(imageID, platform, newTag) |
|
| 27 | 27 |
} |
| 28 | 28 |
|
| 29 | 29 |
// TagImageWithReference adds the given reference to the image ID provided. |
| 30 |
-func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
|
|
| 31 |
- if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
|
|
| 30 |
+func (daemon *Daemon) TagImageWithReference(imageID image.ID, platform string, newTag reference.Named) error {
|
|
| 31 |
+ if err := daemon.stores[platform].referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
|
|
| 32 | 32 |
return err |
| 33 | 33 |
} |
| 34 | 34 |
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"github.com/docker/docker/container" |
| 16 | 16 |
"github.com/docker/docker/image" |
| 17 | 17 |
"github.com/docker/docker/layer" |
| 18 |
+ "github.com/docker/docker/pkg/system" |
|
| 18 | 19 |
) |
| 19 | 20 |
|
| 20 | 21 |
var acceptedImageFilterTags = map[string]bool{
|
| ... | ... |
@@ -35,7 +36,12 @@ func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
|
| 35 | 35 |
|
| 36 | 36 |
// Map returns a map of all images in the ImageStore |
| 37 | 37 |
func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
| 38 |
- return daemon.imageStore.Map() |
|
| 38 |
+ // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. |
|
| 39 |
+ platform := runtime.GOOS |
|
| 40 |
+ if platform == "windows" && system.LCOWSupported() {
|
|
| 41 |
+ platform = "linux" |
|
| 42 |
+ } |
|
| 43 |
+ return daemon.stores[platform].imageStore.Map() |
|
| 39 | 44 |
} |
| 40 | 45 |
|
| 41 | 46 |
// Images returns a filtered list of images. filterArgs is a JSON-encoded set |
| ... | ... |
@@ -44,6 +50,13 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
| 44 | 44 |
// named all controls whether all images in the graph are filtered, or just |
| 45 | 45 |
// the heads. |
| 46 | 46 |
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
|
| 47 |
+ |
|
| 48 |
+ // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. |
|
| 49 |
+ platform := runtime.GOOS |
|
| 50 |
+ if platform == "windows" && system.LCOWSupported() {
|
|
| 51 |
+ platform = "linux" |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 47 | 54 |
var ( |
| 48 | 55 |
allImages map[image.ID]*image.Image |
| 49 | 56 |
err error |
| ... | ... |
@@ -62,9 +75,9 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 62 | 62 |
} |
| 63 | 63 |
} |
| 64 | 64 |
if danglingOnly {
|
| 65 |
- allImages = daemon.imageStore.Heads() |
|
| 65 |
+ allImages = daemon.stores[platform].imageStore.Heads() |
|
| 66 | 66 |
} else {
|
| 67 |
- allImages = daemon.imageStore.Map() |
|
| 67 |
+ allImages = daemon.stores[platform].imageStore.Map() |
|
| 68 | 68 |
} |
| 69 | 69 |
|
| 70 | 70 |
var beforeFilter, sinceFilter *image.Image |
| ... | ... |
@@ -117,7 +130,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 117 | 117 |
layerID := img.RootFS.ChainID() |
| 118 | 118 |
var size int64 |
| 119 | 119 |
if layerID != "" {
|
| 120 |
- l, err := daemon.layerStore.Get(layerID) |
|
| 120 |
+ l, err := daemon.stores[platform].layerStore.Get(layerID) |
|
| 121 | 121 |
if err != nil {
|
| 122 | 122 |
// The layer may have been deleted between the call to `Map()` or |
| 123 | 123 |
// `Heads()` and the call to `Get()`, so we just ignore this error |
| ... | ... |
@@ -128,7 +141,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 128 | 128 |
} |
| 129 | 129 |
|
| 130 | 130 |
size, err = l.Size() |
| 131 |
- layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 131 |
+ layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 132 | 132 |
if err != nil {
|
| 133 | 133 |
return nil, err |
| 134 | 134 |
} |
| ... | ... |
@@ -136,7 +149,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 136 | 136 |
|
| 137 | 137 |
newImage := newImage(img, size) |
| 138 | 138 |
|
| 139 |
- for _, ref := range daemon.referenceStore.References(id.Digest()) {
|
|
| 139 |
+ for _, ref := range daemon.stores[platform].referenceStore.References(id.Digest()) {
|
|
| 140 | 140 |
if imageFilters.Include("reference") {
|
| 141 | 141 |
var found bool |
| 142 | 142 |
var matchErr error |
| ... | ... |
@@ -158,7 +171,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 158 | 158 |
} |
| 159 | 159 |
} |
| 160 | 160 |
if newImage.RepoDigests == nil && newImage.RepoTags == nil {
|
| 161 |
- if all || len(daemon.imageStore.Children(id)) == 0 {
|
|
| 161 |
+ if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 {
|
|
| 162 | 162 |
|
| 163 | 163 |
if imageFilters.Include("dangling") && !danglingOnly {
|
| 164 | 164 |
//dangling=false case, so dangling image is not needed |
| ... | ... |
@@ -180,7 +193,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 180 | 180 |
// lazily init variables |
| 181 | 181 |
if imagesMap == nil {
|
| 182 | 182 |
allContainers = daemon.List() |
| 183 |
- allLayers = daemon.layerStore.Map() |
|
| 183 |
+ allLayers = daemon.stores[platform].layerStore.Map() |
|
| 184 | 184 |
imagesMap = make(map[*image.Image]*types.ImageSummary) |
| 185 | 185 |
layerRefs = make(map[layer.ChainID]int) |
| 186 | 186 |
} |
| ... | ... |
@@ -243,7 +256,16 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 243 | 243 |
// The existing image(s) is not destroyed. |
| 244 | 244 |
// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. |
| 245 | 245 |
func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 246 |
- img, err := daemon.imageStore.Get(image.ID(id)) |
|
| 246 |
+ |
|
| 247 |
+ var ( |
|
| 248 |
+ img *image.Image |
|
| 249 |
+ err error |
|
| 250 |
+ ) |
|
| 251 |
+ for _, ds := range daemon.stores {
|
|
| 252 |
+ if img, err = ds.imageStore.Get(image.ID(id)); err == nil {
|
|
| 253 |
+ break |
|
| 254 |
+ } |
|
| 255 |
+ } |
|
| 247 | 256 |
if err != nil {
|
| 248 | 257 |
return "", err |
| 249 | 258 |
} |
| ... | ... |
@@ -251,7 +273,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 251 | 251 |
var parentImg *image.Image |
| 252 | 252 |
var parentChainID layer.ChainID |
| 253 | 253 |
if len(parent) != 0 {
|
| 254 |
- parentImg, err = daemon.imageStore.Get(image.ID(parent)) |
|
| 254 |
+ parentImg, err = daemon.stores[img.Platform()].imageStore.Get(image.ID(parent)) |
|
| 255 | 255 |
if err != nil {
|
| 256 | 256 |
return "", errors.Wrap(err, "error getting specified parent layer") |
| 257 | 257 |
} |
| ... | ... |
@@ -261,11 +283,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 261 | 261 |
parentImg = &image.Image{RootFS: rootFS}
|
| 262 | 262 |
} |
| 263 | 263 |
|
| 264 |
- l, err := daemon.layerStore.Get(img.RootFS.ChainID()) |
|
| 264 |
+ l, err := daemon.stores[img.Platform()].layerStore.Get(img.RootFS.ChainID()) |
|
| 265 | 265 |
if err != nil {
|
| 266 | 266 |
return "", errors.Wrap(err, "error getting image layer") |
| 267 | 267 |
} |
| 268 |
- defer daemon.layerStore.Release(l) |
|
| 268 |
+ defer daemon.stores[img.Platform()].layerStore.Release(l) |
|
| 269 | 269 |
|
| 270 | 270 |
ts, err := l.TarStreamFrom(parentChainID) |
| 271 | 271 |
if err != nil {
|
| ... | ... |
@@ -273,17 +295,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 273 | 273 |
} |
| 274 | 274 |
defer ts.Close() |
| 275 | 275 |
|
| 276 |
- // To support LCOW, Windows needs to pass the platform into the store when registering the layer. |
|
| 277 |
- platform := layer.Platform("")
|
|
| 278 |
- if runtime.GOOS == "windows" {
|
|
| 279 |
- platform = l.Platform() |
|
| 280 |
- } |
|
| 281 |
- |
|
| 282 |
- newL, err := daemon.layerStore.Register(ts, parentChainID, platform) |
|
| 276 |
+ newL, err := daemon.stores[img.Platform()].layerStore.Register(ts, parentChainID, layer.Platform(img.Platform())) |
|
| 283 | 277 |
if err != nil {
|
| 284 | 278 |
return "", errors.Wrap(err, "error registering layer") |
| 285 | 279 |
} |
| 286 |
- defer daemon.layerStore.Release(newL) |
|
| 280 |
+ defer daemon.stores[img.Platform()].layerStore.Release(newL) |
|
| 287 | 281 |
|
| 288 | 282 |
var newImage image.Image |
| 289 | 283 |
newImage = *img |
| ... | ... |
@@ -320,7 +336,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 320 | 320 |
return "", errors.Wrap(err, "error marshalling image config") |
| 321 | 321 |
} |
| 322 | 322 |
|
| 323 |
- newImgID, err := daemon.imageStore.Create(b) |
|
| 323 |
+ newImgID, err := daemon.stores[img.Platform()].imageStore.Create(b) |
|
| 324 | 324 |
if err != nil {
|
| 325 | 325 |
return "", errors.Wrap(err, "error creating new image after squash") |
| 326 | 326 |
} |
| ... | ... |
@@ -91,11 +91,11 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string |
| 91 | 91 |
// but for Linux images, there's no reason it couldn't. However it |
| 92 | 92 |
// would need another CLI flag as there's no meta-data indicating |
| 93 | 93 |
// the OS of the thing being imported. |
| 94 |
- l, err := daemon.layerStore.Register(inflatedLayerData, "", "") |
|
| 94 |
+ l, err := daemon.stores[runtime.GOOS].layerStore.Register(inflatedLayerData, "", "") |
|
| 95 | 95 |
if err != nil {
|
| 96 | 96 |
return err |
| 97 | 97 |
} |
| 98 |
- defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 98 |
+ defer layer.ReleaseAndLog(daemon.stores[runtime.GOOS].layerStore, l) // TODO LCOW @jhowardmsft as for above comment |
|
| 99 | 99 |
|
| 100 | 100 |
created := time.Now().UTC() |
| 101 | 101 |
imgConfig, err := json.Marshal(&image.Image{
|
| ... | ... |
@@ -103,7 +103,7 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string |
| 103 | 103 |
DockerVersion: dockerversion.Version, |
| 104 | 104 |
Config: config, |
| 105 | 105 |
Architecture: runtime.GOARCH, |
| 106 |
- OS: runtime.GOOS, |
|
| 106 |
+ OS: runtime.GOOS, // TODO LCOW @jhowardmsft as for above commment |
|
| 107 | 107 |
Created: created, |
| 108 | 108 |
Comment: msg, |
| 109 | 109 |
}, |
| ... | ... |
@@ -120,14 +120,16 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string |
| 120 | 120 |
return err |
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 |
- id, err := daemon.imageStore.Create(imgConfig) |
|
| 123 |
+ // TODO @jhowardmsft LCOW - Again, assume the OS of the host for now |
|
| 124 |
+ id, err := daemon.stores[runtime.GOOS].imageStore.Create(imgConfig) |
|
| 124 | 125 |
if err != nil {
|
| 125 | 126 |
return err |
| 126 | 127 |
} |
| 127 | 128 |
|
| 128 | 129 |
// FIXME: connect with commit code and call refstore directly |
| 129 | 130 |
if newRef != nil {
|
| 130 |
- if err := daemon.TagImageWithReference(id, newRef); err != nil {
|
|
| 131 |
+ // TODO @jhowardmsft LCOW - Again, assume the OS of the host for now |
|
| 132 |
+ if err := daemon.TagImageWithReference(id, runtime.GOOS, newRef); err != nil {
|
|
| 131 | 133 |
return err |
| 132 | 134 |
} |
| 133 | 135 |
} |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"os" |
| 6 | 6 |
"runtime" |
| 7 |
+ "strings" |
|
| 7 | 8 |
"time" |
| 8 | 9 |
|
| 9 | 10 |
"github.com/Sirupsen/logrus" |
| ... | ... |
@@ -77,15 +78,32 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
| 77 | 77 |
securityOptions = append(securityOptions, "name=userns") |
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 |
+ imageCount := 0 |
|
| 81 |
+ drivers := "" |
|
| 82 |
+ for p, ds := range daemon.stores {
|
|
| 83 |
+ imageCount += len(ds.imageStore.Map()) |
|
| 84 |
+ drivers += daemon.GraphDriverName(p) |
|
| 85 |
+ if len(daemon.stores) > 1 {
|
|
| 86 |
+ drivers += fmt.Sprintf(" (%s) ", p)
|
|
| 87 |
+ } |
|
| 88 |
+ } |
|
| 89 |
+ |
|
| 90 |
+ // TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status |
|
| 91 |
+ p := runtime.GOOS |
|
| 92 |
+ if p == "windows" && system.LCOWSupported() {
|
|
| 93 |
+ p = "linux" |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ drivers = strings.TrimSpace(drivers) |
|
| 80 | 97 |
v := &types.Info{
|
| 81 | 98 |
ID: daemon.ID, |
| 82 | 99 |
Containers: int(cRunning + cPaused + cStopped), |
| 83 | 100 |
ContainersRunning: int(cRunning), |
| 84 | 101 |
ContainersPaused: int(cPaused), |
| 85 | 102 |
ContainersStopped: int(cStopped), |
| 86 |
- Images: len(daemon.imageStore.Map()), |
|
| 87 |
- Driver: daemon.GraphDriverName(), |
|
| 88 |
- DriverStatus: daemon.layerStore.DriverStatus(), |
|
| 103 |
+ Images: imageCount, |
|
| 104 |
+ Driver: drivers, |
|
| 105 |
+ DriverStatus: daemon.stores[p].layerStore.DriverStatus(), |
|
| 89 | 106 |
Plugins: daemon.showPluginsInfo(), |
| 90 | 107 |
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, |
| 91 | 108 |
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, |
| ... | ... |
@@ -170,6 +170,7 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con |
| 170 | 170 |
Name: container.Name, |
| 171 | 171 |
RestartCount: container.RestartCount, |
| 172 | 172 |
Driver: container.Driver, |
| 173 |
+ Platform: container.Platform, |
|
| 173 | 174 |
MountLabel: container.MountLabel, |
| 174 | 175 |
ProcessLabel: container.ProcessLabel, |
| 175 | 176 |
ExecIDs: container.GetExecIDs(), |
| ... | ... |
@@ -317,7 +317,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte |
| 317 | 317 |
if psFilters.Include("ancestor") {
|
| 318 | 318 |
ancestorFilter = true |
| 319 | 319 |
psFilters.WalkValues("ancestor", func(ancestor string) error {
|
| 320 |
- id, err := daemon.GetImageID(ancestor) |
|
| 320 |
+ id, platform, err := daemon.GetImageIDAndPlatform(ancestor) |
|
| 321 | 321 |
if err != nil {
|
| 322 | 322 |
logrus.Warnf("Error while looking up for image %v", ancestor)
|
| 323 | 323 |
return nil |
| ... | ... |
@@ -327,7 +327,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte |
| 327 | 327 |
return nil |
| 328 | 328 |
} |
| 329 | 329 |
// Then walk down the graph and put the imageIds in imagesFilter |
| 330 |
- populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) |
|
| 330 |
+ populateImageFilterByParents(imagesFilter, id, daemon.stores[platform].imageStore.Children) |
|
| 331 | 331 |
return nil |
| 332 | 332 |
}) |
| 333 | 333 |
} |
| ... | ... |
@@ -558,7 +558,7 @@ func (daemon *Daemon) transformContainer(container *container.Container, ctx *li |
| 558 | 558 |
|
| 559 | 559 |
image := container.Config.Image // if possible keep the original ref |
| 560 | 560 |
if image != container.ImageID.String() {
|
| 561 |
- id, err := daemon.GetImageID(image) |
|
| 561 |
+ id, _, err := daemon.GetImageIDAndPlatform(image) |
|
| 562 | 562 |
if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE {
|
| 563 | 563 |
return nil, err |
| 564 | 564 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"regexp" |
| 6 |
+ "runtime" |
|
| 6 | 7 |
"sync/atomic" |
| 7 | 8 |
"time" |
| 8 | 9 |
|
| ... | ... |
@@ -14,6 +15,7 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/image" |
| 15 | 15 |
"github.com/docker/docker/layer" |
| 16 | 16 |
"github.com/docker/docker/pkg/directory" |
| 17 |
+ "github.com/docker/docker/pkg/system" |
|
| 17 | 18 |
"github.com/docker/docker/runconfig" |
| 18 | 19 |
"github.com/docker/docker/volume" |
| 19 | 20 |
"github.com/docker/libnetwork" |
| ... | ... |
@@ -157,6 +159,12 @@ func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Arg |
| 157 | 157 |
|
| 158 | 158 |
// ImagesPrune removes unused images |
| 159 | 159 |
func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
|
| 160 |
+ // TODO @jhowardmsft LCOW Support: This will need revisiting later. |
|
| 161 |
+ platform := runtime.GOOS |
|
| 162 |
+ if platform == "windows" && system.LCOWSupported() {
|
|
| 163 |
+ platform = "linux" |
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 160 | 166 |
if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) {
|
| 161 | 167 |
return nil, errPruneRunning |
| 162 | 168 |
} |
| ... | ... |
@@ -186,9 +194,9 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 186 | 186 |
|
| 187 | 187 |
var allImages map[image.ID]*image.Image |
| 188 | 188 |
if danglingOnly {
|
| 189 |
- allImages = daemon.imageStore.Heads() |
|
| 189 |
+ allImages = daemon.stores[platform].imageStore.Heads() |
|
| 190 | 190 |
} else {
|
| 191 |
- allImages = daemon.imageStore.Map() |
|
| 191 |
+ allImages = daemon.stores[platform].imageStore.Map() |
|
| 192 | 192 |
} |
| 193 | 193 |
allContainers := daemon.List() |
| 194 | 194 |
imageRefs := map[string]bool{}
|
| ... | ... |
@@ -202,7 +210,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 202 | 202 |
} |
| 203 | 203 |
|
| 204 | 204 |
// Filter intermediary images and get their unique size |
| 205 |
- allLayers := daemon.layerStore.Map() |
|
| 205 |
+ allLayers := daemon.stores[platform].layerStore.Map() |
|
| 206 | 206 |
topImages := map[image.ID]*image.Image{}
|
| 207 | 207 |
for id, img := range allImages {
|
| 208 | 208 |
select {
|
| ... | ... |
@@ -210,7 +218,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 210 | 210 |
return nil, ctx.Err() |
| 211 | 211 |
default: |
| 212 | 212 |
dgst := digest.Digest(id) |
| 213 |
- if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
|
|
| 213 |
+ if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 {
|
|
| 214 | 214 |
continue |
| 215 | 215 |
} |
| 216 | 216 |
if !until.IsZero() && img.Created.After(until) {
|
| ... | ... |
@@ -241,7 +249,7 @@ deleteImagesLoop: |
| 241 | 241 |
} |
| 242 | 242 |
|
| 243 | 243 |
deletedImages := []types.ImageDeleteResponseItem{}
|
| 244 |
- refs := daemon.referenceStore.References(dgst) |
|
| 244 |
+ refs := daemon.stores[platform].referenceStore.References(dgst) |
|
| 245 | 245 |
if len(refs) > 0 {
|
| 246 | 246 |
shouldDelete := !danglingOnly |
| 247 | 247 |
if !shouldDelete {
|
| ... | ... |
@@ -207,7 +207,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
|
| 207 | 207 |
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
|
| 208 | 208 |
// FIXME: remove once reference counting for graphdrivers has been refactored |
| 209 | 209 |
// Ensure that all the mounts are gone |
| 210 |
- if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil {
|
|
| 210 |
+ if mountid, err := daemon.stores[container.Platform].layerStore.GetMountID(container.ID); err == nil {
|
|
| 211 | 211 |
daemon.cleanupMountsByID(mountid) |
| 212 | 212 |
} |
| 213 | 213 |
} |
| ... | ... |
@@ -41,7 +41,7 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain |
| 41 | 41 |
layerOpts.LayerFolderPath = m["dir"] |
| 42 | 42 |
|
| 43 | 43 |
// Generate the layer paths of the layer options |
| 44 |
- img, err := daemon.imageStore.Get(container.ImageID) |
|
| 44 |
+ img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) |
|
| 45 | 45 |
if err != nil {
|
| 46 | 46 |
return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err)
|
| 47 | 47 |
} |
| ... | ... |
@@ -49,9 +49,9 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain |
| 49 | 49 |
max := len(img.RootFS.DiffIDs) |
| 50 | 50 |
for i := 1; i <= max; i++ {
|
| 51 | 51 |
img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] |
| 52 |
- layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) |
|
| 52 |
+ layerPath, err := layer.GetLayerPath(daemon.stores[container.Platform].layerStore, img.RootFS.ChainID()) |
|
| 53 | 53 |
if err != nil {
|
| 54 |
- return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err)
|
|
| 54 |
+ return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[container.Platform].layerStore, img.RootFS.ChainID(), err)
|
|
| 55 | 55 |
} |
| 56 | 56 |
// Reverse order, expecting parent most first |
| 57 | 57 |
layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...)
|
| ... | ... |
@@ -26,15 +26,17 @@ type Store interface {
|
| 26 | 26 |
type FSMetadataStore struct {
|
| 27 | 27 |
sync.RWMutex |
| 28 | 28 |
basePath string |
| 29 |
+ platform string |
|
| 29 | 30 |
} |
| 30 | 31 |
|
| 31 | 32 |
// NewFSMetadataStore creates a new filesystem-based metadata store. |
| 32 |
-func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) {
|
|
| 33 |
+func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) {
|
|
| 33 | 34 |
if err := os.MkdirAll(basePath, 0700); err != nil {
|
| 34 | 35 |
return nil, err |
| 35 | 36 |
} |
| 36 | 37 |
return &FSMetadataStore{
|
| 37 | 38 |
basePath: basePath, |
| 39 |
+ platform: platform, |
|
| 38 | 40 |
}, nil |
| 39 | 41 |
} |
| 40 | 42 |
|
| ... | ... |
@@ -3,6 +3,7 @@ package metadata |
| 3 | 3 |
import ( |
| 4 | 4 |
"io/ioutil" |
| 5 | 5 |
"os" |
| 6 |
+ "runtime" |
|
| 6 | 7 |
"testing" |
| 7 | 8 |
|
| 8 | 9 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -15,7 +16,7 @@ func TestV1IDService(t *testing.T) {
|
| 15 | 15 |
} |
| 16 | 16 |
defer os.RemoveAll(tmpDir) |
| 17 | 17 |
|
| 18 |
- metadataStore, err := NewFSMetadataStore(tmpDir) |
|
| 18 |
+ metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) |
|
| 19 | 19 |
if err != nil {
|
| 20 | 20 |
t.Fatalf("could not create metadata store: %v", err)
|
| 21 | 21 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"math/rand" |
| 7 | 7 |
"os" |
| 8 | 8 |
"reflect" |
| 9 |
+ "runtime" |
|
| 9 | 10 |
"testing" |
| 10 | 11 |
|
| 11 | 12 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -19,7 +20,7 @@ func TestV2MetadataService(t *testing.T) {
|
| 19 | 19 |
} |
| 20 | 20 |
defer os.RemoveAll(tmpDir) |
| 21 | 21 |
|
| 22 |
- metadataStore, err := NewFSMetadataStore(tmpDir) |
|
| 22 |
+ metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) |
|
| 23 | 23 |
if err != nil {
|
| 24 | 24 |
t.Fatalf("could not create metadata store: %v", err)
|
| 25 | 25 |
} |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"errors" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"io" |
| 7 |
+ "runtime" |
|
| 7 | 8 |
"time" |
| 8 | 9 |
|
| 9 | 10 |
"github.com/Sirupsen/logrus" |
| ... | ... |
@@ -22,7 +23,7 @@ const maxDownloadAttempts = 5 |
| 22 | 22 |
// registers and downloads those, taking into account dependencies between |
| 23 | 23 |
// layers. |
| 24 | 24 |
type LayerDownloadManager struct {
|
| 25 |
- layerStore layer.Store |
|
| 25 |
+ layerStores map[string]layer.Store |
|
| 26 | 26 |
tm TransferManager |
| 27 | 27 |
waitDuration time.Duration |
| 28 | 28 |
} |
| ... | ... |
@@ -33,9 +34,9 @@ func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
|
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 | 35 |
// NewLayerDownloadManager returns a new LayerDownloadManager. |
| 36 |
-func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
|
| 36 |
+func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
|
| 37 | 37 |
manager := LayerDownloadManager{
|
| 38 |
- layerStore: layerStore, |
|
| 38 |
+ layerStores: layerStores, |
|
| 39 | 39 |
tm: NewTransferManager(concurrencyLimit), |
| 40 | 40 |
waitDuration: time.Second, |
| 41 | 41 |
} |
| ... | ... |
@@ -104,6 +105,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 104 | 104 |
downloadsByKey = make(map[string]*downloadTransfer) |
| 105 | 105 |
) |
| 106 | 106 |
|
| 107 |
+ // Assume that the platform is the host OS if blank |
|
| 108 |
+ if platform == "" {
|
|
| 109 |
+ platform = layer.Platform(runtime.GOOS) |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 107 | 112 |
rootFS := initialRootFS |
| 108 | 113 |
for _, descriptor := range layers {
|
| 109 | 114 |
key := descriptor.Key() |
| ... | ... |
@@ -115,13 +121,13 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 115 | 115 |
if err == nil {
|
| 116 | 116 |
getRootFS := rootFS |
| 117 | 117 |
getRootFS.Append(diffID) |
| 118 |
- l, err := ldm.layerStore.Get(getRootFS.ChainID()) |
|
| 118 |
+ l, err := ldm.layerStores[string(platform)].Get(getRootFS.ChainID()) |
|
| 119 | 119 |
if err == nil {
|
| 120 | 120 |
// Layer already exists. |
| 121 | 121 |
logrus.Debugf("Layer already exists: %s", descriptor.ID())
|
| 122 | 122 |
progress.Update(progressOutput, descriptor.ID(), "Already exists") |
| 123 | 123 |
if topLayer != nil {
|
| 124 |
- layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 124 |
+ layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) |
|
| 125 | 125 |
} |
| 126 | 126 |
topLayer = l |
| 127 | 127 |
missingLayer = false |
| ... | ... |
@@ -165,7 +171,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 165 | 165 |
if topDownload == nil {
|
| 166 | 166 |
return rootFS, func() {
|
| 167 | 167 |
if topLayer != nil {
|
| 168 |
- layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 168 |
+ layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) |
|
| 169 | 169 |
} |
| 170 | 170 |
}, nil |
| 171 | 171 |
} |
| ... | ... |
@@ -176,7 +182,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 176 | 176 |
|
| 177 | 177 |
defer func() {
|
| 178 | 178 |
if topLayer != nil {
|
| 179 |
- layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 179 |
+ layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) |
|
| 180 | 180 |
} |
| 181 | 181 |
}() |
| 182 | 182 |
|
| ... | ... |
@@ -216,7 +222,7 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, |
| 216 | 216 |
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
| 217 | 217 |
d := &downloadTransfer{
|
| 218 | 218 |
Transfer: NewTransfer(), |
| 219 |
- layerStore: ldm.layerStore, |
|
| 219 |
+ layerStore: ldm.layerStores[string(platform)], |
|
| 220 | 220 |
} |
| 221 | 221 |
|
| 222 | 222 |
go func() {
|
| ... | ... |
@@ -380,7 +386,7 @@ func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor Downloa |
| 380 | 380 |
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
| 381 | 381 |
d := &downloadTransfer{
|
| 382 | 382 |
Transfer: NewTransfer(), |
| 383 |
- layerStore: ldm.layerStore, |
|
| 383 |
+ layerStore: ldm.layerStores[string(platform)], |
|
| 384 | 384 |
} |
| 385 | 385 |
|
| 386 | 386 |
go func() {
|
| ... | ... |
@@ -46,6 +46,9 @@ type store struct {
|
| 46 | 46 |
// referencesByIDCache is a cache of references indexed by ID, to speed |
| 47 | 47 |
// up References. |
| 48 | 48 |
referencesByIDCache map[digest.Digest]map[string]reference.Named |
| 49 |
+ // platform is the container target platform for this store (which may be |
|
| 50 |
+ // different to the host operating system |
|
| 51 |
+ platform string |
|
| 49 | 52 |
} |
| 50 | 53 |
|
| 51 | 54 |
// Repository maps tags to digests. The key is a stringified Reference, |
| ... | ... |
@@ -70,7 +73,7 @@ func (a lexicalAssociations) Less(i, j int) bool {
|
| 70 | 70 |
|
| 71 | 71 |
// NewReferenceStore creates a new reference store, tied to a file path where |
| 72 | 72 |
// the set of references are serialized in JSON format. |
| 73 |
-func NewReferenceStore(jsonPath string) (Store, error) {
|
|
| 73 |
+func NewReferenceStore(jsonPath, platform string) (Store, error) {
|
|
| 74 | 74 |
abspath, err := filepath.Abs(jsonPath) |
| 75 | 75 |
if err != nil {
|
| 76 | 76 |
return nil, err |
| ... | ... |
@@ -80,6 +83,7 @@ func NewReferenceStore(jsonPath string) (Store, error) {
|
| 80 | 80 |
jsonPath: abspath, |
| 81 | 81 |
Repositories: make(map[string]repository), |
| 82 | 82 |
referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), |
| 83 |
+ platform: platform, |
|
| 83 | 84 |
} |
| 84 | 85 |
// Load the json file if it exists, otherwise create it. |
| 85 | 86 |
if err := store.reload(); os.IsNotExist(err) {
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"io/ioutil" |
| 6 | 6 |
"os" |
| 7 | 7 |
"path/filepath" |
| 8 |
+ "runtime" |
|
| 8 | 9 |
"strings" |
| 9 | 10 |
"testing" |
| 10 | 11 |
|
| ... | ... |
@@ -40,7 +41,7 @@ func TestLoad(t *testing.T) {
|
| 40 | 40 |
} |
| 41 | 41 |
jsonFile.Close() |
| 42 | 42 |
|
| 43 |
- store, err := NewReferenceStore(jsonFile.Name()) |
|
| 43 |
+ store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) |
|
| 44 | 44 |
if err != nil {
|
| 45 | 45 |
t.Fatalf("error creating tag store: %v", err)
|
| 46 | 46 |
} |
| ... | ... |
@@ -69,7 +70,7 @@ func TestSave(t *testing.T) {
|
| 69 | 69 |
jsonFile.Close() |
| 70 | 70 |
defer os.RemoveAll(jsonFile.Name()) |
| 71 | 71 |
|
| 72 |
- store, err := NewReferenceStore(jsonFile.Name()) |
|
| 72 |
+ store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) |
|
| 73 | 73 |
if err != nil {
|
| 74 | 74 |
t.Fatalf("error creating tag store: %v", err)
|
| 75 | 75 |
} |
| ... | ... |
@@ -111,7 +112,7 @@ func TestAddDeleteGet(t *testing.T) {
|
| 111 | 111 |
jsonFile.Close() |
| 112 | 112 |
defer os.RemoveAll(jsonFile.Name()) |
| 113 | 113 |
|
| 114 |
- store, err := NewReferenceStore(jsonFile.Name()) |
|
| 114 |
+ store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) |
|
| 115 | 115 |
if err != nil {
|
| 116 | 116 |
t.Fatalf("error creating tag store: %v", err)
|
| 117 | 117 |
} |
| ... | ... |
@@ -328,7 +329,7 @@ func TestInvalidTags(t *testing.T) {
|
| 328 | 328 |
tmpDir, err := ioutil.TempDir("", "tag-store-test")
|
| 329 | 329 |
defer os.RemoveAll(tmpDir) |
| 330 | 330 |
|
| 331 |
- store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) |
|
| 331 |
+ store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json"), runtime.GOOS) |
|
| 332 | 332 |
if err != nil {
|
| 333 | 333 |
t.Fatalf("error creating tag store: %v", err)
|
| 334 | 334 |
} |