Signed-off-by: John Howard <jhoward@microsoft.com>
The re-coalesces the daemon stores which were split as part of the
original LCOW implementation.
This is part of the work discussed in https://github.com/moby/moby/issues/34617,
in particular see the document linked to in that issue.
| ... | ... |
@@ -17,7 +17,7 @@ import ( |
| 17 | 17 |
// ImageComponent provides an interface for working with images |
| 18 | 18 |
type ImageComponent interface {
|
| 19 | 19 |
SquashImage(from string, to string) (string, error) |
| 20 |
- TagImageWithReference(image.ID, string, reference.Named) error |
|
| 20 |
+ TagImageWithReference(image.ID, reference.Named) error |
|
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 | 23 |
// Builder defines interface for running a build |
| ... | ... |
@@ -3,11 +3,9 @@ package build |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"io" |
| 6 |
- "runtime" |
|
| 7 | 6 |
|
| 8 | 7 |
"github.com/docker/distribution/reference" |
| 9 | 8 |
"github.com/docker/docker/image" |
| 10 |
- "github.com/docker/docker/pkg/system" |
|
| 11 | 9 |
"github.com/pkg/errors" |
| 12 | 10 |
) |
| 13 | 11 |
|
| ... | ... |
@@ -35,12 +33,7 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge |
| 35 | 35 |
// TagImages creates image tags for the imageID |
| 36 | 36 |
func (bt *Tagger) TagImages(imageID image.ID) error {
|
| 37 | 37 |
for _, rt := range bt.repoAndTags {
|
| 38 |
- // TODO @jhowardmsft LCOW support. Will need revisiting. |
|
| 39 |
- platform := runtime.GOOS |
|
| 40 |
- if system.LCOWSupported() {
|
|
| 41 |
- platform = "linux" |
|
| 42 |
- } |
|
| 43 |
- if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil {
|
|
| 38 |
+ if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
|
|
| 44 | 39 |
return err |
| 45 | 40 |
} |
| 46 | 41 |
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) |
| ... | ... |
@@ -44,7 +44,7 @@ type Backend interface {
|
| 44 | 44 |
// ContainerCreateWorkdir creates the workdir |
| 45 | 45 |
ContainerCreateWorkdir(containerID string) error |
| 46 | 46 |
|
| 47 |
- CreateImage(config []byte, parent string, platform string) (Image, error) |
|
| 47 |
+ CreateImage(config []byte, parent string) (Image, error) |
|
| 48 | 48 |
|
| 49 | 49 |
ImageCacheBuilder |
| 50 | 50 |
} |
| ... | ... |
@@ -79,7 +79,7 @@ type Result struct {
|
| 79 | 79 |
// ImageCacheBuilder represents a generator for stateful image cache. |
| 80 | 80 |
type ImageCacheBuilder interface {
|
| 81 | 81 |
// MakeImageCache creates a stateful image cache. |
| 82 |
- MakeImageCache(cacheFrom []string, platform string) ImageCache |
|
| 82 |
+ MakeImageCache(cacheFrom []string) ImageCache |
|
| 83 | 83 |
} |
| 84 | 84 |
|
| 85 | 85 |
// ImageCache abstracts an image cache. |
| ... | ... |
@@ -123,7 +123,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( |
| 123 | 123 |
PathCache: bm.pathCache, |
| 124 | 124 |
IDMappings: bm.idMappings, |
| 125 | 125 |
} |
| 126 |
- return newBuilder(ctx, builderOptions, os).build(source, dockerfile) |
|
| 126 |
+ return newBuilder(ctx, builderOptions).build(source, dockerfile) |
|
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 | 129 |
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
|
| ... | ... |
@@ -190,7 +190,7 @@ type Builder struct {
|
| 190 | 190 |
} |
| 191 | 191 |
|
| 192 | 192 |
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. |
| 193 |
-func newBuilder(clientCtx context.Context, options builderOptions, os string) *Builder {
|
|
| 193 |
+func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
|
|
| 194 | 194 |
config := options.Options |
| 195 | 195 |
if config == nil {
|
| 196 | 196 |
config = new(types.ImageBuildOptions) |
| ... | ... |
@@ -207,7 +207,7 @@ func newBuilder(clientCtx context.Context, options builderOptions, os string) *B |
| 207 | 207 |
idMappings: options.IDMappings, |
| 208 | 208 |
imageSources: newImageSources(clientCtx, options), |
| 209 | 209 |
pathCache: options.PathCache, |
| 210 |
- imageProber: newImageProber(options.Backend, config.CacheFrom, os, config.NoCache), |
|
| 210 |
+ imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache), |
|
| 211 | 211 |
containerManager: newContainerManager(options.Backend), |
| 212 | 212 |
} |
| 213 | 213 |
|
| ... | ... |
@@ -367,14 +367,9 @@ func BuildFromConfig(config *container.Config, changes []string) (*container.Con |
| 367 | 367 |
return nil, errdefs.InvalidParameter(err) |
| 368 | 368 |
} |
| 369 | 369 |
|
| 370 |
- os := runtime.GOOS |
|
| 371 |
- if dockerfile.OS != "" {
|
|
| 372 |
- os = dockerfile.OS |
|
| 373 |
- } |
|
| 374 |
- |
|
| 375 | 370 |
b := newBuilder(context.Background(), builderOptions{
|
| 376 | 371 |
Options: &types.ImageBuildOptions{NoCache: true},
|
| 377 |
- }, os) |
|
| 372 |
+ }) |
|
| 378 | 373 |
|
| 379 | 374 |
// ensure that the commands are valid |
| 380 | 375 |
for _, n := range dockerfile.AST.Children {
|
| ... | ... |
@@ -31,7 +31,7 @@ func newBuilderWithMockBackend() *Builder {
|
| 31 | 31 |
Options: &types.ImageBuildOptions{Platform: runtime.GOOS},
|
| 32 | 32 |
Backend: mockBackend, |
| 33 | 33 |
}), |
| 34 |
- imageProber: newImageProber(mockBackend, nil, runtime.GOOS, false), |
|
| 34 |
+ imageProber: newImageProber(mockBackend, nil, false), |
|
| 35 | 35 |
containerManager: newContainerManager(mockBackend), |
| 36 | 36 |
} |
| 37 | 37 |
return b |
| ... | ... |
@@ -427,10 +427,10 @@ func TestRunWithBuildArgs(t *testing.T) {
|
| 427 | 427 |
} |
| 428 | 428 |
|
| 429 | 429 |
mockBackend := b.docker.(*MockBackend) |
| 430 |
- mockBackend.makeImageCacheFunc = func(_ []string, _ string) builder.ImageCache {
|
|
| 430 |
+ mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache {
|
|
| 431 | 431 |
return imageCache |
| 432 | 432 |
} |
| 433 |
- b.imageProber = newImageProber(mockBackend, nil, runtime.GOOS, false) |
|
| 433 |
+ b.imageProber = newImageProber(mockBackend, nil, false) |
|
| 434 | 434 |
mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) {
|
| 435 | 435 |
return &mockImage{
|
| 436 | 436 |
id: "abcdef", |
| ... | ... |
@@ -19,13 +19,13 @@ type imageProber struct {
|
| 19 | 19 |
cacheBusted bool |
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 |
-func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, platform string, noCache bool) ImageProber {
|
|
| 22 |
+func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber {
|
|
| 23 | 23 |
if noCache {
|
| 24 | 24 |
return &nopProber{}
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
reset := func() builder.ImageCache {
|
| 28 |
- return cacheBuilder.MakeImageCache(cacheFrom, platform) |
|
| 28 |
+ return cacheBuilder.MakeImageCache(cacheFrom) |
|
| 29 | 29 |
} |
| 30 | 30 |
return &imageProber{cache: reset(), reset: reset}
|
| 31 | 31 |
} |
| ... | ... |
@@ -154,7 +154,7 @@ func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runC |
| 154 | 154 |
return errors.Wrap(err, "failed to encode image config") |
| 155 | 155 |
} |
| 156 | 156 |
|
| 157 |
- exportedImage, err := b.docker.CreateImage(config, state.imageID, parentImage.OS) |
|
| 157 |
+ exportedImage, err := b.docker.CreateImage(config, state.imageID) |
|
| 158 | 158 |
if err != nil {
|
| 159 | 159 |
return errors.Wrapf(err, "failed to export image") |
| 160 | 160 |
} |
| ... | ... |
@@ -20,7 +20,7 @@ type MockBackend struct {
|
| 20 | 20 |
containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) |
| 21 | 21 |
commitFunc func(string, *backend.ContainerCommitConfig) (string, error) |
| 22 | 22 |
getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) |
| 23 |
- makeImageCacheFunc func(cacheFrom []string, platform string) builder.ImageCache |
|
| 23 |
+ makeImageCacheFunc func(cacheFrom []string) builder.ImageCache |
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 | 26 |
func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error {
|
| ... | ... |
@@ -73,14 +73,14 @@ func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID st |
| 73 | 73 |
return &mockImage{id: "theid"}, &mockLayer{}, nil
|
| 74 | 74 |
} |
| 75 | 75 |
|
| 76 |
-func (m *MockBackend) MakeImageCache(cacheFrom []string, platform string) builder.ImageCache {
|
|
| 76 |
+func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache {
|
|
| 77 | 77 |
if m.makeImageCacheFunc != nil {
|
| 78 |
- return m.makeImageCacheFunc(cacheFrom, platform) |
|
| 78 |
+ return m.makeImageCacheFunc(cacheFrom) |
|
| 79 | 79 |
} |
| 80 | 80 |
return nil |
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 |
-func (m *MockBackend) CreateImage(config []byte, parent string, platform string) (builder.Image, error) {
|
|
| 83 |
+func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) {
|
|
| 84 | 84 |
return nil, nil |
| 85 | 85 |
} |
| 86 | 86 |
|
| ... | ... |
@@ -2,7 +2,6 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
- "runtime" |
|
| 6 | 5 |
|
| 7 | 6 |
"github.com/docker/distribution/reference" |
| 8 | 7 |
"github.com/docker/docker/api/types" |
| ... | ... |
@@ -24,6 +23,7 @@ type releaseableLayer struct {
|
| 24 | 24 |
layerStore layer.Store |
| 25 | 25 |
roLayer layer.Layer |
| 26 | 26 |
rwLayer layer.RWLayer |
| 27 |
+ os string |
|
| 27 | 28 |
} |
| 28 | 29 |
|
| 29 | 30 |
func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
|
| ... | ... |
@@ -35,7 +35,7 @@ func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
|
| 35 | 35 |
} |
| 36 | 36 |
|
| 37 | 37 |
mountID := stringid.GenerateRandomID() |
| 38 |
- rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) |
|
| 38 |
+ rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, rl.os, nil) |
|
| 39 | 39 |
if err != nil {
|
| 40 | 40 |
return nil, errors.Wrap(err, "failed to create rwlayer") |
| 41 | 41 |
} |
| ... | ... |
@@ -67,12 +67,12 @@ func (rl *releaseableLayer) Commit(os string) (builder.ReleaseableLayer, error) |
| 67 | 67 |
} |
| 68 | 68 |
defer stream.Close() |
| 69 | 69 |
|
| 70 |
- newLayer, err := rl.layerStore.Register(stream, chainID, layer.OS(os)) |
|
| 70 |
+ newLayer, err := rl.layerStore.Register(stream, chainID, os) |
|
| 71 | 71 |
if err != nil {
|
| 72 | 72 |
return nil, err |
| 73 | 73 |
} |
| 74 |
- // TODO: An optimization would be to handle empty layers before returning |
|
| 75 |
- return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil
|
|
| 74 |
+ // TODO: An optimization woudld be to handle empty layers before returning |
|
| 75 |
+ return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer, os: os}, nil
|
|
| 76 | 76 |
} |
| 77 | 77 |
|
| 78 | 78 |
func (rl *releaseableLayer) DiffID() layer.DiffID {
|
| ... | ... |
@@ -128,9 +128,9 @@ func (rl *releaseableLayer) releaseROLayer() error {
|
| 128 | 128 |
return err |
| 129 | 129 |
} |
| 130 | 130 |
|
| 131 |
-func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) {
|
|
| 131 |
+func newReleasableLayerForImage(img *image.Image, layerStore layer.Store, os string) (builder.ReleaseableLayer, error) {
|
|
| 132 | 132 |
if img == nil || img.RootFS.ChainID() == "" {
|
| 133 |
- return &releaseableLayer{layerStore: layerStore}, nil
|
|
| 133 |
+ return &releaseableLayer{layerStore: layerStore, os: os}, nil
|
|
| 134 | 134 |
} |
| 135 | 135 |
// Hold a reference to the image layer so that it can't be removed before |
| 136 | 136 |
// it is released |
| ... | ... |
@@ -138,11 +138,11 @@ func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (build |
| 138 | 138 |
if err != nil {
|
| 139 | 139 |
return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) |
| 140 | 140 |
} |
| 141 |
- return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil
|
|
| 141 |
+ return &releaseableLayer{layerStore: layerStore, roLayer: roLayer, os: os}, nil
|
|
| 142 | 142 |
} |
| 143 | 143 |
|
| 144 | 144 |
// TODO: could this use the regular daemon PullImage ? |
| 145 |
-func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform string) (*image.Image, error) {
|
|
| 145 |
+func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) {
|
|
| 146 | 146 |
ref, err := reference.ParseNormalizedNamed(name) |
| 147 | 147 |
if err != nil {
|
| 148 | 148 |
return nil, err |
| ... | ... |
@@ -161,7 +161,7 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi |
| 161 | 161 |
pullRegistryAuth = &resolvedConfig |
| 162 | 162 |
} |
| 163 | 163 |
|
| 164 |
- if err := daemon.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil {
|
|
| 164 |
+ if err := daemon.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil {
|
|
| 165 | 165 |
return nil, err |
| 166 | 166 |
} |
| 167 | 167 |
return daemon.GetImage(name) |
| ... | ... |
@@ -172,7 +172,7 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi |
| 172 | 172 |
// leaking of layers. |
| 173 | 173 |
func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) {
|
| 174 | 174 |
if refOrID == "" {
|
| 175 |
- layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.OS].layerStore) |
|
| 175 |
+ layer, err := newReleasableLayerForImage(nil, daemon.layerStore, opts.OS) |
|
| 176 | 176 |
return nil, layer, err |
| 177 | 177 |
} |
| 178 | 178 |
|
| ... | ... |
@@ -183,7 +183,7 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st |
| 183 | 183 |
} |
| 184 | 184 |
// TODO: shouldn't we error out if error is different from "not found" ? |
| 185 | 185 |
if image != nil {
|
| 186 |
- layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore) |
|
| 186 |
+ layer, err := newReleasableLayerForImage(image, daemon.layerStore, image.OperatingSystem()) |
|
| 187 | 187 |
return image, layer, err |
| 188 | 188 |
} |
| 189 | 189 |
} |
| ... | ... |
@@ -192,29 +192,26 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st |
| 192 | 192 |
if err != nil {
|
| 193 | 193 |
return nil, nil, err |
| 194 | 194 |
} |
| 195 |
- layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore) |
|
| 195 |
+ layer, err := newReleasableLayerForImage(image, daemon.layerStore, image.OperatingSystem()) |
|
| 196 | 196 |
return image, layer, err |
| 197 | 197 |
} |
| 198 | 198 |
|
| 199 | 199 |
// CreateImage creates a new image by adding a config and ID to the image store. |
| 200 | 200 |
// This is similar to LoadImage() except that it receives JSON encoded bytes of |
| 201 | 201 |
// an image instead of a tar archive. |
| 202 |
-func (daemon *Daemon) CreateImage(config []byte, parent string, platform string) (builder.Image, error) {
|
|
| 203 |
- if platform == "" {
|
|
| 204 |
- platform = runtime.GOOS |
|
| 205 |
- } |
|
| 206 |
- id, err := daemon.stores[platform].imageStore.Create(config) |
|
| 202 |
+func (daemon *Daemon) CreateImage(config []byte, parent string) (builder.Image, error) {
|
|
| 203 |
+ id, err := daemon.imageStore.Create(config) |
|
| 207 | 204 |
if err != nil {
|
| 208 | 205 |
return nil, errors.Wrapf(err, "failed to create image") |
| 209 | 206 |
} |
| 210 | 207 |
|
| 211 | 208 |
if parent != "" {
|
| 212 |
- if err := daemon.stores[platform].imageStore.SetParent(id, image.ID(parent)); err != nil {
|
|
| 209 |
+ if err := daemon.imageStore.SetParent(id, image.ID(parent)); err != nil {
|
|
| 213 | 210 |
return nil, errors.Wrapf(err, "failed to set parent %s", parent) |
| 214 | 211 |
} |
| 215 | 212 |
} |
| 216 | 213 |
|
| 217 |
- return daemon.stores[platform].imageStore.Get(id) |
|
| 214 |
+ return daemon.imageStore.Get(id) |
|
| 218 | 215 |
} |
| 219 | 216 |
|
| 220 | 217 |
// IDMappings returns uid/gid mappings for the builder |
| ... | ... |
@@ -7,12 +7,12 @@ import ( |
| 7 | 7 |
) |
| 8 | 8 |
|
| 9 | 9 |
// MakeImageCache creates a stateful image cache. |
| 10 |
-func (daemon *Daemon) MakeImageCache(sourceRefs []string, platform string) builder.ImageCache {
|
|
| 10 |
+func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache {
|
|
| 11 | 11 |
if len(sourceRefs) == 0 {
|
| 12 |
- return cache.NewLocal(daemon.stores[platform].imageStore) |
|
| 12 |
+ return cache.NewLocal(daemon.imageStore) |
|
| 13 | 13 |
} |
| 14 | 14 |
|
| 15 |
- cache := cache.New(daemon.stores[platform].imageStore) |
|
| 15 |
+ cache := cache.New(daemon.imageStore) |
|
| 16 | 16 |
|
| 17 | 17 |
for _, ref := range sourceRefs {
|
| 18 | 18 |
img, err := daemon.GetImage(ref) |
| ... | ... |
@@ -180,17 +180,17 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 180 | 180 |
parent = new(image.Image) |
| 181 | 181 |
parent.RootFS = image.NewRootFS() |
| 182 | 182 |
} else {
|
| 183 |
- parent, err = daemon.stores[container.OS].imageStore.Get(container.ImageID) |
|
| 183 |
+ parent, err = daemon.imageStore.Get(container.ImageID) |
|
| 184 | 184 |
if err != nil {
|
| 185 | 185 |
return "", err |
| 186 | 186 |
} |
| 187 | 187 |
} |
| 188 | 188 |
|
| 189 |
- l, err := daemon.stores[container.OS].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.OS(container.OS)) |
|
| 189 |
+ l, err := daemon.layerStore.Register(rwTar, parent.RootFS.ChainID(), container.OS) |
|
| 190 | 190 |
if err != nil {
|
| 191 | 191 |
return "", err |
| 192 | 192 |
} |
| 193 |
- defer layer.ReleaseAndLog(daemon.stores[container.OS].layerStore, l) |
|
| 193 |
+ defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 194 | 194 |
|
| 195 | 195 |
containerConfig := c.ContainerConfig |
| 196 | 196 |
if containerConfig == nil {
|
| ... | ... |
@@ -209,13 +209,13 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 209 | 209 |
return "", err |
| 210 | 210 |
} |
| 211 | 211 |
|
| 212 |
- id, err := daemon.stores[container.OS].imageStore.Create(config) |
|
| 212 |
+ id, err := daemon.imageStore.Create(config) |
|
| 213 | 213 |
if err != nil {
|
| 214 | 214 |
return "", err |
| 215 | 215 |
} |
| 216 | 216 |
|
| 217 | 217 |
if container.ImageID != "" {
|
| 218 |
- if err := daemon.stores[container.OS].imageStore.SetParent(id, container.ImageID); err != nil {
|
|
| 218 |
+ if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil {
|
|
| 219 | 219 |
return "", err |
| 220 | 220 |
} |
| 221 | 221 |
} |
| ... | ... |
@@ -234,7 +234,7 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 234 | 234 |
return "", err |
| 235 | 235 |
} |
| 236 | 236 |
} |
| 237 |
- if err := daemon.TagImageWithReference(id, container.OS, newTag); err != nil {
|
|
| 237 |
+ if err := daemon.TagImageWithReference(id, newTag); err != nil {
|
|
| 238 | 238 |
return "", err |
| 239 | 239 |
} |
| 240 | 240 |
imageRef = reference.FamiliarString(newTag) |
| ... | ... |
@@ -257,7 +257,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) |
| 257 | 257 |
func (daemon *Daemon) setRWLayer(container *container.Container) error {
|
| 258 | 258 |
var layerID layer.ChainID |
| 259 | 259 |
if container.ImageID != "" {
|
| 260 |
- img, err := daemon.stores[container.OS].imageStore.Get(container.ImageID) |
|
| 260 |
+ img, err := daemon.imageStore.Get(container.ImageID) |
|
| 261 | 261 |
if err != nil {
|
| 262 | 262 |
return err |
| 263 | 263 |
} |
| ... | ... |
@@ -270,7 +270,7 @@ func (daemon *Daemon) setRWLayer(container *container.Container) error {
|
| 270 | 270 |
StorageOpt: container.HostConfig.StorageOpt, |
| 271 | 271 |
} |
| 272 | 272 |
|
| 273 |
- rwLayer, err := daemon.stores[container.OS].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) |
|
| 273 |
+ rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.OS, rwLayerOpts) |
|
| 274 | 274 |
if err != nil {
|
| 275 | 275 |
return err |
| 276 | 276 |
} |
| ... | ... |
@@ -69,50 +69,46 @@ var ( |
| 69 | 69 |
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
|
| 70 | 70 |
) |
| 71 | 71 |
|
| 72 |
-type daemonStore struct {
|
|
| 73 |
- graphDriver string |
|
| 74 |
- imageRoot string |
|
| 72 |
+// Daemon holds information about the Docker daemon. |
|
| 73 |
+type Daemon struct {
|
|
| 74 |
+ ID string |
|
| 75 |
+ repository string |
|
| 76 |
+ containers container.Store |
|
| 77 |
+ containersReplica container.ViewDB |
|
| 78 |
+ execCommands *exec.Store |
|
| 79 |
+ downloadManager *xfer.LayerDownloadManager |
|
| 80 |
+ uploadManager *xfer.LayerUploadManager |
|
| 81 |
+ trustKey libtrust.PrivateKey |
|
| 82 |
+ idIndex *truncindex.TruncIndex |
|
| 83 |
+ configStore *config.Config |
|
| 84 |
+ statsCollector *stats.Collector |
|
| 85 |
+ defaultLogConfig containertypes.LogConfig |
|
| 86 |
+ RegistryService registry.Service |
|
| 87 |
+ EventsService *events.Events |
|
| 88 |
+ netController libnetwork.NetworkController |
|
| 89 |
+ volumes *store.VolumeStore |
|
| 90 |
+ discoveryWatcher discovery.Reloader |
|
| 91 |
+ root string |
|
| 92 |
+ seccompEnabled bool |
|
| 93 |
+ apparmorEnabled bool |
|
| 94 |
+ shutdown bool |
|
| 95 |
+ idMappings *idtools.IDMappings |
|
| 96 |
+ graphDrivers map[string]string // By operating system |
|
| 97 |
+ referenceStore refstore.Store |
|
| 75 | 98 |
imageStore image.Store |
| 99 |
+ imageRoot string |
|
| 76 | 100 |
layerStore layer.Store |
| 77 | 101 |
distributionMetadataStore dmetadata.Store |
| 78 |
-} |
|
| 79 |
- |
|
| 80 |
-// Daemon holds information about the Docker daemon. |
|
| 81 |
-type Daemon struct {
|
|
| 82 |
- ID string |
|
| 83 |
- repository string |
|
| 84 |
- containers container.Store |
|
| 85 |
- containersReplica container.ViewDB |
|
| 86 |
- execCommands *exec.Store |
|
| 87 |
- downloadManager *xfer.LayerDownloadManager |
|
| 88 |
- uploadManager *xfer.LayerUploadManager |
|
| 89 |
- trustKey libtrust.PrivateKey |
|
| 90 |
- idIndex *truncindex.TruncIndex |
|
| 91 |
- configStore *config.Config |
|
| 92 |
- statsCollector *stats.Collector |
|
| 93 |
- defaultLogConfig containertypes.LogConfig |
|
| 94 |
- RegistryService registry.Service |
|
| 95 |
- EventsService *events.Events |
|
| 96 |
- netController libnetwork.NetworkController |
|
| 97 |
- volumes *store.VolumeStore |
|
| 98 |
- discoveryWatcher discovery.Reloader |
|
| 99 |
- root string |
|
| 100 |
- seccompEnabled bool |
|
| 101 |
- apparmorEnabled bool |
|
| 102 |
- shutdown bool |
|
| 103 |
- idMappings *idtools.IDMappings |
|
| 104 |
- stores map[string]daemonStore // By container target platform |
|
| 105 |
- referenceStore refstore.Store |
|
| 106 |
- PluginStore *plugin.Store // todo: remove |
|
| 107 |
- pluginManager *plugin.Manager |
|
| 108 |
- linkIndex *linkIndex |
|
| 109 |
- containerd libcontainerd.Client |
|
| 110 |
- containerdRemote libcontainerd.Remote |
|
| 111 |
- defaultIsolation containertypes.Isolation // Default isolation mode on Windows |
|
| 112 |
- clusterProvider cluster.Provider |
|
| 113 |
- cluster Cluster |
|
| 114 |
- genericResources []swarm.GenericResource |
|
| 115 |
- metricsPluginListener net.Listener |
|
| 102 |
+ PluginStore *plugin.Store // todo: remove |
|
| 103 |
+ pluginManager *plugin.Manager |
|
| 104 |
+ linkIndex *linkIndex |
|
| 105 |
+ containerd libcontainerd.Client |
|
| 106 |
+ containerdRemote libcontainerd.Remote |
|
| 107 |
+ defaultIsolation containertypes.Isolation // Default isolation mode on Windows |
|
| 108 |
+ clusterProvider cluster.Provider |
|
| 109 |
+ cluster Cluster |
|
| 110 |
+ genericResources []swarm.GenericResource |
|
| 111 |
+ metricsPluginListener net.Listener |
|
| 116 | 112 |
|
| 117 | 113 |
machineMemory uint64 |
| 118 | 114 |
|
| ... | ... |
@@ -161,9 +157,9 @@ func (daemon *Daemon) restore() error {
|
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 | 163 |
// Ignore the container if it does not support the current driver being used by the graph |
| 164 |
- currentDriverForContainerOS := daemon.stores[container.OS].graphDriver |
|
| 164 |
+ currentDriverForContainerOS := daemon.graphDrivers[container.OS] |
|
| 165 | 165 |
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
|
| 166 |
- rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID) |
|
| 166 |
+ rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) |
|
| 167 | 167 |
if err != nil {
|
| 168 | 168 |
logrus.Errorf("Failed to load container mount %v: %v", id, err)
|
| 169 | 169 |
continue |
| ... | ... |
@@ -706,11 +702,11 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 706 | 706 |
// lcow. Unix platforms however run a single graphdriver for all containers, and it can |
| 707 | 707 |
// be set through an environment variable, a daemon start parameter, or chosen through |
| 708 | 708 |
// initialization of the layerstore through driver priority order for example. |
| 709 |
- d.stores = make(map[string]daemonStore) |
|
| 709 |
+ d.graphDrivers = make(map[string]string) |
|
| 710 | 710 |
if runtime.GOOS == "windows" {
|
| 711 |
- d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"}
|
|
| 711 |
+ d.graphDrivers[runtime.GOOS] = "windowsfilter" |
|
| 712 | 712 |
if system.LCOWSupported() {
|
| 713 |
- d.stores["linux"] = daemonStore{graphDriver: "lcow"}
|
|
| 713 |
+ d.graphDrivers["linux"] = "lcow" |
|
| 714 | 714 |
} |
| 715 | 715 |
} else {
|
| 716 | 716 |
driverName := os.Getenv("DOCKER_DRIVER")
|
| ... | ... |
@@ -719,7 +715,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 719 | 719 |
} else {
|
| 720 | 720 |
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
|
| 721 | 721 |
} |
| 722 |
- d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead.
|
|
| 722 |
+ d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. |
|
| 723 | 723 |
} |
| 724 | 724 |
|
| 725 | 725 |
d.RegistryService = registryService |
| ... | ... |
@@ -750,55 +746,43 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 750 | 750 |
return nil, errors.Wrap(err, "couldn't create plugin manager") |
| 751 | 751 |
} |
| 752 | 752 |
|
| 753 |
- var graphDrivers []string |
|
| 754 |
- for operatingSystem, ds := range d.stores {
|
|
| 755 |
- ls, err := layer.NewStoreFromOptions(layer.StoreOptions{
|
|
| 756 |
- StorePath: config.Root, |
|
| 757 |
- MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), |
|
| 758 |
- GraphDriver: ds.graphDriver, |
|
| 759 |
- GraphDriverOptions: config.GraphOptions, |
|
| 760 |
- IDMappings: idMappings, |
|
| 761 |
- PluginGetter: d.PluginStore, |
|
| 762 |
- ExperimentalEnabled: config.Experimental, |
|
| 763 |
- OS: operatingSystem, |
|
| 764 |
- }) |
|
| 765 |
- if err != nil {
|
|
| 766 |
- return nil, err |
|
| 767 |
- } |
|
| 768 |
- ds.graphDriver = ls.DriverName() // As layerstore may set the driver |
|
| 769 |
- ds.layerStore = ls |
|
| 770 |
- d.stores[operatingSystem] = ds |
|
| 771 |
- graphDrivers = append(graphDrivers, ls.DriverName()) |
|
| 753 |
+ d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
|
|
| 754 |
+ Root: config.Root, |
|
| 755 |
+ MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), |
|
| 756 |
+ GraphDrivers: d.graphDrivers, |
|
| 757 |
+ GraphDriverOptions: config.GraphOptions, |
|
| 758 |
+ IDMappings: idMappings, |
|
| 759 |
+ PluginGetter: d.PluginStore, |
|
| 760 |
+ ExperimentalEnabled: config.Experimental, |
|
| 761 |
+ }) |
|
| 762 |
+ if err != nil {
|
|
| 763 |
+ return nil, err |
|
| 772 | 764 |
} |
| 773 | 765 |
|
| 774 |
- // Configure and validate the kernels security support |
|
| 775 |
- if err := configureKernelSecuritySupport(config, graphDrivers); err != nil {
|
|
| 766 |
+ // As layerstore may set the driver |
|
| 767 |
+ for os := range d.graphDrivers {
|
|
| 768 |
+ d.graphDrivers[os] = d.layerStore.DriverName(os) |
|
| 769 |
+ } |
|
| 770 |
+ |
|
| 771 |
+ // Configure and validate the kernels security support. Note this is a Linux/FreeBSD |
|
| 772 |
+ // operation only, so it is safe to pass *just* the runtime OS graphdriver. |
|
| 773 |
+ if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
|
|
| 776 | 774 |
return nil, err |
| 777 | 775 |
} |
| 778 | 776 |
|
| 779 | 777 |
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
|
| 780 |
- lsMap := make(map[string]layer.Store) |
|
| 781 |
- for operatingSystem, ds := range d.stores {
|
|
| 782 |
- lsMap[operatingSystem] = ds.layerStore |
|
| 783 |
- } |
|
| 784 |
- d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) |
|
| 778 |
+ d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) |
|
| 785 | 779 |
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
|
| 786 | 780 |
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) |
| 787 |
- for operatingSystem, ds := range d.stores {
|
|
| 788 |
- imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) |
|
| 789 |
- ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) |
|
| 790 |
- if err != nil {
|
|
| 791 |
- return nil, err |
|
| 792 |
- } |
|
| 793 | 781 |
|
| 794 |
- var is image.Store |
|
| 795 |
- is, err = image.NewImageStore(ifs, operatingSystem, ds.layerStore) |
|
| 796 |
- if err != nil {
|
|
| 797 |
- return nil, err |
|
| 798 |
- } |
|
| 799 |
- ds.imageRoot = imageRoot |
|
| 800 |
- ds.imageStore = is |
|
| 801 |
- d.stores[operatingSystem] = ds |
|
| 782 |
+ d.imageRoot = filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) |
|
| 783 |
+ ifs, err := image.NewFSStoreBackend(filepath.Join(d.imageRoot, "imagedb")) |
|
| 784 |
+ if err != nil {
|
|
| 785 |
+ return nil, err |
|
| 786 |
+ } |
|
| 787 |
+ d.imageStore, err = image.NewImageStore(ifs, d.layerStore) |
|
| 788 |
+ if err != nil {
|
|
| 789 |
+ return nil, err |
|
| 802 | 790 |
} |
| 803 | 791 |
|
| 804 | 792 |
// Configure the volumes driver |
| ... | ... |
@@ -830,30 +814,25 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 830 | 830 |
// operating systems, the list of graphdrivers available isn't user configurable. |
| 831 | 831 |
// For backwards compatibility, we just put it under the windowsfilter |
| 832 | 832 |
// directory regardless. |
| 833 |
- refStoreLocation := filepath.Join(d.stores[runtime.GOOS].imageRoot, `repositories.json`) |
|
| 833 |
+ refStoreLocation := filepath.Join(d.imageRoot, `repositories.json`) |
|
| 834 | 834 |
rs, err := refstore.NewReferenceStore(refStoreLocation) |
| 835 | 835 |
if err != nil {
|
| 836 | 836 |
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
|
| 837 | 837 |
} |
| 838 | 838 |
d.referenceStore = rs |
| 839 | 839 |
|
| 840 |
- for platform, ds := range d.stores {
|
|
| 841 |
- dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) |
|
| 842 |
- if err != nil {
|
|
| 843 |
- return nil, err |
|
| 844 |
- } |
|
| 845 |
- |
|
| 846 |
- ds.distributionMetadataStore = dms |
|
| 847 |
- d.stores[platform] = ds |
|
| 840 |
+ d.distributionMetadataStore, err = dmetadata.NewFSMetadataStore(filepath.Join(d.imageRoot, "distribution")) |
|
| 841 |
+ if err != nil {
|
|
| 842 |
+ return nil, err |
|
| 843 |
+ } |
|
| 848 | 844 |
|
| 849 |
- // No content-addressability migration on Windows as it never supported pre-CA |
|
| 850 |
- if runtime.GOOS != "windows" {
|
|
| 851 |
- migrationStart := time.Now() |
|
| 852 |
- if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil {
|
|
| 853 |
- logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
|
|
| 854 |
- } |
|
| 855 |
- logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
|
|
| 845 |
+ // No content-addressability migration on Windows as it never supported pre-CA |
|
| 846 |
+ if runtime.GOOS != "windows" {
|
|
| 847 |
+ migrationStart := time.Now() |
|
| 848 |
+ if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], d.layerStore, d.imageStore, rs, d.distributionMetadataStore); err != nil {
|
|
| 849 |
+ logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
|
|
| 856 | 850 |
} |
| 851 |
+ logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
|
|
| 857 | 852 |
} |
| 858 | 853 |
|
| 859 | 854 |
// Discovery is only enabled when the daemon is launched with an address to advertise. When |
| ... | ... |
@@ -922,13 +901,13 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
| 922 | 922 |
engineMemory.Set(float64(info.MemTotal)) |
| 923 | 923 |
|
| 924 | 924 |
gd := "" |
| 925 |
- for platform, ds := range d.stores {
|
|
| 925 |
+ for os, driver := range d.graphDrivers {
|
|
| 926 | 926 |
if len(gd) > 0 {
|
| 927 | 927 |
gd += ", " |
| 928 | 928 |
} |
| 929 |
- gd += ds.graphDriver |
|
| 930 |
- if len(d.stores) > 1 {
|
|
| 931 |
- gd = fmt.Sprintf("%s (%s)", gd, platform)
|
|
| 929 |
+ gd += driver |
|
| 930 |
+ if len(d.graphDrivers) > 1 {
|
|
| 931 |
+ gd = fmt.Sprintf("%s (%s)", gd, os)
|
|
| 932 | 932 |
} |
| 933 | 933 |
} |
| 934 | 934 |
logrus.WithFields(logrus.Fields{
|
| ... | ... |
@@ -1009,7 +988,7 @@ func (daemon *Daemon) Shutdown() error {
|
| 1009 | 1009 |
logrus.Errorf("Stop container error: %v", err)
|
| 1010 | 1010 |
return |
| 1011 | 1011 |
} |
| 1012 |
- if mountid, err := daemon.stores[c.OS].layerStore.GetMountID(c.ID); err == nil {
|
|
| 1012 |
+ if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
|
|
| 1013 | 1013 |
daemon.cleanupMountsByID(mountid) |
| 1014 | 1014 |
} |
| 1015 | 1015 |
logrus.Debugf("container stopped %s", c.ID)
|
| ... | ... |
@@ -1022,12 +1001,8 @@ func (daemon *Daemon) Shutdown() error {
|
| 1022 | 1022 |
} |
| 1023 | 1023 |
} |
| 1024 | 1024 |
|
| 1025 |
- for platform, ds := range daemon.stores {
|
|
| 1026 |
- if ds.layerStore != nil {
|
|
| 1027 |
- if err := ds.layerStore.Cleanup(); err != nil {
|
|
| 1028 |
- logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform)
|
|
| 1029 |
- } |
|
| 1030 |
- } |
|
| 1025 |
+ if err := daemon.layerStore.Cleanup(); err != nil {
|
|
| 1026 |
+ logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
|
|
| 1031 | 1027 |
} |
| 1032 | 1028 |
|
| 1033 | 1029 |
// If we are part of a cluster, clean up cluster's stuff |
| ... | ... |
@@ -1107,8 +1082,8 @@ func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
|
| 1107 | 1107 |
} |
| 1108 | 1108 |
|
| 1109 | 1109 |
// GraphDriverName returns the name of the graph driver used by the layer.Store |
| 1110 |
-func (daemon *Daemon) GraphDriverName(platform string) string {
|
|
| 1111 |
- return daemon.stores[platform].layerStore.DriverName() |
|
| 1110 |
+func (daemon *Daemon) GraphDriverName(os string) string {
|
|
| 1111 |
+ return daemon.layerStore.DriverName(os) |
|
| 1112 | 1112 |
} |
| 1113 | 1113 |
|
| 1114 | 1114 |
// prepareTempDir prepares and returns the default directory to use |
| ... | ... |
@@ -814,22 +814,14 @@ func overlaySupportsSelinux() (bool, error) {
|
| 814 | 814 |
} |
| 815 | 815 |
|
| 816 | 816 |
// configureKernelSecuritySupport configures and validates security support for the kernel |
| 817 |
-func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
|
|
| 817 |
+func configureKernelSecuritySupport(config *config.Config, driverName string) error {
|
|
| 818 | 818 |
if config.EnableSelinuxSupport {
|
| 819 | 819 |
if !selinuxEnabled() {
|
| 820 | 820 |
logrus.Warn("Docker could not enable SELinux on the host system")
|
| 821 | 821 |
return nil |
| 822 | 822 |
} |
| 823 | 823 |
|
| 824 |
- overlayFound := false |
|
| 825 |
- for _, d := range driverNames {
|
|
| 826 |
- if d == "overlay" || d == "overlay2" {
|
|
| 827 |
- overlayFound = true |
|
| 828 |
- break |
|
| 829 |
- } |
|
| 830 |
- } |
|
| 831 |
- |
|
| 832 |
- if overlayFound {
|
|
| 824 |
+ if driverName == "overlay" || driverName == "overlay2" {
|
|
| 833 | 825 |
// If driver is overlay or overlay2, make sure kernel |
| 834 | 826 |
// supports selinux with overlay. |
| 835 | 827 |
supported, err := overlaySupportsSelinux() |
| ... | ... |
@@ -838,7 +830,7 @@ func configureKernelSecuritySupport(config *config.Config, driverNames []string) |
| 838 | 838 |
} |
| 839 | 839 |
|
| 840 | 840 |
if !supported {
|
| 841 |
- logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames)
|
|
| 841 |
+ logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
|
|
| 842 | 842 |
} |
| 843 | 843 |
} |
| 844 | 844 |
} else {
|
| ... | ... |
@@ -262,7 +262,7 @@ func ensureServicesInstalled(services []string) error {
|
| 262 | 262 |
} |
| 263 | 263 |
|
| 264 | 264 |
// configureKernelSecuritySupport configures and validate security support for the kernel |
| 265 |
-func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
|
|
| 265 |
+func configureKernelSecuritySupport(config *config.Config, driverName string) error {
|
|
| 266 | 266 |
return nil |
| 267 | 267 |
} |
| 268 | 268 |
|
| ... | ... |
@@ -118,7 +118,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo |
| 118 | 118 |
// When container creation fails and `RWLayer` has not been created yet, we |
| 119 | 119 |
// do not call `ReleaseRWLayer` |
| 120 | 120 |
if container.RWLayer != nil {
|
| 121 |
- metadata, err := daemon.stores[container.OS].layerStore.ReleaseRWLayer(container.RWLayer) |
|
| 121 |
+ metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) |
|
| 122 | 122 |
layer.LogReleaseMetadata(metadata) |
| 123 | 123 |
if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) {
|
| 124 | 124 |
e := errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.OS), container.ID) |
| ... | ... |
@@ -15,12 +15,12 @@ import ( |
| 15 | 15 |
"github.com/sirupsen/logrus" |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int {
|
|
| 19 |
- tmpImages := daemon.stores[platform].imageStore.Map() |
|
| 18 |
+func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
|
|
| 19 |
+ tmpImages := daemon.imageStore.Map() |
|
| 20 | 20 |
layerRefs := map[layer.ChainID]int{}
|
| 21 | 21 |
for id, img := range tmpImages {
|
| 22 | 22 |
dgst := digest.Digest(id) |
| 23 |
- if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 {
|
|
| 23 |
+ if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
|
|
| 24 | 24 |
continue |
| 25 | 25 |
} |
| 26 | 26 |
|
| ... | ... |
@@ -53,7 +53,6 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er |
| 53 | 53 |
} |
| 54 | 54 |
|
| 55 | 55 |
// Get all top images with extra attributes |
| 56 |
- // TODO @jhowardmsft LCOW. This may need revisiting |
|
| 57 | 56 |
allImages, err := daemon.Images(filters.NewArgs(), false, true) |
| 58 | 57 |
if err != nil {
|
| 59 | 58 |
return nil, fmt.Errorf("failed to retrieve image list: %v", err)
|
| ... | ... |
@@ -96,24 +95,22 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er |
| 96 | 96 |
|
| 97 | 97 |
// Get total layers size on disk |
| 98 | 98 |
var allLayersSize int64 |
| 99 |
- for platform := range daemon.stores {
|
|
| 100 |
- layerRefs := daemon.getLayerRefs(platform) |
|
| 101 |
- allLayers := daemon.stores[platform].layerStore.Map() |
|
| 102 |
- for _, l := range allLayers {
|
|
| 103 |
- select {
|
|
| 104 |
- case <-ctx.Done(): |
|
| 105 |
- return nil, ctx.Err() |
|
| 106 |
- default: |
|
| 107 |
- size, err := l.DiffSize() |
|
| 108 |
- if err == nil {
|
|
| 109 |
- if _, ok := layerRefs[l.ChainID()]; ok {
|
|
| 110 |
- allLayersSize += size |
|
| 111 |
- } else {
|
|
| 112 |
- logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform)
|
|
| 113 |
- } |
|
| 99 |
+ layerRefs := daemon.getLayerRefs() |
|
| 100 |
+ allLayers := daemon.layerStore.Map() |
|
| 101 |
+ for _, l := range allLayers {
|
|
| 102 |
+ select {
|
|
| 103 |
+ case <-ctx.Done(): |
|
| 104 |
+ return nil, ctx.Err() |
|
| 105 |
+ default: |
|
| 106 |
+ size, err := l.DiffSize() |
|
| 107 |
+ if err == nil {
|
|
| 108 |
+ if _, ok := layerRefs[l.ChainID()]; ok {
|
|
| 109 |
+ allLayersSize += size |
|
| 114 | 110 |
} else {
|
| 115 |
- logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform)
|
|
| 111 |
+ logrus.Warnf("found leaked image layer %v", l.ChainID())
|
|
| 116 | 112 |
} |
| 113 |
+ } else {
|
|
| 114 |
+ logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
|
|
| 117 | 115 |
} |
| 118 | 116 |
} |
| 119 | 117 |
} |
| ... | ... |
@@ -15,12 +15,12 @@ func (daemon *Daemon) getSize(containerID string) (int64, int64) {
|
| 15 | 15 |
err error |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
- rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) |
|
| 18 |
+ rwlayer, err := daemon.layerStore.GetRWLayer(containerID) |
|
| 19 | 19 |
if err != nil {
|
| 20 | 20 |
logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err)
|
| 21 | 21 |
return sizeRw, sizeRootfs |
| 22 | 22 |
} |
| 23 |
- defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) |
|
| 23 |
+ defer daemon.layerStore.ReleaseRWLayer(rwlayer) |
|
| 24 | 24 |
|
| 25 | 25 |
sizeRw, err = rwlayer.Size() |
| 26 | 26 |
if err != nil {
|
| ... | ... |
@@ -38,32 +38,31 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error) |
| 38 | 38 |
return "", "", errImageDoesNotExist{ref}
|
| 39 | 39 |
} |
| 40 | 40 |
id := image.IDFromDigest(digested.Digest()) |
| 41 |
- for platform := range daemon.stores {
|
|
| 42 |
- if _, err = daemon.stores[platform].imageStore.Get(id); err == nil {
|
|
| 43 |
- return id, platform, nil |
|
| 44 |
- } |
|
| 41 |
+ if img, err := daemon.imageStore.Get(id); err == nil {
|
|
| 42 |
+ return id, img.OperatingSystem(), nil |
|
| 45 | 43 |
} |
| 46 | 44 |
return "", "", errImageDoesNotExist{ref}
|
| 47 | 45 |
} |
| 48 | 46 |
|
| 49 | 47 |
if digest, err := daemon.referenceStore.Get(namedRef); err == nil {
|
| 50 | 48 |
// Search the image stores to get the operating system, defaulting to host OS. |
| 51 |
- imageOS := runtime.GOOS |
|
| 52 | 49 |
id := image.IDFromDigest(digest) |
| 53 |
- for os := range daemon.stores {
|
|
| 54 |
- if img, err := daemon.stores[os].imageStore.Get(id); err == nil {
|
|
| 55 |
- imageOS = img.OperatingSystem() |
|
| 56 |
- break |
|
| 57 |
- } |
|
| 50 |
+ if img, err := daemon.imageStore.Get(id); err == nil {
|
|
| 51 |
+ return id, img.OperatingSystem(), nil |
|
| 58 | 52 |
} |
| 59 |
- return id, imageOS, nil |
|
| 60 | 53 |
} |
| 61 | 54 |
|
| 62 | 55 |
// Search based on ID |
| 63 |
- for os := range daemon.stores {
|
|
| 64 |
- if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil {
|
|
| 65 |
- return id, os, nil |
|
| 56 |
+ if id, err := daemon.imageStore.Search(refOrID); err == nil {
|
|
| 57 |
+ img, err := daemon.imageStore.Get(id) |
|
| 58 |
+ if err != nil {
|
|
| 59 |
+ return "", "", errImageDoesNotExist{ref}
|
|
| 66 | 60 |
} |
| 61 |
+ imageOS := img.OperatingSystem() |
|
| 62 |
+ if imageOS == "" {
|
|
| 63 |
+ imageOS = runtime.GOOS |
|
| 64 |
+ } |
|
| 65 |
+ return id, imageOS, nil |
|
| 67 | 66 |
} |
| 68 | 67 |
|
| 69 | 68 |
return "", "", errImageDoesNotExist{ref}
|
| ... | ... |
@@ -71,9 +70,9 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error) |
| 71 | 71 |
|
| 72 | 72 |
// GetImage returns an image corresponding to the image referred to by refOrID. |
| 73 | 73 |
func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
|
| 74 |
- imgID, os, err := daemon.GetImageIDAndOS(refOrID) |
|
| 74 |
+ imgID, _, err := daemon.GetImageIDAndOS(refOrID) |
|
| 75 | 75 |
if err != nil {
|
| 76 | 76 |
return nil, err |
| 77 | 77 |
} |
| 78 |
- return daemon.stores[os].imageStore.Get(imgID) |
|
| 78 |
+ return daemon.imageStore.Get(imgID) |
|
| 79 | 79 |
} |
| ... | ... |
@@ -66,7 +66,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 66 | 66 |
start := time.Now() |
| 67 | 67 |
records := []types.ImageDeleteResponseItem{}
|
| 68 | 68 |
|
| 69 |
- imgID, os, err := daemon.GetImageIDAndOS(imageRef) |
|
| 69 |
+ imgID, _, err := daemon.GetImageIDAndOS(imageRef) |
|
| 70 | 70 |
if err != nil {
|
| 71 | 71 |
return nil, err |
| 72 | 72 |
} |
| ... | ... |
@@ -95,7 +95,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 95 | 95 |
return nil, err |
| 96 | 96 |
} |
| 97 | 97 |
|
| 98 |
- parsedRef, err = daemon.removeImageRef(os, parsedRef) |
|
| 98 |
+ parsedRef, err = daemon.removeImageRef(parsedRef) |
|
| 99 | 99 |
if err != nil {
|
| 100 | 100 |
return nil, err |
| 101 | 101 |
} |
| ... | ... |
@@ -123,7 +123,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 123 | 123 |
remainingRefs := []reference.Named{}
|
| 124 | 124 |
for _, repoRef := range repoRefs {
|
| 125 | 125 |
if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
|
| 126 |
- if _, err := daemon.removeImageRef(os, repoRef); err != nil {
|
|
| 126 |
+ if _, err := daemon.removeImageRef(repoRef); err != nil {
|
|
| 127 | 127 |
return records, err |
| 128 | 128 |
} |
| 129 | 129 |
|
| ... | ... |
@@ -153,12 +153,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 153 | 153 |
if !force {
|
| 154 | 154 |
c |= conflictSoft &^ conflictActiveReference |
| 155 | 155 |
} |
| 156 |
- if conflict := daemon.checkImageDeleteConflict(imgID, os, c); conflict != nil {
|
|
| 156 |
+ if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
|
|
| 157 | 157 |
return nil, conflict |
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 | 160 |
for _, repoRef := range repoRefs {
|
| 161 |
- parsedRef, err := daemon.removeImageRef(os, repoRef) |
|
| 161 |
+ parsedRef, err := daemon.removeImageRef(repoRef) |
|
| 162 | 162 |
if err != nil {
|
| 163 | 163 |
return nil, err |
| 164 | 164 |
} |
| ... | ... |
@@ -171,7 +171,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I |
| 171 | 171 |
} |
| 172 | 172 |
} |
| 173 | 173 |
|
| 174 |
- if err := daemon.imageDeleteHelper(imgID, os, &records, force, prune, removedRepositoryRef); err != nil {
|
|
| 174 |
+ if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil {
|
|
| 175 | 175 |
return nil, err |
| 176 | 176 |
} |
| 177 | 177 |
|
| ... | ... |
@@ -232,7 +232,7 @@ func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Contai |
| 232 | 232 |
// repositoryRef must not be an image ID but a repository name followed by an |
| 233 | 233 |
// optional tag or digest reference. If tag or digest is omitted, the default |
| 234 | 234 |
// tag is used. Returns the resolved image reference and an error. |
| 235 |
-func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) {
|
|
| 235 |
+func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) {
|
|
| 236 | 236 |
ref = reference.TagNameOnly(ref) |
| 237 | 237 |
|
| 238 | 238 |
// Ignore the boolean value returned, as far as we're concerned, this |
| ... | ... |
@@ -248,11 +248,11 @@ func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (refe |
| 248 | 248 |
// on the first encountered error. Removed references are logged to this |
| 249 | 249 |
// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the |
| 250 | 250 |
// given list of records. |
| 251 |
-func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error {
|
|
| 251 |
+func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error {
|
|
| 252 | 252 |
imageRefs := daemon.referenceStore.References(imgID.Digest()) |
| 253 | 253 |
|
| 254 | 254 |
for _, imageRef := range imageRefs {
|
| 255 |
- parsedRef, err := daemon.removeImageRef(platform, imageRef) |
|
| 255 |
+ parsedRef, err := daemon.removeImageRef(imageRef) |
|
| 256 | 256 |
if err != nil {
|
| 257 | 257 |
return err |
| 258 | 258 |
} |
| ... | ... |
@@ -299,15 +299,15 @@ func (idc *imageDeleteConflict) Conflict() {}
|
| 299 | 299 |
// conflict is encountered, it will be returned immediately without deleting |
| 300 | 300 |
// the image. If quiet is true, any encountered conflicts will be ignored and |
| 301 | 301 |
// the function will return nil immediately without deleting the image. |
| 302 |
-func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
|
|
| 302 |
+func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
|
|
| 303 | 303 |
// First, determine if this image has any conflicts. Ignore soft conflicts |
| 304 | 304 |
// if force is true. |
| 305 | 305 |
c := conflictHard |
| 306 | 306 |
if !force {
|
| 307 | 307 |
c |= conflictSoft |
| 308 | 308 |
} |
| 309 |
- if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil {
|
|
| 310 |
- if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) {
|
|
| 309 |
+ if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
|
|
| 310 |
+ if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) {
|
|
| 311 | 311 |
// Ignore conflicts UNLESS the image is "dangling" or not being used in |
| 312 | 312 |
// which case we want the user to know. |
| 313 | 313 |
return nil |
| ... | ... |
@@ -318,18 +318,18 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records |
| 318 | 318 |
return conflict |
| 319 | 319 |
} |
| 320 | 320 |
|
| 321 |
- parent, err := daemon.stores[platform].imageStore.GetParent(imgID) |
|
| 321 |
+ parent, err := daemon.imageStore.GetParent(imgID) |
|
| 322 | 322 |
if err != nil {
|
| 323 | 323 |
// There may be no parent |
| 324 | 324 |
parent = "" |
| 325 | 325 |
} |
| 326 | 326 |
|
| 327 | 327 |
// Delete all repository tag/digest references to this image. |
| 328 |
- if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil {
|
|
| 328 |
+ if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil {
|
|
| 329 | 329 |
return err |
| 330 | 330 |
} |
| 331 | 331 |
|
| 332 |
- removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) |
|
| 332 |
+ removedLayers, err := daemon.imageStore.Delete(imgID) |
|
| 333 | 333 |
if err != nil {
|
| 334 | 334 |
return err |
| 335 | 335 |
} |
| ... | ... |
@@ -349,7 +349,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records |
| 349 | 349 |
// either running or stopped). |
| 350 | 350 |
// Do not force prunings, but do so quietly (stopping on any encountered |
| 351 | 351 |
// conflicts). |
| 352 |
- return daemon.imageDeleteHelper(parent, platform, records, false, true, true) |
|
| 352 |
+ return daemon.imageDeleteHelper(parent, records, false, true, true) |
|
| 353 | 353 |
} |
| 354 | 354 |
|
| 355 | 355 |
// checkImageDeleteConflict determines whether there are any conflicts |
| ... | ... |
@@ -358,9 +358,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records |
| 358 | 358 |
// using the image. A soft conflict is any tags/digest referencing the given |
| 359 | 359 |
// image or any stopped container using the image. If ignoreSoftConflicts is |
| 360 | 360 |
// true, this function will not check for soft conflict conditions. |
| 361 |
-func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict {
|
|
| 361 |
+func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
|
|
| 362 | 362 |
// Check if the image has any descendant images. |
| 363 |
- if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 {
|
|
| 363 |
+ if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
|
|
| 364 | 364 |
return &imageDeleteConflict{
|
| 365 | 365 |
hard: true, |
| 366 | 366 |
imgID: imgID, |
| ... | ... |
@@ -411,6 +411,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, |
| 411 | 411 |
// imageIsDangling returns whether the given image is "dangling" which means |
| 412 | 412 |
// that there are no repository references to the given image and it has no |
| 413 | 413 |
// child images. |
| 414 |
-func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool {
|
|
| 415 |
- return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) |
|
| 414 |
+func (daemon *Daemon) imageIsDangling(imgID image.ID) bool {
|
|
| 415 |
+ return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) |
|
| 416 | 416 |
} |
| ... | ... |
@@ -2,10 +2,8 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
- "runtime" |
|
| 6 | 5 |
|
| 7 | 6 |
"github.com/docker/docker/image/tarexport" |
| 8 |
- "github.com/docker/docker/pkg/system" |
|
| 9 | 7 |
) |
| 10 | 8 |
|
| 11 | 9 |
// ExportImage exports a list of images to the given output stream. The |
| ... | ... |
@@ -14,12 +12,7 @@ import ( |
| 14 | 14 |
// the same tag are exported. names is the set of tags to export, and |
| 15 | 15 |
// outStream is the writer which the images are written to. |
| 16 | 16 |
func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
|
| 17 |
- // TODO @jhowardmsft LCOW. This will need revisiting later. |
|
| 18 |
- platform := runtime.GOOS |
|
| 19 |
- if system.LCOWSupported() {
|
|
| 20 |
- platform = "linux" |
|
| 21 |
- } |
|
| 22 |
- imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.referenceStore, daemon) |
|
| 17 |
+ imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) |
|
| 23 | 18 |
return imageExporter.Save(names, outStream) |
| 24 | 19 |
} |
| 25 | 20 |
|
| ... | ... |
@@ -27,11 +20,6 @@ func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
|
| 27 | 27 |
// complement of ImageExport. The input stream is an uncompressed tar |
| 28 | 28 |
// ball containing images and metadata. |
| 29 | 29 |
func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
|
| 30 |
- // TODO @jhowardmsft LCOW. This will need revisiting later. |
|
| 31 |
- platform := runtime.GOOS |
|
| 32 |
- if system.LCOWSupported() {
|
|
| 33 |
- platform = "linux" |
|
| 34 |
- } |
|
| 35 |
- imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.referenceStore, daemon) |
|
| 30 |
+ imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) |
|
| 36 | 31 |
return imageExporter.Load(inTar, outStream, quiet) |
| 37 | 32 |
} |
| ... | ... |
@@ -2,7 +2,6 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
- "runtime" |
|
| 6 | 5 |
"time" |
| 7 | 6 |
|
| 8 | 7 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -19,12 +18,6 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e |
| 19 | 19 |
return nil, err |
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 |
- // If the image OS isn't set, assume it's the host OS |
|
| 23 |
- platform := img.OS |
|
| 24 |
- if platform == "" {
|
|
| 25 |
- platform = runtime.GOOS |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 | 22 |
history := []*image.HistoryResponseItem{}
|
| 29 | 23 |
|
| 30 | 24 |
layerCounter := 0 |
| ... | ... |
@@ -40,12 +33,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e |
| 40 | 40 |
} |
| 41 | 41 |
|
| 42 | 42 |
rootFS.Append(img.RootFS.DiffIDs[layerCounter]) |
| 43 |
- l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) |
|
| 43 |
+ l, err := daemon.layerStore.Get(rootFS.ChainID()) |
|
| 44 | 44 |
if err != nil {
|
| 45 | 45 |
return nil, err |
| 46 | 46 |
} |
| 47 | 47 |
layerSize, err = l.DiffSize() |
| 48 |
- layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 48 |
+ layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 49 | 49 |
if err != nil {
|
| 50 | 50 |
return nil, err |
| 51 | 51 |
} |
| ... | ... |
@@ -19,9 +19,9 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
// If the image OS isn't set, assume it's the host OS |
| 22 |
- platform := img.OS |
|
| 23 |
- if platform == "" {
|
|
| 24 |
- platform = runtime.GOOS |
|
| 22 |
+ os := img.OS |
|
| 23 |
+ if os == "" {
|
|
| 24 |
+ os = runtime.GOOS |
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
refs := daemon.referenceStore.References(img.ID().Digest()) |
| ... | ... |
@@ -40,11 +40,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 40 | 40 |
var layerMetadata map[string]string |
| 41 | 41 |
layerID := img.RootFS.ChainID() |
| 42 | 42 |
if layerID != "" {
|
| 43 |
- l, err := daemon.stores[platform].layerStore.Get(layerID) |
|
| 43 |
+ l, err := daemon.layerStore.Get(layerID) |
|
| 44 | 44 |
if err != nil {
|
| 45 | 45 |
return nil, err |
| 46 | 46 |
} |
| 47 |
- defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 47 |
+ defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 48 | 48 |
size, err = l.Size() |
| 49 | 49 |
if err != nil {
|
| 50 | 50 |
return nil, err |
| ... | ... |
@@ -61,7 +61,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 61 | 61 |
comment = img.History[len(img.History)-1].Comment |
| 62 | 62 |
} |
| 63 | 63 |
|
| 64 |
- lastUpdated, err := daemon.stores[platform].imageStore.GetLastUpdated(img.ID()) |
|
| 64 |
+ lastUpdated, err := daemon.imageStore.GetLastUpdated(img.ID()) |
|
| 65 | 65 |
if err != nil {
|
| 66 | 66 |
return nil, err |
| 67 | 67 |
} |
| ... | ... |
@@ -79,7 +79,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 79 | 79 |
Author: img.Author, |
| 80 | 80 |
Config: img.Config, |
| 81 | 81 |
Architecture: img.Architecture, |
| 82 |
- Os: platform, |
|
| 82 |
+ Os: os, |
|
| 83 | 83 |
OsVersion: img.OSVersion, |
| 84 | 84 |
Size: size, |
| 85 | 85 |
VirtualSize: size, // TODO: field unused, deprecate |
| ... | ... |
@@ -89,7 +89,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
| 89 | 89 |
}, |
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 |
- imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) |
|
| 92 |
+ imageInspect.GraphDriver.Name = daemon.GraphDriverName(os) |
|
| 93 | 93 |
imageInspect.GraphDriver.Data = layerMetadata |
| 94 | 94 |
|
| 95 | 95 |
return imageInspect, nil |
| ... | ... |
@@ -19,7 +19,7 @@ import ( |
| 19 | 19 |
|
| 20 | 20 |
// PullImage initiates a pull operation. image is the repository name to pull, and |
| 21 | 21 |
// tag may be either empty, or indicate a specific tag to pull. |
| 22 |
-func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
| 22 |
+func (daemon *Daemon) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
| 23 | 23 |
// Special case: "pull -a" may send an image name with a |
| 24 | 24 |
// trailing :. This is ugly, but let's not break API |
| 25 | 25 |
// compatibility. |
| ... | ... |
@@ -44,10 +44,10 @@ func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string |
| 44 | 44 |
} |
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 |
- return daemon.pullImageWithReference(ctx, ref, platform, metaHeaders, authConfig, outStream) |
|
| 47 |
+ return daemon.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream) |
|
| 48 | 48 |
} |
| 49 | 49 |
|
| 50 |
-func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
| 50 |
+func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
| 51 | 51 |
// Include a buffer so that slow client connections don't affect |
| 52 | 52 |
// transfer performance. |
| 53 | 53 |
progressChan := make(chan progress.Progress, 100) |
| ... | ... |
@@ -62,8 +62,8 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. |
| 62 | 62 |
}() |
| 63 | 63 |
|
| 64 | 64 |
// Default to the host OS platform in case it hasn't been populated with an explicit value. |
| 65 |
- if platform == "" {
|
|
| 66 |
- platform = runtime.GOOS |
|
| 65 |
+ if os == "" {
|
|
| 66 |
+ os = runtime.GOOS |
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 | 69 |
imagePullConfig := &distribution.ImagePullConfig{
|
| ... | ... |
@@ -73,13 +73,13 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. |
| 73 | 73 |
ProgressOutput: progress.ChanOutput(progressChan), |
| 74 | 74 |
RegistryService: daemon.RegistryService, |
| 75 | 75 |
ImageEventLogger: daemon.LogImageEvent, |
| 76 |
- MetadataStore: daemon.stores[platform].distributionMetadataStore, |
|
| 77 |
- ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), |
|
| 76 |
+ MetadataStore: daemon.distributionMetadataStore, |
|
| 77 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), |
|
| 78 | 78 |
ReferenceStore: daemon.referenceStore, |
| 79 | 79 |
}, |
| 80 | 80 |
DownloadManager: daemon.downloadManager, |
| 81 | 81 |
Schema2Types: distribution.ImageTypes, |
| 82 |
- Platform: platform, |
|
| 82 |
+ OS: os, |
|
| 83 | 83 |
} |
| 84 | 84 |
|
| 85 | 85 |
err := distribution.Pull(ctx, ref, imagePullConfig) |
| ... | ... |
@@ -2,7 +2,6 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
- "runtime" |
|
| 6 | 5 |
|
| 7 | 6 |
"github.com/docker/distribution/manifest/schema2" |
| 8 | 7 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -10,7 +9,6 @@ import ( |
| 10 | 10 |
"github.com/docker/docker/distribution" |
| 11 | 11 |
progressutils "github.com/docker/docker/distribution/utils" |
| 12 | 12 |
"github.com/docker/docker/pkg/progress" |
| 13 |
- "github.com/docker/docker/pkg/system" |
|
| 14 | 13 |
"golang.org/x/net/context" |
| 15 | 14 |
) |
| 16 | 15 |
|
| ... | ... |
@@ -41,12 +39,6 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead |
| 41 | 41 |
close(writesDone) |
| 42 | 42 |
}() |
| 43 | 43 |
|
| 44 |
- // TODO @jhowardmsft LCOW Support. This will require revisiting. For now, hard-code. |
|
| 45 |
- platform := runtime.GOOS |
|
| 46 |
- if system.LCOWSupported() {
|
|
| 47 |
- platform = "linux" |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 | 44 |
imagePushConfig := &distribution.ImagePushConfig{
|
| 51 | 45 |
Config: distribution.Config{
|
| 52 | 46 |
MetaHeaders: metaHeaders, |
| ... | ... |
@@ -54,12 +46,12 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead |
| 54 | 54 |
ProgressOutput: progress.ChanOutput(progressChan), |
| 55 | 55 |
RegistryService: daemon.RegistryService, |
| 56 | 56 |
ImageEventLogger: daemon.LogImageEvent, |
| 57 |
- MetadataStore: daemon.stores[platform].distributionMetadataStore, |
|
| 58 |
- ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), |
|
| 57 |
+ MetadataStore: daemon.distributionMetadataStore, |
|
| 58 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), |
|
| 59 | 59 |
ReferenceStore: daemon.referenceStore, |
| 60 | 60 |
}, |
| 61 | 61 |
ConfigMediaType: schema2.MediaTypeImageConfig, |
| 62 |
- LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[platform].layerStore), |
|
| 62 |
+ LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), |
|
| 63 | 63 |
TrustKey: daemon.trustKey, |
| 64 | 64 |
UploadManager: daemon.uploadManager, |
| 65 | 65 |
} |
| ... | ... |
@@ -8,7 +8,7 @@ import ( |
| 8 | 8 |
// TagImage creates the tag specified by newTag, pointing to the image named |
| 9 | 9 |
// imageName (alternatively, imageName can also be an image ID). |
| 10 | 10 |
func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
|
| 11 |
- imageID, os, err := daemon.GetImageIDAndOS(imageName) |
|
| 11 |
+ imageID, _, err := daemon.GetImageIDAndOS(imageName) |
|
| 12 | 12 |
if err != nil {
|
| 13 | 13 |
return err |
| 14 | 14 |
} |
| ... | ... |
@@ -23,16 +23,16 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
|
| 23 | 23 |
} |
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
- return daemon.TagImageWithReference(imageID, os, newTag) |
|
| 26 |
+ return daemon.TagImageWithReference(imageID, newTag) |
|
| 27 | 27 |
} |
| 28 | 28 |
|
| 29 | 29 |
// TagImageWithReference adds the given reference to the image ID provided. |
| 30 |
-func (daemon *Daemon) TagImageWithReference(imageID image.ID, os string, newTag reference.Named) error {
|
|
| 30 |
+func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
|
|
| 31 | 31 |
if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
|
| 32 | 32 |
return err |
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 |
- if err := daemon.stores[os].imageStore.SetLastUpdated(imageID); err != nil {
|
|
| 35 |
+ if err := daemon.imageStore.SetLastUpdated(imageID); err != nil {
|
|
| 36 | 36 |
return err |
| 37 | 37 |
} |
| 38 | 38 |
daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") |
| ... | ... |
@@ -3,7 +3,6 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
- "runtime" |
|
| 7 | 6 |
"sort" |
| 8 | 7 |
"time" |
| 9 | 8 |
|
| ... | ... |
@@ -15,7 +14,6 @@ import ( |
| 15 | 15 |
"github.com/docker/docker/container" |
| 16 | 16 |
"github.com/docker/docker/image" |
| 17 | 17 |
"github.com/docker/docker/layer" |
| 18 |
- "github.com/docker/docker/pkg/system" |
|
| 19 | 18 |
) |
| 20 | 19 |
|
| 21 | 20 |
var acceptedImageFilterTags = map[string]bool{
|
| ... | ... |
@@ -36,12 +34,7 @@ func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
|
| 36 | 36 |
|
| 37 | 37 |
// Map returns a map of all images in the ImageStore |
| 38 | 38 |
func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
| 39 |
- // TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced |
|
| 40 |
- platform := runtime.GOOS |
|
| 41 |
- if system.LCOWSupported() {
|
|
| 42 |
- platform = "linux" |
|
| 43 |
- } |
|
| 44 |
- return daemon.stores[platform].imageStore.Map() |
|
| 39 |
+ return daemon.imageStore.Map() |
|
| 45 | 40 |
} |
| 46 | 41 |
|
| 47 | 42 |
// Images returns a filtered list of images. filterArgs is a JSON-encoded set |
| ... | ... |
@@ -50,13 +43,6 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
| 50 | 50 |
// named all controls whether all images in the graph are filtered, or just |
| 51 | 51 |
// the heads. |
| 52 | 52 |
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
|
| 53 |
- |
|
| 54 |
- // TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced |
|
| 55 |
- platform := runtime.GOOS |
|
| 56 |
- if system.LCOWSupported() {
|
|
| 57 |
- platform = "linux" |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 | 53 |
var ( |
| 61 | 54 |
allImages map[image.ID]*image.Image |
| 62 | 55 |
err error |
| ... | ... |
@@ -75,9 +61,9 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 75 | 75 |
} |
| 76 | 76 |
} |
| 77 | 77 |
if danglingOnly {
|
| 78 |
- allImages = daemon.stores[platform].imageStore.Heads() |
|
| 78 |
+ allImages = daemon.imageStore.Heads() |
|
| 79 | 79 |
} else {
|
| 80 |
- allImages = daemon.stores[platform].imageStore.Map() |
|
| 80 |
+ allImages = daemon.imageStore.Map() |
|
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 | 83 |
var beforeFilter, sinceFilter *image.Image |
| ... | ... |
@@ -130,7 +116,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 130 | 130 |
layerID := img.RootFS.ChainID() |
| 131 | 131 |
var size int64 |
| 132 | 132 |
if layerID != "" {
|
| 133 |
- l, err := daemon.stores[platform].layerStore.Get(layerID) |
|
| 133 |
+ l, err := daemon.layerStore.Get(layerID) |
|
| 134 | 134 |
if err != nil {
|
| 135 | 135 |
// The layer may have been deleted between the call to `Map()` or |
| 136 | 136 |
// `Heads()` and the call to `Get()`, so we just ignore this error |
| ... | ... |
@@ -141,7 +127,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 141 | 141 |
} |
| 142 | 142 |
|
| 143 | 143 |
size, err = l.Size() |
| 144 |
- layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) |
|
| 144 |
+ layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 145 | 145 |
if err != nil {
|
| 146 | 146 |
return nil, err |
| 147 | 147 |
} |
| ... | ... |
@@ -171,7 +157,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 171 | 171 |
} |
| 172 | 172 |
} |
| 173 | 173 |
if newImage.RepoDigests == nil && newImage.RepoTags == nil {
|
| 174 |
- if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 {
|
|
| 174 |
+ if all || len(daemon.imageStore.Children(id)) == 0 {
|
|
| 175 | 175 |
|
| 176 | 176 |
if imageFilters.Contains("dangling") && !danglingOnly {
|
| 177 | 177 |
//dangling=false case, so dangling image is not needed |
| ... | ... |
@@ -193,7 +179,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs |
| 193 | 193 |
// lazily init variables |
| 194 | 194 |
if imagesMap == nil {
|
| 195 | 195 |
allContainers = daemon.List() |
| 196 |
- allLayers = daemon.stores[platform].layerStore.Map() |
|
| 196 |
+ allLayers = daemon.layerStore.Map() |
|
| 197 | 197 |
imagesMap = make(map[*image.Image]*types.ImageSummary) |
| 198 | 198 |
layerRefs = make(map[layer.ChainID]int) |
| 199 | 199 |
} |
| ... | ... |
@@ -261,19 +247,14 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 261 | 261 |
img *image.Image |
| 262 | 262 |
err error |
| 263 | 263 |
) |
| 264 |
- for _, ds := range daemon.stores {
|
|
| 265 |
- if img, err = ds.imageStore.Get(image.ID(id)); err == nil {
|
|
| 266 |
- break |
|
| 267 |
- } |
|
| 268 |
- } |
|
| 269 |
- if err != nil {
|
|
| 264 |
+ if img, err = daemon.imageStore.Get(image.ID(id)); err != nil {
|
|
| 270 | 265 |
return "", err |
| 271 | 266 |
} |
| 272 | 267 |
|
| 273 | 268 |
var parentImg *image.Image |
| 274 | 269 |
var parentChainID layer.ChainID |
| 275 | 270 |
if len(parent) != 0 {
|
| 276 |
- parentImg, err = daemon.stores[img.OperatingSystem()].imageStore.Get(image.ID(parent)) |
|
| 271 |
+ parentImg, err = daemon.imageStore.Get(image.ID(parent)) |
|
| 277 | 272 |
if err != nil {
|
| 278 | 273 |
return "", errors.Wrap(err, "error getting specified parent layer") |
| 279 | 274 |
} |
| ... | ... |
@@ -283,11 +264,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 283 | 283 |
parentImg = &image.Image{RootFS: rootFS}
|
| 284 | 284 |
} |
| 285 | 285 |
|
| 286 |
- l, err := daemon.stores[img.OperatingSystem()].layerStore.Get(img.RootFS.ChainID()) |
|
| 286 |
+ l, err := daemon.layerStore.Get(img.RootFS.ChainID()) |
|
| 287 | 287 |
if err != nil {
|
| 288 | 288 |
return "", errors.Wrap(err, "error getting image layer") |
| 289 | 289 |
} |
| 290 |
- defer daemon.stores[img.OperatingSystem()].layerStore.Release(l) |
|
| 290 |
+ defer daemon.layerStore.Release(l) |
|
| 291 | 291 |
|
| 292 | 292 |
ts, err := l.TarStreamFrom(parentChainID) |
| 293 | 293 |
if err != nil {
|
| ... | ... |
@@ -295,11 +276,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 295 | 295 |
} |
| 296 | 296 |
defer ts.Close() |
| 297 | 297 |
|
| 298 |
- newL, err := daemon.stores[img.OperatingSystem()].layerStore.Register(ts, parentChainID, layer.OS(img.OperatingSystem())) |
|
| 298 |
+ newL, err := daemon.layerStore.Register(ts, parentChainID, img.OperatingSystem()) |
|
| 299 | 299 |
if err != nil {
|
| 300 | 300 |
return "", errors.Wrap(err, "error registering layer") |
| 301 | 301 |
} |
| 302 |
- defer daemon.stores[img.OperatingSystem()].layerStore.Release(newL) |
|
| 302 |
+ defer daemon.layerStore.Release(newL) |
|
| 303 | 303 |
|
| 304 | 304 |
newImage := *img |
| 305 | 305 |
newImage.RootFS = nil |
| ... | ... |
@@ -334,7 +315,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
| 334 | 334 |
return "", errors.Wrap(err, "error marshalling image config") |
| 335 | 335 |
} |
| 336 | 336 |
|
| 337 |
- newImgID, err := daemon.stores[img.OperatingSystem()].imageStore.Create(b) |
|
| 337 |
+ newImgID, err := daemon.imageStore.Create(b) |
|
| 338 | 338 |
if err != nil {
|
| 339 | 339 |
return "", errors.Wrap(err, "error creating new image after squash") |
| 340 | 340 |
} |
| ... | ... |
@@ -91,11 +91,11 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string, |
| 91 | 91 |
if err != nil {
|
| 92 | 92 |
return err |
| 93 | 93 |
} |
| 94 |
- l, err := daemon.stores[os].layerStore.Register(inflatedLayerData, "", layer.OS(os)) |
|
| 94 |
+ l, err := daemon.layerStore.Register(inflatedLayerData, "", os) |
|
| 95 | 95 |
if err != nil {
|
| 96 | 96 |
return err |
| 97 | 97 |
} |
| 98 |
- defer layer.ReleaseAndLog(daemon.stores[os].layerStore, l) |
|
| 98 |
+ defer layer.ReleaseAndLog(daemon.layerStore, l) |
|
| 99 | 99 |
|
| 100 | 100 |
created := time.Now().UTC() |
| 101 | 101 |
imgConfig, err := json.Marshal(&image.Image{
|
| ... | ... |
@@ -120,14 +120,14 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string, |
| 120 | 120 |
return err |
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 |
- id, err := daemon.stores[os].imageStore.Create(imgConfig) |
|
| 123 |
+ id, err := daemon.imageStore.Create(imgConfig) |
|
| 124 | 124 |
if err != nil {
|
| 125 | 125 |
return err |
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
// FIXME: connect with commit code and call refstore directly |
| 129 | 129 |
if newRef != nil {
|
| 130 |
- if err := daemon.TagImageWithReference(id, os, newRef); err != nil {
|
|
| 130 |
+ if err := daemon.TagImageWithReference(id, newRef); err != nil {
|
|
| 131 | 131 |
return err |
| 132 | 132 |
} |
| 133 | 133 |
} |
| ... | ... |
@@ -78,32 +78,26 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
| 78 | 78 |
securityOptions = append(securityOptions, "name=userns") |
| 79 | 79 |
} |
| 80 | 80 |
|
| 81 |
- imageCount := 0 |
|
| 81 |
+ var ds [][2]string |
|
| 82 | 82 |
drivers := "" |
| 83 |
- for p, ds := range daemon.stores {
|
|
| 84 |
- imageCount += len(ds.imageStore.Map()) |
|
| 85 |
- drivers += daemon.GraphDriverName(p) |
|
| 86 |
- if len(daemon.stores) > 1 {
|
|
| 87 |
- drivers += fmt.Sprintf(" (%s) ", p)
|
|
| 83 |
+ for os, gd := range daemon.graphDrivers {
|
|
| 84 |
+ ds = append(ds, daemon.layerStore.DriverStatus(os)...) |
|
| 85 |
+ drivers += gd |
|
| 86 |
+ if len(daemon.graphDrivers) > 1 {
|
|
| 87 |
+ drivers += fmt.Sprintf(" (%s) ", os)
|
|
| 88 | 88 |
} |
| 89 | 89 |
} |
| 90 |
- |
|
| 91 |
- // TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status |
|
| 92 |
- p := runtime.GOOS |
|
| 93 |
- if system.LCOWSupported() {
|
|
| 94 |
- p = "linux" |
|
| 95 |
- } |
|
| 96 |
- |
|
| 97 | 90 |
drivers = strings.TrimSpace(drivers) |
| 91 |
+ |
|
| 98 | 92 |
v := &types.Info{
|
| 99 | 93 |
ID: daemon.ID, |
| 100 | 94 |
Containers: cRunning + cPaused + cStopped, |
| 101 | 95 |
ContainersRunning: cRunning, |
| 102 | 96 |
ContainersPaused: cPaused, |
| 103 | 97 |
ContainersStopped: cStopped, |
| 104 |
- Images: imageCount, |
|
| 98 |
+ Images: len(daemon.imageStore.Map()), |
|
| 105 | 99 |
Driver: drivers, |
| 106 |
- DriverStatus: daemon.stores[p].layerStore.DriverStatus(), |
|
| 100 |
+ DriverStatus: ds, |
|
| 107 | 101 |
Plugins: daemon.showPluginsInfo(), |
| 108 | 102 |
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, |
| 109 | 103 |
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, |
| ... | ... |
@@ -323,7 +323,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis |
| 323 | 323 |
if psFilters.Contains("ancestor") {
|
| 324 | 324 |
ancestorFilter = true |
| 325 | 325 |
psFilters.WalkValues("ancestor", func(ancestor string) error {
|
| 326 |
- id, os, err := daemon.GetImageIDAndOS(ancestor) |
|
| 326 |
+ id, _, err := daemon.GetImageIDAndOS(ancestor) |
|
| 327 | 327 |
if err != nil {
|
| 328 | 328 |
logrus.Warnf("Error while looking up for image %v", ancestor)
|
| 329 | 329 |
return nil |
| ... | ... |
@@ -333,7 +333,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis |
| 333 | 333 |
return nil |
| 334 | 334 |
} |
| 335 | 335 |
// Then walk down the graph and put the imageIds in imagesFilter |
| 336 |
- populateImageFilterByParents(imagesFilter, id, daemon.stores[os].imageStore.Children) |
|
| 336 |
+ populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) |
|
| 337 | 337 |
return nil |
| 338 | 338 |
}) |
| 339 | 339 |
} |
| ... | ... |
@@ -138,9 +138,9 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
|
| 138 | 138 |
max := len(img.RootFS.DiffIDs) |
| 139 | 139 |
for i := 1; i <= max; i++ {
|
| 140 | 140 |
img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] |
| 141 |
- layerPath, err := layer.GetLayerPath(daemon.stores[c.OS].layerStore, img.RootFS.ChainID()) |
|
| 141 |
+ layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) |
|
| 142 | 142 |
if err != nil {
|
| 143 |
- return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[c.OS].layerStore, img.RootFS.ChainID(), err)
|
|
| 143 |
+ return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err)
|
|
| 144 | 144 |
} |
| 145 | 145 |
// Reverse order, expecting parent most first |
| 146 | 146 |
s.Windows.LayerFolders = append([]string{layerPath}, s.Windows.LayerFolders...)
|
| ... | ... |
@@ -210,15 +210,18 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
|
| 210 | 210 |
NetworkSharedContainerName: networkSharedContainerID, |
| 211 | 211 |
} |
| 212 | 212 |
|
| 213 |
- if img.OS == "windows" {
|
|
| 213 |
+ switch img.OS {
|
|
| 214 |
+ case "windows": |
|
| 214 | 215 |
if err := daemon.createSpecWindowsFields(c, &s, isHyperV); err != nil {
|
| 215 | 216 |
return nil, err |
| 216 | 217 |
} |
| 217 |
- } else {
|
|
| 218 |
- // TODO @jhowardmsft LCOW Support. Modify this check when running in dual-mode |
|
| 219 |
- if system.LCOWSupported() && img.OS == "linux" {
|
|
| 220 |
- daemon.createSpecLinuxFields(c, &s) |
|
| 218 |
+ case "linux": |
|
| 219 |
+ if !system.LCOWSupported() {
|
|
| 220 |
+ return nil, fmt.Errorf("Linux containers on Windows are not supported")
|
|
| 221 | 221 |
} |
| 222 |
+ daemon.createSpecLinuxFields(c, &s) |
|
| 223 |
+ default: |
|
| 224 |
+ return nil, fmt.Errorf("Unsupported platform %q", img.OS)
|
|
| 222 | 225 |
} |
| 223 | 226 |
|
| 224 | 227 |
return (*specs.Spec)(&s), nil |
| ... | ... |
@@ -3,7 +3,6 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"regexp" |
| 6 |
- "runtime" |
|
| 7 | 6 |
"sync/atomic" |
| 8 | 7 |
"time" |
| 9 | 8 |
|
| ... | ... |
@@ -14,7 +13,6 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/image" |
| 15 | 15 |
"github.com/docker/docker/layer" |
| 16 | 16 |
"github.com/docker/docker/pkg/directory" |
| 17 |
- "github.com/docker/docker/pkg/system" |
|
| 18 | 17 |
"github.com/docker/docker/runconfig" |
| 19 | 18 |
"github.com/docker/docker/volume" |
| 20 | 19 |
"github.com/docker/libnetwork" |
| ... | ... |
@@ -162,12 +160,6 @@ func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Arg |
| 162 | 162 |
|
| 163 | 163 |
// ImagesPrune removes unused images |
| 164 | 164 |
func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
|
| 165 |
- // TODO @jhowardmsft LCOW Support: This will need revisiting later. |
|
| 166 |
- platform := runtime.GOOS |
|
| 167 |
- if system.LCOWSupported() {
|
|
| 168 |
- platform = "linux" |
|
| 169 |
- } |
|
| 170 |
- |
|
| 171 | 165 |
if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) {
|
| 172 | 166 |
return nil, errPruneRunning |
| 173 | 167 |
} |
| ... | ... |
@@ -197,9 +189,9 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 197 | 197 |
|
| 198 | 198 |
var allImages map[image.ID]*image.Image |
| 199 | 199 |
if danglingOnly {
|
| 200 |
- allImages = daemon.stores[platform].imageStore.Heads() |
|
| 200 |
+ allImages = daemon.imageStore.Heads() |
|
| 201 | 201 |
} else {
|
| 202 |
- allImages = daemon.stores[platform].imageStore.Map() |
|
| 202 |
+ allImages = daemon.imageStore.Map() |
|
| 203 | 203 |
} |
| 204 | 204 |
allContainers := daemon.List() |
| 205 | 205 |
imageRefs := map[string]bool{}
|
| ... | ... |
@@ -213,7 +205,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
// Filter intermediary images and get their unique size |
| 216 |
- allLayers := daemon.stores[platform].layerStore.Map() |
|
| 216 |
+ allLayers := daemon.layerStore.Map() |
|
| 217 | 217 |
topImages := map[image.ID]*image.Image{}
|
| 218 | 218 |
for id, img := range allImages {
|
| 219 | 219 |
select {
|
| ... | ... |
@@ -221,7 +213,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args |
| 221 | 221 |
return nil, ctx.Err() |
| 222 | 222 |
default: |
| 223 | 223 |
dgst := digest.Digest(id) |
| 224 |
- if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 {
|
|
| 224 |
+ if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
|
|
| 225 | 225 |
continue |
| 226 | 226 |
} |
| 227 | 227 |
if !until.IsZero() && img.Created.After(until) {
|
| ... | ... |
@@ -222,7 +222,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
|
| 222 | 222 |
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
|
| 223 | 223 |
// FIXME: remove once reference counting for graphdrivers has been refactored |
| 224 | 224 |
// Ensure that all the mounts are gone |
| 225 |
- if mountid, err := daemon.stores[container.OS].layerStore.GetMountID(container.ID); err == nil {
|
|
| 225 |
+ if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil {
|
|
| 226 | 226 |
daemon.cleanupMountsByID(mountid) |
| 227 | 227 |
} |
| 228 | 228 |
} |
| ... | ... |
@@ -59,9 +59,9 @@ type ImagePullConfig struct {
|
| 59 | 59 |
// Schema2Types is the valid schema2 configuration types allowed |
| 60 | 60 |
// by the pull operation. |
| 61 | 61 |
Schema2Types []string |
| 62 |
- // Platform is the requested platform of the image being pulled to ensure it can be validated |
|
| 63 |
- // when the host platform supports multiple image operating systems. |
|
| 64 |
- Platform string |
|
| 62 |
+ // OS is the requested operating system of the image being pulled to ensure it can be validated |
|
| 63 |
+ // when the host OS supports multiple image operating systems. |
|
| 64 |
+ OS string |
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 | 67 |
// ImagePushConfig stores push configuration. |
| ... | ... |
@@ -86,7 +86,7 @@ type ImagePushConfig struct {
|
| 86 | 86 |
type ImageConfigStore interface {
|
| 87 | 87 |
Put([]byte) (digest.Digest, error) |
| 88 | 88 |
Get(digest.Digest) ([]byte, error) |
| 89 |
- RootFSAndOSFromConfig([]byte) (*image.RootFS, layer.OS, error) |
|
| 89 |
+ RootFSAndOSFromConfig([]byte) (*image.RootFS, string, error) |
|
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
// PushLayerProvider provides layers to be pushed by ChainID. |
| ... | ... |
@@ -112,7 +112,7 @@ type RootFSDownloadManager interface {
|
| 112 | 112 |
// returns the final rootfs. |
| 113 | 113 |
// Given progress output to track download progress |
| 114 | 114 |
// Returns function to release download resources |
| 115 |
- Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) |
|
| 115 |
+ Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) |
|
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 | 118 |
type imageConfigStore struct {
|
| ... | ... |
@@ -140,7 +140,7 @@ func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) {
|
| 140 | 140 |
return img.RawJSON(), nil |
| 141 | 141 |
} |
| 142 | 142 |
|
| 143 |
-func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
|
| 143 |
+func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
|
|
| 144 | 144 |
var unmarshalledConfig image.Image |
| 145 | 145 |
if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
|
| 146 | 146 |
return nil, "", err |
| ... | ... |
@@ -154,11 +154,11 @@ func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer |
| 154 | 154 |
return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
|
| 155 | 155 |
} |
| 156 | 156 |
|
| 157 |
- os := "" |
|
| 158 |
- if runtime.GOOS == "windows" {
|
|
| 159 |
- os = unmarshalledConfig.OS |
|
| 157 |
+ os := unmarshalledConfig.OS |
|
| 158 |
+ if os == "" {
|
|
| 159 |
+ os = runtime.GOOS |
|
| 160 | 160 |
} |
| 161 |
- return unmarshalledConfig.RootFS, layer.OS(os), nil |
|
| 161 |
+ return unmarshalledConfig.RootFS, os, nil |
|
| 162 | 162 |
} |
| 163 | 163 |
|
| 164 | 164 |
type storeLayerProvider struct {
|
| ... | ... |
@@ -26,17 +26,15 @@ type Store interface {
|
| 26 | 26 |
type FSMetadataStore struct {
|
| 27 | 27 |
sync.RWMutex |
| 28 | 28 |
basePath string |
| 29 |
- platform string |
|
| 30 | 29 |
} |
| 31 | 30 |
|
| 32 | 31 |
// NewFSMetadataStore creates a new filesystem-based metadata store. |
| 33 |
-func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) {
|
|
| 32 |
+func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) {
|
|
| 34 | 33 |
if err := os.MkdirAll(basePath, 0700); err != nil {
|
| 35 | 34 |
return nil, err |
| 36 | 35 |
} |
| 37 | 36 |
return &FSMetadataStore{
|
| 38 | 37 |
basePath: basePath, |
| 39 |
- platform: platform, |
|
| 40 | 38 |
}, nil |
| 41 | 39 |
} |
| 42 | 40 |
|
| ... | ... |
@@ -3,7 +3,6 @@ package metadata |
| 3 | 3 |
import ( |
| 4 | 4 |
"io/ioutil" |
| 5 | 5 |
"os" |
| 6 |
- "runtime" |
|
| 7 | 6 |
"testing" |
| 8 | 7 |
|
| 9 | 8 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -17,7 +16,7 @@ func TestV1IDService(t *testing.T) {
|
| 17 | 17 |
} |
| 18 | 18 |
defer os.RemoveAll(tmpDir) |
| 19 | 19 |
|
| 20 |
- metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) |
|
| 20 |
+ metadataStore, err := NewFSMetadataStore(tmpDir) |
|
| 21 | 21 |
if err != nil {
|
| 22 | 22 |
t.Fatalf("could not create metadata store: %v", err)
|
| 23 | 23 |
} |
| ... | ... |
@@ -6,7 +6,6 @@ import ( |
| 6 | 6 |
"math/rand" |
| 7 | 7 |
"os" |
| 8 | 8 |
"reflect" |
| 9 |
- "runtime" |
|
| 10 | 9 |
"testing" |
| 11 | 10 |
|
| 12 | 11 |
"github.com/docker/docker/layer" |
| ... | ... |
@@ -20,7 +19,7 @@ func TestV2MetadataService(t *testing.T) {
|
| 20 | 20 |
} |
| 21 | 21 |
defer os.RemoveAll(tmpDir) |
| 22 | 22 |
|
| 23 |
- metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) |
|
| 23 |
+ metadataStore, err := NewFSMetadataStore(tmpDir) |
|
| 24 | 24 |
if err != nil {
|
| 25 | 25 |
t.Fatalf("could not create metadata store: %v", err)
|
| 26 | 26 |
} |
| ... | ... |
@@ -21,7 +21,7 @@ type Puller interface {
|
| 21 | 21 |
// Pull tries to pull the image referenced by `tag` |
| 22 | 22 |
// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. |
| 23 | 23 |
// |
| 24 |
- Pull(ctx context.Context, ref reference.Named, platform string) error |
|
| 24 |
+ Pull(ctx context.Context, ref reference.Named, os string) error |
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
// newPuller returns a Puller interface that will pull from either a v1 or v2 |
| ... | ... |
@@ -115,12 +115,12 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo |
| 115 | 115 |
continue |
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 |
- // Make sure we default the platform if it hasn't been supplied |
|
| 119 |
- if imagePullConfig.Platform == "" {
|
|
| 120 |
- imagePullConfig.Platform = runtime.GOOS |
|
| 118 |
+ // Make sure we default the OS if it hasn't been supplied |
|
| 119 |
+ if imagePullConfig.OS == "" {
|
|
| 120 |
+ imagePullConfig.OS = runtime.GOOS |
|
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 |
- if err := puller.Pull(ctx, ref, imagePullConfig.Platform); err != nil {
|
|
| 123 |
+ if err := puller.Pull(ctx, ref, imagePullConfig.OS); err != nil {
|
|
| 124 | 124 |
// Was this pull cancelled? If so, don't try to fall |
| 125 | 125 |
// back. |
| 126 | 126 |
fallback := false |
| ... | ... |
@@ -36,7 +36,7 @@ type v1Puller struct {
|
| 36 | 36 |
session *registry.Session |
| 37 | 37 |
} |
| 38 | 38 |
|
| 39 |
-func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, platform string) error {
|
|
| 39 |
+func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, os string) error {
|
|
| 40 | 40 |
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
| 41 | 41 |
// Allowing fallback, because HTTPS v1 is before HTTP v2 |
| 42 | 42 |
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
| ... | ... |
@@ -62,7 +62,7 @@ type v2Puller struct {
|
| 62 | 62 |
confirmedV2 bool |
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 |
-func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform string) (err error) {
|
|
| 65 |
+func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, os string) (err error) {
|
|
| 66 | 66 |
// TODO(tiborvass): was ReceiveTimeout |
| 67 | 67 |
p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") |
| 68 | 68 |
if err != nil {
|
| ... | ... |
@@ -70,7 +70,7 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform strin |
| 70 | 70 |
return err |
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 |
- if err = p.pullV2Repository(ctx, ref, platform); err != nil {
|
|
| 73 |
+ if err = p.pullV2Repository(ctx, ref, os); err != nil {
|
|
| 74 | 74 |
if _, ok := err.(fallbackError); ok {
|
| 75 | 75 |
return err |
| 76 | 76 |
} |
| ... | ... |
@@ -85,10 +85,10 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform strin |
| 85 | 85 |
return err |
| 86 | 86 |
} |
| 87 | 87 |
|
| 88 |
-func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform string) (err error) {
|
|
| 88 |
+func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, os string) (err error) {
|
|
| 89 | 89 |
var layersDownloaded bool |
| 90 | 90 |
if !reference.IsNameOnly(ref) {
|
| 91 |
- layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) |
|
| 91 |
+ layersDownloaded, err = p.pullV2Tag(ctx, ref, os) |
|
| 92 | 92 |
if err != nil {
|
| 93 | 93 |
return err |
| 94 | 94 |
} |
| ... | ... |
@@ -110,7 +110,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, pl |
| 110 | 110 |
if err != nil {
|
| 111 | 111 |
return err |
| 112 | 112 |
} |
| 113 |
- pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) |
|
| 113 |
+ pulledNew, err := p.pullV2Tag(ctx, tagRef, os) |
|
| 114 | 114 |
if err != nil {
|
| 115 | 115 |
// Since this is the pull-all-tags case, don't |
| 116 | 116 |
// allow an error pulling a particular tag to |
| ... | ... |
@@ -488,9 +488,9 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unv |
| 488 | 488 |
descriptors = append(descriptors, layerDescriptor) |
| 489 | 489 |
} |
| 490 | 490 |
|
| 491 |
- // The v1 manifest itself doesn't directly contain a platform. However, |
|
| 491 |
+ // The v1 manifest itself doesn't directly contain an OS. However, |
|
| 492 | 492 |
// the history does, but unfortunately that's a string, so search through |
| 493 |
- // all the history until hopefully we find one which indicates the os. |
|
| 493 |
+ // all the history until hopefully we find one which indicates the OS. |
|
| 494 | 494 |
// supertest2014/nyan is an example of a registry image with schemav1. |
| 495 | 495 |
configOS := runtime.GOOS |
| 496 | 496 |
if system.LCOWSupported() {
|
| ... | ... |
@@ -514,7 +514,7 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unv |
| 514 | 514 |
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
|
| 515 | 515 |
} |
| 516 | 516 |
|
| 517 |
- resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.OS(configOS), descriptors, p.config.ProgressOutput) |
|
| 517 |
+ resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput) |
|
| 518 | 518 |
if err != nil {
|
| 519 | 519 |
return "", "", err |
| 520 | 520 |
} |
| ... | ... |
@@ -588,7 +588,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s |
| 588 | 588 |
downloadedRootFS *image.RootFS // rootFS from registered layers |
| 589 | 589 |
configRootFS *image.RootFS // rootFS from configuration |
| 590 | 590 |
release func() // release resources from rootFS download |
| 591 |
- configOS layer.OS // for LCOW when registering downloaded layers |
|
| 591 |
+ configOS string // for LCOW when registering downloaded layers |
|
| 592 | 592 |
) |
| 593 | 593 |
|
| 594 | 594 |
// https://github.com/docker/docker/issues/24766 - Err on the side of caution, |
| ... | ... |
@@ -615,7 +615,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s |
| 615 | 615 |
|
| 616 | 616 |
// Early bath if the requested OS doesn't match that of the configuration. |
| 617 | 617 |
// This avoids doing the download, only to potentially fail later. |
| 618 |
- if !strings.EqualFold(string(configOS), requestedOS) {
|
|
| 618 |
+ if !strings.EqualFold(configOS, requestedOS) {
|
|
| 619 | 619 |
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
|
| 620 | 620 |
} |
| 621 | 621 |
|
| ... | ... |
@@ -633,7 +633,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s |
| 633 | 633 |
rootFS image.RootFS |
| 634 | 634 |
) |
| 635 | 635 |
downloadRootFS := *image.NewRootFS() |
| 636 |
- rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layer.OS(requestedOS), descriptors, p.config.ProgressOutput) |
|
| 636 |
+ rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, requestedOS, descriptors, p.config.ProgressOutput) |
|
| 637 | 637 |
if err != nil {
|
| 638 | 638 |
// Intentionally do not cancel the config download here |
| 639 | 639 |
// as the error from config download (if there is one) |
| ... | ... |
@@ -698,7 +698,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s |
| 698 | 698 |
return imageID, manifestDigest, nil |
| 699 | 699 |
} |
| 700 | 700 |
|
| 701 |
-func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.OS, error) {
|
|
| 701 |
+func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, string, error) {
|
|
| 702 | 702 |
select {
|
| 703 | 703 |
case configJSON := <-configChan: |
| 704 | 704 |
rootfs, os, err := s.RootFSAndOSFromConfig(configJSON) |
| ... | ... |
@@ -4,7 +4,6 @@ import ( |
| 4 | 4 |
"errors" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"io" |
| 7 |
- "runtime" |
|
| 8 | 7 |
"time" |
| 9 | 8 |
|
| 10 | 9 |
"github.com/docker/distribution" |
| ... | ... |
@@ -23,7 +22,7 @@ const maxDownloadAttempts = 5 |
| 23 | 23 |
// registers and downloads those, taking into account dependencies between |
| 24 | 24 |
// layers. |
| 25 | 25 |
type LayerDownloadManager struct {
|
| 26 |
- layerStores map[string]layer.Store |
|
| 26 |
+ layerStore layer.Store |
|
| 27 | 27 |
tm TransferManager |
| 28 | 28 |
waitDuration time.Duration |
| 29 | 29 |
} |
| ... | ... |
@@ -34,9 +33,9 @@ func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
|
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 | 36 |
// NewLayerDownloadManager returns a new LayerDownloadManager. |
| 37 |
-func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
|
| 37 |
+func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
|
| 38 | 38 |
manager := LayerDownloadManager{
|
| 39 |
- layerStores: layerStores, |
|
| 39 |
+ layerStore: layerStore, |
|
| 40 | 40 |
tm: NewTransferManager(concurrencyLimit), |
| 41 | 41 |
waitDuration: time.Second, |
| 42 | 42 |
} |
| ... | ... |
@@ -95,7 +94,7 @@ type DownloadDescriptorWithRegistered interface {
|
| 95 | 95 |
// Download method is called to get the layer tar data. Layers are then |
| 96 | 96 |
// registered in the appropriate order. The caller must call the returned |
| 97 | 97 |
// release function once it is done with the returned RootFS object. |
| 98 |
-func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
|
| 98 |
+func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
|
| 99 | 99 |
var ( |
| 100 | 100 |
topLayer layer.Layer |
| 101 | 101 |
topDownload *downloadTransfer |
| ... | ... |
@@ -105,11 +104,6 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 105 | 105 |
downloadsByKey = make(map[string]*downloadTransfer) |
| 106 | 106 |
) |
| 107 | 107 |
|
| 108 |
- // Assume that the operating system is the host OS if blank |
|
| 109 |
- if os == "" {
|
|
| 110 |
- os = layer.OS(runtime.GOOS) |
|
| 111 |
- } |
|
| 112 |
- |
|
| 113 | 108 |
rootFS := initialRootFS |
| 114 | 109 |
for _, descriptor := range layers {
|
| 115 | 110 |
key := descriptor.Key() |
| ... | ... |
@@ -121,20 +115,20 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 121 | 121 |
if err == nil {
|
| 122 | 122 |
getRootFS := rootFS |
| 123 | 123 |
getRootFS.Append(diffID) |
| 124 |
- l, err := ldm.layerStores[string(os)].Get(getRootFS.ChainID()) |
|
| 124 |
+ l, err := ldm.layerStore.Get(getRootFS.ChainID()) |
|
| 125 | 125 |
if err == nil {
|
| 126 | 126 |
// Layer already exists. |
| 127 | 127 |
logrus.Debugf("Layer already exists: %s", descriptor.ID())
|
| 128 | 128 |
progress.Update(progressOutput, descriptor.ID(), "Already exists") |
| 129 | 129 |
if topLayer != nil {
|
| 130 |
- layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) |
|
| 130 |
+ layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 131 | 131 |
} |
| 132 | 132 |
topLayer = l |
| 133 | 133 |
missingLayer = false |
| 134 | 134 |
rootFS.Append(diffID) |
| 135 | 135 |
// Register this repository as a source of this layer. |
| 136 | 136 |
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) |
| 137 |
- if hasRegistered {
|
|
| 137 |
+ if hasRegistered { // As layerstore may set the driver
|
|
| 138 | 138 |
withRegistered.Registered(diffID) |
| 139 | 139 |
} |
| 140 | 140 |
continue |
| ... | ... |
@@ -171,7 +165,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 171 | 171 |
if topDownload == nil {
|
| 172 | 172 |
return rootFS, func() {
|
| 173 | 173 |
if topLayer != nil {
|
| 174 |
- layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) |
|
| 174 |
+ layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 175 | 175 |
} |
| 176 | 176 |
}, nil |
| 177 | 177 |
} |
| ... | ... |
@@ -182,7 +176,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 182 | 182 |
|
| 183 | 183 |
defer func() {
|
| 184 | 184 |
if topLayer != nil {
|
| 185 |
- layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) |
|
| 185 |
+ layer.ReleaseAndLog(ldm.layerStore, topLayer) |
|
| 186 | 186 |
} |
| 187 | 187 |
}() |
| 188 | 188 |
|
| ... | ... |
@@ -218,11 +212,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima |
| 218 | 218 |
// complete before the registration step, and registers the downloaded data |
| 219 | 219 |
// on top of parentDownload's resulting layer. Otherwise, it registers the |
| 220 | 220 |
// layer on top of the ChainID given by parentLayer. |
| 221 |
-func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os layer.OS) DoFunc {
|
|
| 221 |
+func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc {
|
|
| 222 | 222 |
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
| 223 | 223 |
d := &downloadTransfer{
|
| 224 | 224 |
Transfer: NewTransfer(), |
| 225 |
- layerStore: ldm.layerStores[string(os)], |
|
| 225 |
+ layerStore: ldm.layerStore, |
|
| 226 | 226 |
} |
| 227 | 227 |
|
| 228 | 228 |
go func() {
|
| ... | ... |
@@ -382,11 +376,11 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, |
| 382 | 382 |
// parentDownload. This function does not log progress output because it would |
| 383 | 383 |
// interfere with the progress reporting for sourceDownload, which has the same |
| 384 | 384 |
// Key. |
| 385 |
-func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os layer.OS) DoFunc {
|
|
| 385 |
+func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc {
|
|
| 386 | 386 |
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
| 387 | 387 |
d := &downloadTransfer{
|
| 388 | 388 |
Transfer: NewTransfer(), |
| 389 |
- layerStore: ldm.layerStores[string(os)], |
|
| 389 |
+ layerStore: ldm.layerStore, |
|
| 390 | 390 |
} |
| 391 | 391 |
|
| 392 | 392 |
go func() {
|
| ... | ... |
@@ -26,7 +26,7 @@ type mockLayer struct {
|
| 26 | 26 |
diffID layer.DiffID |
| 27 | 27 |
chainID layer.ChainID |
| 28 | 28 |
parent layer.Layer |
| 29 |
- os layer.OS |
|
| 29 |
+ os string |
|
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 | 32 |
func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
|
| ... | ... |
@@ -57,7 +57,7 @@ func (ml *mockLayer) DiffSize() (size int64, err error) {
|
| 57 | 57 |
return 0, nil |
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-func (ml *mockLayer) OS() layer.OS {
|
|
| 60 |
+func (ml *mockLayer) OS() string {
|
|
| 61 | 61 |
return ml.os |
| 62 | 62 |
} |
| 63 | 63 |
|
| ... | ... |
@@ -91,7 +91,7 @@ func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer {
|
| 91 | 91 |
return layers |
| 92 | 92 |
} |
| 93 | 93 |
|
| 94 |
-func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, os layer.OS) (layer.Layer, error) {
|
|
| 94 |
+func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, os string) (layer.Layer, error) {
|
|
| 95 | 95 |
return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
|
| 96 | 96 |
} |
| 97 | 97 |
|
| ... | ... |
@@ -131,7 +131,7 @@ func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) {
|
| 131 | 131 |
func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) {
|
| 132 | 132 |
return []layer.Metadata{}, nil
|
| 133 | 133 |
} |
| 134 |
-func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) {
|
|
| 134 |
+func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, *layer.CreateRWLayerOpts) (layer.RWLayer, error) {
|
|
| 135 | 135 |
return nil, errors.New("not implemented")
|
| 136 | 136 |
} |
| 137 | 137 |
|
| ... | ... |
@@ -150,11 +150,11 @@ func (ls *mockLayerStore) Cleanup() error {
|
| 150 | 150 |
return nil |
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 |
-func (ls *mockLayerStore) DriverStatus() [][2]string {
|
|
| 153 |
+func (ls *mockLayerStore) DriverStatus(string) [][2]string {
|
|
| 154 | 154 |
return [][2]string{}
|
| 155 | 155 |
} |
| 156 | 156 |
|
| 157 |
-func (ls *mockLayerStore) DriverName() string {
|
|
| 157 |
+func (ls *mockLayerStore) DriverName(string) string {
|
|
| 158 | 158 |
return "mock" |
| 159 | 159 |
} |
| 160 | 160 |
|
| ... | ... |
@@ -272,9 +272,7 @@ func TestSuccessfulDownload(t *testing.T) {
|
| 272 | 272 |
} |
| 273 | 273 |
|
| 274 | 274 |
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
|
| 275 |
- lsMap := make(map[string]layer.Store) |
|
| 276 |
- lsMap[runtime.GOOS] = layerStore |
|
| 277 |
- ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
|
| 275 |
+ ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
|
| 278 | 276 |
|
| 279 | 277 |
progressChan := make(chan progress.Progress) |
| 280 | 278 |
progressDone := make(chan struct{})
|
| ... | ... |
@@ -293,13 +291,13 @@ func TestSuccessfulDownload(t *testing.T) {
|
| 293 | 293 |
firstDescriptor := descriptors[0].(*mockDownloadDescriptor) |
| 294 | 294 |
|
| 295 | 295 |
// Pre-register the first layer to simulate an already-existing layer |
| 296 |
- l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.OS(runtime.GOOS)) |
|
| 296 |
+ l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", runtime.GOOS) |
|
| 297 | 297 |
if err != nil {
|
| 298 | 298 |
t.Fatal(err) |
| 299 | 299 |
} |
| 300 | 300 |
firstDescriptor.diffID = l.DiffID() |
| 301 | 301 |
|
| 302 |
- rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) |
|
| 302 |
+ rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan)) |
|
| 303 | 303 |
if err != nil {
|
| 304 | 304 |
t.Fatalf("download error: %v", err)
|
| 305 | 305 |
} |
| ... | ... |
@@ -336,9 +334,7 @@ func TestSuccessfulDownload(t *testing.T) {
|
| 336 | 336 |
|
| 337 | 337 |
func TestCancelledDownload(t *testing.T) {
|
| 338 | 338 |
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
|
| 339 |
- lsMap := make(map[string]layer.Store) |
|
| 340 |
- lsMap[runtime.GOOS] = layerStore |
|
| 341 |
- ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
|
| 339 |
+ ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
|
| 342 | 340 |
|
| 343 | 341 |
progressChan := make(chan progress.Progress) |
| 344 | 342 |
progressDone := make(chan struct{})
|
| ... | ... |
@@ -357,7 +353,7 @@ func TestCancelledDownload(t *testing.T) {
|
| 357 | 357 |
}() |
| 358 | 358 |
|
| 359 | 359 |
descriptors := downloadDescriptors(nil) |
| 360 |
- _, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) |
|
| 360 |
+ _, _, err := ldm.Download(ctx, *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan)) |
|
| 361 | 361 |
if err != context.Canceled {
|
| 362 | 362 |
t.Fatal("expected download to be cancelled")
|
| 363 | 363 |
} |
| ... | ... |
@@ -3,13 +3,11 @@ package image |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
- "strings" |
|
| 7 | 6 |
"sync" |
| 8 | 7 |
"time" |
| 9 | 8 |
|
| 10 | 9 |
"github.com/docker/distribution/digestset" |
| 11 | 10 |
"github.com/docker/docker/layer" |
| 12 |
- "github.com/docker/docker/pkg/system" |
|
| 13 | 11 |
"github.com/opencontainers/go-digest" |
| 14 | 12 |
"github.com/pkg/errors" |
| 15 | 13 |
"github.com/sirupsen/logrus" |
| ... | ... |
@@ -47,17 +45,15 @@ type store struct {
|
| 47 | 47 |
images map[ID]*imageMeta |
| 48 | 48 |
fs StoreBackend |
| 49 | 49 |
digestSet *digestset.Set |
| 50 |
- os string |
|
| 51 | 50 |
} |
| 52 | 51 |
|
| 53 | 52 |
// NewImageStore returns new store object for given layer store |
| 54 |
-func NewImageStore(fs StoreBackend, os string, ls LayerGetReleaser) (Store, error) {
|
|
| 53 |
+func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
|
|
| 55 | 54 |
is := &store{
|
| 56 | 55 |
ls: ls, |
| 57 | 56 |
images: make(map[ID]*imageMeta), |
| 58 | 57 |
fs: fs, |
| 59 | 58 |
digestSet: digestset.NewSet(), |
| 60 |
- os: os, |
|
| 61 | 59 |
} |
| 62 | 60 |
|
| 63 | 61 |
// load all current images and retain layers |
| ... | ... |
@@ -118,14 +114,6 @@ func (is *store) Create(config []byte) (ID, error) {
|
| 118 | 118 |
return "", err |
| 119 | 119 |
} |
| 120 | 120 |
|
| 121 |
- // TODO @jhowardmsft - LCOW Support. This will need revisiting when coalescing the image stores. |
|
| 122 |
- // Integrity check - ensure we are creating something for the correct platform |
|
| 123 |
- if system.LCOWSupported() {
|
|
| 124 |
- if strings.ToLower(img.OperatingSystem()) != strings.ToLower(is.os) {
|
|
| 125 |
- return "", fmt.Errorf("cannot create entry for operating system %q in image store for operating system %q", img.OperatingSystem(), is.os)
|
|
| 126 |
- } |
|
| 127 |
- } |
|
| 128 |
- |
|
| 129 | 121 |
// Must reject any config that references diffIDs from the history |
| 130 | 122 |
// which aren't among the rootfs layers. |
| 131 | 123 |
rootFSLayers := make(map[layer.DiffID]struct{})
|
| ... | ... |
@@ -1,7 +1,6 @@ |
| 1 | 1 |
package image |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "runtime" |
|
| 5 | 4 |
"testing" |
| 6 | 5 |
|
| 7 | 6 |
"github.com/docker/docker/internal/testutil" |
| ... | ... |
@@ -26,7 +25,7 @@ func TestRestore(t *testing.T) {
|
| 26 | 26 |
err = fs.SetMetadata(id2, "parent", []byte(id1)) |
| 27 | 27 |
assert.NoError(t, err) |
| 28 | 28 |
|
| 29 |
- is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{})
|
|
| 29 |
+ is, err := NewImageStore(fs, &mockLayerGetReleaser{})
|
|
| 30 | 30 |
assert.NoError(t, err) |
| 31 | 31 |
|
| 32 | 32 |
assert.Len(t, is.Map(), 2) |
| ... | ... |
@@ -143,7 +142,7 @@ func TestParentReset(t *testing.T) {
|
| 143 | 143 |
func defaultImageStore(t *testing.T) (Store, func()) {
|
| 144 | 144 |
fsBackend, cleanup := defaultFSStoreBackend(t) |
| 145 | 145 |
|
| 146 |
- store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{})
|
|
| 146 |
+ store, err := NewImageStore(fsBackend, &mockLayerGetReleaser{})
|
|
| 147 | 147 |
assert.NoError(t, err) |
| 148 | 148 |
|
| 149 | 149 |
return store, cleanup |
| ... | ... |
@@ -90,11 +90,11 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) |
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
// On Windows, validate the platform, defaulting to windows if not present. |
| 93 |
- os := layer.OS(img.OS) |
|
| 93 |
+ os := img.OS |
|
| 94 |
+ if os == "" {
|
|
| 95 |
+ os = runtime.GOOS |
|
| 96 |
+ } |
|
| 94 | 97 |
if runtime.GOOS == "windows" {
|
| 95 |
- if os == "" {
|
|
| 96 |
- os = "windows" |
|
| 97 |
- } |
|
| 98 | 98 |
if (os != "windows") && (os != "linux") {
|
| 99 | 99 |
return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os)
|
| 100 | 100 |
} |
| ... | ... |
@@ -176,7 +176,7 @@ func (l *tarexporter) setParentID(id, parentID image.ID) error {
|
| 176 | 176 |
return l.is.SetParent(id, parentID) |
| 177 | 177 |
} |
| 178 | 178 |
|
| 179 |
-func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os layer.OS, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
|
|
| 179 |
+func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
|
|
| 180 | 180 |
// We use system.OpenSequential to use sequential file access on Windows, avoiding |
| 181 | 181 |
// depleting the standby list. On Linux, this equates to a regular os.Open. |
| 182 | 182 |
rawTar, err := system.OpenSequential(filename) |
| ... | ... |
@@ -409,19 +409,18 @@ func checkValidParent(img, parent *image.Image) bool {
|
| 409 | 409 |
return true |
| 410 | 410 |
} |
| 411 | 411 |
|
| 412 |
-func checkCompatibleOS(os string) error {
|
|
| 413 |
- // TODO @jhowardmsft LCOW - revisit for simultaneous platforms |
|
| 414 |
- platform := runtime.GOOS |
|
| 415 |
- if system.LCOWSupported() {
|
|
| 416 |
- platform = "linux" |
|
| 417 |
- } |
|
| 418 |
- // always compatible if the OS matches; also match an empty OS |
|
| 419 |
- if os == platform || os == "" {
|
|
| 412 |
+func checkCompatibleOS(imageOS string) error {
|
|
| 413 |
+ // always compatible if the images OS matches the host OS; also match an empty image OS |
|
| 414 |
+ if imageOS == runtime.GOOS || imageOS == "" {
|
|
| 420 | 415 |
return nil |
| 421 | 416 |
} |
| 422 |
- // for compatibility, only fail if the image or runtime OS is Windows |
|
| 423 |
- if os == "windows" || platform == "windows" {
|
|
| 424 |
- return fmt.Errorf("cannot load %s image on %s", os, platform)
|
|
| 417 |
+ // On non-Windows hosts, for compatibility, fail if the image is Windows. |
|
| 418 |
+ if runtime.GOOS != "windows" && imageOS == "windows" {
|
|
| 419 |
+ return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS)
|
|
| 420 |
+ } |
|
| 421 |
+ // Finally, check the image OS is supported for the platform. |
|
| 422 |
+ if err := system.ValidatePlatform(system.ParsePlatform(imageOS)); err != nil {
|
|
| 423 |
+ return fmt.Errorf("cannot load %s image on %s: %s", imageOS, runtime.GOOS, err)
|
|
| 425 | 424 |
} |
| 426 | 425 |
return nil |
| 427 | 426 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"io" |
| 8 | 8 |
"io/ioutil" |
| 9 |
+ "runtime" |
|
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - |
| ... | ... |
@@ -55,8 +56,8 @@ func (el *emptyLayer) Metadata() (map[string]string, error) {
|
| 55 | 55 |
return make(map[string]string), nil |
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 |
-func (el *emptyLayer) OS() OS {
|
|
| 59 |
- return "" |
|
| 58 |
+func (el *emptyLayer) OS() string {
|
|
| 59 |
+ return runtime.GOOS |
|
| 60 | 60 |
} |
| 61 | 61 |
|
| 62 | 62 |
// IsEmpty returns true if the layer is an EmptyLayer |
| ... | ... |
@@ -2,12 +2,14 @@ |
| 2 | 2 |
|
| 3 | 3 |
package layer |
| 4 | 4 |
|
| 5 |
+import "runtime" |
|
| 6 |
+ |
|
| 5 | 7 |
// SetOS writes the "os" file to the layer filestore |
| 6 |
-func (fm *fileMetadataTransaction) SetOS(os OS) error {
|
|
| 8 |
+func (fm *fileMetadataTransaction) SetOS(os string) error {
|
|
| 7 | 9 |
return nil |
| 8 | 10 |
} |
| 9 | 11 |
|
| 10 | 12 |
// GetOS reads the "os" file from the layer filestore |
| 11 |
-func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
|
|
| 12 |
- return "", nil |
|
| 13 |
+func (fms *fileMetadataStore) GetOS(layer ChainID) (string, error) {
|
|
| 14 |
+ return runtime.GOOS, nil |
|
| 13 | 15 |
} |
| ... | ... |
@@ -8,7 +8,7 @@ import ( |
| 8 | 8 |
) |
| 9 | 9 |
|
| 10 | 10 |
// SetOS writes the "os" file to the layer filestore |
| 11 |
-func (fm *fileMetadataTransaction) SetOS(os OS) error {
|
|
| 11 |
+func (fm *fileMetadataTransaction) SetOS(os string) error {
|
|
| 12 | 12 |
if os == "" {
|
| 13 | 13 |
return nil |
| 14 | 14 |
} |
| ... | ... |
@@ -16,7 +16,7 @@ func (fm *fileMetadataTransaction) SetOS(os OS) error {
|
| 16 | 16 |
} |
| 17 | 17 |
|
| 18 | 18 |
// GetOS reads the "os" file from the layer filestore |
| 19 |
-func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
|
|
| 19 |
+func (fms *fileMetadataStore) GetOS(layer ChainID) (string, error) {
|
|
| 20 | 20 |
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) |
| 21 | 21 |
if err != nil {
|
| 22 | 22 |
// For backwards compatibility, the os file may not exist. Default to "windows" if missing. |
| ... | ... |
@@ -31,5 +31,5 @@ func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
|
| 31 | 31 |
return "", fmt.Errorf("invalid operating system value: %s", content)
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 |
- return OS(content), nil |
|
| 34 |
+ return content, nil |
|
| 35 | 35 |
} |
| ... | ... |
@@ -65,14 +65,6 @@ func (id ChainID) String() string {
|
| 65 | 65 |
return string(id) |
| 66 | 66 |
} |
| 67 | 67 |
|
| 68 |
-// OS is the operating system of a layer |
|
| 69 |
-type OS string |
|
| 70 |
- |
|
| 71 |
-// String returns a string rendition of layers target operating system |
|
| 72 |
-func (id OS) String() string {
|
|
| 73 |
- return string(id) |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 | 68 |
// DiffID is the hash of an individual layer tar. |
| 77 | 69 |
type DiffID digest.Digest |
| 78 | 70 |
|
| ... | ... |
@@ -109,7 +101,7 @@ type Layer interface {
|
| 109 | 109 |
Parent() Layer |
| 110 | 110 |
|
| 111 | 111 |
// OS returns the operating system of the layer |
| 112 |
- OS() OS |
|
| 112 |
+ OS() string |
|
| 113 | 113 |
|
| 114 | 114 |
// Size returns the size of the entire layer chain. The size |
| 115 | 115 |
// is calculated from the total size of all files in the layers. |
| ... | ... |
@@ -156,6 +148,9 @@ type RWLayer interface {
|
| 156 | 156 |
|
| 157 | 157 |
// Metadata returns the low level metadata for the mutable layer |
| 158 | 158 |
Metadata() (map[string]string, error) |
| 159 |
+ |
|
| 160 |
+ // OS returns the operating system of the writable layer |
|
| 161 |
+ OS() string |
|
| 159 | 162 |
} |
| 160 | 163 |
|
| 161 | 164 |
// Metadata holds information about a |
| ... | ... |
@@ -191,25 +186,25 @@ type CreateRWLayerOpts struct {
|
| 191 | 191 |
// Store represents a backend for managing both |
| 192 | 192 |
// read-only and read-write layers. |
| 193 | 193 |
type Store interface {
|
| 194 |
- Register(io.Reader, ChainID, OS) (Layer, error) |
|
| 194 |
+ Register(io.Reader, ChainID, string) (Layer, error) |
|
| 195 | 195 |
Get(ChainID) (Layer, error) |
| 196 | 196 |
Map() map[ChainID]Layer |
| 197 | 197 |
Release(Layer) ([]Metadata, error) |
| 198 | 198 |
|
| 199 |
- CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) |
|
| 199 |
+ CreateRWLayer(id string, parent ChainID, os string, opts *CreateRWLayerOpts) (RWLayer, error) |
|
| 200 | 200 |
GetRWLayer(id string) (RWLayer, error) |
| 201 | 201 |
GetMountID(id string) (string, error) |
| 202 | 202 |
ReleaseRWLayer(RWLayer) ([]Metadata, error) |
| 203 | 203 |
|
| 204 | 204 |
Cleanup() error |
| 205 |
- DriverStatus() [][2]string |
|
| 206 |
- DriverName() string |
|
| 205 |
+ DriverStatus(os string) [][2]string |
|
| 206 |
+ DriverName(os string) string |
|
| 207 | 207 |
} |
| 208 | 208 |
|
| 209 | 209 |
// DescribableStore represents a layer store capable of storing |
| 210 | 210 |
// descriptors for layers. |
| 211 | 211 |
type DescribableStore interface {
|
| 212 |
- RegisterWithDescriptor(io.Reader, ChainID, OS, distribution.Descriptor) (Layer, error) |
|
| 212 |
+ RegisterWithDescriptor(io.Reader, ChainID, string, distribution.Descriptor) (Layer, error) |
|
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
// MetadataTransaction represents functions for setting layer metadata |
| ... | ... |
@@ -220,7 +215,7 @@ type MetadataTransaction interface {
|
| 220 | 220 |
SetDiffID(DiffID) error |
| 221 | 221 |
SetCacheID(string) error |
| 222 | 222 |
SetDescriptor(distribution.Descriptor) error |
| 223 |
- SetOS(OS) error |
|
| 223 |
+ SetOS(string) error |
|
| 224 | 224 |
TarSplitWriter(compressInput bool) (io.WriteCloser, error) |
| 225 | 225 |
|
| 226 | 226 |
Commit(ChainID) error |
| ... | ... |
@@ -241,7 +236,7 @@ type MetadataStore interface {
|
| 241 | 241 |
GetDiffID(ChainID) (DiffID, error) |
| 242 | 242 |
GetCacheID(ChainID) (string, error) |
| 243 | 243 |
GetDescriptor(ChainID) (distribution.Descriptor, error) |
| 244 |
- GetOS(ChainID) (OS, error) |
|
| 244 |
+ GetOS(ChainID) (string, error) |
|
| 245 | 245 |
TarSplitReader(ChainID) (io.ReadCloser, error) |
| 246 | 246 |
|
| 247 | 247 |
SetMountID(string, string) error |
| ... | ... |
@@ -5,7 +5,7 @@ import ( |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"io" |
| 7 | 7 |
"io/ioutil" |
| 8 |
- "strings" |
|
| 8 |
+ "runtime" |
|
| 9 | 9 |
"sync" |
| 10 | 10 |
|
| 11 | 11 |
"github.com/docker/distribution" |
| ... | ... |
@@ -28,70 +28,77 @@ import ( |
| 28 | 28 |
const maxLayerDepth = 125 |
| 29 | 29 |
|
| 30 | 30 |
type layerStore struct {
|
| 31 |
- store MetadataStore |
|
| 32 |
- driver graphdriver.Driver |
|
| 31 |
+ store MetadataStore |
|
| 32 |
+ drivers map[string]graphdriver.Driver |
|
| 33 |
+ useTarSplit map[string]bool |
|
| 33 | 34 |
|
| 34 | 35 |
layerMap map[ChainID]*roLayer |
| 35 | 36 |
layerL sync.Mutex |
| 36 | 37 |
|
| 37 | 38 |
mounts map[string]*mountedLayer |
| 38 | 39 |
mountL sync.Mutex |
| 39 |
- |
|
| 40 |
- useTarSplit bool |
|
| 41 |
- |
|
| 42 |
- os string |
|
| 43 | 40 |
} |
| 44 | 41 |
|
| 45 | 42 |
// StoreOptions are the options used to create a new Store instance |
| 46 | 43 |
type StoreOptions struct {
|
| 47 |
- StorePath string |
|
| 44 |
+ Root string |
|
| 45 |
+ GraphDrivers map[string]string |
|
| 48 | 46 |
MetadataStorePathTemplate string |
| 49 |
- GraphDriver string |
|
| 50 | 47 |
GraphDriverOptions []string |
| 51 | 48 |
IDMappings *idtools.IDMappings |
| 52 | 49 |
PluginGetter plugingetter.PluginGetter |
| 53 | 50 |
ExperimentalEnabled bool |
| 54 |
- OS string |
|
| 55 | 51 |
} |
| 56 | 52 |
|
| 57 | 53 |
// NewStoreFromOptions creates a new Store instance |
| 58 | 54 |
func NewStoreFromOptions(options StoreOptions) (Store, error) {
|
| 59 |
- driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{
|
|
| 60 |
- Root: options.StorePath, |
|
| 61 |
- DriverOptions: options.GraphDriverOptions, |
|
| 62 |
- UIDMaps: options.IDMappings.UIDs(), |
|
| 63 |
- GIDMaps: options.IDMappings.GIDs(), |
|
| 64 |
- ExperimentalEnabled: options.ExperimentalEnabled, |
|
| 65 |
- }) |
|
| 66 |
- if err != nil {
|
|
| 67 |
- return nil, fmt.Errorf("error initializing graphdriver: %v", err)
|
|
| 55 |
+ drivers := make(map[string]graphdriver.Driver) |
|
| 56 |
+ for os, drivername := range options.GraphDrivers {
|
|
| 57 |
+ var err error |
|
| 58 |
+ drivers[os], err = graphdriver.New(drivername, |
|
| 59 |
+ options.PluginGetter, |
|
| 60 |
+ graphdriver.Options{
|
|
| 61 |
+ Root: options.Root, |
|
| 62 |
+ DriverOptions: options.GraphDriverOptions, |
|
| 63 |
+ UIDMaps: options.IDMappings.UIDs(), |
|
| 64 |
+ GIDMaps: options.IDMappings.GIDs(), |
|
| 65 |
+ ExperimentalEnabled: options.ExperimentalEnabled, |
|
| 66 |
+ }) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ return nil, fmt.Errorf("error initializing graphdriver: %v", err)
|
|
| 69 |
+ } |
|
| 70 |
+ logrus.Debugf("Initialized graph driver %s", drivername)
|
|
| 68 | 71 |
} |
| 69 |
- logrus.Debugf("Using graph driver %s", driver)
|
|
| 70 | 72 |
|
| 71 |
- fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) |
|
| 73 |
+ fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, options.GraphDrivers[runtime.GOOS])) |
|
| 72 | 74 |
if err != nil {
|
| 73 | 75 |
return nil, err |
| 74 | 76 |
} |
| 75 | 77 |
|
| 76 |
- return NewStoreFromGraphDriver(fms, driver, options.OS) |
|
| 78 |
+ return NewStoreFromGraphDrivers(fms, drivers) |
|
| 77 | 79 |
} |
| 78 | 80 |
|
| 79 |
-// NewStoreFromGraphDriver creates a new Store instance using the provided |
|
| 80 |
-// metadata store and graph driver. The metadata store will be used to restore |
|
| 81 |
+// NewStoreFromGraphDrivers creates a new Store instance using the provided |
|
| 82 |
+// metadata store and graph drivers. The metadata store will be used to restore |
|
| 81 | 83 |
// the Store. |
| 82 |
-func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os string) (Store, error) {
|
|
| 83 |
- caps := graphdriver.Capabilities{}
|
|
| 84 |
- if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
|
|
| 85 |
- caps = capDriver.Capabilities() |
|
| 84 |
+func NewStoreFromGraphDrivers(store MetadataStore, drivers map[string]graphdriver.Driver) (Store, error) {
|
|
| 85 |
+ |
|
| 86 |
+ useTarSplit := make(map[string]bool) |
|
| 87 |
+ for os, driver := range drivers {
|
|
| 88 |
+ |
|
| 89 |
+ caps := graphdriver.Capabilities{}
|
|
| 90 |
+ if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
|
|
| 91 |
+ caps = capDriver.Capabilities() |
|
| 92 |
+ } |
|
| 93 |
+ useTarSplit[os] = !caps.ReproducesExactDiffs |
|
| 86 | 94 |
} |
| 87 | 95 |
|
| 88 | 96 |
ls := &layerStore{
|
| 89 | 97 |
store: store, |
| 90 |
- driver: driver, |
|
| 98 |
+ drivers: drivers, |
|
| 91 | 99 |
layerMap: map[ChainID]*roLayer{},
|
| 92 | 100 |
mounts: map[string]*mountedLayer{},
|
| 93 |
- useTarSplit: !caps.ReproducesExactDiffs, |
|
| 94 |
- os: os, |
|
| 101 |
+ useTarSplit: useTarSplit, |
|
| 95 | 102 |
} |
| 96 | 103 |
|
| 97 | 104 |
ids, mounts, err := store.List() |
| ... | ... |
@@ -227,7 +234,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri |
| 227 | 227 |
tr := io.TeeReader(ts, digester.Hash()) |
| 228 | 228 |
|
| 229 | 229 |
rdr := tr |
| 230 |
- if ls.useTarSplit {
|
|
| 230 |
+ if ls.useTarSplit[layer.os] {
|
|
| 231 | 231 |
tsw, err := tx.TarSplitWriter(true) |
| 232 | 232 |
if err != nil {
|
| 233 | 233 |
return err |
| ... | ... |
@@ -243,7 +250,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri |
| 243 | 243 |
} |
| 244 | 244 |
} |
| 245 | 245 |
|
| 246 |
- applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) |
|
| 246 |
+ applySize, err := ls.drivers[layer.os].ApplyDiff(layer.cacheID, parent, rdr) |
|
| 247 | 247 |
if err != nil {
|
| 248 | 248 |
return err |
| 249 | 249 |
} |
| ... | ... |
@@ -259,11 +266,11 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri |
| 259 | 259 |
return nil |
| 260 | 260 |
} |
| 261 | 261 |
|
| 262 |
-func (ls *layerStore) Register(ts io.Reader, parent ChainID, os OS) (Layer, error) {
|
|
| 262 |
+func (ls *layerStore) Register(ts io.Reader, parent ChainID, os string) (Layer, error) {
|
|
| 263 | 263 |
return ls.registerWithDescriptor(ts, parent, os, distribution.Descriptor{})
|
| 264 | 264 |
} |
| 265 | 265 |
|
| 266 |
-func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) {
|
|
| 266 |
+func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os string, descriptor distribution.Descriptor) (Layer, error) {
|
|
| 267 | 267 |
// err is used to hold the error which will always trigger |
| 268 | 268 |
// cleanup of creates sources but may not be an error returned |
| 269 | 269 |
// to the caller (already exists). |
| ... | ... |
@@ -271,13 +278,6 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS |
| 271 | 271 |
var pid string |
| 272 | 272 |
var p *roLayer |
| 273 | 273 |
|
| 274 |
- // Integrity check - ensure we are creating something for the correct operating system |
|
| 275 |
- if system.LCOWSupported() {
|
|
| 276 |
- if strings.ToLower(ls.os) != strings.ToLower(string(os)) {
|
|
| 277 |
- return nil, fmt.Errorf("cannot create entry for operating system %q in layer store for operating system %q", os, ls.os)
|
|
| 278 |
- } |
|
| 279 |
- } |
|
| 280 |
- |
|
| 281 | 274 |
if string(parent) != "" {
|
| 282 | 275 |
p = ls.get(parent) |
| 283 | 276 |
if p == nil {
|
| ... | ... |
@@ -298,6 +298,14 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS |
| 298 | 298 |
} |
| 299 | 299 |
} |
| 300 | 300 |
|
| 301 |
+ // Validate the operating system is valid |
|
| 302 |
+ if os == "" {
|
|
| 303 |
+ os = runtime.GOOS |
|
| 304 |
+ } |
|
| 305 |
+ if err := system.ValidatePlatform(system.ParsePlatform(os)); err != nil {
|
|
| 306 |
+ return nil, err |
|
| 307 |
+ } |
|
| 308 |
+ |
|
| 301 | 309 |
// Create new roLayer |
| 302 | 310 |
layer := &roLayer{
|
| 303 | 311 |
parent: p, |
| ... | ... |
@@ -309,7 +317,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS |
| 309 | 309 |
os: os, |
| 310 | 310 |
} |
| 311 | 311 |
|
| 312 |
- if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil {
|
|
| 312 |
+ if err = ls.drivers[os].Create(layer.cacheID, pid, nil); err != nil {
|
|
| 313 | 313 |
return nil, err |
| 314 | 314 |
} |
| 315 | 315 |
|
| ... | ... |
@@ -321,7 +329,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS |
| 321 | 321 |
defer func() {
|
| 322 | 322 |
if err != nil {
|
| 323 | 323 |
logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
|
| 324 |
- if err := ls.driver.Remove(layer.cacheID); err != nil {
|
|
| 324 |
+ if err := ls.drivers[os].Remove(layer.cacheID); err != nil {
|
|
| 325 | 325 |
logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
|
| 326 | 326 |
} |
| 327 | 327 |
if err := tx.Cancel(); err != nil {
|
| ... | ... |
@@ -405,7 +413,7 @@ func (ls *layerStore) Map() map[ChainID]Layer {
|
| 405 | 405 |
} |
| 406 | 406 |
|
| 407 | 407 |
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
|
| 408 |
- err := ls.driver.Remove(layer.cacheID) |
|
| 408 |
+ err := ls.drivers[layer.os].Remove(layer.cacheID) |
|
| 409 | 409 |
if err != nil {
|
| 410 | 410 |
return err |
| 411 | 411 |
} |
| ... | ... |
@@ -475,7 +483,7 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
|
| 475 | 475 |
return ls.releaseLayer(layer) |
| 476 | 476 |
} |
| 477 | 477 |
|
| 478 |
-func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) {
|
|
| 478 |
+func (ls *layerStore) CreateRWLayer(name string, parent ChainID, os string, opts *CreateRWLayerOpts) (RWLayer, error) {
|
|
| 479 | 479 |
var ( |
| 480 | 480 |
storageOpt map[string]string |
| 481 | 481 |
initFunc MountInit |
| ... | ... |
@@ -515,16 +523,21 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL |
| 515 | 515 |
}() |
| 516 | 516 |
} |
| 517 | 517 |
|
| 518 |
+ // Ensure the operating system is set to the host OS if not populated. |
|
| 519 |
+ if os == "" {
|
|
| 520 |
+ os = runtime.GOOS |
|
| 521 |
+ } |
|
| 518 | 522 |
m = &mountedLayer{
|
| 519 | 523 |
name: name, |
| 520 | 524 |
parent: p, |
| 521 | 525 |
mountID: ls.mountID(name), |
| 522 | 526 |
layerStore: ls, |
| 523 | 527 |
references: map[RWLayer]*referencedRWLayer{},
|
| 528 |
+ os: os, |
|
| 524 | 529 |
} |
| 525 | 530 |
|
| 526 | 531 |
if initFunc != nil {
|
| 527 |
- pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) |
|
| 532 |
+ pid, err = ls.initMount(m.mountID, m.os, pid, mountLabel, initFunc, storageOpt) |
|
| 528 | 533 |
if err != nil {
|
| 529 | 534 |
return nil, err |
| 530 | 535 |
} |
| ... | ... |
@@ -535,7 +548,7 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL |
| 535 | 535 |
StorageOpt: storageOpt, |
| 536 | 536 |
} |
| 537 | 537 |
|
| 538 |
- if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
|
|
| 538 |
+ if err = ls.drivers[os].CreateReadWrite(m.mountID, pid, createOpts); err != nil {
|
|
| 539 | 539 |
return nil, err |
| 540 | 540 |
} |
| 541 | 541 |
if err = ls.saveMount(m); err != nil {
|
| ... | ... |
@@ -584,14 +597,14 @@ func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
|
| 584 | 584 |
return []Metadata{}, nil
|
| 585 | 585 |
} |
| 586 | 586 |
|
| 587 |
- if err := ls.driver.Remove(m.mountID); err != nil {
|
|
| 587 |
+ if err := ls.drivers[l.OS()].Remove(m.mountID); err != nil {
|
|
| 588 | 588 |
logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
|
| 589 | 589 |
m.retakeReference(l) |
| 590 | 590 |
return nil, err |
| 591 | 591 |
} |
| 592 | 592 |
|
| 593 | 593 |
if m.initID != "" {
|
| 594 |
- if err := ls.driver.Remove(m.initID); err != nil {
|
|
| 594 |
+ if err := ls.drivers[l.OS()].Remove(m.initID); err != nil {
|
|
| 595 | 595 |
logrus.Errorf("Error removing init layer %s: %s", m.name, err)
|
| 596 | 596 |
m.retakeReference(l) |
| 597 | 597 |
return nil, err |
| ... | ... |
@@ -637,7 +650,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
| 637 | 637 |
return nil |
| 638 | 638 |
} |
| 639 | 639 |
|
| 640 |
-func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
|
| 640 |
+func (ls *layerStore) initMount(graphID, os, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
|
| 641 | 641 |
// Use "<graph-id>-init" to maintain compatibility with graph drivers |
| 642 | 642 |
// which are expecting this layer with this special name. If all |
| 643 | 643 |
// graph drivers can be updated to not rely on knowing about this layer |
| ... | ... |
@@ -649,20 +662,20 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou |
| 649 | 649 |
StorageOpt: storageOpt, |
| 650 | 650 |
} |
| 651 | 651 |
|
| 652 |
- if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
|
|
| 652 |
+ if err := ls.drivers[os].CreateReadWrite(initID, parent, createOpts); err != nil {
|
|
| 653 | 653 |
return "", err |
| 654 | 654 |
} |
| 655 |
- p, err := ls.driver.Get(initID, "") |
|
| 655 |
+ p, err := ls.drivers[os].Get(initID, "") |
|
| 656 | 656 |
if err != nil {
|
| 657 | 657 |
return "", err |
| 658 | 658 |
} |
| 659 | 659 |
|
| 660 | 660 |
if err := initFunc(p); err != nil {
|
| 661 |
- ls.driver.Put(initID) |
|
| 661 |
+ ls.drivers[os].Put(initID) |
|
| 662 | 662 |
return "", err |
| 663 | 663 |
} |
| 664 | 664 |
|
| 665 |
- if err := ls.driver.Put(initID); err != nil {
|
|
| 665 |
+ if err := ls.drivers[os].Put(initID); err != nil {
|
|
| 666 | 666 |
return "", err |
| 667 | 667 |
} |
| 668 | 668 |
|
| ... | ... |
@@ -670,13 +683,13 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou |
| 670 | 670 |
} |
| 671 | 671 |
|
| 672 | 672 |
func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
|
| 673 |
- if !ls.useTarSplit {
|
|
| 673 |
+ if !ls.useTarSplit[rl.os] {
|
|
| 674 | 674 |
var parentCacheID string |
| 675 | 675 |
if rl.parent != nil {
|
| 676 | 676 |
parentCacheID = rl.parent.cacheID |
| 677 | 677 |
} |
| 678 | 678 |
|
| 679 |
- return ls.driver.Diff(rl.cacheID, parentCacheID) |
|
| 679 |
+ return ls.drivers[rl.os].Diff(rl.cacheID, parentCacheID) |
|
| 680 | 680 |
} |
| 681 | 681 |
|
| 682 | 682 |
r, err := ls.store.TarSplitReader(rl.chainID) |
| ... | ... |
@@ -686,7 +699,7 @@ func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
|
| 686 | 686 |
|
| 687 | 687 |
pr, pw := io.Pipe() |
| 688 | 688 |
go func() {
|
| 689 |
- err := ls.assembleTarTo(rl.cacheID, r, nil, pw) |
|
| 689 |
+ err := ls.assembleTarTo(rl.cacheID, rl.os, r, nil, pw) |
|
| 690 | 690 |
if err != nil {
|
| 691 | 691 |
pw.CloseWithError(err) |
| 692 | 692 |
} else {
|
| ... | ... |
@@ -697,10 +710,10 @@ func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
|
| 697 | 697 |
return pr, nil |
| 698 | 698 |
} |
| 699 | 699 |
|
| 700 |
-func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
|
| 701 |
- diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) |
|
| 700 |
+func (ls *layerStore) assembleTarTo(graphID, os string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
|
| 701 |
+ diffDriver, ok := ls.drivers[os].(graphdriver.DiffGetterDriver) |
|
| 702 | 702 |
if !ok {
|
| 703 |
- diffDriver = &naiveDiffPathDriver{ls.driver}
|
|
| 703 |
+ diffDriver = &naiveDiffPathDriver{ls.drivers[os]}
|
|
| 704 | 704 |
} |
| 705 | 705 |
|
| 706 | 706 |
defer metadata.Close() |
| ... | ... |
@@ -719,15 +732,27 @@ func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size |
| 719 | 719 |
} |
| 720 | 720 |
|
| 721 | 721 |
func (ls *layerStore) Cleanup() error {
|
| 722 |
- return ls.driver.Cleanup() |
|
| 722 |
+ var err error |
|
| 723 |
+ for _, driver := range ls.drivers {
|
|
| 724 |
+ if e := driver.Cleanup(); e != nil {
|
|
| 725 |
+ err = fmt.Errorf("%s - %s", err.Error(), e.Error())
|
|
| 726 |
+ } |
|
| 727 |
+ } |
|
| 728 |
+ return err |
|
| 723 | 729 |
} |
| 724 | 730 |
|
| 725 |
-func (ls *layerStore) DriverStatus() [][2]string {
|
|
| 726 |
- return ls.driver.Status() |
|
| 731 |
+func (ls *layerStore) DriverStatus(os string) [][2]string {
|
|
| 732 |
+ if os == "" {
|
|
| 733 |
+ os = runtime.GOOS |
|
| 734 |
+ } |
|
| 735 |
+ return ls.drivers[os].Status() |
|
| 727 | 736 |
} |
| 728 | 737 |
|
| 729 |
-func (ls *layerStore) DriverName() string {
|
|
| 730 |
- return ls.driver.String() |
|
| 738 |
+func (ls *layerStore) DriverName(os string) string {
|
|
| 739 |
+ if os == "" {
|
|
| 740 |
+ os = runtime.GOOS |
|
| 741 |
+ } |
|
| 742 |
+ return ls.drivers[os].String() |
|
| 731 | 743 |
} |
| 732 | 744 |
|
| 733 | 745 |
type naiveDiffPathDriver struct {
|
| ... | ... |
@@ -6,6 +6,6 @@ import ( |
| 6 | 6 |
"github.com/docker/distribution" |
| 7 | 7 |
) |
| 8 | 8 |
|
| 9 |
-func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) {
|
|
| 9 |
+func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, os string, descriptor distribution.Descriptor) (Layer, error) {
|
|
| 10 | 10 |
return ls.registerWithDescriptor(ts, parent, os, descriptor) |
| 11 | 11 |
} |
| ... | ... |
@@ -73,7 +73,9 @@ func newTestStore(t *testing.T) (Store, string, func()) {
|
| 73 | 73 |
if err != nil {
|
| 74 | 74 |
t.Fatal(err) |
| 75 | 75 |
} |
| 76 |
- ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) |
|
| 76 |
+ graphs := make(map[string]graphdriver.Driver) |
|
| 77 |
+ graphs[runtime.GOOS] = graph |
|
| 78 |
+ ls, err := NewStoreFromGraphDrivers(fms, graphs) |
|
| 77 | 79 |
if err != nil {
|
| 78 | 80 |
t.Fatal(err) |
| 79 | 81 |
} |
| ... | ... |
@@ -88,7 +90,7 @@ type layerInit func(root containerfs.ContainerFS) error |
| 88 | 88 |
|
| 89 | 89 |
func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
| 90 | 90 |
containerID := stringid.GenerateRandomID() |
| 91 |
- mount, err := ls.CreateRWLayer(containerID, parent, nil) |
|
| 91 |
+ mount, err := ls.CreateRWLayer(containerID, parent, runtime.GOOS, nil) |
|
| 92 | 92 |
if err != nil {
|
| 93 | 93 |
return nil, err |
| 94 | 94 |
} |
| ... | ... |
@@ -108,7 +110,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
| 108 | 108 |
} |
| 109 | 109 |
defer ts.Close() |
| 110 | 110 |
|
| 111 |
- layer, err := ls.Register(ts, parent, OS(runtime.GOOS)) |
|
| 111 |
+ layer, err := ls.Register(ts, parent, runtime.GOOS) |
|
| 112 | 112 |
if err != nil {
|
| 113 | 113 |
return nil, err |
| 114 | 114 |
} |
| ... | ... |
@@ -277,7 +279,7 @@ func TestMountAndRegister(t *testing.T) {
|
| 277 | 277 |
size, _ := layer.Size() |
| 278 | 278 |
t.Logf("Layer size: %d", size)
|
| 279 | 279 |
|
| 280 |
- mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), nil)
|
|
| 280 |
+ mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), runtime.GOOS, nil)
|
|
| 281 | 281 |
if err != nil {
|
| 282 | 282 |
t.Fatal(err) |
| 283 | 283 |
} |
| ... | ... |
@@ -385,7 +387,7 @@ func TestStoreRestore(t *testing.T) {
|
| 385 | 385 |
t.Fatal(err) |
| 386 | 386 |
} |
| 387 | 387 |
|
| 388 |
- m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), nil)
|
|
| 388 |
+ m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), runtime.GOOS, nil)
|
|
| 389 | 389 |
if err != nil {
|
| 390 | 390 |
t.Fatal(err) |
| 391 | 391 |
} |
| ... | ... |
@@ -403,7 +405,7 @@ func TestStoreRestore(t *testing.T) {
|
| 403 | 403 |
t.Fatal(err) |
| 404 | 404 |
} |
| 405 | 405 |
|
| 406 |
- ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) |
|
| 406 |
+ ls2, err := NewStoreFromGraphDrivers(ls.(*layerStore).store, ls.(*layerStore).drivers) |
|
| 407 | 407 |
if err != nil {
|
| 408 | 408 |
t.Fatal(err) |
| 409 | 409 |
} |
| ... | ... |
@@ -416,7 +418,7 @@ func TestStoreRestore(t *testing.T) {
|
| 416 | 416 |
assertLayerEqual(t, layer3b, layer3) |
| 417 | 417 |
|
| 418 | 418 |
// Create again with same name, should return error |
| 419 |
- if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), nil); err == nil {
|
|
| 419 |
+ if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), runtime.GOOS, nil); err == nil {
|
|
| 420 | 420 |
t.Fatal("Expected error creating mount with same name")
|
| 421 | 421 |
} else if err != ErrMountNameConflict {
|
| 422 | 422 |
t.Fatal(err) |
| ... | ... |
@@ -498,13 +500,13 @@ func TestTarStreamStability(t *testing.T) {
|
| 498 | 498 |
t.Fatal(err) |
| 499 | 499 |
} |
| 500 | 500 |
|
| 501 |
- layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) |
|
| 501 |
+ layer1, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS) |
|
| 502 | 502 |
if err != nil {
|
| 503 | 503 |
t.Fatal(err) |
| 504 | 504 |
} |
| 505 | 505 |
|
| 506 | 506 |
// hack layer to add file |
| 507 |
- p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") |
|
| 507 |
+ p, err := ls.(*layerStore).drivers[runtime.GOOS].Get(layer1.(*referencedCacheLayer).cacheID, "") |
|
| 508 | 508 |
if err != nil {
|
| 509 | 509 |
t.Fatal(err) |
| 510 | 510 |
} |
| ... | ... |
@@ -513,11 +515,11 @@ func TestTarStreamStability(t *testing.T) {
|
| 513 | 513 |
t.Fatal(err) |
| 514 | 514 |
} |
| 515 | 515 |
|
| 516 |
- if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil {
|
|
| 516 |
+ if err := ls.(*layerStore).drivers[runtime.GOOS].Put(layer1.(*referencedCacheLayer).cacheID); err != nil {
|
|
| 517 | 517 |
t.Fatal(err) |
| 518 | 518 |
} |
| 519 | 519 |
|
| 520 |
- layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), OS(runtime.GOOS)) |
|
| 520 |
+ layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), runtime.GOOS) |
|
| 521 | 521 |
if err != nil {
|
| 522 | 522 |
t.Fatal(err) |
| 523 | 523 |
} |
| ... | ... |
@@ -685,12 +687,12 @@ func TestRegisterExistingLayer(t *testing.T) {
|
| 685 | 685 |
t.Fatal(err) |
| 686 | 686 |
} |
| 687 | 687 |
|
| 688 |
- layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS)) |
|
| 688 |
+ layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), runtime.GOOS) |
|
| 689 | 689 |
if err != nil {
|
| 690 | 690 |
t.Fatal(err) |
| 691 | 691 |
} |
| 692 | 692 |
|
| 693 |
- layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS)) |
|
| 693 |
+ layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), runtime.GOOS) |
|
| 694 | 694 |
if err != nil {
|
| 695 | 695 |
t.Fatal(err) |
| 696 | 696 |
} |
| ... | ... |
@@ -725,12 +727,12 @@ func TestTarStreamVerification(t *testing.T) {
|
| 725 | 725 |
t.Fatal(err) |
| 726 | 726 |
} |
| 727 | 727 |
|
| 728 |
- layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) |
|
| 728 |
+ layer1, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS) |
|
| 729 | 729 |
if err != nil {
|
| 730 | 730 |
t.Fatal(err) |
| 731 | 731 |
} |
| 732 | 732 |
|
| 733 |
- layer2, err := ls.Register(bytes.NewReader(tar2), "", OS(runtime.GOOS)) |
|
| 733 |
+ layer2, err := ls.Register(bytes.NewReader(tar2), "", runtime.GOOS) |
|
| 734 | 734 |
if err != nil {
|
| 735 | 735 |
t.Fatal(err) |
| 736 | 736 |
} |
| ... | ... |
@@ -2,7 +2,10 @@ |
| 2 | 2 |
|
| 3 | 3 |
package layer |
| 4 | 4 |
|
| 5 |
-import "testing" |
|
| 5 |
+import ( |
|
| 6 |
+ "runtime" |
|
| 7 |
+ "testing" |
|
| 8 |
+) |
|
| 6 | 9 |
|
| 7 | 10 |
func graphDiffSize(ls Store, l Layer) (int64, error) {
|
| 8 | 11 |
cl := getCachedLayer(l) |
| ... | ... |
@@ -10,7 +13,7 @@ func graphDiffSize(ls Store, l Layer) (int64, error) {
|
| 10 | 10 |
if cl.parent != nil {
|
| 11 | 11 |
parent = cl.parent.cacheID |
| 12 | 12 |
} |
| 13 |
- return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) |
|
| 13 |
+ return ls.(*layerStore).drivers[runtime.GOOS].DiffSize(cl.cacheID, parent) |
|
| 14 | 14 |
} |
| 15 | 15 |
|
| 16 | 16 |
// Unix as Windows graph driver does not support Changes which is indirectly |
| ... | ... |
@@ -25,16 +25,15 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
|
| 25 | 25 |
return "", ErrLayerDoesNotExist |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- if layerGetter, ok := ls.driver.(Getter); ok {
|
|
| 28 |
+ if layerGetter, ok := ls.drivers[rl.os].(Getter); ok {
|
|
| 29 | 29 |
return layerGetter.GetLayerPath(rl.cacheID) |
| 30 | 30 |
} |
| 31 |
- |
|
| 32 |
- path, err := ls.driver.Get(rl.cacheID, "") |
|
| 31 |
+ path, err := ls.drivers[rl.os].Get(rl.cacheID, "") |
|
| 33 | 32 |
if err != nil {
|
| 34 | 33 |
return "", err |
| 35 | 34 |
} |
| 36 | 35 |
|
| 37 |
- if err := ls.driver.Put(rl.cacheID); err != nil {
|
|
| 36 |
+ if err := ls.drivers[rl.os].Put(rl.cacheID); err != nil {
|
|
| 38 | 37 |
return "", err |
| 39 | 38 |
} |
| 40 | 39 |
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"io" |
| 8 | 8 |
"os" |
| 9 |
+ "runtime" |
|
| 9 | 10 |
|
| 10 | 11 |
"github.com/opencontainers/go-digest" |
| 11 | 12 |
"github.com/sirupsen/logrus" |
| ... | ... |
@@ -16,7 +17,7 @@ import ( |
| 16 | 16 |
// CreateRWLayerByGraphID creates a RWLayer in the layer store using |
| 17 | 17 |
// the provided name with the given graphID. To get the RWLayer |
| 18 | 18 |
// after migration the layer may be retrieved by the given name. |
| 19 |
-func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) {
|
|
| 19 |
+func (ls *layerStore) CreateRWLayerByGraphID(name, graphID, os string, parent ChainID) (err error) {
|
|
| 20 | 20 |
ls.mountL.Lock() |
| 21 | 21 |
defer ls.mountL.Unlock() |
| 22 | 22 |
m, ok := ls.mounts[name] |
| ... | ... |
@@ -31,7 +32,11 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent |
| 31 | 31 |
return nil |
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 |
- if !ls.driver.Exists(graphID) {
|
|
| 34 |
+ // Ensure the operating system is set to the host OS if not populated. |
|
| 35 |
+ if os == "" {
|
|
| 36 |
+ os = runtime.GOOS |
|
| 37 |
+ } |
|
| 38 |
+ if !ls.drivers[os].Exists(graphID) {
|
|
| 35 | 39 |
return fmt.Errorf("graph ID does not exist: %q", graphID)
|
| 36 | 40 |
} |
| 37 | 41 |
|
| ... | ... |
@@ -60,11 +65,12 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent |
| 60 | 60 |
mountID: graphID, |
| 61 | 61 |
layerStore: ls, |
| 62 | 62 |
references: map[RWLayer]*referencedRWLayer{},
|
| 63 |
+ os: os, |
|
| 63 | 64 |
} |
| 64 | 65 |
|
| 65 | 66 |
// Check for existing init layer |
| 66 | 67 |
initID := fmt.Sprintf("%s-init", graphID)
|
| 67 |
- if ls.driver.Exists(initID) {
|
|
| 68 |
+ if ls.drivers[os].Exists(initID) {
|
|
| 68 | 69 |
m.initID = initID |
| 69 | 70 |
} |
| 70 | 71 |
|
| ... | ... |
@@ -95,7 +101,10 @@ func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataP |
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 | 97 |
dgst := digest.Canonical.Digester() |
| 98 |
- err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) |
|
| 98 |
+ // Note - we use the host OS here. This is a safe assumption as its during migration, and |
|
| 99 |
+ // no host OS which supports migration also supports multiple image OS's. In other words, |
|
| 100 |
+ // it's only on Linux, not on Windows. |
|
| 101 |
+ err = ls.assembleTarTo(id, runtime.GOOS, uncompressed, &size, dgst.Hash()) |
|
| 99 | 102 |
if err != nil {
|
| 100 | 103 |
return |
| 101 | 104 |
} |
| ... | ... |
@@ -111,7 +120,10 @@ func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataP |
| 111 | 111 |
} |
| 112 | 112 |
|
| 113 | 113 |
func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
|
| 114 |
- rawarchive, err := ls.driver.Diff(id, parent) |
|
| 114 |
+ // Note - we use the host OS here. This is a safe assumption as its during migration, and |
|
| 115 |
+ // no host OS which supports migration also supports multiple image OS's. In other words, |
|
| 116 |
+ // it's only on Linux, not on Windows. |
|
| 117 |
+ rawarchive, err := ls.drivers[runtime.GOOS].Diff(id, parent) |
|
| 115 | 118 |
if err != nil {
|
| 116 | 119 |
return |
| 117 | 120 |
} |
| ... | ... |
@@ -94,7 +94,9 @@ func TestLayerMigration(t *testing.T) {
|
| 94 | 94 |
if err != nil {
|
| 95 | 95 |
t.Fatal(err) |
| 96 | 96 |
} |
| 97 |
- ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) |
|
| 97 |
+ graphs := make(map[string]graphdriver.Driver) |
|
| 98 |
+ graphs[runtime.GOOS] = graph |
|
| 99 |
+ ls, err := NewStoreFromGraphDrivers(fms, graphs) |
|
| 98 | 100 |
if err != nil {
|
| 99 | 101 |
t.Fatal(err) |
| 100 | 102 |
} |
| ... | ... |
@@ -110,14 +112,14 @@ func TestLayerMigration(t *testing.T) {
|
| 110 | 110 |
t.Fatal(err) |
| 111 | 111 |
} |
| 112 | 112 |
|
| 113 |
- layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) |
|
| 113 |
+ layer1b, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS) |
|
| 114 | 114 |
if err != nil {
|
| 115 | 115 |
t.Fatal(err) |
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 | 118 |
assertReferences(t, layer1a, layer1b) |
| 119 | 119 |
// Attempt register, should be same |
| 120 |
- layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS)) |
|
| 120 |
+ layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), runtime.GOOS) |
|
| 121 | 121 |
if err != nil {
|
| 122 | 122 |
t.Fatal(err) |
| 123 | 123 |
} |
| ... | ... |
@@ -222,7 +224,9 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
|
| 222 | 222 |
if err != nil {
|
| 223 | 223 |
t.Fatal(err) |
| 224 | 224 |
} |
| 225 |
- ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) |
|
| 225 |
+ graphs := make(map[string]graphdriver.Driver) |
|
| 226 |
+ graphs[runtime.GOOS] = graph |
|
| 227 |
+ ls, err := NewStoreFromGraphDrivers(fms, graphs) |
|
| 226 | 228 |
if err != nil {
|
| 227 | 229 |
t.Fatal(err) |
| 228 | 230 |
} |
| ... | ... |
@@ -238,7 +242,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
|
| 238 | 238 |
t.Fatal(err) |
| 239 | 239 |
} |
| 240 | 240 |
|
| 241 |
- layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) |
|
| 241 |
+ layer1b, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS) |
|
| 242 | 242 |
if err != nil {
|
| 243 | 243 |
t.Fatal(err) |
| 244 | 244 |
} |
| ... | ... |
@@ -246,7 +250,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
|
| 246 | 246 |
assertReferences(t, layer1a, layer1b) |
| 247 | 247 |
|
| 248 | 248 |
// Attempt register, should be same |
| 249 |
- layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS)) |
|
| 249 |
+ layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), runtime.GOOS) |
|
| 250 | 250 |
if err != nil {
|
| 251 | 251 |
t.Fatal(err) |
| 252 | 252 |
} |
| ... | ... |
@@ -308,7 +312,7 @@ func TestMountMigration(t *testing.T) {
|
| 308 | 308 |
t.Fatal(err) |
| 309 | 309 |
} |
| 310 | 310 |
|
| 311 |
- graph := ls.(*layerStore).driver |
|
| 311 |
+ graph := ls.(*layerStore).drivers[runtime.GOOS] |
|
| 312 | 312 |
|
| 313 | 313 |
layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) |
| 314 | 314 |
if err != nil {
|
| ... | ... |
@@ -334,7 +338,7 @@ func TestMountMigration(t *testing.T) {
|
| 334 | 334 |
t.Fatal(err) |
| 335 | 335 |
} |
| 336 | 336 |
|
| 337 |
- if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil {
|
|
| 337 |
+ if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, runtime.GOOS, layer1.ChainID()); err != nil {
|
|
| 338 | 338 |
t.Fatal(err) |
| 339 | 339 |
} |
| 340 | 340 |
|
| ... | ... |
@@ -380,7 +384,7 @@ func TestMountMigration(t *testing.T) {
|
| 380 | 380 |
Kind: archive.ChangeAdd, |
| 381 | 381 |
}) |
| 382 | 382 |
|
| 383 |
- if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil {
|
|
| 383 |
+ if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), runtime.GOOS, nil); err == nil {
|
|
| 384 | 384 |
t.Fatal("Expected error creating mount with same name")
|
| 385 | 385 |
} else if err != ErrMountNameConflict {
|
| 386 | 386 |
t.Fatal(err) |
| ... | ... |
@@ -35,7 +35,7 @@ func TestMountInit(t *testing.T) {
|
| 35 | 35 |
rwLayerOpts := &CreateRWLayerOpts{
|
| 36 | 36 |
InitFunc: mountInit, |
| 37 | 37 |
} |
| 38 |
- m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), rwLayerOpts)
|
|
| 38 |
+ m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), runtime.GOOS, rwLayerOpts)
|
|
| 39 | 39 |
if err != nil {
|
| 40 | 40 |
t.Fatal(err) |
| 41 | 41 |
} |
| ... | ... |
@@ -95,7 +95,7 @@ func TestMountSize(t *testing.T) {
|
| 95 | 95 |
InitFunc: mountInit, |
| 96 | 96 |
} |
| 97 | 97 |
|
| 98 |
- m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), rwLayerOpts)
|
|
| 98 |
+ m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), runtime.GOOS, rwLayerOpts)
|
|
| 99 | 99 |
if err != nil {
|
| 100 | 100 |
t.Fatal(err) |
| 101 | 101 |
} |
| ... | ... |
@@ -147,7 +147,7 @@ func TestMountChanges(t *testing.T) {
|
| 147 | 147 |
InitFunc: mountInit, |
| 148 | 148 |
} |
| 149 | 149 |
|
| 150 |
- m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), rwLayerOpts)
|
|
| 150 |
+ m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), runtime.GOOS, rwLayerOpts)
|
|
| 151 | 151 |
if err != nil {
|
| 152 | 152 |
t.Fatal(err) |
| 153 | 153 |
} |
| ... | ... |
@@ -2,6 +2,7 @@ package layer |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "runtime" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/pkg/archive" |
| 7 | 8 |
"github.com/docker/docker/pkg/containerfs" |
| ... | ... |
@@ -14,6 +15,7 @@ type mountedLayer struct {
|
| 14 | 14 |
parent *roLayer |
| 15 | 15 |
path string |
| 16 | 16 |
layerStore *layerStore |
| 17 |
+ os string |
|
| 17 | 18 |
|
| 18 | 19 |
references map[RWLayer]*referencedRWLayer |
| 19 | 20 |
} |
| ... | ... |
@@ -29,7 +31,7 @@ func (ml *mountedLayer) cacheParent() string {
|
| 29 | 29 |
} |
| 30 | 30 |
|
| 31 | 31 |
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
|
| 32 |
- return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) |
|
| 32 |
+ return ml.layerStore.drivers[ml.OS()].Diff(ml.mountID, ml.cacheParent()) |
|
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 | 35 |
func (ml *mountedLayer) Name() string {
|
| ... | ... |
@@ -46,16 +48,24 @@ func (ml *mountedLayer) Parent() Layer {
|
| 46 | 46 |
return nil |
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 |
+func (ml *mountedLayer) OS() string {
|
|
| 50 |
+ // For backwards compatibility, return the host OS if not set. |
|
| 51 |
+ if ml.os == "" {
|
|
| 52 |
+ return runtime.GOOS |
|
| 53 |
+ } |
|
| 54 |
+ return ml.os |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 49 | 57 |
func (ml *mountedLayer) Size() (int64, error) {
|
| 50 |
- return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) |
|
| 58 |
+ return ml.layerStore.drivers[ml.OS()].DiffSize(ml.mountID, ml.cacheParent()) |
|
| 51 | 59 |
} |
| 52 | 60 |
|
| 53 | 61 |
func (ml *mountedLayer) Changes() ([]archive.Change, error) {
|
| 54 |
- return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) |
|
| 62 |
+ return ml.layerStore.drivers[ml.OS()].Changes(ml.mountID, ml.cacheParent()) |
|
| 55 | 63 |
} |
| 56 | 64 |
|
| 57 | 65 |
func (ml *mountedLayer) Metadata() (map[string]string, error) {
|
| 58 |
- return ml.layerStore.driver.GetMetadata(ml.mountID) |
|
| 66 |
+ return ml.layerStore.drivers[ml.OS()].GetMetadata(ml.mountID) |
|
| 59 | 67 |
} |
| 60 | 68 |
|
| 61 | 69 |
func (ml *mountedLayer) getReference() RWLayer {
|
| ... | ... |
@@ -90,11 +100,11 @@ type referencedRWLayer struct {
|
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) {
|
| 93 |
- return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) |
|
| 93 |
+ return rl.layerStore.drivers[rl.OS()].Get(rl.mountedLayer.mountID, mountLabel) |
|
| 94 | 94 |
} |
| 95 | 95 |
|
| 96 | 96 |
// Unmount decrements the activity count and unmounts the underlying layer |
| 97 | 97 |
// Callers should only call `Unmount` once per call to `Mount`, even on error. |
| 98 | 98 |
func (rl *referencedRWLayer) Unmount() error {
|
| 99 |
- return rl.layerStore.driver.Put(rl.mountedLayer.mountID) |
|
| 99 |
+ return rl.layerStore.drivers[rl.OS()].Put(rl.mountedLayer.mountID) |
|
| 100 | 100 |
} |
| ... | ... |
@@ -16,7 +16,7 @@ type roLayer struct {
|
| 16 | 16 |
size int64 |
| 17 | 17 |
layerStore *layerStore |
| 18 | 18 |
descriptor distribution.Descriptor |
| 19 |
- os OS |
|
| 19 |
+ os string |
|
| 20 | 20 |
|
| 21 | 21 |
referenceCount int |
| 22 | 22 |
references map[Layer]struct{}
|
| ... | ... |
@@ -52,7 +52,7 @@ func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
|
| 52 | 52 |
if parent != ChainID("") && parentCacheID == "" {
|
| 53 | 53 |
return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
|
| 54 | 54 |
} |
| 55 |
- return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) |
|
| 55 |
+ return rl.layerStore.drivers[rl.OS()].Diff(rl.cacheID, parentCacheID) |
|
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 | 58 |
func (rl *roLayer) ChainID() ChainID {
|
| ... | ... |
@@ -86,7 +86,7 @@ func (rl *roLayer) DiffSize() (size int64, err error) {
|
| 86 | 86 |
} |
| 87 | 87 |
|
| 88 | 88 |
func (rl *roLayer) Metadata() (map[string]string, error) {
|
| 89 |
- return rl.layerStore.driver.GetMetadata(rl.cacheID) |
|
| 89 |
+ return rl.layerStore.drivers[rl.OS()].GetMetadata(rl.cacheID) |
|
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
type referencedCacheLayer struct {
|
| ... | ... |
@@ -94,7 +94,7 @@ func TestMigrateContainers(t *testing.T) {
|
| 94 | 94 |
t.Fatal(err) |
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 |
- is, err := image.NewImageStore(ifs, runtime.GOOS, ls) |
|
| 97 |
+ is, err := image.NewImageStore(ifs, ls) |
|
| 98 | 98 |
if err != nil {
|
| 99 | 99 |
t.Fatal(err) |
| 100 | 100 |
} |
| ... | ... |
@@ -172,12 +172,12 @@ func TestMigrateImages(t *testing.T) {
|
| 172 | 172 |
t.Fatal(err) |
| 173 | 173 |
} |
| 174 | 174 |
|
| 175 |
- is, err := image.NewImageStore(ifs, runtime.GOOS, ls) |
|
| 175 |
+ is, err := image.NewImageStore(ifs, ls) |
|
| 176 | 176 |
if err != nil {
|
| 177 | 177 |
t.Fatal(err) |
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 |
- ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"), runtime.GOOS) |
|
| 180 |
+ ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) |
|
| 181 | 181 |
if err != nil {
|
| 182 | 182 |
t.Fatal(err) |
| 183 | 183 |
} |
| ... | ... |
@@ -430,7 +430,7 @@ func (l *mockLayer) DiffSize() (int64, error) {
|
| 430 | 430 |
return 0, nil |
| 431 | 431 |
} |
| 432 | 432 |
|
| 433 |
-func (l *mockLayer) OS() layer.OS {
|
|
| 433 |
+func (l *mockLayer) OS() string {
|
|
| 434 | 434 |
return "" |
| 435 | 435 |
} |
| 436 | 436 |
|
| ... | ... |
@@ -11,7 +11,7 @@ var lcowSupported = false |
| 11 | 11 |
// 2. Remove the getenv check when image-store is coalesced as shouldn't be needed anymore. |
| 12 | 12 |
func InitLCOW(experimental bool) {
|
| 13 | 13 |
v := GetOSVersion() |
| 14 |
- if experimental && v.Build > 16270 && os.Getenv("LCOW_SUPPORTED") != "" {
|
|
| 14 |
+ if experimental && v.Build > 16278 && os.Getenv("LCOW_SUPPORTED") != "" {
|
|
| 15 | 15 |
lcowSupported = true |
| 16 | 16 |
} |
| 17 | 17 |
} |
| ... | ... |
@@ -145,7 +145,7 @@ func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
|
| 145 | 145 |
return s.config, nil |
| 146 | 146 |
} |
| 147 | 147 |
|
| 148 |
-func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
|
| 148 |
+func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
|
|
| 149 | 149 |
return configToRootFS(c) |
| 150 | 150 |
} |
| 151 | 151 |
|
| ... | ... |
@@ -532,7 +532,7 @@ func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
|
| 532 | 532 |
return ioutil.ReadAll(rwc) |
| 533 | 533 |
} |
| 534 | 534 |
|
| 535 |
-func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
|
| 535 |
+func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
|
|
| 536 | 536 |
return configToRootFS(c) |
| 537 | 537 |
} |
| 538 | 538 |
|
| ... | ... |
@@ -126,8 +126,7 @@ type downloadManager struct {
|
| 126 | 126 |
configDigest digest.Digest |
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 |
-func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
|
| 130 |
- // TODO @jhowardmsft LCOW: May need revisiting. |
|
| 129 |
+func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
|
| 131 | 130 |
for _, l := range layers {
|
| 132 | 131 |
b, err := dm.blobStore.New() |
| 133 | 132 |
if err != nil {
|
| ... | ... |
@@ -179,6 +178,6 @@ func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
|
| 179 | 179 |
func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
|
| 180 | 180 |
return nil, fmt.Errorf("digest not found")
|
| 181 | 181 |
} |
| 182 |
-func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
|
| 182 |
+func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
|
|
| 183 | 183 |
return configToRootFS(c) |
| 184 | 184 |
} |
| ... | ... |
@@ -375,12 +375,9 @@ func isEqualPrivilege(a, b types.PluginPrivilege) bool {
|
| 375 | 375 |
return reflect.DeepEqual(a.Value, b.Value) |
| 376 | 376 |
} |
| 377 | 377 |
|
| 378 |
-func configToRootFS(c []byte) (*image.RootFS, layer.OS, error) {
|
|
| 379 |
- // TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the operating system. |
|
| 380 |
- os := layer.OS(runtime.GOOS) |
|
| 381 |
- if system.LCOWSupported() {
|
|
| 382 |
- os = "linux" |
|
| 383 |
- } |
|
| 378 |
+func configToRootFS(c []byte) (*image.RootFS, string, error) {
|
|
| 379 |
+ // TODO @jhowardmsft LCOW - Will need to revisit this. |
|
| 380 |
+ os := runtime.GOOS |
|
| 384 | 381 |
var pluginConfig types.PluginConfig |
| 385 | 382 |
if err := json.Unmarshal(c, &pluginConfig); err != nil {
|
| 386 | 383 |
return nil, "", err |