Signed-off-by: Daniel Nephin <dnephin@docker.com>
Daniel Nephin authored on 2018/02/08 05:52:471 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,27 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "github.com/docker/docker/builder" |
|
5 |
- "github.com/docker/docker/image/cache" |
|
6 |
- "github.com/sirupsen/logrus" |
|
7 |
-) |
|
8 |
- |
|
9 |
-// MakeImageCache creates a stateful image cache. |
|
10 |
-func (i *imageService) MakeImageCache(sourceRefs []string) builder.ImageCache { |
|
11 |
- if len(sourceRefs) == 0 { |
|
12 |
- return cache.NewLocal(i.imageStore) |
|
13 |
- } |
|
14 |
- |
|
15 |
- cache := cache.New(i.imageStore) |
|
16 |
- |
|
17 |
- for _, ref := range sourceRefs { |
|
18 |
- img, err := i.GetImage(ref) |
|
19 |
- if err != nil { |
|
20 |
- logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) |
|
21 |
- continue |
|
22 |
- } |
|
23 |
- cache.Populate(img) |
|
24 |
- } |
|
25 |
- |
|
26 |
- return cache |
|
27 |
-} |
... | ... |
@@ -62,6 +62,7 @@ type Backend interface { |
62 | 62 |
GetAttachmentStore() *networkSettings.AttachmentStore |
63 | 63 |
} |
64 | 64 |
|
65 |
+// ImageBackend is used by an executor to perform image operations |
|
65 | 66 |
type ImageBackend interface { |
66 | 67 |
PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error |
67 | 68 |
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) |
... | ... |
@@ -64,12 +64,6 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { |
64 | 64 |
return nil |
65 | 65 |
} |
66 | 66 |
|
67 |
-// getSize returns real size & virtual size |
|
68 |
-func (daemon *Daemon) getSize(containerID string) (int64, int64) { |
|
69 |
- // TODO Windows |
|
70 |
- return 0, 0 |
|
71 |
-} |
|
72 |
- |
|
73 | 67 |
func (daemon *Daemon) setupIpcDirs(container *container.Container) error { |
74 | 68 |
return nil |
75 | 69 |
} |
... | ... |
@@ -157,7 +157,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( |
157 | 157 |
} |
158 | 158 |
|
159 | 159 |
// Set RWLayer for container after mount labels have been set |
160 |
- rwLayer, err := daemon.imageService.GetRWLayer(container, setupInitLayer(daemon.idMappings)) |
|
160 |
+ rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMappings)) |
|
161 | 161 |
if err != nil { |
162 | 162 |
return nil, errdefs.System(err) |
163 | 163 |
} |
... | ... |
@@ -21,21 +21,21 @@ import ( |
21 | 21 |
"github.com/docker/docker/api/types" |
22 | 22 |
containertypes "github.com/docker/docker/api/types/container" |
23 | 23 |
"github.com/docker/docker/api/types/swarm" |
24 |
+ "github.com/docker/docker/builder" |
|
24 | 25 |
"github.com/docker/docker/container" |
25 | 26 |
"github.com/docker/docker/daemon/config" |
26 | 27 |
"github.com/docker/docker/daemon/discovery" |
27 | 28 |
"github.com/docker/docker/daemon/events" |
28 | 29 |
"github.com/docker/docker/daemon/exec" |
30 |
+ "github.com/docker/docker/daemon/images" |
|
29 | 31 |
"github.com/docker/docker/daemon/logger" |
30 | 32 |
"github.com/docker/docker/daemon/network" |
31 | 33 |
"github.com/docker/docker/errdefs" |
32 | 34 |
"github.com/sirupsen/logrus" |
33 | 35 |
// register graph drivers |
34 |
- "github.com/docker/docker/builder" |
|
35 | 36 |
_ "github.com/docker/docker/daemon/graphdriver/register" |
36 | 37 |
"github.com/docker/docker/daemon/stats" |
37 | 38 |
dmetadata "github.com/docker/docker/distribution/metadata" |
38 |
- "github.com/docker/docker/distribution/xfer" |
|
39 | 39 |
"github.com/docker/docker/dockerversion" |
40 | 40 |
"github.com/docker/docker/image" |
41 | 41 |
"github.com/docker/docker/layer" |
... | ... |
@@ -75,23 +75,21 @@ type Daemon struct { |
75 | 75 |
containers container.Store |
76 | 76 |
containersReplica container.ViewDB |
77 | 77 |
execCommands *exec.Store |
78 |
- |
|
79 |
- imageService *imageService |
|
80 |
- |
|
81 |
- idIndex *truncindex.TruncIndex |
|
82 |
- configStore *config.Config |
|
83 |
- statsCollector *stats.Collector |
|
84 |
- defaultLogConfig containertypes.LogConfig |
|
85 |
- RegistryService registry.Service |
|
86 |
- EventsService *events.Events |
|
87 |
- netController libnetwork.NetworkController |
|
88 |
- volumes *store.VolumeStore |
|
89 |
- discoveryWatcher discovery.Reloader |
|
90 |
- root string |
|
91 |
- seccompEnabled bool |
|
92 |
- apparmorEnabled bool |
|
93 |
- shutdown bool |
|
94 |
- idMappings *idtools.IDMappings |
|
78 |
+ imageService *images.ImageService |
|
79 |
+ idIndex *truncindex.TruncIndex |
|
80 |
+ configStore *config.Config |
|
81 |
+ statsCollector *stats.Collector |
|
82 |
+ defaultLogConfig containertypes.LogConfig |
|
83 |
+ RegistryService registry.Service |
|
84 |
+ EventsService *events.Events |
|
85 |
+ netController libnetwork.NetworkController |
|
86 |
+ volumes *store.VolumeStore |
|
87 |
+ discoveryWatcher discovery.Reloader |
|
88 |
+ root string |
|
89 |
+ seccompEnabled bool |
|
90 |
+ apparmorEnabled bool |
|
91 |
+ shutdown bool |
|
92 |
+ idMappings *idtools.IDMappings |
|
95 | 93 |
// TODO: move graphDrivers field to an InfoService |
96 | 94 |
graphDrivers map[string]string // By operating system |
97 | 95 |
|
... | ... |
@@ -158,7 +156,7 @@ func (daemon *Daemon) restore() error { |
158 | 158 |
// Ignore the container if it does not support the current driver being used by the graph |
159 | 159 |
currentDriverForContainerOS := daemon.graphDrivers[container.OS] |
160 | 160 |
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { |
161 |
- rwlayer, err := daemon.imageService.GetRWLayerByID(container.ID, container.OS) |
|
161 |
+ rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) |
|
162 | 162 |
if err != nil { |
163 | 163 |
logrus.Errorf("Failed to load container mount %v: %v", id, err) |
164 | 164 |
continue |
... | ... |
@@ -808,8 +806,6 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
808 | 808 |
return nil, err |
809 | 809 |
} |
810 | 810 |
|
811 |
- eventsService := events.New() |
|
812 |
- |
|
813 | 811 |
// We have a single tag/reference store for the daemon globally. However, it's |
814 | 812 |
// stored under the graphdriver. On host platforms which only support a single |
815 | 813 |
// container OS, but multiple selectable graphdrivers, this means depending on which |
... | ... |
@@ -863,7 +859,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
863 | 863 |
d.idIndex = truncindex.NewTruncIndex([]string{}) |
864 | 864 |
d.statsCollector = d.newStatsCollector(1 * time.Second) |
865 | 865 |
|
866 |
- d.EventsService = eventsService |
|
866 |
+ d.EventsService = events.New() |
|
867 | 867 |
d.volumes = volStore |
868 | 868 |
d.root = config.Root |
869 | 869 |
d.idMappings = idMappings |
... | ... |
@@ -872,19 +868,21 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe |
872 | 872 |
|
873 | 873 |
d.linkIndex = newLinkIndex() |
874 | 874 |
|
875 |
- logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) |
|
876 |
- logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) |
|
877 |
- d.imageService = &imageService{ |
|
878 |
- trustKey: trustKey, |
|
879 |
- uploadManager: xfer.NewLayerUploadManager(*config.MaxConcurrentUploads), |
|
880 |
- downloadManager: xfer.NewLayerDownloadManager(layerStores, *config.MaxConcurrentDownloads), |
|
881 |
- registryService: registryService, |
|
882 |
- referenceStore: rs, |
|
883 |
- distributionMetadataStore: distributionMetadataStore, |
|
884 |
- imageStore: imageStore, |
|
885 |
- eventsService: eventsService, |
|
886 |
- containers: d.containers, |
|
887 |
- } |
|
875 |
+ // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only |
|
876 |
+ // used above to run migration. They could be initialized in ImageService |
|
877 |
+ // if migration is called from daemon/images. layerStore might move as well. |
|
878 |
+ d.imageService = images.NewImageService(images.ImageServiceConfig{ |
|
879 |
+ ContainerStore: d.containers, |
|
880 |
+ DistributionMetadataStore: distributionMetadataStore, |
|
881 |
+ EventsService: d.EventsService, |
|
882 |
+ ImageStore: imageStore, |
|
883 |
+ LayerStores: layerStores, |
|
884 |
+ MaxConcurrentDownloads: *config.MaxConcurrentDownloads, |
|
885 |
+ MaxConcurrentUploads: *config.MaxConcurrentUploads, |
|
886 |
+ ReferenceStore: rs, |
|
887 |
+ RegistryService: registryService, |
|
888 |
+ TrustKey: trustKey, |
|
889 |
+ }) |
|
888 | 890 |
|
889 | 891 |
go d.execCommandGC() |
890 | 892 |
|
... | ... |
@@ -1007,7 +1005,7 @@ func (daemon *Daemon) Shutdown() error { |
1007 | 1007 |
logrus.Errorf("Stop container error: %v", err) |
1008 | 1008 |
return |
1009 | 1009 |
} |
1010 |
- if mountid, err := daemon.imageService.GetContainerMountID(c.ID, c.OS); err == nil { |
|
1010 |
+ if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil { |
|
1011 | 1011 |
daemon.cleanupMountsByID(mountid) |
1012 | 1012 |
} |
1013 | 1013 |
logrus.Debugf("container stopped %s", c.ID) |
... | ... |
@@ -1020,7 +1018,9 @@ func (daemon *Daemon) Shutdown() error { |
1020 | 1020 |
} |
1021 | 1021 |
} |
1022 | 1022 |
|
1023 |
- daemon.imageService.Cleanup() |
|
1023 |
+ if daemon.imageService != nil { |
|
1024 |
+ daemon.imageService.Cleanup() |
|
1025 |
+ } |
|
1024 | 1026 |
|
1025 | 1027 |
// If we are part of a cluster, clean up cluster's stuff |
1026 | 1028 |
if daemon.clusterProvider != nil { |
... | ... |
@@ -1320,14 +1320,15 @@ func (daemon *Daemon) IDMappings() *idtools.IDMappings { |
1320 | 1320 |
return daemon.idMappings |
1321 | 1321 |
} |
1322 | 1322 |
|
1323 |
-func (daemon *Daemon) ImageService() *imageService { |
|
1323 |
+// ImageService returns the Daemon's ImageService |
|
1324 |
+func (daemon *Daemon) ImageService() *images.ImageService { |
|
1324 | 1325 |
return daemon.imageService |
1325 | 1326 |
} |
1326 | 1327 |
|
1327 |
-// TODO: tmp hack to merge interfaces |
|
1328 |
+// BuilderBackend returns the backend used by builder |
|
1328 | 1329 |
func (daemon *Daemon) BuilderBackend() builder.Backend { |
1329 | 1330 |
return struct { |
1330 | 1331 |
*Daemon |
1331 |
- *imageService |
|
1332 |
+ *images.ImageService |
|
1332 | 1333 |
}{daemon, daemon.imageService} |
1333 | 1334 |
} |
... | ... |
@@ -120,7 +120,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo |
120 | 120 |
// When container creation fails and `RWLayer` has not been created yet, we |
121 | 121 |
// do not call `ReleaseRWLayer` |
122 | 122 |
if container.RWLayer != nil { |
123 |
- err := daemon.imageService.ReleaseContainerLayer(container.RWLayer, container.OS) |
|
123 |
+ err := daemon.imageService.ReleaseLayer(container.RWLayer, container.OS) |
|
124 | 124 |
if err != nil { |
125 | 125 |
err = errors.Wrapf(err, "container %s", container.ID) |
126 | 126 |
container.SetRemovalError(err) |
... | ... |
@@ -51,13 +51,13 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R |
51 | 51 |
if !system.IsOSSupported(container.OS) { |
52 | 52 |
return nil, fmt.Errorf("cannot export %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) |
53 | 53 |
} |
54 |
- rwlayer, err := daemon.imageService.GetRWLayerByID(container.ID, container.OS) |
|
54 |
+ rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) |
|
55 | 55 |
if err != nil { |
56 | 56 |
return nil, err |
57 | 57 |
} |
58 | 58 |
defer func() { |
59 | 59 |
if err != nil { |
60 |
- daemon.imageService.ReleaseContainerLayer(rwlayer, container.OS) |
|
60 |
+ daemon.imageService.ReleaseLayer(rwlayer, container.OS) |
|
61 | 61 |
} |
62 | 62 |
}() |
63 | 63 |
|
... | ... |
@@ -78,7 +78,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R |
78 | 78 |
arch = ioutils.NewReadCloserWrapper(archive, func() error { |
79 | 79 |
err := archive.Close() |
80 | 80 |
rwlayer.Unmount() |
81 |
- daemon.imageService.ReleaseContainerLayer(rwlayer, container.OS) |
|
81 |
+ daemon.imageService.ReleaseLayer(rwlayer, container.OS) |
|
82 | 82 |
return err |
83 | 83 |
}) |
84 | 84 |
daemon.LogContainerEvent(container, "export") |
85 | 85 |
deleted file mode 100644 |
... | ... |
@@ -1,266 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "context" |
|
5 |
- "fmt" |
|
6 |
- "os" |
|
7 |
- |
|
8 |
- "github.com/docker/distribution/reference" |
|
9 |
- "github.com/docker/docker/api/types/events" |
|
10 |
- "github.com/docker/docker/container" |
|
11 |
- daemonevents "github.com/docker/docker/daemon/events" |
|
12 |
- "github.com/docker/docker/distribution/metadata" |
|
13 |
- "github.com/docker/docker/distribution/xfer" |
|
14 |
- "github.com/docker/docker/errdefs" |
|
15 |
- "github.com/docker/docker/image" |
|
16 |
- "github.com/docker/docker/layer" |
|
17 |
- dockerreference "github.com/docker/docker/reference" |
|
18 |
- "github.com/docker/docker/registry" |
|
19 |
- "github.com/docker/libtrust" |
|
20 |
- "github.com/opencontainers/go-digest" |
|
21 |
- "github.com/pkg/errors" |
|
22 |
- "github.com/sirupsen/logrus" |
|
23 |
-) |
|
24 |
- |
|
25 |
-// errImageDoesNotExist is error returned when no image can be found for a reference. |
|
26 |
-type errImageDoesNotExist struct { |
|
27 |
- ref reference.Reference |
|
28 |
-} |
|
29 |
- |
|
30 |
-func (e errImageDoesNotExist) Error() string { |
|
31 |
- ref := e.ref |
|
32 |
- if named, ok := ref.(reference.Named); ok { |
|
33 |
- ref = reference.TagNameOnly(named) |
|
34 |
- } |
|
35 |
- return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) |
|
36 |
-} |
|
37 |
- |
|
38 |
-func (e errImageDoesNotExist) NotFound() {} |
|
39 |
- |
|
40 |
-// GetImageIDAndOS returns an image ID and operating system corresponding to the image referred to by |
|
41 |
-// refOrID. |
|
42 |
-// called from list.go foldFilter() |
|
43 |
-func (i imageService) GetImageIDAndOS(refOrID string) (image.ID, string, error) { |
|
44 |
- ref, err := reference.ParseAnyReference(refOrID) |
|
45 |
- if err != nil { |
|
46 |
- return "", "", errdefs.InvalidParameter(err) |
|
47 |
- } |
|
48 |
- namedRef, ok := ref.(reference.Named) |
|
49 |
- if !ok { |
|
50 |
- digested, ok := ref.(reference.Digested) |
|
51 |
- if !ok { |
|
52 |
- return "", "", errImageDoesNotExist{ref} |
|
53 |
- } |
|
54 |
- id := image.IDFromDigest(digested.Digest()) |
|
55 |
- if img, err := i.imageStore.Get(id); err == nil { |
|
56 |
- return id, img.OperatingSystem(), nil |
|
57 |
- } |
|
58 |
- return "", "", errImageDoesNotExist{ref} |
|
59 |
- } |
|
60 |
- |
|
61 |
- if digest, err := i.referenceStore.Get(namedRef); err == nil { |
|
62 |
- // Search the image stores to get the operating system, defaulting to host OS. |
|
63 |
- id := image.IDFromDigest(digest) |
|
64 |
- if img, err := i.imageStore.Get(id); err == nil { |
|
65 |
- return id, img.OperatingSystem(), nil |
|
66 |
- } |
|
67 |
- } |
|
68 |
- |
|
69 |
- // Search based on ID |
|
70 |
- if id, err := i.imageStore.Search(refOrID); err == nil { |
|
71 |
- img, err := i.imageStore.Get(id) |
|
72 |
- if err != nil { |
|
73 |
- return "", "", errImageDoesNotExist{ref} |
|
74 |
- } |
|
75 |
- return id, img.OperatingSystem(), nil |
|
76 |
- } |
|
77 |
- |
|
78 |
- return "", "", errImageDoesNotExist{ref} |
|
79 |
-} |
|
80 |
- |
|
81 |
-// GetImage returns an image corresponding to the image referred to by refOrID. |
|
82 |
-func (i *imageService) GetImage(refOrID string) (*image.Image, error) { |
|
83 |
- imgID, _, err := i.GetImageIDAndOS(refOrID) |
|
84 |
- if err != nil { |
|
85 |
- return nil, err |
|
86 |
- } |
|
87 |
- return i.imageStore.Get(imgID) |
|
88 |
-} |
|
89 |
- |
|
90 |
-type containerStore interface { |
|
91 |
- // used by image delete |
|
92 |
- First(container.StoreFilter) *container.Container |
|
93 |
- // used by image prune, and image list |
|
94 |
- List() []*container.Container |
|
95 |
- // TODO: remove, only used for CommitBuildStep |
|
96 |
- Get(string) *container.Container |
|
97 |
-} |
|
98 |
- |
|
99 |
-type imageService struct { |
|
100 |
- eventsService *daemonevents.Events |
|
101 |
- containers containerStore |
|
102 |
- downloadManager *xfer.LayerDownloadManager |
|
103 |
- uploadManager *xfer.LayerUploadManager |
|
104 |
- |
|
105 |
- // TODO: should accept a trust service instead of a key |
|
106 |
- trustKey libtrust.PrivateKey |
|
107 |
- |
|
108 |
- registryService registry.Service |
|
109 |
- referenceStore dockerreference.Store |
|
110 |
- distributionMetadataStore metadata.Store |
|
111 |
- imageStore image.Store |
|
112 |
- layerStores map[string]layer.Store // By operating system |
|
113 |
- |
|
114 |
- pruneRunning int32 |
|
115 |
-} |
|
116 |
- |
|
117 |
-// called from info.go |
|
118 |
-func (i *imageService) CountImages() int { |
|
119 |
- return len(i.imageStore.Map()) |
|
120 |
-} |
|
121 |
- |
|
122 |
-// called from list.go to filter containers |
|
123 |
-func (i *imageService) Children(id image.ID) []image.ID { |
|
124 |
- return i.imageStore.Children(id) |
|
125 |
-} |
|
126 |
- |
|
127 |
-// TODO: accept an opt struct instead of container? |
|
128 |
-// called from create.go |
|
129 |
-func (i *imageService) GetRWLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { |
|
130 |
- var layerID layer.ChainID |
|
131 |
- if container.ImageID != "" { |
|
132 |
- img, err := i.imageStore.Get(container.ImageID) |
|
133 |
- if err != nil { |
|
134 |
- return nil, err |
|
135 |
- } |
|
136 |
- layerID = img.RootFS.ChainID() |
|
137 |
- } |
|
138 |
- |
|
139 |
- rwLayerOpts := &layer.CreateRWLayerOpts{ |
|
140 |
- MountLabel: container.MountLabel, |
|
141 |
- InitFunc: initFunc, |
|
142 |
- StorageOpt: container.HostConfig.StorageOpt, |
|
143 |
- } |
|
144 |
- |
|
145 |
- // Indexing by OS is safe here as validation of OS has already been performed in create() (the only |
|
146 |
- // caller), and guaranteed non-nil |
|
147 |
- return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts) |
|
148 |
-} |
|
149 |
- |
|
150 |
-// called from daemon.go Daemon.restore(), and Daemon.containerExport() |
|
151 |
-func (i *imageService) GetRWLayerByID(cid string, os string) (layer.RWLayer, error) { |
|
152 |
- return i.layerStores[os].GetRWLayer(cid) |
|
153 |
-} |
|
154 |
- |
|
155 |
-// called from info.go |
|
156 |
-func (i *imageService) GraphDriverStatuses() map[string][][2]string { |
|
157 |
- result := make(map[string][][2]string) |
|
158 |
- for os, store := range i.layerStores { |
|
159 |
- result[os] = store.DriverStatus() |
|
160 |
- } |
|
161 |
- return result |
|
162 |
-} |
|
163 |
- |
|
164 |
-// called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) |
|
165 |
-func (i *imageService) GetContainerMountID(cid string, os string) (string, error) { |
|
166 |
- return i.layerStores[os].GetMountID(cid) |
|
167 |
-} |
|
168 |
- |
|
169 |
-// called from daemon.go Daemon.Shutdown() |
|
170 |
-func (i *imageService) Cleanup() { |
|
171 |
- for os, ls := range i.layerStores { |
|
172 |
- if ls != nil { |
|
173 |
- if err := ls.Cleanup(); err != nil { |
|
174 |
- logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) |
|
175 |
- } |
|
176 |
- } |
|
177 |
- } |
|
178 |
-} |
|
179 |
- |
|
180 |
-// moved from Daemon.GraphDriverName, multiple calls |
|
181 |
-func (i *imageService) GraphDriverForOS(os string) string { |
|
182 |
- return i.layerStores[os].DriverName() |
|
183 |
-} |
|
184 |
- |
|
185 |
-// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() |
|
186 |
-func (i *imageService) ReleaseContainerLayer(rwlayer layer.RWLayer, containerOS string) error { |
|
187 |
- metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer) |
|
188 |
- layer.LogReleaseMetadata(metadata) |
|
189 |
- if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { |
|
190 |
- return errors.Wrapf(err, "driver %q failed to remove root filesystem", |
|
191 |
- i.layerStores[containerOS].DriverName()) |
|
192 |
- } |
|
193 |
- return nil |
|
194 |
-} |
|
195 |
- |
|
196 |
-// called from disk_usage.go |
|
197 |
-func (i *imageService) LayerDiskUsage(ctx context.Context) (int64, error) { |
|
198 |
- var allLayersSize int64 |
|
199 |
- layerRefs := i.getLayerRefs() |
|
200 |
- for _, ls := range i.layerStores { |
|
201 |
- allLayers := ls.Map() |
|
202 |
- for _, l := range allLayers { |
|
203 |
- select { |
|
204 |
- case <-ctx.Done(): |
|
205 |
- return allLayersSize, ctx.Err() |
|
206 |
- default: |
|
207 |
- size, err := l.DiffSize() |
|
208 |
- if err == nil { |
|
209 |
- if _, ok := layerRefs[l.ChainID()]; ok { |
|
210 |
- allLayersSize += size |
|
211 |
- } else { |
|
212 |
- logrus.Warnf("found leaked image layer %v", l.ChainID()) |
|
213 |
- } |
|
214 |
- } else { |
|
215 |
- logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) |
|
216 |
- } |
|
217 |
- } |
|
218 |
- } |
|
219 |
- } |
|
220 |
- return allLayersSize, nil |
|
221 |
-} |
|
222 |
- |
|
223 |
-func (i *imageService) getLayerRefs() map[layer.ChainID]int { |
|
224 |
- tmpImages := i.imageStore.Map() |
|
225 |
- layerRefs := map[layer.ChainID]int{} |
|
226 |
- for id, img := range tmpImages { |
|
227 |
- dgst := digest.Digest(id) |
|
228 |
- if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { |
|
229 |
- continue |
|
230 |
- } |
|
231 |
- |
|
232 |
- rootFS := *img.RootFS |
|
233 |
- rootFS.DiffIDs = nil |
|
234 |
- for _, id := range img.RootFS.DiffIDs { |
|
235 |
- rootFS.Append(id) |
|
236 |
- chid := rootFS.ChainID() |
|
237 |
- layerRefs[chid]++ |
|
238 |
- } |
|
239 |
- } |
|
240 |
- |
|
241 |
- return layerRefs |
|
242 |
-} |
|
243 |
- |
|
244 |
-// LogImageEvent generates an event related to an image with only the default attributes. |
|
245 |
-func (i *imageService) LogImageEvent(imageID, refName, action string) { |
|
246 |
- i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) |
|
247 |
-} |
|
248 |
- |
|
249 |
-// LogImageEventWithAttributes generates an event related to an image with specific given attributes. |
|
250 |
-func (i *imageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { |
|
251 |
- img, err := i.GetImage(imageID) |
|
252 |
- if err == nil && img.Config != nil { |
|
253 |
- // image has not been removed yet. |
|
254 |
- // it could be missing if the event is `delete`. |
|
255 |
- copyAttributes(attributes, img.Config.Labels) |
|
256 |
- } |
|
257 |
- if refName != "" { |
|
258 |
- attributes["name"] = refName |
|
259 |
- } |
|
260 |
- actor := events.Actor{ |
|
261 |
- ID: imageID, |
|
262 |
- Attributes: attributes, |
|
263 |
- } |
|
264 |
- |
|
265 |
- i.eventsService.Log(action, events.ImageEventType, actor) |
|
266 |
-} |
267 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,220 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "io" |
|
5 |
- |
|
6 |
- "github.com/docker/distribution/reference" |
|
7 |
- "github.com/docker/docker/api/types" |
|
8 |
- "github.com/docker/docker/api/types/backend" |
|
9 |
- "github.com/docker/docker/builder" |
|
10 |
- "github.com/docker/docker/image" |
|
11 |
- "github.com/docker/docker/image/cache" |
|
12 |
- "github.com/docker/docker/layer" |
|
13 |
- "github.com/docker/docker/pkg/containerfs" |
|
14 |
- "github.com/docker/docker/pkg/stringid" |
|
15 |
- "github.com/docker/docker/pkg/system" |
|
16 |
- "github.com/docker/docker/registry" |
|
17 |
- "github.com/pkg/errors" |
|
18 |
- "golang.org/x/net/context" |
|
19 |
-) |
|
20 |
- |
|
21 |
-type roLayer struct { |
|
22 |
- released bool |
|
23 |
- layerStore layer.Store |
|
24 |
- roLayer layer.Layer |
|
25 |
-} |
|
26 |
- |
|
27 |
-func (l *roLayer) DiffID() layer.DiffID { |
|
28 |
- if l.roLayer == nil { |
|
29 |
- return layer.DigestSHA256EmptyTar |
|
30 |
- } |
|
31 |
- return l.roLayer.DiffID() |
|
32 |
-} |
|
33 |
- |
|
34 |
-func (l *roLayer) Release() error { |
|
35 |
- if l.released { |
|
36 |
- return nil |
|
37 |
- } |
|
38 |
- if l.roLayer != nil { |
|
39 |
- metadata, err := l.layerStore.Release(l.roLayer) |
|
40 |
- layer.LogReleaseMetadata(metadata) |
|
41 |
- if err != nil { |
|
42 |
- return errors.Wrap(err, "failed to release ROLayer") |
|
43 |
- } |
|
44 |
- } |
|
45 |
- l.roLayer = nil |
|
46 |
- l.released = true |
|
47 |
- return nil |
|
48 |
-} |
|
49 |
- |
|
50 |
-func (l *roLayer) NewRWLayer() (builder.RWLayer, error) { |
|
51 |
- var chainID layer.ChainID |
|
52 |
- if l.roLayer != nil { |
|
53 |
- chainID = l.roLayer.ChainID() |
|
54 |
- } |
|
55 |
- |
|
56 |
- mountID := stringid.GenerateRandomID() |
|
57 |
- newLayer, err := l.layerStore.CreateRWLayer(mountID, chainID, nil) |
|
58 |
- if err != nil { |
|
59 |
- return nil, errors.Wrap(err, "failed to create rwlayer") |
|
60 |
- } |
|
61 |
- |
|
62 |
- rwLayer := &rwLayer{layerStore: l.layerStore, rwLayer: newLayer} |
|
63 |
- |
|
64 |
- fs, err := newLayer.Mount("") |
|
65 |
- if err != nil { |
|
66 |
- rwLayer.Release() |
|
67 |
- return nil, err |
|
68 |
- } |
|
69 |
- |
|
70 |
- rwLayer.fs = fs |
|
71 |
- |
|
72 |
- return rwLayer, nil |
|
73 |
-} |
|
74 |
- |
|
75 |
-type rwLayer struct { |
|
76 |
- released bool |
|
77 |
- layerStore layer.Store |
|
78 |
- rwLayer layer.RWLayer |
|
79 |
- fs containerfs.ContainerFS |
|
80 |
-} |
|
81 |
- |
|
82 |
-func (l *rwLayer) Root() containerfs.ContainerFS { |
|
83 |
- return l.fs |
|
84 |
-} |
|
85 |
- |
|
86 |
-func (l *rwLayer) Commit() (builder.ROLayer, error) { |
|
87 |
- stream, err := l.rwLayer.TarStream() |
|
88 |
- if err != nil { |
|
89 |
- return nil, err |
|
90 |
- } |
|
91 |
- defer stream.Close() |
|
92 |
- |
|
93 |
- var chainID layer.ChainID |
|
94 |
- if parent := l.rwLayer.Parent(); parent != nil { |
|
95 |
- chainID = parent.ChainID() |
|
96 |
- } |
|
97 |
- |
|
98 |
- newLayer, err := l.layerStore.Register(stream, chainID) |
|
99 |
- if err != nil { |
|
100 |
- return nil, err |
|
101 |
- } |
|
102 |
- // TODO: An optimization would be to handle empty layers before returning |
|
103 |
- return &roLayer{layerStore: l.layerStore, roLayer: newLayer}, nil |
|
104 |
-} |
|
105 |
- |
|
106 |
-func (l *rwLayer) Release() error { |
|
107 |
- if l.released { |
|
108 |
- return nil |
|
109 |
- } |
|
110 |
- |
|
111 |
- if l.fs != nil { |
|
112 |
- if err := l.rwLayer.Unmount(); err != nil { |
|
113 |
- return errors.Wrap(err, "failed to unmount RWLayer") |
|
114 |
- } |
|
115 |
- l.fs = nil |
|
116 |
- } |
|
117 |
- |
|
118 |
- metadata, err := l.layerStore.ReleaseRWLayer(l.rwLayer) |
|
119 |
- layer.LogReleaseMetadata(metadata) |
|
120 |
- if err != nil { |
|
121 |
- return errors.Wrap(err, "failed to release RWLayer") |
|
122 |
- } |
|
123 |
- l.released = true |
|
124 |
- return nil |
|
125 |
-} |
|
126 |
- |
|
127 |
-func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLayer, error) { |
|
128 |
- if img == nil || img.RootFS.ChainID() == "" { |
|
129 |
- return &roLayer{layerStore: layerStore}, nil |
|
130 |
- } |
|
131 |
- // Hold a reference to the image layer so that it can't be removed before |
|
132 |
- // it is released |
|
133 |
- layer, err := layerStore.Get(img.RootFS.ChainID()) |
|
134 |
- if err != nil { |
|
135 |
- return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) |
|
136 |
- } |
|
137 |
- return &roLayer{layerStore: layerStore, roLayer: layer}, nil |
|
138 |
-} |
|
139 |
- |
|
140 |
-// TODO: could this use the regular daemon PullImage ? |
|
141 |
-func (i *imageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) { |
|
142 |
- ref, err := reference.ParseNormalizedNamed(name) |
|
143 |
- if err != nil { |
|
144 |
- return nil, err |
|
145 |
- } |
|
146 |
- ref = reference.TagNameOnly(ref) |
|
147 |
- |
|
148 |
- pullRegistryAuth := &types.AuthConfig{} |
|
149 |
- if len(authConfigs) > 0 { |
|
150 |
- // The request came with a full auth config, use it |
|
151 |
- repoInfo, err := i.registryService.ResolveRepository(ref) |
|
152 |
- if err != nil { |
|
153 |
- return nil, err |
|
154 |
- } |
|
155 |
- |
|
156 |
- resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) |
|
157 |
- pullRegistryAuth = &resolvedConfig |
|
158 |
- } |
|
159 |
- |
|
160 |
- if err := i.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil { |
|
161 |
- return nil, err |
|
162 |
- } |
|
163 |
- return i.GetImage(name) |
|
164 |
-} |
|
165 |
- |
|
166 |
-// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. |
|
167 |
-// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent |
|
168 |
-// leaking of layers. |
|
169 |
-func (i *imageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { |
|
170 |
- if refOrID == "" { |
|
171 |
- if !system.IsOSSupported(opts.OS) { |
|
172 |
- return nil, nil, system.ErrNotSupportedOperatingSystem |
|
173 |
- } |
|
174 |
- layer, err := newROLayerForImage(nil, i.layerStores[opts.OS]) |
|
175 |
- return nil, layer, err |
|
176 |
- } |
|
177 |
- |
|
178 |
- if opts.PullOption != backend.PullOptionForcePull { |
|
179 |
- image, err := i.GetImage(refOrID) |
|
180 |
- if err != nil && opts.PullOption == backend.PullOptionNoPull { |
|
181 |
- return nil, nil, err |
|
182 |
- } |
|
183 |
- // TODO: shouldn't we error out if error is different from "not found" ? |
|
184 |
- if image != nil { |
|
185 |
- if !system.IsOSSupported(image.OperatingSystem()) { |
|
186 |
- return nil, nil, system.ErrNotSupportedOperatingSystem |
|
187 |
- } |
|
188 |
- layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) |
|
189 |
- return image, layer, err |
|
190 |
- } |
|
191 |
- } |
|
192 |
- |
|
193 |
- image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS) |
|
194 |
- if err != nil { |
|
195 |
- return nil, nil, err |
|
196 |
- } |
|
197 |
- if !system.IsOSSupported(image.OperatingSystem()) { |
|
198 |
- return nil, nil, system.ErrNotSupportedOperatingSystem |
|
199 |
- } |
|
200 |
- layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) |
|
201 |
- return image, layer, err |
|
202 |
-} |
|
203 |
- |
|
204 |
-// CreateImage creates a new image by adding a config and ID to the image store. |
|
205 |
-// This is similar to LoadImage() except that it receives JSON encoded bytes of |
|
206 |
-// an image instead of a tar archive. |
|
207 |
-func (i *imageService) CreateImage(config []byte, parent string) (builder.Image, error) { |
|
208 |
- id, err := i.imageStore.Create(config) |
|
209 |
- if err != nil { |
|
210 |
- return nil, errors.Wrapf(err, "failed to create image") |
|
211 |
- } |
|
212 |
- |
|
213 |
- if parent != "" { |
|
214 |
- if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil { |
|
215 |
- return nil, errors.Wrapf(err, "failed to set parent %s", parent) |
|
216 |
- } |
|
217 |
- } |
|
218 |
- |
|
219 |
- return i.imageStore.Get(id) |
|
220 |
-} |
221 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,127 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "io" |
|
6 |
- |
|
7 |
- "github.com/docker/docker/api/types/backend" |
|
8 |
- "github.com/docker/docker/image" |
|
9 |
- "github.com/docker/docker/layer" |
|
10 |
- "github.com/docker/docker/pkg/ioutils" |
|
11 |
- "github.com/docker/docker/pkg/system" |
|
12 |
- "github.com/pkg/errors" |
|
13 |
-) |
|
14 |
- |
|
15 |
-// CommitImage creates a new image from a commit config |
|
16 |
-func (i *imageService) CommitImage(c backend.CommitConfig) (image.ID, error) { |
|
17 |
- layerStore, ok := i.layerStores[c.ContainerOS] |
|
18 |
- if !ok { |
|
19 |
- return "", system.ErrNotSupportedOperatingSystem |
|
20 |
- } |
|
21 |
- rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) |
|
22 |
- if err != nil { |
|
23 |
- return "", err |
|
24 |
- } |
|
25 |
- defer func() { |
|
26 |
- if rwTar != nil { |
|
27 |
- rwTar.Close() |
|
28 |
- } |
|
29 |
- }() |
|
30 |
- |
|
31 |
- var parent *image.Image |
|
32 |
- if c.ParentImageID == "" { |
|
33 |
- parent = new(image.Image) |
|
34 |
- parent.RootFS = image.NewRootFS() |
|
35 |
- } else { |
|
36 |
- parent, err = i.imageStore.Get(image.ID(c.ParentImageID)) |
|
37 |
- if err != nil { |
|
38 |
- return "", err |
|
39 |
- } |
|
40 |
- } |
|
41 |
- |
|
42 |
- l, err := layerStore.Register(rwTar, parent.RootFS.ChainID()) |
|
43 |
- if err != nil { |
|
44 |
- return "", err |
|
45 |
- } |
|
46 |
- defer layer.ReleaseAndLog(layerStore, l) |
|
47 |
- |
|
48 |
- cc := image.ChildConfig{ |
|
49 |
- ContainerID: c.ContainerID, |
|
50 |
- Author: c.Author, |
|
51 |
- Comment: c.Comment, |
|
52 |
- ContainerConfig: c.ContainerConfig, |
|
53 |
- Config: c.Config, |
|
54 |
- DiffID: l.DiffID(), |
|
55 |
- } |
|
56 |
- config, err := json.Marshal(image.NewChildImage(parent, cc, c.ContainerOS)) |
|
57 |
- if err != nil { |
|
58 |
- return "", err |
|
59 |
- } |
|
60 |
- |
|
61 |
- id, err := i.imageStore.Create(config) |
|
62 |
- if err != nil { |
|
63 |
- return "", err |
|
64 |
- } |
|
65 |
- |
|
66 |
- if c.ParentImageID != "" { |
|
67 |
- if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil { |
|
68 |
- return "", err |
|
69 |
- } |
|
70 |
- } |
|
71 |
- return id, nil |
|
72 |
-} |
|
73 |
- |
|
74 |
-func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { |
|
75 |
- rwlayer, err := layerStore.GetRWLayer(id) |
|
76 |
- if err != nil { |
|
77 |
- return nil, err |
|
78 |
- } |
|
79 |
- defer func() { |
|
80 |
- if err != nil { |
|
81 |
- layerStore.ReleaseRWLayer(rwlayer) |
|
82 |
- } |
|
83 |
- }() |
|
84 |
- |
|
85 |
- // TODO: this mount call is not necessary as we assume that TarStream() should |
|
86 |
- // mount the layer if needed. But the Diff() function for windows requests that |
|
87 |
- // the layer should be mounted when calling it. So we reserve this mount call |
|
88 |
- // until windows driver can implement Diff() interface correctly. |
|
89 |
- _, err = rwlayer.Mount(mountLabel) |
|
90 |
- if err != nil { |
|
91 |
- return nil, err |
|
92 |
- } |
|
93 |
- |
|
94 |
- archive, err := rwlayer.TarStream() |
|
95 |
- if err != nil { |
|
96 |
- rwlayer.Unmount() |
|
97 |
- return nil, err |
|
98 |
- } |
|
99 |
- return ioutils.NewReadCloserWrapper(archive, func() error { |
|
100 |
- archive.Close() |
|
101 |
- err = rwlayer.Unmount() |
|
102 |
- layerStore.ReleaseRWLayer(rwlayer) |
|
103 |
- return err |
|
104 |
- }), |
|
105 |
- nil |
|
106 |
-} |
|
107 |
- |
|
108 |
-// CommitBuildStep is used by the builder to create an image for each step in |
|
109 |
-// the build. |
|
110 |
-// |
|
111 |
-// This method is different from CreateImageFromContainer: |
|
112 |
-// * it doesn't attempt to validate container state |
|
113 |
-// * it doesn't send a commit action to metrics |
|
114 |
-// * it doesn't log a container commit event |
|
115 |
-// |
|
116 |
-// This is a temporary shim. Should be removed when builder stops using commit. |
|
117 |
-func (i *imageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { |
|
118 |
- container := i.containers.Get(c.ContainerID) |
|
119 |
- if container == nil { |
|
120 |
- // TODO: use typed error |
|
121 |
- return "", errors.Errorf("container not found: %s", c.ContainerID) |
|
122 |
- } |
|
123 |
- c.ContainerMountLabel = container.MountLabel |
|
124 |
- c.ContainerOS = container.OS |
|
125 |
- c.ParentImageID = string(container.ImageID) |
|
126 |
- return i.CommitImage(c) |
|
127 |
-} |
128 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,413 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "fmt" |
|
5 |
- "strings" |
|
6 |
- "time" |
|
7 |
- |
|
8 |
- "github.com/docker/distribution/reference" |
|
9 |
- "github.com/docker/docker/api/types" |
|
10 |
- "github.com/docker/docker/container" |
|
11 |
- "github.com/docker/docker/errdefs" |
|
12 |
- "github.com/docker/docker/image" |
|
13 |
- "github.com/docker/docker/pkg/stringid" |
|
14 |
- "github.com/docker/docker/pkg/system" |
|
15 |
- "github.com/pkg/errors" |
|
16 |
-) |
|
17 |
- |
|
18 |
-type conflictType int |
|
19 |
- |
|
20 |
-const ( |
|
21 |
- conflictDependentChild conflictType = (1 << iota) |
|
22 |
- conflictRunningContainer |
|
23 |
- conflictActiveReference |
|
24 |
- conflictStoppedContainer |
|
25 |
- conflictHard = conflictDependentChild | conflictRunningContainer |
|
26 |
- conflictSoft = conflictActiveReference | conflictStoppedContainer |
|
27 |
-) |
|
28 |
- |
|
29 |
-// ImageDelete deletes the image referenced by the given imageRef from this |
|
30 |
-// daemon. The given imageRef can be an image ID, ID prefix, or a repository |
|
31 |
-// reference (with an optional tag or digest, defaulting to the tag name |
|
32 |
-// "latest"). There is differing behavior depending on whether the given |
|
33 |
-// imageRef is a repository reference or not. |
|
34 |
-// |
|
35 |
-// If the given imageRef is a repository reference then that repository |
|
36 |
-// reference will be removed. However, if there exists any containers which |
|
37 |
-// were created using the same image reference then the repository reference |
|
38 |
-// cannot be removed unless either there are other repository references to the |
|
39 |
-// same image or force is true. Following removal of the repository reference, |
|
40 |
-// the referenced image itself will attempt to be deleted as described below |
|
41 |
-// but quietly, meaning any image delete conflicts will cause the image to not |
|
42 |
-// be deleted and the conflict will not be reported. |
|
43 |
-// |
|
44 |
-// There may be conflicts preventing deletion of an image and these conflicts |
|
45 |
-// are divided into two categories grouped by their severity: |
|
46 |
-// |
|
47 |
-// Hard Conflict: |
|
48 |
-// - a pull or build using the image. |
|
49 |
-// - any descendant image. |
|
50 |
-// - any running container using the image. |
|
51 |
-// |
|
52 |
-// Soft Conflict: |
|
53 |
-// - any stopped container using the image. |
|
54 |
-// - any repository tag or digest references to the image. |
|
55 |
-// |
|
56 |
-// The image cannot be removed if there are any hard conflicts and can be |
|
57 |
-// removed if there are soft conflicts only if force is true. |
|
58 |
-// |
|
59 |
-// If prune is true, ancestor images will each attempt to be deleted quietly, |
|
60 |
-// meaning any delete conflicts will cause the image to not be deleted and the |
|
61 |
-// conflict will not be reported. |
|
62 |
-// |
|
63 |
-func (i *imageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { |
|
64 |
- start := time.Now() |
|
65 |
- records := []types.ImageDeleteResponseItem{} |
|
66 |
- |
|
67 |
- imgID, operatingSystem, err := i.GetImageIDAndOS(imageRef) |
|
68 |
- if err != nil { |
|
69 |
- return nil, err |
|
70 |
- } |
|
71 |
- if !system.IsOSSupported(operatingSystem) { |
|
72 |
- return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem) |
|
73 |
- } |
|
74 |
- |
|
75 |
- repoRefs := i.referenceStore.References(imgID.Digest()) |
|
76 |
- |
|
77 |
- using := func(c *container.Container) bool { |
|
78 |
- return c.ImageID == imgID |
|
79 |
- } |
|
80 |
- |
|
81 |
- var removedRepositoryRef bool |
|
82 |
- if !isImageIDPrefix(imgID.String(), imageRef) { |
|
83 |
- // A repository reference was given and should be removed |
|
84 |
- // first. We can only remove this reference if either force is |
|
85 |
- // true, there are multiple repository references to this |
|
86 |
- // image, or there are no containers using the given reference. |
|
87 |
- if !force && isSingleReference(repoRefs) { |
|
88 |
- if container := i.containers.First(using); container != nil { |
|
89 |
- // If we removed the repository reference then |
|
90 |
- // this image would remain "dangling" and since |
|
91 |
- // we really want to avoid that the client must |
|
92 |
- // explicitly force its removal. |
|
93 |
- err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) |
|
94 |
- return nil, errdefs.Conflict(err) |
|
95 |
- } |
|
96 |
- } |
|
97 |
- |
|
98 |
- parsedRef, err := reference.ParseNormalizedNamed(imageRef) |
|
99 |
- if err != nil { |
|
100 |
- return nil, err |
|
101 |
- } |
|
102 |
- |
|
103 |
- parsedRef, err = i.removeImageRef(parsedRef) |
|
104 |
- if err != nil { |
|
105 |
- return nil, err |
|
106 |
- } |
|
107 |
- |
|
108 |
- untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
109 |
- |
|
110 |
- i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
111 |
- records = append(records, untaggedRecord) |
|
112 |
- |
|
113 |
- repoRefs = i.referenceStore.References(imgID.Digest()) |
|
114 |
- |
|
115 |
- // If a tag reference was removed and the only remaining |
|
116 |
- // references to the same repository are digest references, |
|
117 |
- // then clean up those digest references. |
|
118 |
- if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { |
|
119 |
- foundRepoTagRef := false |
|
120 |
- for _, repoRef := range repoRefs { |
|
121 |
- if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { |
|
122 |
- foundRepoTagRef = true |
|
123 |
- break |
|
124 |
- } |
|
125 |
- } |
|
126 |
- if !foundRepoTagRef { |
|
127 |
- // Remove canonical references from same repository |
|
128 |
- remainingRefs := []reference.Named{} |
|
129 |
- for _, repoRef := range repoRefs { |
|
130 |
- if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { |
|
131 |
- if _, err := i.removeImageRef(repoRef); err != nil { |
|
132 |
- return records, err |
|
133 |
- } |
|
134 |
- |
|
135 |
- untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} |
|
136 |
- records = append(records, untaggedRecord) |
|
137 |
- } else { |
|
138 |
- remainingRefs = append(remainingRefs, repoRef) |
|
139 |
- |
|
140 |
- } |
|
141 |
- } |
|
142 |
- repoRefs = remainingRefs |
|
143 |
- } |
|
144 |
- } |
|
145 |
- |
|
146 |
- // If it has remaining references then the untag finished the remove |
|
147 |
- if len(repoRefs) > 0 { |
|
148 |
- return records, nil |
|
149 |
- } |
|
150 |
- |
|
151 |
- removedRepositoryRef = true |
|
152 |
- } else { |
|
153 |
- // If an ID reference was given AND there is at most one tag |
|
154 |
- // reference to the image AND all references are within one |
|
155 |
- // repository, then remove all references. |
|
156 |
- if isSingleReference(repoRefs) { |
|
157 |
- c := conflictHard |
|
158 |
- if !force { |
|
159 |
- c |= conflictSoft &^ conflictActiveReference |
|
160 |
- } |
|
161 |
- if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { |
|
162 |
- return nil, conflict |
|
163 |
- } |
|
164 |
- |
|
165 |
- for _, repoRef := range repoRefs { |
|
166 |
- parsedRef, err := i.removeImageRef(repoRef) |
|
167 |
- if err != nil { |
|
168 |
- return nil, err |
|
169 |
- } |
|
170 |
- |
|
171 |
- untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
172 |
- |
|
173 |
- i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
174 |
- records = append(records, untaggedRecord) |
|
175 |
- } |
|
176 |
- } |
|
177 |
- } |
|
178 |
- |
|
179 |
- if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { |
|
180 |
- return nil, err |
|
181 |
- } |
|
182 |
- |
|
183 |
- imageActions.WithValues("delete").UpdateSince(start) |
|
184 |
- |
|
185 |
- return records, nil |
|
186 |
-} |
|
187 |
- |
|
188 |
-// isSingleReference returns true when all references are from one repository |
|
189 |
-// and there is at most one tag. Returns false for empty input. |
|
190 |
-func isSingleReference(repoRefs []reference.Named) bool { |
|
191 |
- if len(repoRefs) <= 1 { |
|
192 |
- return len(repoRefs) == 1 |
|
193 |
- } |
|
194 |
- var singleRef reference.Named |
|
195 |
- canonicalRefs := map[string]struct{}{} |
|
196 |
- for _, repoRef := range repoRefs { |
|
197 |
- if _, isCanonical := repoRef.(reference.Canonical); isCanonical { |
|
198 |
- canonicalRefs[repoRef.Name()] = struct{}{} |
|
199 |
- } else if singleRef == nil { |
|
200 |
- singleRef = repoRef |
|
201 |
- } else { |
|
202 |
- return false |
|
203 |
- } |
|
204 |
- } |
|
205 |
- if singleRef == nil { |
|
206 |
- // Just use first canonical ref |
|
207 |
- singleRef = repoRefs[0] |
|
208 |
- } |
|
209 |
- _, ok := canonicalRefs[singleRef.Name()] |
|
210 |
- return len(canonicalRefs) == 1 && ok |
|
211 |
-} |
|
212 |
- |
|
213 |
-// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the |
|
214 |
-// given imageID. |
|
215 |
-func isImageIDPrefix(imageID, possiblePrefix string) bool { |
|
216 |
- if strings.HasPrefix(imageID, possiblePrefix) { |
|
217 |
- return true |
|
218 |
- } |
|
219 |
- |
|
220 |
- if i := strings.IndexRune(imageID, ':'); i >= 0 { |
|
221 |
- return strings.HasPrefix(imageID[i+1:], possiblePrefix) |
|
222 |
- } |
|
223 |
- |
|
224 |
- return false |
|
225 |
-} |
|
226 |
- |
|
227 |
-// removeImageRef attempts to parse and remove the given image reference from |
|
228 |
-// this daemon's store of repository tag/digest references. The given |
|
229 |
-// repositoryRef must not be an image ID but a repository name followed by an |
|
230 |
-// optional tag or digest reference. If tag or digest is omitted, the default |
|
231 |
-// tag is used. Returns the resolved image reference and an error. |
|
232 |
-func (i *imageService) removeImageRef(ref reference.Named) (reference.Named, error) { |
|
233 |
- ref = reference.TagNameOnly(ref) |
|
234 |
- |
|
235 |
- // Ignore the boolean value returned, as far as we're concerned, this |
|
236 |
- // is an idempotent operation and it's okay if the reference didn't |
|
237 |
- // exist in the first place. |
|
238 |
- _, err := i.referenceStore.Delete(ref) |
|
239 |
- |
|
240 |
- return ref, err |
|
241 |
-} |
|
242 |
- |
|
243 |
-// removeAllReferencesToImageID attempts to remove every reference to the given |
|
244 |
-// imgID from this daemon's store of repository tag/digest references. Returns |
|
245 |
-// on the first encountered error. Removed references are logged to this |
|
246 |
-// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the |
|
247 |
-// given list of records. |
|
248 |
-func (i *imageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { |
|
249 |
- imageRefs := i.referenceStore.References(imgID.Digest()) |
|
250 |
- |
|
251 |
- for _, imageRef := range imageRefs { |
|
252 |
- parsedRef, err := i.removeImageRef(imageRef) |
|
253 |
- if err != nil { |
|
254 |
- return err |
|
255 |
- } |
|
256 |
- |
|
257 |
- untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
258 |
- |
|
259 |
- i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
260 |
- *records = append(*records, untaggedRecord) |
|
261 |
- } |
|
262 |
- |
|
263 |
- return nil |
|
264 |
-} |
|
265 |
- |
|
266 |
-// ImageDeleteConflict holds a soft or hard conflict and an associated error. |
|
267 |
-// Implements the error interface. |
|
268 |
-type imageDeleteConflict struct { |
|
269 |
- hard bool |
|
270 |
- used bool |
|
271 |
- imgID image.ID |
|
272 |
- message string |
|
273 |
-} |
|
274 |
- |
|
275 |
-func (idc *imageDeleteConflict) Error() string { |
|
276 |
- var forceMsg string |
|
277 |
- if idc.hard { |
|
278 |
- forceMsg = "cannot be forced" |
|
279 |
- } else { |
|
280 |
- forceMsg = "must be forced" |
|
281 |
- } |
|
282 |
- |
|
283 |
- return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) |
|
284 |
-} |
|
285 |
- |
|
286 |
-func (idc *imageDeleteConflict) Conflict() {} |
|
287 |
- |
|
288 |
-// imageDeleteHelper attempts to delete the given image from this daemon. If |
|
289 |
-// the image has any hard delete conflicts (child images or running containers |
|
290 |
-// using the image) then it cannot be deleted. If the image has any soft delete |
|
291 |
-// conflicts (any tags/digests referencing the image or any stopped container |
|
292 |
-// using the image) then it can only be deleted if force is true. If the delete |
|
293 |
-// succeeds and prune is true, the parent images are also deleted if they do |
|
294 |
-// not have any soft or hard delete conflicts themselves. Any deleted images |
|
295 |
-// and untagged references are appended to the given records. If any error or |
|
296 |
-// conflict is encountered, it will be returned immediately without deleting |
|
297 |
-// the image. If quiet is true, any encountered conflicts will be ignored and |
|
298 |
-// the function will return nil immediately without deleting the image. |
|
299 |
-func (i *imageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { |
|
300 |
- // First, determine if this image has any conflicts. Ignore soft conflicts |
|
301 |
- // if force is true. |
|
302 |
- c := conflictHard |
|
303 |
- if !force { |
|
304 |
- c |= conflictSoft |
|
305 |
- } |
|
306 |
- if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { |
|
307 |
- if quiet && (!i.imageIsDangling(imgID) || conflict.used) { |
|
308 |
- // Ignore conflicts UNLESS the image is "dangling" or not being used in |
|
309 |
- // which case we want the user to know. |
|
310 |
- return nil |
|
311 |
- } |
|
312 |
- |
|
313 |
- // There was a conflict and it's either a hard conflict OR we are not |
|
314 |
- // forcing deletion on soft conflicts. |
|
315 |
- return conflict |
|
316 |
- } |
|
317 |
- |
|
318 |
- parent, err := i.imageStore.GetParent(imgID) |
|
319 |
- if err != nil { |
|
320 |
- // There may be no parent |
|
321 |
- parent = "" |
|
322 |
- } |
|
323 |
- |
|
324 |
- // Delete all repository tag/digest references to this image. |
|
325 |
- if err := i.removeAllReferencesToImageID(imgID, records); err != nil { |
|
326 |
- return err |
|
327 |
- } |
|
328 |
- |
|
329 |
- removedLayers, err := i.imageStore.Delete(imgID) |
|
330 |
- if err != nil { |
|
331 |
- return err |
|
332 |
- } |
|
333 |
- |
|
334 |
- i.LogImageEvent(imgID.String(), imgID.String(), "delete") |
|
335 |
- *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) |
|
336 |
- for _, removedLayer := range removedLayers { |
|
337 |
- *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) |
|
338 |
- } |
|
339 |
- |
|
340 |
- if !prune || parent == "" { |
|
341 |
- return nil |
|
342 |
- } |
|
343 |
- |
|
344 |
- // We need to prune the parent image. This means delete it if there are |
|
345 |
- // no tags/digests referencing it and there are no containers using it ( |
|
346 |
- // either running or stopped). |
|
347 |
- // Do not force prunings, but do so quietly (stopping on any encountered |
|
348 |
- // conflicts). |
|
349 |
- return i.imageDeleteHelper(parent, records, false, true, true) |
|
350 |
-} |
|
351 |
- |
|
352 |
-// checkImageDeleteConflict determines whether there are any conflicts |
|
353 |
-// preventing deletion of the given image from this daemon. A hard conflict is |
|
354 |
-// any image which has the given image as a parent or any running container |
|
355 |
-// using the image. A soft conflict is any tags/digest referencing the given |
|
356 |
-// image or any stopped container using the image. If ignoreSoftConflicts is |
|
357 |
-// true, this function will not check for soft conflict conditions. |
|
358 |
-func (i *imageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { |
|
359 |
- // Check if the image has any descendant images. |
|
360 |
- if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 { |
|
361 |
- return &imageDeleteConflict{ |
|
362 |
- hard: true, |
|
363 |
- imgID: imgID, |
|
364 |
- message: "image has dependent child images", |
|
365 |
- } |
|
366 |
- } |
|
367 |
- |
|
368 |
- if mask&conflictRunningContainer != 0 { |
|
369 |
- // Check if any running container is using the image. |
|
370 |
- running := func(c *container.Container) bool { |
|
371 |
- return c.IsRunning() && c.ImageID == imgID |
|
372 |
- } |
|
373 |
- if container := i.containers.First(running); container != nil { |
|
374 |
- return &imageDeleteConflict{ |
|
375 |
- imgID: imgID, |
|
376 |
- hard: true, |
|
377 |
- used: true, |
|
378 |
- message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), |
|
379 |
- } |
|
380 |
- } |
|
381 |
- } |
|
382 |
- |
|
383 |
- // Check if any repository tags/digest reference this image. |
|
384 |
- if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 { |
|
385 |
- return &imageDeleteConflict{ |
|
386 |
- imgID: imgID, |
|
387 |
- message: "image is referenced in multiple repositories", |
|
388 |
- } |
|
389 |
- } |
|
390 |
- |
|
391 |
- if mask&conflictStoppedContainer != 0 { |
|
392 |
- // Check if any stopped containers reference this image. |
|
393 |
- stopped := func(c *container.Container) bool { |
|
394 |
- return !c.IsRunning() && c.ImageID == imgID |
|
395 |
- } |
|
396 |
- if container := i.containers.First(stopped); container != nil { |
|
397 |
- return &imageDeleteConflict{ |
|
398 |
- imgID: imgID, |
|
399 |
- used: true, |
|
400 |
- message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), |
|
401 |
- } |
|
402 |
- } |
|
403 |
- } |
|
404 |
- |
|
405 |
- return nil |
|
406 |
-} |
|
407 |
- |
|
408 |
-// imageIsDangling returns whether the given image is "dangling" which means |
|
409 |
-// that there are no repository references to the given image and it has no |
|
410 |
-// child images. |
|
411 |
-func (i *imageService) imageIsDangling(imgID image.ID) bool { |
|
412 |
- return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0) |
|
413 |
-} |
414 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,29 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "github.com/docker/docker/api/types/events" |
|
5 |
-) |
|
6 |
- |
|
7 |
-// LogImageEvent generates an event related to an image with only the default attributes. |
|
8 |
-func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { |
|
9 |
- daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) |
|
10 |
-} |
|
11 |
- |
|
12 |
-// LogImageEventWithAttributes generates an event related to an image with specific given attributes. |
|
13 |
-func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { |
|
14 |
- img, err := daemon.GetImage(imageID) |
|
15 |
- if err == nil && img.Config != nil { |
|
16 |
- // image has not been removed yet. |
|
17 |
- // it could be missing if the event is `delete`. |
|
18 |
- copyAttributes(attributes, img.Config.Labels) |
|
19 |
- } |
|
20 |
- if refName != "" { |
|
21 |
- attributes["name"] = refName |
|
22 |
- } |
|
23 |
- actor := events.Actor{ |
|
24 |
- ID: imageID, |
|
25 |
- Attributes: attributes, |
|
26 |
- } |
|
27 |
- |
|
28 |
- daemon.EventsService.Log(action, events.ImageEventType, actor) |
|
29 |
-} |
30 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,25 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "io" |
|
5 |
- |
|
6 |
- "github.com/docker/docker/image/tarexport" |
|
7 |
-) |
|
8 |
- |
|
9 |
-// ExportImage exports a list of images to the given output stream. The |
|
10 |
-// exported images are archived into a tar when written to the output |
|
11 |
-// stream. All images with the given tag and all versions containing |
|
12 |
-// the same tag are exported. names is the set of tags to export, and |
|
13 |
-// outStream is the writer which the images are written to. |
|
14 |
-func (i *imageService) ExportImage(names []string, outStream io.Writer) error { |
|
15 |
- imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) |
|
16 |
- return imageExporter.Save(names, outStream) |
|
17 |
-} |
|
18 |
- |
|
19 |
-// LoadImage uploads a set of images into the repository. This is the |
|
20 |
-// complement of ImageExport. The input stream is an uncompressed tar |
|
21 |
-// ball containing images and metadata. |
|
22 |
-func (i *imageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { |
|
23 |
- imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) |
|
24 |
- return imageExporter.Load(inTar, outStream, quiet) |
|
25 |
-} |
26 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,45 +0,0 @@ |
1 |
-// +build linux freebsd |
|
2 |
- |
|
3 |
-package daemon // import "github.com/docker/docker/daemon" |
|
4 |
- |
|
5 |
-import ( |
|
6 |
- "runtime" |
|
7 |
- |
|
8 |
- "github.com/sirupsen/logrus" |
|
9 |
-) |
|
10 |
- |
|
11 |
-// getSize returns the real size & virtual size of the container. |
|
12 |
-func (i *imageService) GetContainerLayerSize(containerID string) (int64, int64) { |
|
13 |
- var ( |
|
14 |
- sizeRw, sizeRootfs int64 |
|
15 |
- err error |
|
16 |
- ) |
|
17 |
- |
|
18 |
- // Safe to index by runtime.GOOS as Unix hosts don't support multiple |
|
19 |
- // container operating systems. |
|
20 |
- rwlayer, err := i.layerStores[runtime.GOOS].GetRWLayer(containerID) |
|
21 |
- if err != nil { |
|
22 |
- logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) |
|
23 |
- return sizeRw, sizeRootfs |
|
24 |
- } |
|
25 |
- defer i.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer) |
|
26 |
- |
|
27 |
- sizeRw, err = rwlayer.Size() |
|
28 |
- if err != nil { |
|
29 |
- logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", |
|
30 |
- i.layerStores[runtime.GOOS].DriverName(), containerID, err) |
|
31 |
- // FIXME: GetSize should return an error. Not changing it now in case |
|
32 |
- // there is a side-effect. |
|
33 |
- sizeRw = -1 |
|
34 |
- } |
|
35 |
- |
|
36 |
- if parent := rwlayer.Parent(); parent != nil { |
|
37 |
- sizeRootfs, err = parent.Size() |
|
38 |
- if err != nil { |
|
39 |
- sizeRootfs = -1 |
|
40 |
- } else if sizeRw != -1 { |
|
41 |
- sizeRootfs += sizeRw |
|
42 |
- } |
|
43 |
- } |
|
44 |
- return sizeRw, sizeRootfs |
|
45 |
-} |
46 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,84 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "fmt" |
|
5 |
- "time" |
|
6 |
- |
|
7 |
- "github.com/docker/distribution/reference" |
|
8 |
- "github.com/docker/docker/api/types/image" |
|
9 |
- "github.com/docker/docker/layer" |
|
10 |
-) |
|
11 |
- |
|
12 |
-// ImageHistory returns a slice of ImageHistory structures for the specified image |
|
13 |
-// name by walking the image lineage. |
|
14 |
-func (i *imageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { |
|
15 |
- start := time.Now() |
|
16 |
- img, err := i.GetImage(name) |
|
17 |
- if err != nil { |
|
18 |
- return nil, err |
|
19 |
- } |
|
20 |
- |
|
21 |
- history := []*image.HistoryResponseItem{} |
|
22 |
- |
|
23 |
- layerCounter := 0 |
|
24 |
- rootFS := *img.RootFS |
|
25 |
- rootFS.DiffIDs = nil |
|
26 |
- |
|
27 |
- for _, h := range img.History { |
|
28 |
- var layerSize int64 |
|
29 |
- |
|
30 |
- if !h.EmptyLayer { |
|
31 |
- if len(img.RootFS.DiffIDs) <= layerCounter { |
|
32 |
- return nil, fmt.Errorf("too many non-empty layers in History section") |
|
33 |
- } |
|
34 |
- |
|
35 |
- rootFS.Append(img.RootFS.DiffIDs[layerCounter]) |
|
36 |
- l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) |
|
37 |
- if err != nil { |
|
38 |
- return nil, err |
|
39 |
- } |
|
40 |
- layerSize, err = l.DiffSize() |
|
41 |
- layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
42 |
- if err != nil { |
|
43 |
- return nil, err |
|
44 |
- } |
|
45 |
- |
|
46 |
- layerCounter++ |
|
47 |
- } |
|
48 |
- |
|
49 |
- history = append([]*image.HistoryResponseItem{{ |
|
50 |
- ID: "<missing>", |
|
51 |
- Created: h.Created.Unix(), |
|
52 |
- CreatedBy: h.CreatedBy, |
|
53 |
- Comment: h.Comment, |
|
54 |
- Size: layerSize, |
|
55 |
- }}, history...) |
|
56 |
- } |
|
57 |
- |
|
58 |
- // Fill in image IDs and tags |
|
59 |
- histImg := img |
|
60 |
- id := img.ID() |
|
61 |
- for _, h := range history { |
|
62 |
- h.ID = id.String() |
|
63 |
- |
|
64 |
- var tags []string |
|
65 |
- for _, r := range i.referenceStore.References(id.Digest()) { |
|
66 |
- if _, ok := r.(reference.NamedTagged); ok { |
|
67 |
- tags = append(tags, reference.FamiliarString(r)) |
|
68 |
- } |
|
69 |
- } |
|
70 |
- |
|
71 |
- h.Tags = tags |
|
72 |
- |
|
73 |
- id = histImg.Parent |
|
74 |
- if id == "" { |
|
75 |
- break |
|
76 |
- } |
|
77 |
- histImg, err = i.GetImage(id.String()) |
|
78 |
- if err != nil { |
|
79 |
- break |
|
80 |
- } |
|
81 |
- } |
|
82 |
- imageActions.WithValues("history").UpdateSince(start) |
|
83 |
- return history, nil |
|
84 |
-} |
85 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,138 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "io" |
|
6 |
- "net/http" |
|
7 |
- "net/url" |
|
8 |
- "runtime" |
|
9 |
- "strings" |
|
10 |
- "time" |
|
11 |
- |
|
12 |
- "github.com/docker/distribution/reference" |
|
13 |
- "github.com/docker/docker/api/types/container" |
|
14 |
- "github.com/docker/docker/builder/dockerfile" |
|
15 |
- "github.com/docker/docker/builder/remotecontext" |
|
16 |
- "github.com/docker/docker/dockerversion" |
|
17 |
- "github.com/docker/docker/errdefs" |
|
18 |
- "github.com/docker/docker/image" |
|
19 |
- "github.com/docker/docker/layer" |
|
20 |
- "github.com/docker/docker/pkg/archive" |
|
21 |
- "github.com/docker/docker/pkg/progress" |
|
22 |
- "github.com/docker/docker/pkg/streamformatter" |
|
23 |
- "github.com/pkg/errors" |
|
24 |
-) |
|
25 |
- |
|
26 |
-// ImportImage imports an image, getting the archived layer data either from |
|
27 |
-// inConfig (if src is "-"), or from a URI specified in src. Progress output is |
|
28 |
-// written to outStream. Repository and tag names can optionally be given in |
|
29 |
-// the repo and tag arguments, respectively. |
|
30 |
-func (i *imageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { |
|
31 |
- var ( |
|
32 |
- rc io.ReadCloser |
|
33 |
- resp *http.Response |
|
34 |
- newRef reference.Named |
|
35 |
- ) |
|
36 |
- |
|
37 |
- // Default the operating system if not supplied. |
|
38 |
- if os == "" { |
|
39 |
- os = runtime.GOOS |
|
40 |
- } |
|
41 |
- |
|
42 |
- if repository != "" { |
|
43 |
- var err error |
|
44 |
- newRef, err = reference.ParseNormalizedNamed(repository) |
|
45 |
- if err != nil { |
|
46 |
- return errdefs.InvalidParameter(err) |
|
47 |
- } |
|
48 |
- if _, isCanonical := newRef.(reference.Canonical); isCanonical { |
|
49 |
- return errdefs.InvalidParameter(errors.New("cannot import digest reference")) |
|
50 |
- } |
|
51 |
- |
|
52 |
- if tag != "" { |
|
53 |
- newRef, err = reference.WithTag(newRef, tag) |
|
54 |
- if err != nil { |
|
55 |
- return errdefs.InvalidParameter(err) |
|
56 |
- } |
|
57 |
- } |
|
58 |
- } |
|
59 |
- |
|
60 |
- config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) |
|
61 |
- if err != nil { |
|
62 |
- return err |
|
63 |
- } |
|
64 |
- if src == "-" { |
|
65 |
- rc = inConfig |
|
66 |
- } else { |
|
67 |
- inConfig.Close() |
|
68 |
- if len(strings.Split(src, "://")) == 1 { |
|
69 |
- src = "http://" + src |
|
70 |
- } |
|
71 |
- u, err := url.Parse(src) |
|
72 |
- if err != nil { |
|
73 |
- return errdefs.InvalidParameter(err) |
|
74 |
- } |
|
75 |
- |
|
76 |
- resp, err = remotecontext.GetWithStatusError(u.String()) |
|
77 |
- if err != nil { |
|
78 |
- return err |
|
79 |
- } |
|
80 |
- outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) |
|
81 |
- progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) |
|
82 |
- rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") |
|
83 |
- } |
|
84 |
- |
|
85 |
- defer rc.Close() |
|
86 |
- if len(msg) == 0 { |
|
87 |
- msg = "Imported from " + src |
|
88 |
- } |
|
89 |
- |
|
90 |
- inflatedLayerData, err := archive.DecompressStream(rc) |
|
91 |
- if err != nil { |
|
92 |
- return err |
|
93 |
- } |
|
94 |
- l, err := i.layerStores[os].Register(inflatedLayerData, "") |
|
95 |
- if err != nil { |
|
96 |
- return err |
|
97 |
- } |
|
98 |
- defer layer.ReleaseAndLog(i.layerStores[os], l) |
|
99 |
- |
|
100 |
- created := time.Now().UTC() |
|
101 |
- imgConfig, err := json.Marshal(&image.Image{ |
|
102 |
- V1Image: image.V1Image{ |
|
103 |
- DockerVersion: dockerversion.Version, |
|
104 |
- Config: config, |
|
105 |
- Architecture: runtime.GOARCH, |
|
106 |
- OS: os, |
|
107 |
- Created: created, |
|
108 |
- Comment: msg, |
|
109 |
- }, |
|
110 |
- RootFS: &image.RootFS{ |
|
111 |
- Type: "layers", |
|
112 |
- DiffIDs: []layer.DiffID{l.DiffID()}, |
|
113 |
- }, |
|
114 |
- History: []image.History{{ |
|
115 |
- Created: created, |
|
116 |
- Comment: msg, |
|
117 |
- }}, |
|
118 |
- }) |
|
119 |
- if err != nil { |
|
120 |
- return err |
|
121 |
- } |
|
122 |
- |
|
123 |
- id, err := i.imageStore.Create(imgConfig) |
|
124 |
- if err != nil { |
|
125 |
- return err |
|
126 |
- } |
|
127 |
- |
|
128 |
- // FIXME: connect with commit code and call refstore directly |
|
129 |
- if newRef != nil { |
|
130 |
- if err := i.TagImageWithReference(id, newRef); err != nil { |
|
131 |
- return err |
|
132 |
- } |
|
133 |
- } |
|
134 |
- |
|
135 |
- i.LogImageEvent(id.String(), id.String(), "import") |
|
136 |
- outStream.Write(streamformatter.FormatStatus("", id.String())) |
|
137 |
- return nil |
|
138 |
-} |
139 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,104 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "time" |
|
5 |
- |
|
6 |
- "github.com/docker/distribution/reference" |
|
7 |
- "github.com/docker/docker/api/types" |
|
8 |
- "github.com/docker/docker/image" |
|
9 |
- "github.com/docker/docker/layer" |
|
10 |
- "github.com/docker/docker/pkg/system" |
|
11 |
- "github.com/pkg/errors" |
|
12 |
-) |
|
13 |
- |
|
14 |
-// LookupImage looks up an image by name and returns it as an ImageInspect |
|
15 |
-// structure. |
|
16 |
-func (i *imageService) LookupImage(name string) (*types.ImageInspect, error) { |
|
17 |
- img, err := i.GetImage(name) |
|
18 |
- if err != nil { |
|
19 |
- return nil, errors.Wrapf(err, "no such image: %s", name) |
|
20 |
- } |
|
21 |
- if !system.IsOSSupported(img.OperatingSystem()) { |
|
22 |
- return nil, system.ErrNotSupportedOperatingSystem |
|
23 |
- } |
|
24 |
- refs := i.referenceStore.References(img.ID().Digest()) |
|
25 |
- repoTags := []string{} |
|
26 |
- repoDigests := []string{} |
|
27 |
- for _, ref := range refs { |
|
28 |
- switch ref.(type) { |
|
29 |
- case reference.NamedTagged: |
|
30 |
- repoTags = append(repoTags, reference.FamiliarString(ref)) |
|
31 |
- case reference.Canonical: |
|
32 |
- repoDigests = append(repoDigests, reference.FamiliarString(ref)) |
|
33 |
- } |
|
34 |
- } |
|
35 |
- |
|
36 |
- var size int64 |
|
37 |
- var layerMetadata map[string]string |
|
38 |
- layerID := img.RootFS.ChainID() |
|
39 |
- if layerID != "" { |
|
40 |
- l, err := i.layerStores[img.OperatingSystem()].Get(layerID) |
|
41 |
- if err != nil { |
|
42 |
- return nil, err |
|
43 |
- } |
|
44 |
- defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
45 |
- size, err = l.Size() |
|
46 |
- if err != nil { |
|
47 |
- return nil, err |
|
48 |
- } |
|
49 |
- |
|
50 |
- layerMetadata, err = l.Metadata() |
|
51 |
- if err != nil { |
|
52 |
- return nil, err |
|
53 |
- } |
|
54 |
- } |
|
55 |
- |
|
56 |
- comment := img.Comment |
|
57 |
- if len(comment) == 0 && len(img.History) > 0 { |
|
58 |
- comment = img.History[len(img.History)-1].Comment |
|
59 |
- } |
|
60 |
- |
|
61 |
- lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) |
|
62 |
- if err != nil { |
|
63 |
- return nil, err |
|
64 |
- } |
|
65 |
- |
|
66 |
- imageInspect := &types.ImageInspect{ |
|
67 |
- ID: img.ID().String(), |
|
68 |
- RepoTags: repoTags, |
|
69 |
- RepoDigests: repoDigests, |
|
70 |
- Parent: img.Parent.String(), |
|
71 |
- Comment: comment, |
|
72 |
- Created: img.Created.Format(time.RFC3339Nano), |
|
73 |
- Container: img.Container, |
|
74 |
- ContainerConfig: &img.ContainerConfig, |
|
75 |
- DockerVersion: img.DockerVersion, |
|
76 |
- Author: img.Author, |
|
77 |
- Config: img.Config, |
|
78 |
- Architecture: img.Architecture, |
|
79 |
- Os: img.OperatingSystem(), |
|
80 |
- OsVersion: img.OSVersion, |
|
81 |
- Size: size, |
|
82 |
- VirtualSize: size, // TODO: field unused, deprecate |
|
83 |
- RootFS: rootFSToAPIType(img.RootFS), |
|
84 |
- Metadata: types.ImageMetadata{ |
|
85 |
- LastTagTime: lastUpdated, |
|
86 |
- }, |
|
87 |
- } |
|
88 |
- |
|
89 |
- imageInspect.GraphDriver.Name = i.GraphDriverForOS(img.OperatingSystem()) |
|
90 |
- imageInspect.GraphDriver.Data = layerMetadata |
|
91 |
- |
|
92 |
- return imageInspect, nil |
|
93 |
-} |
|
94 |
- |
|
95 |
-func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { |
|
96 |
- var layers []string |
|
97 |
- for _, l := range rootfs.DiffIDs { |
|
98 |
- layers = append(layers, l.String()) |
|
99 |
- } |
|
100 |
- return types.RootFS{ |
|
101 |
- Type: rootfs.Type, |
|
102 |
- Layers: layers, |
|
103 |
- } |
|
104 |
-} |
105 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,168 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "sync/atomic" |
|
5 |
- |
|
6 |
- "github.com/docker/distribution/reference" |
|
7 |
- "github.com/docker/docker/api/types" |
|
8 |
- "github.com/docker/docker/api/types/filters" |
|
9 |
- "github.com/docker/docker/errdefs" |
|
10 |
- "github.com/docker/docker/image" |
|
11 |
- "github.com/docker/docker/layer" |
|
12 |
- digest "github.com/opencontainers/go-digest" |
|
13 |
- "github.com/sirupsen/logrus" |
|
14 |
- "golang.org/x/net/context" |
|
15 |
-) |
|
16 |
- |
|
17 |
-var imagesAcceptedFilters = map[string]bool{ |
|
18 |
- "dangling": true, |
|
19 |
- "label": true, |
|
20 |
- "label!": true, |
|
21 |
- "until": true, |
|
22 |
-} |
|
23 |
- |
|
24 |
-// ImagesPrune removes unused images |
|
25 |
-func (i *imageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { |
|
26 |
- if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { |
|
27 |
- return nil, errPruneRunning |
|
28 |
- } |
|
29 |
- defer atomic.StoreInt32(&i.pruneRunning, 0) |
|
30 |
- |
|
31 |
- // make sure that only accepted filters have been received |
|
32 |
- err := pruneFilters.Validate(imagesAcceptedFilters) |
|
33 |
- if err != nil { |
|
34 |
- return nil, err |
|
35 |
- } |
|
36 |
- |
|
37 |
- rep := &types.ImagesPruneReport{} |
|
38 |
- |
|
39 |
- danglingOnly := true |
|
40 |
- if pruneFilters.Contains("dangling") { |
|
41 |
- if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { |
|
42 |
- danglingOnly = false |
|
43 |
- } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { |
|
44 |
- return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} |
|
45 |
- } |
|
46 |
- } |
|
47 |
- |
|
48 |
- until, err := getUntilFromPruneFilters(pruneFilters) |
|
49 |
- if err != nil { |
|
50 |
- return nil, err |
|
51 |
- } |
|
52 |
- |
|
53 |
- var allImages map[image.ID]*image.Image |
|
54 |
- if danglingOnly { |
|
55 |
- allImages = i.imageStore.Heads() |
|
56 |
- } else { |
|
57 |
- allImages = i.imageStore.Map() |
|
58 |
- } |
|
59 |
- |
|
60 |
- // Filter intermediary images and get their unique size |
|
61 |
- allLayers := make(map[layer.ChainID]layer.Layer) |
|
62 |
- for _, ls := range i.layerStores { |
|
63 |
- for k, v := range ls.Map() { |
|
64 |
- allLayers[k] = v |
|
65 |
- } |
|
66 |
- } |
|
67 |
- topImages := map[image.ID]*image.Image{} |
|
68 |
- for id, img := range allImages { |
|
69 |
- select { |
|
70 |
- case <-ctx.Done(): |
|
71 |
- return nil, ctx.Err() |
|
72 |
- default: |
|
73 |
- dgst := digest.Digest(id) |
|
74 |
- if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { |
|
75 |
- continue |
|
76 |
- } |
|
77 |
- if !until.IsZero() && img.Created.After(until) { |
|
78 |
- continue |
|
79 |
- } |
|
80 |
- if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { |
|
81 |
- continue |
|
82 |
- } |
|
83 |
- topImages[id] = img |
|
84 |
- } |
|
85 |
- } |
|
86 |
- |
|
87 |
- canceled := false |
|
88 |
-deleteImagesLoop: |
|
89 |
- for id := range topImages { |
|
90 |
- select { |
|
91 |
- case <-ctx.Done(): |
|
92 |
- // we still want to calculate freed size and return the data |
|
93 |
- canceled = true |
|
94 |
- break deleteImagesLoop |
|
95 |
- default: |
|
96 |
- } |
|
97 |
- |
|
98 |
- deletedImages := []types.ImageDeleteResponseItem{} |
|
99 |
- refs := i.referenceStore.References(dgst) |
|
100 |
- if len(refs) > 0 { |
|
101 |
- shouldDelete := !danglingOnly |
|
102 |
- if !shouldDelete { |
|
103 |
- hasTag := false |
|
104 |
- for _, ref := range refs { |
|
105 |
- if _, ok := ref.(reference.NamedTagged); ok { |
|
106 |
- hasTag = true |
|
107 |
- break |
|
108 |
- } |
|
109 |
- } |
|
110 |
- |
|
111 |
- // Only delete if it's untagged (i.e. repo:<none>) |
|
112 |
- shouldDelete = !hasTag |
|
113 |
- } |
|
114 |
- |
|
115 |
- if shouldDelete { |
|
116 |
- for _, ref := range refs { |
|
117 |
- imgDel, err := i.ImageDelete(ref.String(), false, true) |
|
118 |
- if imageDeleteFailed(ref.String(), err) { |
|
119 |
- continue |
|
120 |
- } |
|
121 |
- deletedImages = append(deletedImages, imgDel...) |
|
122 |
- } |
|
123 |
- } |
|
124 |
- } else { |
|
125 |
- hex := id.Digest().Hex() |
|
126 |
- imgDel, err := i.ImageDelete(hex, false, true) |
|
127 |
- if imageDeleteFailed(hex, err) { |
|
128 |
- continue |
|
129 |
- } |
|
130 |
- deletedImages = append(deletedImages, imgDel...) |
|
131 |
- } |
|
132 |
- |
|
133 |
- rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) |
|
134 |
- } |
|
135 |
- |
|
136 |
- // Compute how much space was freed |
|
137 |
- for _, d := range rep.ImagesDeleted { |
|
138 |
- if d.Deleted != "" { |
|
139 |
- chid := layer.ChainID(d.Deleted) |
|
140 |
- if l, ok := allLayers[chid]; ok { |
|
141 |
- diffSize, err := l.DiffSize() |
|
142 |
- if err != nil { |
|
143 |
- logrus.Warnf("failed to get layer %s size: %v", chid, err) |
|
144 |
- continue |
|
145 |
- } |
|
146 |
- rep.SpaceReclaimed += uint64(diffSize) |
|
147 |
- } |
|
148 |
- } |
|
149 |
- } |
|
150 |
- |
|
151 |
- if canceled { |
|
152 |
- logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) |
|
153 |
- } |
|
154 |
- |
|
155 |
- return rep, nil |
|
156 |
-} |
|
157 |
- |
|
158 |
-func imageDeleteFailed(ref string, err error) bool { |
|
159 |
- switch { |
|
160 |
- case err == nil: |
|
161 |
- return false |
|
162 |
- case errdefs.IsConflict(err): |
|
163 |
- return true |
|
164 |
- default: |
|
165 |
- logrus.Warnf("failed to prune image %s: %v", ref, err) |
|
166 |
- return true |
|
167 |
- } |
|
168 |
-} |
169 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,127 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "io" |
|
5 |
- "runtime" |
|
6 |
- "strings" |
|
7 |
- |
|
8 |
- dist "github.com/docker/distribution" |
|
9 |
- "github.com/docker/distribution/reference" |
|
10 |
- "github.com/docker/docker/api/types" |
|
11 |
- "github.com/docker/docker/distribution" |
|
12 |
- progressutils "github.com/docker/docker/distribution/utils" |
|
13 |
- "github.com/docker/docker/errdefs" |
|
14 |
- "github.com/docker/docker/pkg/progress" |
|
15 |
- "github.com/docker/docker/registry" |
|
16 |
- "github.com/opencontainers/go-digest" |
|
17 |
- "golang.org/x/net/context" |
|
18 |
-) |
|
19 |
- |
|
20 |
-// PullImage initiates a pull operation. image is the repository name to pull, and |
|
21 |
-// tag may be either empty, or indicate a specific tag to pull. |
|
22 |
-func (i *imageService) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
23 |
- // Special case: "pull -a" may send an image name with a |
|
24 |
- // trailing :. This is ugly, but let's not break API |
|
25 |
- // compatibility. |
|
26 |
- image = strings.TrimSuffix(image, ":") |
|
27 |
- |
|
28 |
- ref, err := reference.ParseNormalizedNamed(image) |
|
29 |
- if err != nil { |
|
30 |
- return errdefs.InvalidParameter(err) |
|
31 |
- } |
|
32 |
- |
|
33 |
- if tag != "" { |
|
34 |
- // The "tag" could actually be a digest. |
|
35 |
- var dgst digest.Digest |
|
36 |
- dgst, err = digest.Parse(tag) |
|
37 |
- if err == nil { |
|
38 |
- ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) |
|
39 |
- } else { |
|
40 |
- ref, err = reference.WithTag(ref, tag) |
|
41 |
- } |
|
42 |
- if err != nil { |
|
43 |
- return errdefs.InvalidParameter(err) |
|
44 |
- } |
|
45 |
- } |
|
46 |
- |
|
47 |
- return i.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream) |
|
48 |
-} |
|
49 |
- |
|
50 |
-func (i *imageService) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
51 |
- // Include a buffer so that slow client connections don't affect |
|
52 |
- // transfer performance. |
|
53 |
- progressChan := make(chan progress.Progress, 100) |
|
54 |
- |
|
55 |
- writesDone := make(chan struct{}) |
|
56 |
- |
|
57 |
- ctx, cancelFunc := context.WithCancel(ctx) |
|
58 |
- |
|
59 |
- go func() { |
|
60 |
- progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
61 |
- close(writesDone) |
|
62 |
- }() |
|
63 |
- |
|
64 |
- // Default to the host OS platform in case it hasn't been populated with an explicit value. |
|
65 |
- if os == "" { |
|
66 |
- os = runtime.GOOS |
|
67 |
- } |
|
68 |
- |
|
69 |
- imagePullConfig := &distribution.ImagePullConfig{ |
|
70 |
- Config: distribution.Config{ |
|
71 |
- MetaHeaders: metaHeaders, |
|
72 |
- AuthConfig: authConfig, |
|
73 |
- ProgressOutput: progress.ChanOutput(progressChan), |
|
74 |
- RegistryService: i.registryService, |
|
75 |
- ImageEventLogger: i.LogImageEvent, |
|
76 |
- MetadataStore: i.distributionMetadataStore, |
|
77 |
- ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), |
|
78 |
- ReferenceStore: i.referenceStore, |
|
79 |
- }, |
|
80 |
- DownloadManager: i.downloadManager, |
|
81 |
- Schema2Types: distribution.ImageTypes, |
|
82 |
- OS: os, |
|
83 |
- } |
|
84 |
- |
|
85 |
- err := distribution.Pull(ctx, ref, imagePullConfig) |
|
86 |
- close(progressChan) |
|
87 |
- <-writesDone |
|
88 |
- return err |
|
89 |
-} |
|
90 |
- |
|
91 |
-// GetRepository returns a repository from the registry. |
|
92 |
-func (i *imageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { |
|
93 |
- // get repository info |
|
94 |
- repoInfo, err := i.registryService.ResolveRepository(ref) |
|
95 |
- if err != nil { |
|
96 |
- return nil, false, err |
|
97 |
- } |
|
98 |
- // makes sure name is not empty or `scratch` |
|
99 |
- if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { |
|
100 |
- return nil, false, errdefs.InvalidParameter(err) |
|
101 |
- } |
|
102 |
- |
|
103 |
- // get endpoints |
|
104 |
- endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) |
|
105 |
- if err != nil { |
|
106 |
- return nil, false, err |
|
107 |
- } |
|
108 |
- |
|
109 |
- // retrieve repository |
|
110 |
- var ( |
|
111 |
- confirmedV2 bool |
|
112 |
- repository dist.Repository |
|
113 |
- lastError error |
|
114 |
- ) |
|
115 |
- |
|
116 |
- for _, endpoint := range endpoints { |
|
117 |
- if endpoint.Version == registry.APIVersion1 { |
|
118 |
- continue |
|
119 |
- } |
|
120 |
- |
|
121 |
- repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") |
|
122 |
- if lastError == nil && confirmedV2 { |
|
123 |
- break |
|
124 |
- } |
|
125 |
- } |
|
126 |
- return repository, confirmedV2, lastError |
|
127 |
-} |
128 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,63 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "io" |
|
5 |
- |
|
6 |
- "github.com/docker/distribution/manifest/schema2" |
|
7 |
- "github.com/docker/distribution/reference" |
|
8 |
- "github.com/docker/docker/api/types" |
|
9 |
- "github.com/docker/docker/distribution" |
|
10 |
- progressutils "github.com/docker/docker/distribution/utils" |
|
11 |
- "github.com/docker/docker/pkg/progress" |
|
12 |
- "golang.org/x/net/context" |
|
13 |
-) |
|
14 |
- |
|
15 |
-// PushImage initiates a push operation on the repository named localName. |
|
16 |
-func (i *imageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
17 |
- ref, err := reference.ParseNormalizedNamed(image) |
|
18 |
- if err != nil { |
|
19 |
- return err |
|
20 |
- } |
|
21 |
- if tag != "" { |
|
22 |
- // Push by digest is not supported, so only tags are supported. |
|
23 |
- ref, err = reference.WithTag(ref, tag) |
|
24 |
- if err != nil { |
|
25 |
- return err |
|
26 |
- } |
|
27 |
- } |
|
28 |
- |
|
29 |
- // Include a buffer so that slow client connections don't affect |
|
30 |
- // transfer performance. |
|
31 |
- progressChan := make(chan progress.Progress, 100) |
|
32 |
- |
|
33 |
- writesDone := make(chan struct{}) |
|
34 |
- |
|
35 |
- ctx, cancelFunc := context.WithCancel(ctx) |
|
36 |
- |
|
37 |
- go func() { |
|
38 |
- progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
39 |
- close(writesDone) |
|
40 |
- }() |
|
41 |
- |
|
42 |
- imagePushConfig := &distribution.ImagePushConfig{ |
|
43 |
- Config: distribution.Config{ |
|
44 |
- MetaHeaders: metaHeaders, |
|
45 |
- AuthConfig: authConfig, |
|
46 |
- ProgressOutput: progress.ChanOutput(progressChan), |
|
47 |
- RegistryService: i.registryService, |
|
48 |
- ImageEventLogger: i.LogImageEvent, |
|
49 |
- MetadataStore: i.distributionMetadataStore, |
|
50 |
- ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), |
|
51 |
- ReferenceStore: i.referenceStore, |
|
52 |
- }, |
|
53 |
- ConfigMediaType: schema2.MediaTypeImageConfig, |
|
54 |
- LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), |
|
55 |
- TrustKey: i.trustKey, |
|
56 |
- UploadManager: i.uploadManager, |
|
57 |
- } |
|
58 |
- |
|
59 |
- err = distribution.Push(ctx, ref, imagePushConfig) |
|
60 |
- close(progressChan) |
|
61 |
- <-writesDone |
|
62 |
- return err |
|
63 |
-} |
64 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,96 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "strconv" |
|
5 |
- |
|
6 |
- "golang.org/x/net/context" |
|
7 |
- |
|
8 |
- "github.com/docker/docker/api/types" |
|
9 |
- "github.com/docker/docker/api/types/filters" |
|
10 |
- registrytypes "github.com/docker/docker/api/types/registry" |
|
11 |
- "github.com/docker/docker/dockerversion" |
|
12 |
-) |
|
13 |
- |
|
14 |
-var acceptedSearchFilterTags = map[string]bool{ |
|
15 |
- "is-automated": true, |
|
16 |
- "is-official": true, |
|
17 |
- "stars": true, |
|
18 |
-} |
|
19 |
- |
|
20 |
-// SearchRegistryForImages queries the registry for images matching |
|
21 |
-// term. authConfig is used to login. |
|
22 |
-// |
|
23 |
-// TODO: this could be implemented in a registry service instead of the image |
|
24 |
-// service. |
|
25 |
-func (i *imageService) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, |
|
26 |
- authConfig *types.AuthConfig, |
|
27 |
- headers map[string][]string) (*registrytypes.SearchResults, error) { |
|
28 |
- |
|
29 |
- searchFilters, err := filters.FromJSON(filtersArgs) |
|
30 |
- if err != nil { |
|
31 |
- return nil, err |
|
32 |
- } |
|
33 |
- if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { |
|
34 |
- return nil, err |
|
35 |
- } |
|
36 |
- |
|
37 |
- var isAutomated, isOfficial bool |
|
38 |
- var hasStarFilter = 0 |
|
39 |
- if searchFilters.Contains("is-automated") { |
|
40 |
- if searchFilters.UniqueExactMatch("is-automated", "true") { |
|
41 |
- isAutomated = true |
|
42 |
- } else if !searchFilters.UniqueExactMatch("is-automated", "false") { |
|
43 |
- return nil, invalidFilter{"is-automated", searchFilters.Get("is-automated")} |
|
44 |
- } |
|
45 |
- } |
|
46 |
- if searchFilters.Contains("is-official") { |
|
47 |
- if searchFilters.UniqueExactMatch("is-official", "true") { |
|
48 |
- isOfficial = true |
|
49 |
- } else if !searchFilters.UniqueExactMatch("is-official", "false") { |
|
50 |
- return nil, invalidFilter{"is-official", searchFilters.Get("is-official")} |
|
51 |
- } |
|
52 |
- } |
|
53 |
- if searchFilters.Contains("stars") { |
|
54 |
- hasStars := searchFilters.Get("stars") |
|
55 |
- for _, hasStar := range hasStars { |
|
56 |
- iHasStar, err := strconv.Atoi(hasStar) |
|
57 |
- if err != nil { |
|
58 |
- return nil, invalidFilter{"stars", hasStar} |
|
59 |
- } |
|
60 |
- if iHasStar > hasStarFilter { |
|
61 |
- hasStarFilter = iHasStar |
|
62 |
- } |
|
63 |
- } |
|
64 |
- } |
|
65 |
- |
|
66 |
- unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) |
|
67 |
- if err != nil { |
|
68 |
- return nil, err |
|
69 |
- } |
|
70 |
- |
|
71 |
- filteredResults := []registrytypes.SearchResult{} |
|
72 |
- for _, result := range unfilteredResult.Results { |
|
73 |
- if searchFilters.Contains("is-automated") { |
|
74 |
- if isAutomated != result.IsAutomated { |
|
75 |
- continue |
|
76 |
- } |
|
77 |
- } |
|
78 |
- if searchFilters.Contains("is-official") { |
|
79 |
- if isOfficial != result.IsOfficial { |
|
80 |
- continue |
|
81 |
- } |
|
82 |
- } |
|
83 |
- if searchFilters.Contains("stars") { |
|
84 |
- if result.StarCount < hasStarFilter { |
|
85 |
- continue |
|
86 |
- } |
|
87 |
- } |
|
88 |
- filteredResults = append(filteredResults, result) |
|
89 |
- } |
|
90 |
- |
|
91 |
- return ®istrytypes.SearchResults{ |
|
92 |
- Query: unfilteredResult.Query, |
|
93 |
- NumResults: len(filteredResults), |
|
94 |
- Results: filteredResults, |
|
95 |
- }, nil |
|
96 |
-} |
97 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,41 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "github.com/docker/distribution/reference" |
|
5 |
- "github.com/docker/docker/image" |
|
6 |
-) |
|
7 |
- |
|
8 |
-// TagImage creates the tag specified by newTag, pointing to the image named |
|
9 |
-// imageName (alternatively, imageName can also be an image ID). |
|
10 |
-func (i *imageService) TagImage(imageName, repository, tag string) (string, error) { |
|
11 |
- imageID, _, err := i.GetImageIDAndOS(imageName) |
|
12 |
- if err != nil { |
|
13 |
- return "", err |
|
14 |
- } |
|
15 |
- |
|
16 |
- newTag, err := reference.ParseNormalizedNamed(repository) |
|
17 |
- if err != nil { |
|
18 |
- return "", err |
|
19 |
- } |
|
20 |
- if tag != "" { |
|
21 |
- if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { |
|
22 |
- return "", err |
|
23 |
- } |
|
24 |
- } |
|
25 |
- |
|
26 |
- err = i.TagImageWithReference(imageID, newTag) |
|
27 |
- return reference.FamiliarString(newTag), err |
|
28 |
-} |
|
29 |
- |
|
30 |
-// TagImageWithReference adds the given reference to the image ID provided. |
|
31 |
-func (i *imageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { |
|
32 |
- if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { |
|
33 |
- return err |
|
34 |
- } |
|
35 |
- |
|
36 |
- if err := i.imageStore.SetLastUpdated(imageID); err != nil { |
|
37 |
- return err |
|
38 |
- } |
|
39 |
- i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") |
|
40 |
- return nil |
|
41 |
-} |
42 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,35 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "github.com/docker/docker/image" |
|
5 |
- "github.com/docker/docker/layer" |
|
6 |
- "github.com/docker/docker/pkg/system" |
|
7 |
- "github.com/pkg/errors" |
|
8 |
-) |
|
9 |
- |
|
10 |
-// GetLayerFolders returns the layer folders from an image RootFS |
|
11 |
-func (daemon *Daemon) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) { |
|
12 |
- folders := []string{} |
|
13 |
- max := len(img.RootFS.DiffIDs) |
|
14 |
- for index := 1; index <= max; index++ { |
|
15 |
- // FIXME: why does this mutate the RootFS? |
|
16 |
- img.RootFS.DiffIDs = img.RootFS.DiffIDs[:index] |
|
17 |
- if !system.IsOSSupported(img.OperatingSystem()) { |
|
18 |
- return nil, errors.Wrapf(system.ErrNotSupportedOperatingSystem, "cannot get layerpath for ImageID %s", img.RootFS.ChainID()) |
|
19 |
- } |
|
20 |
- layerPath, err := layer.GetLayerPath(daemon.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) |
|
21 |
- if err != nil { |
|
22 |
- return nil, errors.Wrapf(err, "failed to get layer path from graphdriver %s for ImageID %s", daemon.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) |
|
23 |
- } |
|
24 |
- // Reverse order, expecting parent first |
|
25 |
- folders = append([]string{layerPath}, folders...) |
|
26 |
- } |
|
27 |
- if rwLayer == nil { |
|
28 |
- return nil, errors.New("RWLayer is unexpectedly nil") |
|
29 |
- } |
|
30 |
- m, err := rwLayer.Metadata() |
|
31 |
- if err != nil { |
|
32 |
- return nil, errors.Wrap(err, "failed to get layer metadata") |
|
33 |
- } |
|
34 |
- return append(folders, m["dir"]), nil |
|
35 |
-} |
36 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,346 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "fmt" |
|
6 |
- "sort" |
|
7 |
- "time" |
|
8 |
- |
|
9 |
- "github.com/pkg/errors" |
|
10 |
- |
|
11 |
- "github.com/docker/distribution/reference" |
|
12 |
- "github.com/docker/docker/api/types" |
|
13 |
- "github.com/docker/docker/api/types/filters" |
|
14 |
- "github.com/docker/docker/container" |
|
15 |
- "github.com/docker/docker/image" |
|
16 |
- "github.com/docker/docker/layer" |
|
17 |
- "github.com/docker/docker/pkg/system" |
|
18 |
-) |
|
19 |
- |
|
20 |
-var acceptedImageFilterTags = map[string]bool{ |
|
21 |
- "dangling": true, |
|
22 |
- "label": true, |
|
23 |
- "before": true, |
|
24 |
- "since": true, |
|
25 |
- "reference": true, |
|
26 |
-} |
|
27 |
- |
|
28 |
-// byCreated is a temporary type used to sort a list of images by creation |
|
29 |
-// time. |
|
30 |
-type byCreated []*types.ImageSummary |
|
31 |
- |
|
32 |
-func (r byCreated) Len() int { return len(r) } |
|
33 |
-func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } |
|
34 |
-func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } |
|
35 |
- |
|
36 |
-// Map returns a map of all images in the ImageStore |
|
37 |
-func (i *imageService) Map() map[image.ID]*image.Image { |
|
38 |
- return i.imageStore.Map() |
|
39 |
-} |
|
40 |
- |
|
41 |
-// Images returns a filtered list of images. filterArgs is a JSON-encoded set |
|
42 |
-// of filter arguments which will be interpreted by api/types/filters. |
|
43 |
-// filter is a shell glob string applied to repository names. The argument |
|
44 |
-// named all controls whether all images in the graph are filtered, or just |
|
45 |
-// the heads. |
|
46 |
-func (i *imageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { |
|
47 |
- var ( |
|
48 |
- allImages map[image.ID]*image.Image |
|
49 |
- err error |
|
50 |
- danglingOnly = false |
|
51 |
- ) |
|
52 |
- |
|
53 |
- if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { |
|
54 |
- return nil, err |
|
55 |
- } |
|
56 |
- |
|
57 |
- if imageFilters.Contains("dangling") { |
|
58 |
- if imageFilters.ExactMatch("dangling", "true") { |
|
59 |
- danglingOnly = true |
|
60 |
- } else if !imageFilters.ExactMatch("dangling", "false") { |
|
61 |
- return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} |
|
62 |
- } |
|
63 |
- } |
|
64 |
- if danglingOnly { |
|
65 |
- allImages = i.imageStore.Heads() |
|
66 |
- } else { |
|
67 |
- allImages = i.imageStore.Map() |
|
68 |
- } |
|
69 |
- |
|
70 |
- var beforeFilter, sinceFilter *image.Image |
|
71 |
- err = imageFilters.WalkValues("before", func(value string) error { |
|
72 |
- beforeFilter, err = i.GetImage(value) |
|
73 |
- return err |
|
74 |
- }) |
|
75 |
- if err != nil { |
|
76 |
- return nil, err |
|
77 |
- } |
|
78 |
- |
|
79 |
- err = imageFilters.WalkValues("since", func(value string) error { |
|
80 |
- sinceFilter, err = i.GetImage(value) |
|
81 |
- return err |
|
82 |
- }) |
|
83 |
- if err != nil { |
|
84 |
- return nil, err |
|
85 |
- } |
|
86 |
- |
|
87 |
- images := []*types.ImageSummary{} |
|
88 |
- var imagesMap map[*image.Image]*types.ImageSummary |
|
89 |
- var layerRefs map[layer.ChainID]int |
|
90 |
- var allLayers map[layer.ChainID]layer.Layer |
|
91 |
- var allContainers []*container.Container |
|
92 |
- |
|
93 |
- for id, img := range allImages { |
|
94 |
- if beforeFilter != nil { |
|
95 |
- if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { |
|
96 |
- continue |
|
97 |
- } |
|
98 |
- } |
|
99 |
- |
|
100 |
- if sinceFilter != nil { |
|
101 |
- if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { |
|
102 |
- continue |
|
103 |
- } |
|
104 |
- } |
|
105 |
- |
|
106 |
- if imageFilters.Contains("label") { |
|
107 |
- // Very old image that do not have image.Config (or even labels) |
|
108 |
- if img.Config == nil { |
|
109 |
- continue |
|
110 |
- } |
|
111 |
- // We are now sure image.Config is not nil |
|
112 |
- if !imageFilters.MatchKVList("label", img.Config.Labels) { |
|
113 |
- continue |
|
114 |
- } |
|
115 |
- } |
|
116 |
- |
|
117 |
- // Skip any images with an unsupported operating system to avoid a potential |
|
118 |
- // panic when indexing through the layerstore. Don't error as we want to list |
|
119 |
- // the other images. This should never happen, but here as a safety precaution. |
|
120 |
- if !system.IsOSSupported(img.OperatingSystem()) { |
|
121 |
- continue |
|
122 |
- } |
|
123 |
- |
|
124 |
- layerID := img.RootFS.ChainID() |
|
125 |
- var size int64 |
|
126 |
- if layerID != "" { |
|
127 |
- l, err := i.layerStores[img.OperatingSystem()].Get(layerID) |
|
128 |
- if err != nil { |
|
129 |
- // The layer may have been deleted between the call to `Map()` or |
|
130 |
- // `Heads()` and the call to `Get()`, so we just ignore this error |
|
131 |
- if err == layer.ErrLayerDoesNotExist { |
|
132 |
- continue |
|
133 |
- } |
|
134 |
- return nil, err |
|
135 |
- } |
|
136 |
- |
|
137 |
- size, err = l.Size() |
|
138 |
- layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
139 |
- if err != nil { |
|
140 |
- return nil, err |
|
141 |
- } |
|
142 |
- } |
|
143 |
- |
|
144 |
- newImage := newImage(img, size) |
|
145 |
- |
|
146 |
- for _, ref := range i.referenceStore.References(id.Digest()) { |
|
147 |
- if imageFilters.Contains("reference") { |
|
148 |
- var found bool |
|
149 |
- var matchErr error |
|
150 |
- for _, pattern := range imageFilters.Get("reference") { |
|
151 |
- found, matchErr = reference.FamiliarMatch(pattern, ref) |
|
152 |
- if matchErr != nil { |
|
153 |
- return nil, matchErr |
|
154 |
- } |
|
155 |
- } |
|
156 |
- if !found { |
|
157 |
- continue |
|
158 |
- } |
|
159 |
- } |
|
160 |
- if _, ok := ref.(reference.Canonical); ok { |
|
161 |
- newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) |
|
162 |
- } |
|
163 |
- if _, ok := ref.(reference.NamedTagged); ok { |
|
164 |
- newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) |
|
165 |
- } |
|
166 |
- } |
|
167 |
- if newImage.RepoDigests == nil && newImage.RepoTags == nil { |
|
168 |
- if all || len(i.imageStore.Children(id)) == 0 { |
|
169 |
- |
|
170 |
- if imageFilters.Contains("dangling") && !danglingOnly { |
|
171 |
- //dangling=false case, so dangling image is not needed |
|
172 |
- continue |
|
173 |
- } |
|
174 |
- if imageFilters.Contains("reference") { // skip images with no references if filtering by reference |
|
175 |
- continue |
|
176 |
- } |
|
177 |
- newImage.RepoDigests = []string{"<none>@<none>"} |
|
178 |
- newImage.RepoTags = []string{"<none>:<none>"} |
|
179 |
- } else { |
|
180 |
- continue |
|
181 |
- } |
|
182 |
- } else if danglingOnly && len(newImage.RepoTags) > 0 { |
|
183 |
- continue |
|
184 |
- } |
|
185 |
- |
|
186 |
- if withExtraAttrs { |
|
187 |
- // lazily init variables |
|
188 |
- if imagesMap == nil { |
|
189 |
- allContainers = i.containers.List() |
|
190 |
- allLayers = i.layerStores[img.OperatingSystem()].Map() |
|
191 |
- imagesMap = make(map[*image.Image]*types.ImageSummary) |
|
192 |
- layerRefs = make(map[layer.ChainID]int) |
|
193 |
- } |
|
194 |
- |
|
195 |
- // Get container count |
|
196 |
- newImage.Containers = 0 |
|
197 |
- for _, c := range allContainers { |
|
198 |
- if c.ImageID == id { |
|
199 |
- newImage.Containers++ |
|
200 |
- } |
|
201 |
- } |
|
202 |
- |
|
203 |
- // count layer references |
|
204 |
- rootFS := *img.RootFS |
|
205 |
- rootFS.DiffIDs = nil |
|
206 |
- for _, id := range img.RootFS.DiffIDs { |
|
207 |
- rootFS.Append(id) |
|
208 |
- chid := rootFS.ChainID() |
|
209 |
- layerRefs[chid]++ |
|
210 |
- if _, ok := allLayers[chid]; !ok { |
|
211 |
- return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) |
|
212 |
- } |
|
213 |
- } |
|
214 |
- imagesMap[img] = newImage |
|
215 |
- } |
|
216 |
- |
|
217 |
- images = append(images, newImage) |
|
218 |
- } |
|
219 |
- |
|
220 |
- if withExtraAttrs { |
|
221 |
- // Get Shared sizes |
|
222 |
- for img, newImage := range imagesMap { |
|
223 |
- rootFS := *img.RootFS |
|
224 |
- rootFS.DiffIDs = nil |
|
225 |
- |
|
226 |
- newImage.SharedSize = 0 |
|
227 |
- for _, id := range img.RootFS.DiffIDs { |
|
228 |
- rootFS.Append(id) |
|
229 |
- chid := rootFS.ChainID() |
|
230 |
- |
|
231 |
- diffSize, err := allLayers[chid].DiffSize() |
|
232 |
- if err != nil { |
|
233 |
- return nil, err |
|
234 |
- } |
|
235 |
- |
|
236 |
- if layerRefs[chid] > 1 { |
|
237 |
- newImage.SharedSize += diffSize |
|
238 |
- } |
|
239 |
- } |
|
240 |
- } |
|
241 |
- } |
|
242 |
- |
|
243 |
- sort.Sort(sort.Reverse(byCreated(images))) |
|
244 |
- |
|
245 |
- return images, nil |
|
246 |
-} |
|
247 |
- |
|
248 |
-// SquashImage creates a new image with the diff of the specified image and the specified parent. |
|
249 |
-// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. |
|
250 |
-// The existing image(s) is not destroyed. |
|
251 |
-// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. |
|
252 |
-func (i *imageService) SquashImage(id, parent string) (string, error) { |
|
253 |
- |
|
254 |
- var ( |
|
255 |
- img *image.Image |
|
256 |
- err error |
|
257 |
- ) |
|
258 |
- if img, err = i.imageStore.Get(image.ID(id)); err != nil { |
|
259 |
- return "", err |
|
260 |
- } |
|
261 |
- |
|
262 |
- var parentImg *image.Image |
|
263 |
- var parentChainID layer.ChainID |
|
264 |
- if len(parent) != 0 { |
|
265 |
- parentImg, err = i.imageStore.Get(image.ID(parent)) |
|
266 |
- if err != nil { |
|
267 |
- return "", errors.Wrap(err, "error getting specified parent layer") |
|
268 |
- } |
|
269 |
- parentChainID = parentImg.RootFS.ChainID() |
|
270 |
- } else { |
|
271 |
- rootFS := image.NewRootFS() |
|
272 |
- parentImg = &image.Image{RootFS: rootFS} |
|
273 |
- } |
|
274 |
- |
|
275 |
- l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) |
|
276 |
- if err != nil { |
|
277 |
- return "", errors.Wrap(err, "error getting image layer") |
|
278 |
- } |
|
279 |
- defer i.layerStores[img.OperatingSystem()].Release(l) |
|
280 |
- |
|
281 |
- ts, err := l.TarStreamFrom(parentChainID) |
|
282 |
- if err != nil { |
|
283 |
- return "", errors.Wrapf(err, "error getting tar stream to parent") |
|
284 |
- } |
|
285 |
- defer ts.Close() |
|
286 |
- |
|
287 |
- newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) |
|
288 |
- if err != nil { |
|
289 |
- return "", errors.Wrap(err, "error registering layer") |
|
290 |
- } |
|
291 |
- defer i.layerStores[img.OperatingSystem()].Release(newL) |
|
292 |
- |
|
293 |
- newImage := *img |
|
294 |
- newImage.RootFS = nil |
|
295 |
- |
|
296 |
- rootFS := *parentImg.RootFS |
|
297 |
- rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) |
|
298 |
- newImage.RootFS = &rootFS |
|
299 |
- |
|
300 |
- for i, hi := range newImage.History { |
|
301 |
- if i >= len(parentImg.History) { |
|
302 |
- hi.EmptyLayer = true |
|
303 |
- } |
|
304 |
- newImage.History[i] = hi |
|
305 |
- } |
|
306 |
- |
|
307 |
- now := time.Now() |
|
308 |
- var historyComment string |
|
309 |
- if len(parent) > 0 { |
|
310 |
- historyComment = fmt.Sprintf("merge %s to %s", id, parent) |
|
311 |
- } else { |
|
312 |
- historyComment = fmt.Sprintf("create new from %s", id) |
|
313 |
- } |
|
314 |
- |
|
315 |
- newImage.History = append(newImage.History, image.History{ |
|
316 |
- Created: now, |
|
317 |
- Comment: historyComment, |
|
318 |
- }) |
|
319 |
- newImage.Created = now |
|
320 |
- |
|
321 |
- b, err := json.Marshal(&newImage) |
|
322 |
- if err != nil { |
|
323 |
- return "", errors.Wrap(err, "error marshalling image config") |
|
324 |
- } |
|
325 |
- |
|
326 |
- newImgID, err := i.imageStore.Create(b) |
|
327 |
- if err != nil { |
|
328 |
- return "", errors.Wrap(err, "error creating new image after squash") |
|
329 |
- } |
|
330 |
- return string(newImgID), nil |
|
331 |
-} |
|
332 |
- |
|
333 |
-func newImage(image *image.Image, size int64) *types.ImageSummary { |
|
334 |
- newImage := new(types.ImageSummary) |
|
335 |
- newImage.ParentID = image.Parent.String() |
|
336 |
- newImage.ID = image.ID().String() |
|
337 |
- newImage.Created = image.Created.Unix() |
|
338 |
- newImage.Size = size |
|
339 |
- newImage.VirtualSize = size |
|
340 |
- newImage.SharedSize = -1 |
|
341 |
- newImage.Containers = -1 |
|
342 |
- if image.Config != nil { |
|
343 |
- newImage.Labels = image.Config.Labels |
|
344 |
- } |
|
345 |
- return newImage |
|
346 |
-} |
347 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,27 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "github.com/docker/docker/builder" |
|
4 |
+ "github.com/docker/docker/image/cache" |
|
5 |
+ "github.com/sirupsen/logrus" |
|
6 |
+) |
|
7 |
+ |
|
8 |
+// MakeImageCache creates a stateful image cache. |
|
9 |
+func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { |
|
10 |
+ if len(sourceRefs) == 0 { |
|
11 |
+ return cache.NewLocal(i.imageStore) |
|
12 |
+ } |
|
13 |
+ |
|
14 |
+ cache := cache.New(i.imageStore) |
|
15 |
+ |
|
16 |
+ for _, ref := range sourceRefs { |
|
17 |
+ img, err := i.GetImage(ref) |
|
18 |
+ if err != nil { |
|
19 |
+ logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) |
|
20 |
+ continue |
|
21 |
+ } |
|
22 |
+ cache.Populate(img) |
|
23 |
+ } |
|
24 |
+ |
|
25 |
+ return cache |
|
26 |
+} |
0 | 27 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,75 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ |
|
5 |
+ "github.com/docker/distribution/reference" |
|
6 |
+ "github.com/docker/docker/errdefs" |
|
7 |
+ "github.com/docker/docker/image" |
|
8 |
+) |
|
9 |
+ |
|
10 |
+// ErrImageDoesNotExist is error returned when no image can be found for a reference. |
|
11 |
+type ErrImageDoesNotExist struct { |
|
12 |
+ ref reference.Reference |
|
13 |
+} |
|
14 |
+ |
|
15 |
+func (e ErrImageDoesNotExist) Error() string { |
|
16 |
+ ref := e.ref |
|
17 |
+ if named, ok := ref.(reference.Named); ok { |
|
18 |
+ ref = reference.TagNameOnly(named) |
|
19 |
+ } |
|
20 |
+ return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) |
|
21 |
+} |
|
22 |
+ |
|
23 |
+// NotFound implements the NotFound interface |
|
24 |
+func (e ErrImageDoesNotExist) NotFound() {} |
|
25 |
+ |
|
26 |
+// GetImageIDAndOS returns an image ID and operating system corresponding to the image referred to by |
|
27 |
+// refOrID. |
|
28 |
+// called from list.go foldFilter() |
|
29 |
+func (i ImageService) GetImageIDAndOS(refOrID string) (image.ID, string, error) { |
|
30 |
+ ref, err := reference.ParseAnyReference(refOrID) |
|
31 |
+ if err != nil { |
|
32 |
+ return "", "", errdefs.InvalidParameter(err) |
|
33 |
+ } |
|
34 |
+ namedRef, ok := ref.(reference.Named) |
|
35 |
+ if !ok { |
|
36 |
+ digested, ok := ref.(reference.Digested) |
|
37 |
+ if !ok { |
|
38 |
+ return "", "", ErrImageDoesNotExist{ref} |
|
39 |
+ } |
|
40 |
+ id := image.IDFromDigest(digested.Digest()) |
|
41 |
+ if img, err := i.imageStore.Get(id); err == nil { |
|
42 |
+ return id, img.OperatingSystem(), nil |
|
43 |
+ } |
|
44 |
+ return "", "", ErrImageDoesNotExist{ref} |
|
45 |
+ } |
|
46 |
+ |
|
47 |
+ if digest, err := i.referenceStore.Get(namedRef); err == nil { |
|
48 |
+ // Search the image stores to get the operating system, defaulting to host OS. |
|
49 |
+ id := image.IDFromDigest(digest) |
|
50 |
+ if img, err := i.imageStore.Get(id); err == nil { |
|
51 |
+ return id, img.OperatingSystem(), nil |
|
52 |
+ } |
|
53 |
+ } |
|
54 |
+ |
|
55 |
+ // Search based on ID |
|
56 |
+ if id, err := i.imageStore.Search(refOrID); err == nil { |
|
57 |
+ img, err := i.imageStore.Get(id) |
|
58 |
+ if err != nil { |
|
59 |
+ return "", "", ErrImageDoesNotExist{ref} |
|
60 |
+ } |
|
61 |
+ return id, img.OperatingSystem(), nil |
|
62 |
+ } |
|
63 |
+ |
|
64 |
+ return "", "", ErrImageDoesNotExist{ref} |
|
65 |
+} |
|
66 |
+ |
|
67 |
+// GetImage returns an image corresponding to the image referred to by refOrID. |
|
68 |
+func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { |
|
69 |
+ imgID, _, err := i.GetImageIDAndOS(refOrID) |
|
70 |
+ if err != nil { |
|
71 |
+ return nil, err |
|
72 |
+ } |
|
73 |
+ return i.imageStore.Get(imgID) |
|
74 |
+} |
0 | 75 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,219 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ |
|
5 |
+ "github.com/docker/distribution/reference" |
|
6 |
+ "github.com/docker/docker/api/types" |
|
7 |
+ "github.com/docker/docker/api/types/backend" |
|
8 |
+ "github.com/docker/docker/builder" |
|
9 |
+ "github.com/docker/docker/image" |
|
10 |
+ "github.com/docker/docker/layer" |
|
11 |
+ "github.com/docker/docker/pkg/containerfs" |
|
12 |
+ "github.com/docker/docker/pkg/stringid" |
|
13 |
+ "github.com/docker/docker/pkg/system" |
|
14 |
+ "github.com/docker/docker/registry" |
|
15 |
+ "github.com/pkg/errors" |
|
16 |
+ "golang.org/x/net/context" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+type roLayer struct { |
|
20 |
+ released bool |
|
21 |
+ layerStore layer.Store |
|
22 |
+ roLayer layer.Layer |
|
23 |
+} |
|
24 |
+ |
|
25 |
+func (l *roLayer) DiffID() layer.DiffID { |
|
26 |
+ if l.roLayer == nil { |
|
27 |
+ return layer.DigestSHA256EmptyTar |
|
28 |
+ } |
|
29 |
+ return l.roLayer.DiffID() |
|
30 |
+} |
|
31 |
+ |
|
32 |
+func (l *roLayer) Release() error { |
|
33 |
+ if l.released { |
|
34 |
+ return nil |
|
35 |
+ } |
|
36 |
+ if l.roLayer != nil { |
|
37 |
+ metadata, err := l.layerStore.Release(l.roLayer) |
|
38 |
+ layer.LogReleaseMetadata(metadata) |
|
39 |
+ if err != nil { |
|
40 |
+ return errors.Wrap(err, "failed to release ROLayer") |
|
41 |
+ } |
|
42 |
+ } |
|
43 |
+ l.roLayer = nil |
|
44 |
+ l.released = true |
|
45 |
+ return nil |
|
46 |
+} |
|
47 |
+ |
|
48 |
+func (l *roLayer) NewRWLayer() (builder.RWLayer, error) { |
|
49 |
+ var chainID layer.ChainID |
|
50 |
+ if l.roLayer != nil { |
|
51 |
+ chainID = l.roLayer.ChainID() |
|
52 |
+ } |
|
53 |
+ |
|
54 |
+ mountID := stringid.GenerateRandomID() |
|
55 |
+ newLayer, err := l.layerStore.CreateRWLayer(mountID, chainID, nil) |
|
56 |
+ if err != nil { |
|
57 |
+ return nil, errors.Wrap(err, "failed to create rwlayer") |
|
58 |
+ } |
|
59 |
+ |
|
60 |
+ rwLayer := &rwLayer{layerStore: l.layerStore, rwLayer: newLayer} |
|
61 |
+ |
|
62 |
+ fs, err := newLayer.Mount("") |
|
63 |
+ if err != nil { |
|
64 |
+ rwLayer.Release() |
|
65 |
+ return nil, err |
|
66 |
+ } |
|
67 |
+ |
|
68 |
+ rwLayer.fs = fs |
|
69 |
+ |
|
70 |
+ return rwLayer, nil |
|
71 |
+} |
|
72 |
+ |
|
73 |
+type rwLayer struct { |
|
74 |
+ released bool |
|
75 |
+ layerStore layer.Store |
|
76 |
+ rwLayer layer.RWLayer |
|
77 |
+ fs containerfs.ContainerFS |
|
78 |
+} |
|
79 |
+ |
|
80 |
+func (l *rwLayer) Root() containerfs.ContainerFS { |
|
81 |
+ return l.fs |
|
82 |
+} |
|
83 |
+ |
|
84 |
+func (l *rwLayer) Commit() (builder.ROLayer, error) { |
|
85 |
+ stream, err := l.rwLayer.TarStream() |
|
86 |
+ if err != nil { |
|
87 |
+ return nil, err |
|
88 |
+ } |
|
89 |
+ defer stream.Close() |
|
90 |
+ |
|
91 |
+ var chainID layer.ChainID |
|
92 |
+ if parent := l.rwLayer.Parent(); parent != nil { |
|
93 |
+ chainID = parent.ChainID() |
|
94 |
+ } |
|
95 |
+ |
|
96 |
+ newLayer, err := l.layerStore.Register(stream, chainID) |
|
97 |
+ if err != nil { |
|
98 |
+ return nil, err |
|
99 |
+ } |
|
100 |
+ // TODO: An optimization would be to handle empty layers before returning |
|
101 |
+ return &roLayer{layerStore: l.layerStore, roLayer: newLayer}, nil |
|
102 |
+} |
|
103 |
+ |
|
104 |
+func (l *rwLayer) Release() error { |
|
105 |
+ if l.released { |
|
106 |
+ return nil |
|
107 |
+ } |
|
108 |
+ |
|
109 |
+ if l.fs != nil { |
|
110 |
+ if err := l.rwLayer.Unmount(); err != nil { |
|
111 |
+ return errors.Wrap(err, "failed to unmount RWLayer") |
|
112 |
+ } |
|
113 |
+ l.fs = nil |
|
114 |
+ } |
|
115 |
+ |
|
116 |
+ metadata, err := l.layerStore.ReleaseRWLayer(l.rwLayer) |
|
117 |
+ layer.LogReleaseMetadata(metadata) |
|
118 |
+ if err != nil { |
|
119 |
+ return errors.Wrap(err, "failed to release RWLayer") |
|
120 |
+ } |
|
121 |
+ l.released = true |
|
122 |
+ return nil |
|
123 |
+} |
|
124 |
+ |
|
125 |
+func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLayer, error) { |
|
126 |
+ if img == nil || img.RootFS.ChainID() == "" { |
|
127 |
+ return &roLayer{layerStore: layerStore}, nil |
|
128 |
+ } |
|
129 |
+ // Hold a reference to the image layer so that it can't be removed before |
|
130 |
+ // it is released |
|
131 |
+ layer, err := layerStore.Get(img.RootFS.ChainID()) |
|
132 |
+ if err != nil { |
|
133 |
+ return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) |
|
134 |
+ } |
|
135 |
+ return &roLayer{layerStore: layerStore, roLayer: layer}, nil |
|
136 |
+} |
|
137 |
+ |
|
138 |
+// TODO: could this use the regular daemon PullImage ? |
|
139 |
+func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) { |
|
140 |
+ ref, err := reference.ParseNormalizedNamed(name) |
|
141 |
+ if err != nil { |
|
142 |
+ return nil, err |
|
143 |
+ } |
|
144 |
+ ref = reference.TagNameOnly(ref) |
|
145 |
+ |
|
146 |
+ pullRegistryAuth := &types.AuthConfig{} |
|
147 |
+ if len(authConfigs) > 0 { |
|
148 |
+ // The request came with a full auth config, use it |
|
149 |
+ repoInfo, err := i.registryService.ResolveRepository(ref) |
|
150 |
+ if err != nil { |
|
151 |
+ return nil, err |
|
152 |
+ } |
|
153 |
+ |
|
154 |
+ resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) |
|
155 |
+ pullRegistryAuth = &resolvedConfig |
|
156 |
+ } |
|
157 |
+ |
|
158 |
+ if err := i.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil { |
|
159 |
+ return nil, err |
|
160 |
+ } |
|
161 |
+ return i.GetImage(name) |
|
162 |
+} |
|
163 |
+ |
|
164 |
+// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. |
|
165 |
+// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent |
|
166 |
+// leaking of layers. |
|
167 |
+func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { |
|
168 |
+ if refOrID == "" { |
|
169 |
+ if !system.IsOSSupported(opts.OS) { |
|
170 |
+ return nil, nil, system.ErrNotSupportedOperatingSystem |
|
171 |
+ } |
|
172 |
+ layer, err := newROLayerForImage(nil, i.layerStores[opts.OS]) |
|
173 |
+ return nil, layer, err |
|
174 |
+ } |
|
175 |
+ |
|
176 |
+ if opts.PullOption != backend.PullOptionForcePull { |
|
177 |
+ image, err := i.GetImage(refOrID) |
|
178 |
+ if err != nil && opts.PullOption == backend.PullOptionNoPull { |
|
179 |
+ return nil, nil, err |
|
180 |
+ } |
|
181 |
+ // TODO: shouldn't we error out if error is different from "not found" ? |
|
182 |
+ if image != nil { |
|
183 |
+ if !system.IsOSSupported(image.OperatingSystem()) { |
|
184 |
+ return nil, nil, system.ErrNotSupportedOperatingSystem |
|
185 |
+ } |
|
186 |
+ layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) |
|
187 |
+ return image, layer, err |
|
188 |
+ } |
|
189 |
+ } |
|
190 |
+ |
|
191 |
+ image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS) |
|
192 |
+ if err != nil { |
|
193 |
+ return nil, nil, err |
|
194 |
+ } |
|
195 |
+ if !system.IsOSSupported(image.OperatingSystem()) { |
|
196 |
+ return nil, nil, system.ErrNotSupportedOperatingSystem |
|
197 |
+ } |
|
198 |
+ layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) |
|
199 |
+ return image, layer, err |
|
200 |
+} |
|
201 |
+ |
|
202 |
+// CreateImage creates a new image by adding a config and ID to the image store. |
|
203 |
+// This is similar to LoadImage() except that it receives JSON encoded bytes of |
|
204 |
+// an image instead of a tar archive. |
|
205 |
+func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { |
|
206 |
+ id, err := i.imageStore.Create(config) |
|
207 |
+ if err != nil { |
|
208 |
+ return nil, errors.Wrapf(err, "failed to create image") |
|
209 |
+ } |
|
210 |
+ |
|
211 |
+ if parent != "" { |
|
212 |
+ if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil { |
|
213 |
+ return nil, errors.Wrapf(err, "failed to set parent %s", parent) |
|
214 |
+ } |
|
215 |
+ } |
|
216 |
+ |
|
217 |
+ return i.imageStore.Get(id) |
|
218 |
+} |
0 | 219 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,127 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "encoding/json" |
|
4 |
+ "io" |
|
5 |
+ |
|
6 |
+ "github.com/docker/docker/api/types/backend" |
|
7 |
+ "github.com/docker/docker/image" |
|
8 |
+ "github.com/docker/docker/layer" |
|
9 |
+ "github.com/docker/docker/pkg/ioutils" |
|
10 |
+ "github.com/docker/docker/pkg/system" |
|
11 |
+ "github.com/pkg/errors" |
|
12 |
+) |
|
13 |
+ |
|
14 |
+// CommitImage creates a new image from a commit config |
|
15 |
+func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { |
|
16 |
+ layerStore, ok := i.layerStores[c.ContainerOS] |
|
17 |
+ if !ok { |
|
18 |
+ return "", system.ErrNotSupportedOperatingSystem |
|
19 |
+ } |
|
20 |
+ rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) |
|
21 |
+ if err != nil { |
|
22 |
+ return "", err |
|
23 |
+ } |
|
24 |
+ defer func() { |
|
25 |
+ if rwTar != nil { |
|
26 |
+ rwTar.Close() |
|
27 |
+ } |
|
28 |
+ }() |
|
29 |
+ |
|
30 |
+ var parent *image.Image |
|
31 |
+ if c.ParentImageID == "" { |
|
32 |
+ parent = new(image.Image) |
|
33 |
+ parent.RootFS = image.NewRootFS() |
|
34 |
+ } else { |
|
35 |
+ parent, err = i.imageStore.Get(image.ID(c.ParentImageID)) |
|
36 |
+ if err != nil { |
|
37 |
+ return "", err |
|
38 |
+ } |
|
39 |
+ } |
|
40 |
+ |
|
41 |
+ l, err := layerStore.Register(rwTar, parent.RootFS.ChainID()) |
|
42 |
+ if err != nil { |
|
43 |
+ return "", err |
|
44 |
+ } |
|
45 |
+ defer layer.ReleaseAndLog(layerStore, l) |
|
46 |
+ |
|
47 |
+ cc := image.ChildConfig{ |
|
48 |
+ ContainerID: c.ContainerID, |
|
49 |
+ Author: c.Author, |
|
50 |
+ Comment: c.Comment, |
|
51 |
+ ContainerConfig: c.ContainerConfig, |
|
52 |
+ Config: c.Config, |
|
53 |
+ DiffID: l.DiffID(), |
|
54 |
+ } |
|
55 |
+ config, err := json.Marshal(image.NewChildImage(parent, cc, c.ContainerOS)) |
|
56 |
+ if err != nil { |
|
57 |
+ return "", err |
|
58 |
+ } |
|
59 |
+ |
|
60 |
+ id, err := i.imageStore.Create(config) |
|
61 |
+ if err != nil { |
|
62 |
+ return "", err |
|
63 |
+ } |
|
64 |
+ |
|
65 |
+ if c.ParentImageID != "" { |
|
66 |
+ if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil { |
|
67 |
+ return "", err |
|
68 |
+ } |
|
69 |
+ } |
|
70 |
+ return id, nil |
|
71 |
+} |
|
72 |
+ |
|
73 |
+func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { |
|
74 |
+ rwlayer, err := layerStore.GetRWLayer(id) |
|
75 |
+ if err != nil { |
|
76 |
+ return nil, err |
|
77 |
+ } |
|
78 |
+ defer func() { |
|
79 |
+ if err != nil { |
|
80 |
+ layerStore.ReleaseRWLayer(rwlayer) |
|
81 |
+ } |
|
82 |
+ }() |
|
83 |
+ |
|
84 |
+ // TODO: this mount call is not necessary as we assume that TarStream() should |
|
85 |
+ // mount the layer if needed. But the Diff() function for windows requests that |
|
86 |
+ // the layer should be mounted when calling it. So we reserve this mount call |
|
87 |
+ // until windows driver can implement Diff() interface correctly. |
|
88 |
+ _, err = rwlayer.Mount(mountLabel) |
|
89 |
+ if err != nil { |
|
90 |
+ return nil, err |
|
91 |
+ } |
|
92 |
+ |
|
93 |
+ archive, err := rwlayer.TarStream() |
|
94 |
+ if err != nil { |
|
95 |
+ rwlayer.Unmount() |
|
96 |
+ return nil, err |
|
97 |
+ } |
|
98 |
+ return ioutils.NewReadCloserWrapper(archive, func() error { |
|
99 |
+ archive.Close() |
|
100 |
+ err = rwlayer.Unmount() |
|
101 |
+ layerStore.ReleaseRWLayer(rwlayer) |
|
102 |
+ return err |
|
103 |
+ }), |
|
104 |
+ nil |
|
105 |
+} |
|
106 |
+ |
|
107 |
+// CommitBuildStep is used by the builder to create an image for each step in |
|
108 |
+// the build. |
|
109 |
+// |
|
110 |
+// This method is different from CreateImageFromContainer: |
|
111 |
+// * it doesn't attempt to validate container state |
|
112 |
+// * it doesn't send a commit action to metrics |
|
113 |
+// * it doesn't log a container commit event |
|
114 |
+// |
|
115 |
+// This is a temporary shim. Should be removed when builder stops using commit. |
|
116 |
+func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { |
|
117 |
+ container := i.containers.Get(c.ContainerID) |
|
118 |
+ if container == nil { |
|
119 |
+ // TODO: use typed error |
|
120 |
+ return "", errors.Errorf("container not found: %s", c.ContainerID) |
|
121 |
+ } |
|
122 |
+ c.ContainerMountLabel = container.MountLabel |
|
123 |
+ c.ContainerOS = container.OS |
|
124 |
+ c.ParentImageID = string(container.ImageID) |
|
125 |
+ return i.CommitImage(c) |
|
126 |
+} |
0 | 127 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,413 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "strings" |
|
5 |
+ "time" |
|
6 |
+ |
|
7 |
+ "github.com/docker/distribution/reference" |
|
8 |
+ "github.com/docker/docker/api/types" |
|
9 |
+ "github.com/docker/docker/container" |
|
10 |
+ "github.com/docker/docker/errdefs" |
|
11 |
+ "github.com/docker/docker/image" |
|
12 |
+ "github.com/docker/docker/pkg/stringid" |
|
13 |
+ "github.com/docker/docker/pkg/system" |
|
14 |
+ "github.com/pkg/errors" |
|
15 |
+) |
|
16 |
+ |
|
17 |
+type conflictType int |
|
18 |
+ |
|
19 |
+const ( |
|
20 |
+ conflictDependentChild conflictType = (1 << iota) |
|
21 |
+ conflictRunningContainer |
|
22 |
+ conflictActiveReference |
|
23 |
+ conflictStoppedContainer |
|
24 |
+ conflictHard = conflictDependentChild | conflictRunningContainer |
|
25 |
+ conflictSoft = conflictActiveReference | conflictStoppedContainer |
|
26 |
+) |
|
27 |
+ |
|
28 |
+// ImageDelete deletes the image referenced by the given imageRef from this |
|
29 |
+// daemon. The given imageRef can be an image ID, ID prefix, or a repository |
|
30 |
+// reference (with an optional tag or digest, defaulting to the tag name |
|
31 |
+// "latest"). There is differing behavior depending on whether the given |
|
32 |
+// imageRef is a repository reference or not. |
|
33 |
+// |
|
34 |
+// If the given imageRef is a repository reference then that repository |
|
35 |
+// reference will be removed. However, if there exists any containers which |
|
36 |
+// were created using the same image reference then the repository reference |
|
37 |
+// cannot be removed unless either there are other repository references to the |
|
38 |
+// same image or force is true. Following removal of the repository reference, |
|
39 |
+// the referenced image itself will attempt to be deleted as described below |
|
40 |
+// but quietly, meaning any image delete conflicts will cause the image to not |
|
41 |
+// be deleted and the conflict will not be reported. |
|
42 |
+// |
|
43 |
+// There may be conflicts preventing deletion of an image and these conflicts |
|
44 |
+// are divided into two categories grouped by their severity: |
|
45 |
+// |
|
46 |
+// Hard Conflict: |
|
47 |
+// - a pull or build using the image. |
|
48 |
+// - any descendant image. |
|
49 |
+// - any running container using the image. |
|
50 |
+// |
|
51 |
+// Soft Conflict: |
|
52 |
+// - any stopped container using the image. |
|
53 |
+// - any repository tag or digest references to the image. |
|
54 |
+// |
|
55 |
+// The image cannot be removed if there are any hard conflicts and can be |
|
56 |
+// removed if there are soft conflicts only if force is true. |
|
57 |
+// |
|
58 |
+// If prune is true, ancestor images will each attempt to be deleted quietly, |
|
59 |
+// meaning any delete conflicts will cause the image to not be deleted and the |
|
60 |
+// conflict will not be reported. |
|
61 |
+// |
|
62 |
+func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { |
|
63 |
+ start := time.Now() |
|
64 |
+ records := []types.ImageDeleteResponseItem{} |
|
65 |
+ |
|
66 |
+ imgID, operatingSystem, err := i.GetImageIDAndOS(imageRef) |
|
67 |
+ if err != nil { |
|
68 |
+ return nil, err |
|
69 |
+ } |
|
70 |
+ if !system.IsOSSupported(operatingSystem) { |
|
71 |
+ return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem) |
|
72 |
+ } |
|
73 |
+ |
|
74 |
+ repoRefs := i.referenceStore.References(imgID.Digest()) |
|
75 |
+ |
|
76 |
+ using := func(c *container.Container) bool { |
|
77 |
+ return c.ImageID == imgID |
|
78 |
+ } |
|
79 |
+ |
|
80 |
+ var removedRepositoryRef bool |
|
81 |
+ if !isImageIDPrefix(imgID.String(), imageRef) { |
|
82 |
+ // A repository reference was given and should be removed |
|
83 |
+ // first. We can only remove this reference if either force is |
|
84 |
+ // true, there are multiple repository references to this |
|
85 |
+ // image, or there are no containers using the given reference. |
|
86 |
+ if !force && isSingleReference(repoRefs) { |
|
87 |
+ if container := i.containers.First(using); container != nil { |
|
88 |
+ // If we removed the repository reference then |
|
89 |
+ // this image would remain "dangling" and since |
|
90 |
+ // we really want to avoid that the client must |
|
91 |
+ // explicitly force its removal. |
|
92 |
+ err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) |
|
93 |
+ return nil, errdefs.Conflict(err) |
|
94 |
+ } |
|
95 |
+ } |
|
96 |
+ |
|
97 |
+ parsedRef, err := reference.ParseNormalizedNamed(imageRef) |
|
98 |
+ if err != nil { |
|
99 |
+ return nil, err |
|
100 |
+ } |
|
101 |
+ |
|
102 |
+ parsedRef, err = i.removeImageRef(parsedRef) |
|
103 |
+ if err != nil { |
|
104 |
+ return nil, err |
|
105 |
+ } |
|
106 |
+ |
|
107 |
+ untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
108 |
+ |
|
109 |
+ i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
110 |
+ records = append(records, untaggedRecord) |
|
111 |
+ |
|
112 |
+ repoRefs = i.referenceStore.References(imgID.Digest()) |
|
113 |
+ |
|
114 |
+ // If a tag reference was removed and the only remaining |
|
115 |
+ // references to the same repository are digest references, |
|
116 |
+ // then clean up those digest references. |
|
117 |
+ if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { |
|
118 |
+ foundRepoTagRef := false |
|
119 |
+ for _, repoRef := range repoRefs { |
|
120 |
+ if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { |
|
121 |
+ foundRepoTagRef = true |
|
122 |
+ break |
|
123 |
+ } |
|
124 |
+ } |
|
125 |
+ if !foundRepoTagRef { |
|
126 |
+ // Remove canonical references from same repository |
|
127 |
+ remainingRefs := []reference.Named{} |
|
128 |
+ for _, repoRef := range repoRefs { |
|
129 |
+ if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { |
|
130 |
+ if _, err := i.removeImageRef(repoRef); err != nil { |
|
131 |
+ return records, err |
|
132 |
+ } |
|
133 |
+ |
|
134 |
+ untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} |
|
135 |
+ records = append(records, untaggedRecord) |
|
136 |
+ } else { |
|
137 |
+ remainingRefs = append(remainingRefs, repoRef) |
|
138 |
+ |
|
139 |
+ } |
|
140 |
+ } |
|
141 |
+ repoRefs = remainingRefs |
|
142 |
+ } |
|
143 |
+ } |
|
144 |
+ |
|
145 |
+ // If it has remaining references then the untag finished the remove |
|
146 |
+ if len(repoRefs) > 0 { |
|
147 |
+ return records, nil |
|
148 |
+ } |
|
149 |
+ |
|
150 |
+ removedRepositoryRef = true |
|
151 |
+ } else { |
|
152 |
+ // If an ID reference was given AND there is at most one tag |
|
153 |
+ // reference to the image AND all references are within one |
|
154 |
+ // repository, then remove all references. |
|
155 |
+ if isSingleReference(repoRefs) { |
|
156 |
+ c := conflictHard |
|
157 |
+ if !force { |
|
158 |
+ c |= conflictSoft &^ conflictActiveReference |
|
159 |
+ } |
|
160 |
+ if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { |
|
161 |
+ return nil, conflict |
|
162 |
+ } |
|
163 |
+ |
|
164 |
+ for _, repoRef := range repoRefs { |
|
165 |
+ parsedRef, err := i.removeImageRef(repoRef) |
|
166 |
+ if err != nil { |
|
167 |
+ return nil, err |
|
168 |
+ } |
|
169 |
+ |
|
170 |
+ untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
171 |
+ |
|
172 |
+ i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
173 |
+ records = append(records, untaggedRecord) |
|
174 |
+ } |
|
175 |
+ } |
|
176 |
+ } |
|
177 |
+ |
|
178 |
+ if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { |
|
179 |
+ return nil, err |
|
180 |
+ } |
|
181 |
+ |
|
182 |
+ imageActions.WithValues("delete").UpdateSince(start) |
|
183 |
+ |
|
184 |
+ return records, nil |
|
185 |
+} |
|
186 |
+ |
|
187 |
+// isSingleReference returns true when all references are from one repository |
|
188 |
+// and there is at most one tag. Returns false for empty input. |
|
189 |
+func isSingleReference(repoRefs []reference.Named) bool { |
|
190 |
+ if len(repoRefs) <= 1 { |
|
191 |
+ return len(repoRefs) == 1 |
|
192 |
+ } |
|
193 |
+ var singleRef reference.Named |
|
194 |
+ canonicalRefs := map[string]struct{}{} |
|
195 |
+ for _, repoRef := range repoRefs { |
|
196 |
+ if _, isCanonical := repoRef.(reference.Canonical); isCanonical { |
|
197 |
+ canonicalRefs[repoRef.Name()] = struct{}{} |
|
198 |
+ } else if singleRef == nil { |
|
199 |
+ singleRef = repoRef |
|
200 |
+ } else { |
|
201 |
+ return false |
|
202 |
+ } |
|
203 |
+ } |
|
204 |
+ if singleRef == nil { |
|
205 |
+ // Just use first canonical ref |
|
206 |
+ singleRef = repoRefs[0] |
|
207 |
+ } |
|
208 |
+ _, ok := canonicalRefs[singleRef.Name()] |
|
209 |
+ return len(canonicalRefs) == 1 && ok |
|
210 |
+} |
|
211 |
+ |
|
212 |
+// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the |
|
213 |
+// given imageID. |
|
214 |
+func isImageIDPrefix(imageID, possiblePrefix string) bool { |
|
215 |
+ if strings.HasPrefix(imageID, possiblePrefix) { |
|
216 |
+ return true |
|
217 |
+ } |
|
218 |
+ |
|
219 |
+ if i := strings.IndexRune(imageID, ':'); i >= 0 { |
|
220 |
+ return strings.HasPrefix(imageID[i+1:], possiblePrefix) |
|
221 |
+ } |
|
222 |
+ |
|
223 |
+ return false |
|
224 |
+} |
|
225 |
+ |
|
226 |
+// removeImageRef attempts to parse and remove the given image reference from |
|
227 |
+// this daemon's store of repository tag/digest references. The given |
|
228 |
+// repositoryRef must not be an image ID but a repository name followed by an |
|
229 |
+// optional tag or digest reference. If tag or digest is omitted, the default |
|
230 |
+// tag is used. Returns the resolved image reference and an error. |
|
231 |
+func (i *ImageService) removeImageRef(ref reference.Named) (reference.Named, error) { |
|
232 |
+ ref = reference.TagNameOnly(ref) |
|
233 |
+ |
|
234 |
+ // Ignore the boolean value returned, as far as we're concerned, this |
|
235 |
+ // is an idempotent operation and it's okay if the reference didn't |
|
236 |
+ // exist in the first place. |
|
237 |
+ _, err := i.referenceStore.Delete(ref) |
|
238 |
+ |
|
239 |
+ return ref, err |
|
240 |
+} |
|
241 |
+ |
|
242 |
+// removeAllReferencesToImageID attempts to remove every reference to the given |
|
243 |
+// imgID from this daemon's store of repository tag/digest references. Returns |
|
244 |
+// on the first encountered error. Removed references are logged to this |
|
245 |
+// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the |
|
246 |
+// given list of records. |
|
247 |
+func (i *ImageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { |
|
248 |
+ imageRefs := i.referenceStore.References(imgID.Digest()) |
|
249 |
+ |
|
250 |
+ for _, imageRef := range imageRefs { |
|
251 |
+ parsedRef, err := i.removeImageRef(imageRef) |
|
252 |
+ if err != nil { |
|
253 |
+ return err |
|
254 |
+ } |
|
255 |
+ |
|
256 |
+ untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} |
|
257 |
+ |
|
258 |
+ i.LogImageEvent(imgID.String(), imgID.String(), "untag") |
|
259 |
+ *records = append(*records, untaggedRecord) |
|
260 |
+ } |
|
261 |
+ |
|
262 |
+ return nil |
|
263 |
+} |
|
264 |
+ |
|
265 |
+// ImageDeleteConflict holds a soft or hard conflict and an associated error. |
|
266 |
+// Implements the error interface. |
|
267 |
+type imageDeleteConflict struct { |
|
268 |
+ hard bool |
|
269 |
+ used bool |
|
270 |
+ imgID image.ID |
|
271 |
+ message string |
|
272 |
+} |
|
273 |
+ |
|
274 |
+func (idc *imageDeleteConflict) Error() string { |
|
275 |
+ var forceMsg string |
|
276 |
+ if idc.hard { |
|
277 |
+ forceMsg = "cannot be forced" |
|
278 |
+ } else { |
|
279 |
+ forceMsg = "must be forced" |
|
280 |
+ } |
|
281 |
+ |
|
282 |
+ return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) |
|
283 |
+} |
|
284 |
+ |
|
285 |
+func (idc *imageDeleteConflict) Conflict() {} |
|
286 |
+ |
|
287 |
+// imageDeleteHelper attempts to delete the given image from this daemon. If |
|
288 |
+// the image has any hard delete conflicts (child images or running containers |
|
289 |
+// using the image) then it cannot be deleted. If the image has any soft delete |
|
290 |
+// conflicts (any tags/digests referencing the image or any stopped container |
|
291 |
+// using the image) then it can only be deleted if force is true. If the delete |
|
292 |
+// succeeds and prune is true, the parent images are also deleted if they do |
|
293 |
+// not have any soft or hard delete conflicts themselves. Any deleted images |
|
294 |
+// and untagged references are appended to the given records. If any error or |
|
295 |
+// conflict is encountered, it will be returned immediately without deleting |
|
296 |
+// the image. If quiet is true, any encountered conflicts will be ignored and |
|
297 |
+// the function will return nil immediately without deleting the image. |
|
298 |
+func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { |
|
299 |
+ // First, determine if this image has any conflicts. Ignore soft conflicts |
|
300 |
+ // if force is true. |
|
301 |
+ c := conflictHard |
|
302 |
+ if !force { |
|
303 |
+ c |= conflictSoft |
|
304 |
+ } |
|
305 |
+ if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { |
|
306 |
+ if quiet && (!i.imageIsDangling(imgID) || conflict.used) { |
|
307 |
+ // Ignore conflicts UNLESS the image is "dangling" or not being used in |
|
308 |
+ // which case we want the user to know. |
|
309 |
+ return nil |
|
310 |
+ } |
|
311 |
+ |
|
312 |
+ // There was a conflict and it's either a hard conflict OR we are not |
|
313 |
+ // forcing deletion on soft conflicts. |
|
314 |
+ return conflict |
|
315 |
+ } |
|
316 |
+ |
|
317 |
+ parent, err := i.imageStore.GetParent(imgID) |
|
318 |
+ if err != nil { |
|
319 |
+ // There may be no parent |
|
320 |
+ parent = "" |
|
321 |
+ } |
|
322 |
+ |
|
323 |
+ // Delete all repository tag/digest references to this image. |
|
324 |
+ if err := i.removeAllReferencesToImageID(imgID, records); err != nil { |
|
325 |
+ return err |
|
326 |
+ } |
|
327 |
+ |
|
328 |
+ removedLayers, err := i.imageStore.Delete(imgID) |
|
329 |
+ if err != nil { |
|
330 |
+ return err |
|
331 |
+ } |
|
332 |
+ |
|
333 |
+ i.LogImageEvent(imgID.String(), imgID.String(), "delete") |
|
334 |
+ *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) |
|
335 |
+ for _, removedLayer := range removedLayers { |
|
336 |
+ *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) |
|
337 |
+ } |
|
338 |
+ |
|
339 |
+ if !prune || parent == "" { |
|
340 |
+ return nil |
|
341 |
+ } |
|
342 |
+ |
|
343 |
+ // We need to prune the parent image. This means delete it if there are |
|
344 |
+ // no tags/digests referencing it and there are no containers using it ( |
|
345 |
+ // either running or stopped). |
|
346 |
+ // Do not force prunings, but do so quietly (stopping on any encountered |
|
347 |
+ // conflicts). |
|
348 |
+ return i.imageDeleteHelper(parent, records, false, true, true) |
|
349 |
+} |
|
350 |
+ |
|
351 |
+// checkImageDeleteConflict determines whether there are any conflicts |
|
352 |
+// preventing deletion of the given image from this daemon. A hard conflict is |
|
353 |
+// any image which has the given image as a parent or any running container |
|
354 |
+// using the image. A soft conflict is any tags/digest referencing the given |
|
355 |
+// image or any stopped container using the image. If ignoreSoftConflicts is |
|
356 |
+// true, this function will not check for soft conflict conditions. |
|
357 |
+func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { |
|
358 |
+ // Check if the image has any descendant images. |
|
359 |
+ if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 { |
|
360 |
+ return &imageDeleteConflict{ |
|
361 |
+ hard: true, |
|
362 |
+ imgID: imgID, |
|
363 |
+ message: "image has dependent child images", |
|
364 |
+ } |
|
365 |
+ } |
|
366 |
+ |
|
367 |
+ if mask&conflictRunningContainer != 0 { |
|
368 |
+ // Check if any running container is using the image. |
|
369 |
+ running := func(c *container.Container) bool { |
|
370 |
+ return c.IsRunning() && c.ImageID == imgID |
|
371 |
+ } |
|
372 |
+ if container := i.containers.First(running); container != nil { |
|
373 |
+ return &imageDeleteConflict{ |
|
374 |
+ imgID: imgID, |
|
375 |
+ hard: true, |
|
376 |
+ used: true, |
|
377 |
+ message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), |
|
378 |
+ } |
|
379 |
+ } |
|
380 |
+ } |
|
381 |
+ |
|
382 |
+ // Check if any repository tags/digest reference this image. |
|
383 |
+ if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 { |
|
384 |
+ return &imageDeleteConflict{ |
|
385 |
+ imgID: imgID, |
|
386 |
+ message: "image is referenced in multiple repositories", |
|
387 |
+ } |
|
388 |
+ } |
|
389 |
+ |
|
390 |
+ if mask&conflictStoppedContainer != 0 { |
|
391 |
+ // Check if any stopped containers reference this image. |
|
392 |
+ stopped := func(c *container.Container) bool { |
|
393 |
+ return !c.IsRunning() && c.ImageID == imgID |
|
394 |
+ } |
|
395 |
+ if container := i.containers.First(stopped); container != nil { |
|
396 |
+ return &imageDeleteConflict{ |
|
397 |
+ imgID: imgID, |
|
398 |
+ used: true, |
|
399 |
+ message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), |
|
400 |
+ } |
|
401 |
+ } |
|
402 |
+ } |
|
403 |
+ |
|
404 |
+ return nil |
|
405 |
+} |
|
406 |
+ |
|
407 |
+// imageIsDangling returns whether the given image is "dangling" which means |
|
408 |
+// that there are no repository references to the given image and it has no |
|
409 |
+// child images. |
|
410 |
+func (i *ImageService) imageIsDangling(imgID image.ID) bool { |
|
411 |
+ return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0) |
|
412 |
+} |
0 | 413 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,39 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "github.com/docker/docker/api/types/events" |
|
4 |
+) |
|
5 |
+ |
|
6 |
+// LogImageEvent generates an event related to an image with only the default attributes. |
|
7 |
+func (i *ImageService) LogImageEvent(imageID, refName, action string) { |
|
8 |
+ i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) |
|
9 |
+} |
|
10 |
+ |
|
11 |
+// LogImageEventWithAttributes generates an event related to an image with specific given attributes. |
|
12 |
+func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { |
|
13 |
+ img, err := i.GetImage(imageID) |
|
14 |
+ if err == nil && img.Config != nil { |
|
15 |
+ // image has not been removed yet. |
|
16 |
+ // it could be missing if the event is `delete`. |
|
17 |
+ copyAttributes(attributes, img.Config.Labels) |
|
18 |
+ } |
|
19 |
+ if refName != "" { |
|
20 |
+ attributes["name"] = refName |
|
21 |
+ } |
|
22 |
+ actor := events.Actor{ |
|
23 |
+ ID: imageID, |
|
24 |
+ Attributes: attributes, |
|
25 |
+ } |
|
26 |
+ |
|
27 |
+ i.eventsService.Log(action, events.ImageEventType, actor) |
|
28 |
+} |
|
29 |
+ |
|
30 |
+// copyAttributes guarantees that labels are not mutated by event triggers. |
|
31 |
+func copyAttributes(attributes, labels map[string]string) { |
|
32 |
+ if labels == nil { |
|
33 |
+ return |
|
34 |
+ } |
|
35 |
+ for k, v := range labels { |
|
36 |
+ attributes[k] = v |
|
37 |
+ } |
|
38 |
+} |
0 | 39 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,25 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ |
|
5 |
+ "github.com/docker/docker/image/tarexport" |
|
6 |
+) |
|
7 |
+ |
|
8 |
+// ExportImage exports a list of images to the given output stream. The |
|
9 |
+// exported images are archived into a tar when written to the output |
|
10 |
+// stream. All images with the given tag and all versions containing |
|
11 |
+// the same tag are exported. names is the set of tags to export, and |
|
12 |
+// outStream is the writer which the images are written to. |
|
13 |
+func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { |
|
14 |
+ imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) |
|
15 |
+ return imageExporter.Save(names, outStream) |
|
16 |
+} |
|
17 |
+ |
|
18 |
+// LoadImage uploads a set of images into the repository. This is the |
|
19 |
+// complement of ImageExport. The input stream is an uncompressed tar |
|
20 |
+// ball containing images and metadata. |
|
21 |
+func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { |
|
22 |
+ imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) |
|
23 |
+ return imageExporter.Load(inTar, outStream, quiet) |
|
24 |
+} |
0 | 25 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,84 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "time" |
|
5 |
+ |
|
6 |
+ "github.com/docker/distribution/reference" |
|
7 |
+ "github.com/docker/docker/api/types/image" |
|
8 |
+ "github.com/docker/docker/layer" |
|
9 |
+) |
|
10 |
+ |
|
11 |
+// ImageHistory returns a slice of ImageHistory structures for the specified image |
|
12 |
+// name by walking the image lineage. |
|
13 |
+func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { |
|
14 |
+ start := time.Now() |
|
15 |
+ img, err := i.GetImage(name) |
|
16 |
+ if err != nil { |
|
17 |
+ return nil, err |
|
18 |
+ } |
|
19 |
+ |
|
20 |
+ history := []*image.HistoryResponseItem{} |
|
21 |
+ |
|
22 |
+ layerCounter := 0 |
|
23 |
+ rootFS := *img.RootFS |
|
24 |
+ rootFS.DiffIDs = nil |
|
25 |
+ |
|
26 |
+ for _, h := range img.History { |
|
27 |
+ var layerSize int64 |
|
28 |
+ |
|
29 |
+ if !h.EmptyLayer { |
|
30 |
+ if len(img.RootFS.DiffIDs) <= layerCounter { |
|
31 |
+ return nil, fmt.Errorf("too many non-empty layers in History section") |
|
32 |
+ } |
|
33 |
+ |
|
34 |
+ rootFS.Append(img.RootFS.DiffIDs[layerCounter]) |
|
35 |
+ l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) |
|
36 |
+ if err != nil { |
|
37 |
+ return nil, err |
|
38 |
+ } |
|
39 |
+ layerSize, err = l.DiffSize() |
|
40 |
+ layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
41 |
+ if err != nil { |
|
42 |
+ return nil, err |
|
43 |
+ } |
|
44 |
+ |
|
45 |
+ layerCounter++ |
|
46 |
+ } |
|
47 |
+ |
|
48 |
+ history = append([]*image.HistoryResponseItem{{ |
|
49 |
+ ID: "<missing>", |
|
50 |
+ Created: h.Created.Unix(), |
|
51 |
+ CreatedBy: h.CreatedBy, |
|
52 |
+ Comment: h.Comment, |
|
53 |
+ Size: layerSize, |
|
54 |
+ }}, history...) |
|
55 |
+ } |
|
56 |
+ |
|
57 |
+ // Fill in image IDs and tags |
|
58 |
+ histImg := img |
|
59 |
+ id := img.ID() |
|
60 |
+ for _, h := range history { |
|
61 |
+ h.ID = id.String() |
|
62 |
+ |
|
63 |
+ var tags []string |
|
64 |
+ for _, r := range i.referenceStore.References(id.Digest()) { |
|
65 |
+ if _, ok := r.(reference.NamedTagged); ok { |
|
66 |
+ tags = append(tags, reference.FamiliarString(r)) |
|
67 |
+ } |
|
68 |
+ } |
|
69 |
+ |
|
70 |
+ h.Tags = tags |
|
71 |
+ |
|
72 |
+ id = histImg.Parent |
|
73 |
+ if id == "" { |
|
74 |
+ break |
|
75 |
+ } |
|
76 |
+ histImg, err = i.GetImage(id.String()) |
|
77 |
+ if err != nil { |
|
78 |
+ break |
|
79 |
+ } |
|
80 |
+ } |
|
81 |
+ imageActions.WithValues("history").UpdateSince(start) |
|
82 |
+ return history, nil |
|
83 |
+} |
0 | 84 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,138 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "encoding/json" |
|
4 |
+ "io" |
|
5 |
+ "net/http" |
|
6 |
+ "net/url" |
|
7 |
+ "runtime" |
|
8 |
+ "strings" |
|
9 |
+ "time" |
|
10 |
+ |
|
11 |
+ "github.com/docker/distribution/reference" |
|
12 |
+ "github.com/docker/docker/api/types/container" |
|
13 |
+ "github.com/docker/docker/builder/dockerfile" |
|
14 |
+ "github.com/docker/docker/builder/remotecontext" |
|
15 |
+ "github.com/docker/docker/dockerversion" |
|
16 |
+ "github.com/docker/docker/errdefs" |
|
17 |
+ "github.com/docker/docker/image" |
|
18 |
+ "github.com/docker/docker/layer" |
|
19 |
+ "github.com/docker/docker/pkg/archive" |
|
20 |
+ "github.com/docker/docker/pkg/progress" |
|
21 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
22 |
+ "github.com/pkg/errors" |
|
23 |
+) |
|
24 |
+ |
|
25 |
+// ImportImage imports an image, getting the archived layer data either from |
|
26 |
+// inConfig (if src is "-"), or from a URI specified in src. Progress output is |
|
27 |
+// written to outStream. Repository and tag names can optionally be given in |
|
28 |
+// the repo and tag arguments, respectively. |
|
29 |
+func (i *ImageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { |
|
30 |
+ var ( |
|
31 |
+ rc io.ReadCloser |
|
32 |
+ resp *http.Response |
|
33 |
+ newRef reference.Named |
|
34 |
+ ) |
|
35 |
+ |
|
36 |
+ // Default the operating system if not supplied. |
|
37 |
+ if os == "" { |
|
38 |
+ os = runtime.GOOS |
|
39 |
+ } |
|
40 |
+ |
|
41 |
+ if repository != "" { |
|
42 |
+ var err error |
|
43 |
+ newRef, err = reference.ParseNormalizedNamed(repository) |
|
44 |
+ if err != nil { |
|
45 |
+ return errdefs.InvalidParameter(err) |
|
46 |
+ } |
|
47 |
+ if _, isCanonical := newRef.(reference.Canonical); isCanonical { |
|
48 |
+ return errdefs.InvalidParameter(errors.New("cannot import digest reference")) |
|
49 |
+ } |
|
50 |
+ |
|
51 |
+ if tag != "" { |
|
52 |
+ newRef, err = reference.WithTag(newRef, tag) |
|
53 |
+ if err != nil { |
|
54 |
+ return errdefs.InvalidParameter(err) |
|
55 |
+ } |
|
56 |
+ } |
|
57 |
+ } |
|
58 |
+ |
|
59 |
+ config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) |
|
60 |
+ if err != nil { |
|
61 |
+ return err |
|
62 |
+ } |
|
63 |
+ if src == "-" { |
|
64 |
+ rc = inConfig |
|
65 |
+ } else { |
|
66 |
+ inConfig.Close() |
|
67 |
+ if len(strings.Split(src, "://")) == 1 { |
|
68 |
+ src = "http://" + src |
|
69 |
+ } |
|
70 |
+ u, err := url.Parse(src) |
|
71 |
+ if err != nil { |
|
72 |
+ return errdefs.InvalidParameter(err) |
|
73 |
+ } |
|
74 |
+ |
|
75 |
+ resp, err = remotecontext.GetWithStatusError(u.String()) |
|
76 |
+ if err != nil { |
|
77 |
+ return err |
|
78 |
+ } |
|
79 |
+ outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) |
|
80 |
+ progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) |
|
81 |
+ rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") |
|
82 |
+ } |
|
83 |
+ |
|
84 |
+ defer rc.Close() |
|
85 |
+ if len(msg) == 0 { |
|
86 |
+ msg = "Imported from " + src |
|
87 |
+ } |
|
88 |
+ |
|
89 |
+ inflatedLayerData, err := archive.DecompressStream(rc) |
|
90 |
+ if err != nil { |
|
91 |
+ return err |
|
92 |
+ } |
|
93 |
+ l, err := i.layerStores[os].Register(inflatedLayerData, "") |
|
94 |
+ if err != nil { |
|
95 |
+ return err |
|
96 |
+ } |
|
97 |
+ defer layer.ReleaseAndLog(i.layerStores[os], l) |
|
98 |
+ |
|
99 |
+ created := time.Now().UTC() |
|
100 |
+ imgConfig, err := json.Marshal(&image.Image{ |
|
101 |
+ V1Image: image.V1Image{ |
|
102 |
+ DockerVersion: dockerversion.Version, |
|
103 |
+ Config: config, |
|
104 |
+ Architecture: runtime.GOARCH, |
|
105 |
+ OS: os, |
|
106 |
+ Created: created, |
|
107 |
+ Comment: msg, |
|
108 |
+ }, |
|
109 |
+ RootFS: &image.RootFS{ |
|
110 |
+ Type: "layers", |
|
111 |
+ DiffIDs: []layer.DiffID{l.DiffID()}, |
|
112 |
+ }, |
|
113 |
+ History: []image.History{{ |
|
114 |
+ Created: created, |
|
115 |
+ Comment: msg, |
|
116 |
+ }}, |
|
117 |
+ }) |
|
118 |
+ if err != nil { |
|
119 |
+ return err |
|
120 |
+ } |
|
121 |
+ |
|
122 |
+ id, err := i.imageStore.Create(imgConfig) |
|
123 |
+ if err != nil { |
|
124 |
+ return err |
|
125 |
+ } |
|
126 |
+ |
|
127 |
+ // FIXME: connect with commit code and call refstore directly |
|
128 |
+ if newRef != nil { |
|
129 |
+ if err := i.TagImageWithReference(id, newRef); err != nil { |
|
130 |
+ return err |
|
131 |
+ } |
|
132 |
+ } |
|
133 |
+ |
|
134 |
+ i.LogImageEvent(id.String(), id.String(), "import") |
|
135 |
+ outStream.Write(streamformatter.FormatStatus("", id.String())) |
|
136 |
+ return nil |
|
137 |
+} |
0 | 138 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,104 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "time" |
|
4 |
+ |
|
5 |
+ "github.com/docker/distribution/reference" |
|
6 |
+ "github.com/docker/docker/api/types" |
|
7 |
+ "github.com/docker/docker/image" |
|
8 |
+ "github.com/docker/docker/layer" |
|
9 |
+ "github.com/docker/docker/pkg/system" |
|
10 |
+ "github.com/pkg/errors" |
|
11 |
+) |
|
12 |
+ |
|
13 |
+// LookupImage looks up an image by name and returns it as an ImageInspect |
|
14 |
+// structure. |
|
15 |
+func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { |
|
16 |
+ img, err := i.GetImage(name) |
|
17 |
+ if err != nil { |
|
18 |
+ return nil, errors.Wrapf(err, "no such image: %s", name) |
|
19 |
+ } |
|
20 |
+ if !system.IsOSSupported(img.OperatingSystem()) { |
|
21 |
+ return nil, system.ErrNotSupportedOperatingSystem |
|
22 |
+ } |
|
23 |
+ refs := i.referenceStore.References(img.ID().Digest()) |
|
24 |
+ repoTags := []string{} |
|
25 |
+ repoDigests := []string{} |
|
26 |
+ for _, ref := range refs { |
|
27 |
+ switch ref.(type) { |
|
28 |
+ case reference.NamedTagged: |
|
29 |
+ repoTags = append(repoTags, reference.FamiliarString(ref)) |
|
30 |
+ case reference.Canonical: |
|
31 |
+ repoDigests = append(repoDigests, reference.FamiliarString(ref)) |
|
32 |
+ } |
|
33 |
+ } |
|
34 |
+ |
|
35 |
+ var size int64 |
|
36 |
+ var layerMetadata map[string]string |
|
37 |
+ layerID := img.RootFS.ChainID() |
|
38 |
+ if layerID != "" { |
|
39 |
+ l, err := i.layerStores[img.OperatingSystem()].Get(layerID) |
|
40 |
+ if err != nil { |
|
41 |
+ return nil, err |
|
42 |
+ } |
|
43 |
+ defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
44 |
+ size, err = l.Size() |
|
45 |
+ if err != nil { |
|
46 |
+ return nil, err |
|
47 |
+ } |
|
48 |
+ |
|
49 |
+ layerMetadata, err = l.Metadata() |
|
50 |
+ if err != nil { |
|
51 |
+ return nil, err |
|
52 |
+ } |
|
53 |
+ } |
|
54 |
+ |
|
55 |
+ comment := img.Comment |
|
56 |
+ if len(comment) == 0 && len(img.History) > 0 { |
|
57 |
+ comment = img.History[len(img.History)-1].Comment |
|
58 |
+ } |
|
59 |
+ |
|
60 |
+ lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) |
|
61 |
+ if err != nil { |
|
62 |
+ return nil, err |
|
63 |
+ } |
|
64 |
+ |
|
65 |
+ imageInspect := &types.ImageInspect{ |
|
66 |
+ ID: img.ID().String(), |
|
67 |
+ RepoTags: repoTags, |
|
68 |
+ RepoDigests: repoDigests, |
|
69 |
+ Parent: img.Parent.String(), |
|
70 |
+ Comment: comment, |
|
71 |
+ Created: img.Created.Format(time.RFC3339Nano), |
|
72 |
+ Container: img.Container, |
|
73 |
+ ContainerConfig: &img.ContainerConfig, |
|
74 |
+ DockerVersion: img.DockerVersion, |
|
75 |
+ Author: img.Author, |
|
76 |
+ Config: img.Config, |
|
77 |
+ Architecture: img.Architecture, |
|
78 |
+ Os: img.OperatingSystem(), |
|
79 |
+ OsVersion: img.OSVersion, |
|
80 |
+ Size: size, |
|
81 |
+ VirtualSize: size, // TODO: field unused, deprecate |
|
82 |
+ RootFS: rootFSToAPIType(img.RootFS), |
|
83 |
+ Metadata: types.ImageMetadata{ |
|
84 |
+ LastTagTime: lastUpdated, |
|
85 |
+ }, |
|
86 |
+ } |
|
87 |
+ |
|
88 |
+ imageInspect.GraphDriver.Name = i.layerStores[img.OperatingSystem()].DriverName() |
|
89 |
+ imageInspect.GraphDriver.Data = layerMetadata |
|
90 |
+ |
|
91 |
+ return imageInspect, nil |
|
92 |
+} |
|
93 |
+ |
|
94 |
+func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { |
|
95 |
+ var layers []string |
|
96 |
+ for _, l := range rootfs.DiffIDs { |
|
97 |
+ layers = append(layers, l.String()) |
|
98 |
+ } |
|
99 |
+ return types.RootFS{ |
|
100 |
+ Type: rootfs.Type, |
|
101 |
+ Layers: layers, |
|
102 |
+ } |
|
103 |
+} |
0 | 104 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,210 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "sync/atomic" |
|
5 |
+ "time" |
|
6 |
+ |
|
7 |
+ "github.com/docker/distribution/reference" |
|
8 |
+ "github.com/docker/docker/api/types" |
|
9 |
+ "github.com/docker/docker/api/types/filters" |
|
10 |
+ timetypes "github.com/docker/docker/api/types/time" |
|
11 |
+ "github.com/docker/docker/errdefs" |
|
12 |
+ "github.com/docker/docker/image" |
|
13 |
+ "github.com/docker/docker/layer" |
|
14 |
+ digest "github.com/opencontainers/go-digest" |
|
15 |
+ "github.com/sirupsen/logrus" |
|
16 |
+ "golang.org/x/net/context" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+var imagesAcceptedFilters = map[string]bool{ |
|
20 |
+ "dangling": true, |
|
21 |
+ "label": true, |
|
22 |
+ "label!": true, |
|
23 |
+ "until": true, |
|
24 |
+} |
|
25 |
+ |
|
26 |
+// errPruneRunning is returned when a prune request is received while |
|
27 |
+// one is in progress |
|
28 |
+var errPruneRunning = fmt.Errorf("a prune operation is already running") |
|
29 |
+ |
|
30 |
+// ImagesPrune removes unused images |
|
31 |
+func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { |
|
32 |
+ if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { |
|
33 |
+ return nil, errPruneRunning |
|
34 |
+ } |
|
35 |
+ defer atomic.StoreInt32(&i.pruneRunning, 0) |
|
36 |
+ |
|
37 |
+ // make sure that only accepted filters have been received |
|
38 |
+ err := pruneFilters.Validate(imagesAcceptedFilters) |
|
39 |
+ if err != nil { |
|
40 |
+ return nil, err |
|
41 |
+ } |
|
42 |
+ |
|
43 |
+ rep := &types.ImagesPruneReport{} |
|
44 |
+ |
|
45 |
+ danglingOnly := true |
|
46 |
+ if pruneFilters.Contains("dangling") { |
|
47 |
+ if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { |
|
48 |
+ danglingOnly = false |
|
49 |
+ } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { |
|
50 |
+ return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} |
|
51 |
+ } |
|
52 |
+ } |
|
53 |
+ |
|
54 |
+ until, err := getUntilFromPruneFilters(pruneFilters) |
|
55 |
+ if err != nil { |
|
56 |
+ return nil, err |
|
57 |
+ } |
|
58 |
+ |
|
59 |
+ var allImages map[image.ID]*image.Image |
|
60 |
+ if danglingOnly { |
|
61 |
+ allImages = i.imageStore.Heads() |
|
62 |
+ } else { |
|
63 |
+ allImages = i.imageStore.Map() |
|
64 |
+ } |
|
65 |
+ |
|
66 |
+ // Filter intermediary images and get their unique size |
|
67 |
+ allLayers := make(map[layer.ChainID]layer.Layer) |
|
68 |
+ for _, ls := range i.layerStores { |
|
69 |
+ for k, v := range ls.Map() { |
|
70 |
+ allLayers[k] = v |
|
71 |
+ } |
|
72 |
+ } |
|
73 |
+ topImages := map[image.ID]*image.Image{} |
|
74 |
+ for id, img := range allImages { |
|
75 |
+ select { |
|
76 |
+ case <-ctx.Done(): |
|
77 |
+ return nil, ctx.Err() |
|
78 |
+ default: |
|
79 |
+ dgst := digest.Digest(id) |
|
80 |
+ if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { |
|
81 |
+ continue |
|
82 |
+ } |
|
83 |
+ if !until.IsZero() && img.Created.After(until) { |
|
84 |
+ continue |
|
85 |
+ } |
|
86 |
+ if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { |
|
87 |
+ continue |
|
88 |
+ } |
|
89 |
+ topImages[id] = img |
|
90 |
+ } |
|
91 |
+ } |
|
92 |
+ |
|
93 |
+ canceled := false |
|
94 |
+deleteImagesLoop: |
|
95 |
+ for id := range topImages { |
|
96 |
+ select { |
|
97 |
+ case <-ctx.Done(): |
|
98 |
+ // we still want to calculate freed size and return the data |
|
99 |
+ canceled = true |
|
100 |
+ break deleteImagesLoop |
|
101 |
+ default: |
|
102 |
+ } |
|
103 |
+ |
|
104 |
+ deletedImages := []types.ImageDeleteResponseItem{} |
|
105 |
+ refs := i.referenceStore.References(id.Digest()) |
|
106 |
+ if len(refs) > 0 { |
|
107 |
+ shouldDelete := !danglingOnly |
|
108 |
+ if !shouldDelete { |
|
109 |
+ hasTag := false |
|
110 |
+ for _, ref := range refs { |
|
111 |
+ if _, ok := ref.(reference.NamedTagged); ok { |
|
112 |
+ hasTag = true |
|
113 |
+ break |
|
114 |
+ } |
|
115 |
+ } |
|
116 |
+ |
|
117 |
+ // Only delete if it's untagged (i.e. repo:<none>) |
|
118 |
+ shouldDelete = !hasTag |
|
119 |
+ } |
|
120 |
+ |
|
121 |
+ if shouldDelete { |
|
122 |
+ for _, ref := range refs { |
|
123 |
+ imgDel, err := i.ImageDelete(ref.String(), false, true) |
|
124 |
+ if imageDeleteFailed(ref.String(), err) { |
|
125 |
+ continue |
|
126 |
+ } |
|
127 |
+ deletedImages = append(deletedImages, imgDel...) |
|
128 |
+ } |
|
129 |
+ } |
|
130 |
+ } else { |
|
131 |
+ hex := id.Digest().Hex() |
|
132 |
+ imgDel, err := i.ImageDelete(hex, false, true) |
|
133 |
+ if imageDeleteFailed(hex, err) { |
|
134 |
+ continue |
|
135 |
+ } |
|
136 |
+ deletedImages = append(deletedImages, imgDel...) |
|
137 |
+ } |
|
138 |
+ |
|
139 |
+ rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) |
|
140 |
+ } |
|
141 |
+ |
|
142 |
+ // Compute how much space was freed |
|
143 |
+ for _, d := range rep.ImagesDeleted { |
|
144 |
+ if d.Deleted != "" { |
|
145 |
+ chid := layer.ChainID(d.Deleted) |
|
146 |
+ if l, ok := allLayers[chid]; ok { |
|
147 |
+ diffSize, err := l.DiffSize() |
|
148 |
+ if err != nil { |
|
149 |
+ logrus.Warnf("failed to get layer %s size: %v", chid, err) |
|
150 |
+ continue |
|
151 |
+ } |
|
152 |
+ rep.SpaceReclaimed += uint64(diffSize) |
|
153 |
+ } |
|
154 |
+ } |
|
155 |
+ } |
|
156 |
+ |
|
157 |
+ if canceled { |
|
158 |
+ logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) |
|
159 |
+ } |
|
160 |
+ |
|
161 |
+ return rep, nil |
|
162 |
+} |
|
163 |
+ |
|
164 |
+func imageDeleteFailed(ref string, err error) bool { |
|
165 |
+ switch { |
|
166 |
+ case err == nil: |
|
167 |
+ return false |
|
168 |
+ case errdefs.IsConflict(err): |
|
169 |
+ return true |
|
170 |
+ default: |
|
171 |
+ logrus.Warnf("failed to prune image %s: %v", ref, err) |
|
172 |
+ return true |
|
173 |
+ } |
|
174 |
+} |
|
175 |
+ |
|
176 |
+func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { |
|
177 |
+ if !pruneFilters.MatchKVList("label", labels) { |
|
178 |
+ return false |
|
179 |
+ } |
|
180 |
+ // By default MatchKVList will return true if field (like 'label!') does not exist |
|
181 |
+ // So we have to add additional Contains("label!") check |
|
182 |
+ if pruneFilters.Contains("label!") { |
|
183 |
+ if pruneFilters.MatchKVList("label!", labels) { |
|
184 |
+ return false |
|
185 |
+ } |
|
186 |
+ } |
|
187 |
+ return true |
|
188 |
+} |
|
189 |
+ |
|
190 |
+func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { |
|
191 |
+ until := time.Time{} |
|
192 |
+ if !pruneFilters.Contains("until") { |
|
193 |
+ return until, nil |
|
194 |
+ } |
|
195 |
+ untilFilters := pruneFilters.Get("until") |
|
196 |
+ if len(untilFilters) > 1 { |
|
197 |
+ return until, fmt.Errorf("more than one until filter specified") |
|
198 |
+ } |
|
199 |
+ ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) |
|
200 |
+ if err != nil { |
|
201 |
+ return until, err |
|
202 |
+ } |
|
203 |
+ seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) |
|
204 |
+ if err != nil { |
|
205 |
+ return until, err |
|
206 |
+ } |
|
207 |
+ until = time.Unix(seconds, nanoseconds) |
|
208 |
+ return until, nil |
|
209 |
+} |
0 | 210 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,127 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ "runtime" |
|
5 |
+ "strings" |
|
6 |
+ |
|
7 |
+ dist "github.com/docker/distribution" |
|
8 |
+ "github.com/docker/distribution/reference" |
|
9 |
+ "github.com/docker/docker/api/types" |
|
10 |
+ "github.com/docker/docker/distribution" |
|
11 |
+ progressutils "github.com/docker/docker/distribution/utils" |
|
12 |
+ "github.com/docker/docker/errdefs" |
|
13 |
+ "github.com/docker/docker/pkg/progress" |
|
14 |
+ "github.com/docker/docker/registry" |
|
15 |
+ "github.com/opencontainers/go-digest" |
|
16 |
+ "golang.org/x/net/context" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+// PullImage initiates a pull operation. image is the repository name to pull, and |
|
20 |
+// tag may be either empty, or indicate a specific tag to pull. |
|
21 |
+func (i *ImageService) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
22 |
+ // Special case: "pull -a" may send an image name with a |
|
23 |
+ // trailing :. This is ugly, but let's not break API |
|
24 |
+ // compatibility. |
|
25 |
+ image = strings.TrimSuffix(image, ":") |
|
26 |
+ |
|
27 |
+ ref, err := reference.ParseNormalizedNamed(image) |
|
28 |
+ if err != nil { |
|
29 |
+ return errdefs.InvalidParameter(err) |
|
30 |
+ } |
|
31 |
+ |
|
32 |
+ if tag != "" { |
|
33 |
+ // The "tag" could actually be a digest. |
|
34 |
+ var dgst digest.Digest |
|
35 |
+ dgst, err = digest.Parse(tag) |
|
36 |
+ if err == nil { |
|
37 |
+ ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) |
|
38 |
+ } else { |
|
39 |
+ ref, err = reference.WithTag(ref, tag) |
|
40 |
+ } |
|
41 |
+ if err != nil { |
|
42 |
+ return errdefs.InvalidParameter(err) |
|
43 |
+ } |
|
44 |
+ } |
|
45 |
+ |
|
46 |
+ return i.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream) |
|
47 |
+} |
|
48 |
+ |
|
49 |
+func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
50 |
+ // Include a buffer so that slow client connections don't affect |
|
51 |
+ // transfer performance. |
|
52 |
+ progressChan := make(chan progress.Progress, 100) |
|
53 |
+ |
|
54 |
+ writesDone := make(chan struct{}) |
|
55 |
+ |
|
56 |
+ ctx, cancelFunc := context.WithCancel(ctx) |
|
57 |
+ |
|
58 |
+ go func() { |
|
59 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
60 |
+ close(writesDone) |
|
61 |
+ }() |
|
62 |
+ |
|
63 |
+ // Default to the host OS platform in case it hasn't been populated with an explicit value. |
|
64 |
+ if os == "" { |
|
65 |
+ os = runtime.GOOS |
|
66 |
+ } |
|
67 |
+ |
|
68 |
+ imagePullConfig := &distribution.ImagePullConfig{ |
|
69 |
+ Config: distribution.Config{ |
|
70 |
+ MetaHeaders: metaHeaders, |
|
71 |
+ AuthConfig: authConfig, |
|
72 |
+ ProgressOutput: progress.ChanOutput(progressChan), |
|
73 |
+ RegistryService: i.registryService, |
|
74 |
+ ImageEventLogger: i.LogImageEvent, |
|
75 |
+ MetadataStore: i.distributionMetadataStore, |
|
76 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), |
|
77 |
+ ReferenceStore: i.referenceStore, |
|
78 |
+ }, |
|
79 |
+ DownloadManager: i.downloadManager, |
|
80 |
+ Schema2Types: distribution.ImageTypes, |
|
81 |
+ OS: os, |
|
82 |
+ } |
|
83 |
+ |
|
84 |
+ err := distribution.Pull(ctx, ref, imagePullConfig) |
|
85 |
+ close(progressChan) |
|
86 |
+ <-writesDone |
|
87 |
+ return err |
|
88 |
+} |
|
89 |
+ |
|
90 |
+// GetRepository returns a repository from the registry. |
|
91 |
+func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { |
|
92 |
+ // get repository info |
|
93 |
+ repoInfo, err := i.registryService.ResolveRepository(ref) |
|
94 |
+ if err != nil { |
|
95 |
+ return nil, false, err |
|
96 |
+ } |
|
97 |
+ // makes sure name is not empty or `scratch` |
|
98 |
+ if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { |
|
99 |
+ return nil, false, errdefs.InvalidParameter(err) |
|
100 |
+ } |
|
101 |
+ |
|
102 |
+ // get endpoints |
|
103 |
+ endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) |
|
104 |
+ if err != nil { |
|
105 |
+ return nil, false, err |
|
106 |
+ } |
|
107 |
+ |
|
108 |
+ // retrieve repository |
|
109 |
+ var ( |
|
110 |
+ confirmedV2 bool |
|
111 |
+ repository dist.Repository |
|
112 |
+ lastError error |
|
113 |
+ ) |
|
114 |
+ |
|
115 |
+ for _, endpoint := range endpoints { |
|
116 |
+ if endpoint.Version == registry.APIVersion1 { |
|
117 |
+ continue |
|
118 |
+ } |
|
119 |
+ |
|
120 |
+ repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") |
|
121 |
+ if lastError == nil && confirmedV2 { |
|
122 |
+ break |
|
123 |
+ } |
|
124 |
+ } |
|
125 |
+ return repository, confirmedV2, lastError |
|
126 |
+} |
0 | 127 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,63 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ |
|
5 |
+ "github.com/docker/distribution/manifest/schema2" |
|
6 |
+ "github.com/docker/distribution/reference" |
|
7 |
+ "github.com/docker/docker/api/types" |
|
8 |
+ "github.com/docker/docker/distribution" |
|
9 |
+ progressutils "github.com/docker/docker/distribution/utils" |
|
10 |
+ "github.com/docker/docker/pkg/progress" |
|
11 |
+ "golang.org/x/net/context" |
|
12 |
+) |
|
13 |
+ |
|
14 |
+// PushImage initiates a push operation on the repository named localName. |
|
15 |
+func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
16 |
+ ref, err := reference.ParseNormalizedNamed(image) |
|
17 |
+ if err != nil { |
|
18 |
+ return err |
|
19 |
+ } |
|
20 |
+ if tag != "" { |
|
21 |
+ // Push by digest is not supported, so only tags are supported. |
|
22 |
+ ref, err = reference.WithTag(ref, tag) |
|
23 |
+ if err != nil { |
|
24 |
+ return err |
|
25 |
+ } |
|
26 |
+ } |
|
27 |
+ |
|
28 |
+ // Include a buffer so that slow client connections don't affect |
|
29 |
+ // transfer performance. |
|
30 |
+ progressChan := make(chan progress.Progress, 100) |
|
31 |
+ |
|
32 |
+ writesDone := make(chan struct{}) |
|
33 |
+ |
|
34 |
+ ctx, cancelFunc := context.WithCancel(ctx) |
|
35 |
+ |
|
36 |
+ go func() { |
|
37 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
38 |
+ close(writesDone) |
|
39 |
+ }() |
|
40 |
+ |
|
41 |
+ imagePushConfig := &distribution.ImagePushConfig{ |
|
42 |
+ Config: distribution.Config{ |
|
43 |
+ MetaHeaders: metaHeaders, |
|
44 |
+ AuthConfig: authConfig, |
|
45 |
+ ProgressOutput: progress.ChanOutput(progressChan), |
|
46 |
+ RegistryService: i.registryService, |
|
47 |
+ ImageEventLogger: i.LogImageEvent, |
|
48 |
+ MetadataStore: i.distributionMetadataStore, |
|
49 |
+ ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), |
|
50 |
+ ReferenceStore: i.referenceStore, |
|
51 |
+ }, |
|
52 |
+ ConfigMediaType: schema2.MediaTypeImageConfig, |
|
53 |
+ LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), |
|
54 |
+ TrustKey: i.trustKey, |
|
55 |
+ UploadManager: i.uploadManager, |
|
56 |
+ } |
|
57 |
+ |
|
58 |
+ err = distribution.Push(ctx, ref, imagePushConfig) |
|
59 |
+ close(progressChan) |
|
60 |
+ <-writesDone |
|
61 |
+ return err |
|
62 |
+} |
0 | 63 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,96 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "strconv" |
|
4 |
+ |
|
5 |
+ "golang.org/x/net/context" |
|
6 |
+ |
|
7 |
+ "github.com/docker/docker/api/types" |
|
8 |
+ "github.com/docker/docker/api/types/filters" |
|
9 |
+ registrytypes "github.com/docker/docker/api/types/registry" |
|
10 |
+ "github.com/docker/docker/dockerversion" |
|
11 |
+) |
|
12 |
+ |
|
13 |
+var acceptedSearchFilterTags = map[string]bool{ |
|
14 |
+ "is-automated": true, |
|
15 |
+ "is-official": true, |
|
16 |
+ "stars": true, |
|
17 |
+} |
|
18 |
+ |
|
19 |
+// SearchRegistryForImages queries the registry for images matching |
|
20 |
+// term. authConfig is used to login. |
|
21 |
+// |
|
22 |
+// TODO: this could be implemented in a registry service instead of the image |
|
23 |
+// service. |
|
24 |
+func (i *ImageService) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, |
|
25 |
+ authConfig *types.AuthConfig, |
|
26 |
+ headers map[string][]string) (*registrytypes.SearchResults, error) { |
|
27 |
+ |
|
28 |
+ searchFilters, err := filters.FromJSON(filtersArgs) |
|
29 |
+ if err != nil { |
|
30 |
+ return nil, err |
|
31 |
+ } |
|
32 |
+ if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { |
|
33 |
+ return nil, err |
|
34 |
+ } |
|
35 |
+ |
|
36 |
+ var isAutomated, isOfficial bool |
|
37 |
+ var hasStarFilter = 0 |
|
38 |
+ if searchFilters.Contains("is-automated") { |
|
39 |
+ if searchFilters.UniqueExactMatch("is-automated", "true") { |
|
40 |
+ isAutomated = true |
|
41 |
+ } else if !searchFilters.UniqueExactMatch("is-automated", "false") { |
|
42 |
+ return nil, invalidFilter{"is-automated", searchFilters.Get("is-automated")} |
|
43 |
+ } |
|
44 |
+ } |
|
45 |
+ if searchFilters.Contains("is-official") { |
|
46 |
+ if searchFilters.UniqueExactMatch("is-official", "true") { |
|
47 |
+ isOfficial = true |
|
48 |
+ } else if !searchFilters.UniqueExactMatch("is-official", "false") { |
|
49 |
+ return nil, invalidFilter{"is-official", searchFilters.Get("is-official")} |
|
50 |
+ } |
|
51 |
+ } |
|
52 |
+ if searchFilters.Contains("stars") { |
|
53 |
+ hasStars := searchFilters.Get("stars") |
|
54 |
+ for _, hasStar := range hasStars { |
|
55 |
+ iHasStar, err := strconv.Atoi(hasStar) |
|
56 |
+ if err != nil { |
|
57 |
+ return nil, invalidFilter{"stars", hasStar} |
|
58 |
+ } |
|
59 |
+ if iHasStar > hasStarFilter { |
|
60 |
+ hasStarFilter = iHasStar |
|
61 |
+ } |
|
62 |
+ } |
|
63 |
+ } |
|
64 |
+ |
|
65 |
+ unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) |
|
66 |
+ if err != nil { |
|
67 |
+ return nil, err |
|
68 |
+ } |
|
69 |
+ |
|
70 |
+ filteredResults := []registrytypes.SearchResult{} |
|
71 |
+ for _, result := range unfilteredResult.Results { |
|
72 |
+ if searchFilters.Contains("is-automated") { |
|
73 |
+ if isAutomated != result.IsAutomated { |
|
74 |
+ continue |
|
75 |
+ } |
|
76 |
+ } |
|
77 |
+ if searchFilters.Contains("is-official") { |
|
78 |
+ if isOfficial != result.IsOfficial { |
|
79 |
+ continue |
|
80 |
+ } |
|
81 |
+ } |
|
82 |
+ if searchFilters.Contains("stars") { |
|
83 |
+ if result.StarCount < hasStarFilter { |
|
84 |
+ continue |
|
85 |
+ } |
|
86 |
+ } |
|
87 |
+ filteredResults = append(filteredResults, result) |
|
88 |
+ } |
|
89 |
+ |
|
90 |
+ return ®istrytypes.SearchResults{ |
|
91 |
+ Query: unfilteredResult.Query, |
|
92 |
+ NumResults: len(filteredResults), |
|
93 |
+ Results: filteredResults, |
|
94 |
+ }, nil |
|
95 |
+} |
0 | 96 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,358 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "errors" |
|
4 |
+ "strings" |
|
5 |
+ "testing" |
|
6 |
+ |
|
7 |
+ "golang.org/x/net/context" |
|
8 |
+ |
|
9 |
+ "github.com/docker/docker/api/types" |
|
10 |
+ registrytypes "github.com/docker/docker/api/types/registry" |
|
11 |
+ "github.com/docker/docker/registry" |
|
12 |
+) |
|
13 |
+ |
|
14 |
+type FakeService struct { |
|
15 |
+ registry.DefaultService |
|
16 |
+ |
|
17 |
+ shouldReturnError bool |
|
18 |
+ |
|
19 |
+ term string |
|
20 |
+ results []registrytypes.SearchResult |
|
21 |
+} |
|
22 |
+ |
|
23 |
+func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { |
|
24 |
+ if s.shouldReturnError { |
|
25 |
+ return nil, errors.New("Search unknown error") |
|
26 |
+ } |
|
27 |
+ return ®istrytypes.SearchResults{ |
|
28 |
+ Query: s.term, |
|
29 |
+ NumResults: len(s.results), |
|
30 |
+ Results: s.results, |
|
31 |
+ }, nil |
|
32 |
+} |
|
33 |
+ |
|
34 |
+func TestSearchRegistryForImagesErrors(t *testing.T) { |
|
35 |
+ errorCases := []struct { |
|
36 |
+ filtersArgs string |
|
37 |
+ shouldReturnError bool |
|
38 |
+ expectedError string |
|
39 |
+ }{ |
|
40 |
+ { |
|
41 |
+ expectedError: "Search unknown error", |
|
42 |
+ shouldReturnError: true, |
|
43 |
+ }, |
|
44 |
+ { |
|
45 |
+ filtersArgs: "invalid json", |
|
46 |
+ expectedError: "invalid character 'i' looking for beginning of value", |
|
47 |
+ }, |
|
48 |
+ { |
|
49 |
+ filtersArgs: `{"type":{"custom":true}}`, |
|
50 |
+ expectedError: "Invalid filter 'type'", |
|
51 |
+ }, |
|
52 |
+ { |
|
53 |
+ filtersArgs: `{"is-automated":{"invalid":true}}`, |
|
54 |
+ expectedError: "Invalid filter 'is-automated=[invalid]'", |
|
55 |
+ }, |
|
56 |
+ { |
|
57 |
+ filtersArgs: `{"is-automated":{"true":true,"false":true}}`, |
|
58 |
+ expectedError: "Invalid filter 'is-automated", |
|
59 |
+ }, |
|
60 |
+ { |
|
61 |
+ filtersArgs: `{"is-official":{"invalid":true}}`, |
|
62 |
+ expectedError: "Invalid filter 'is-official=[invalid]'", |
|
63 |
+ }, |
|
64 |
+ { |
|
65 |
+ filtersArgs: `{"is-official":{"true":true,"false":true}}`, |
|
66 |
+ expectedError: "Invalid filter 'is-official", |
|
67 |
+ }, |
|
68 |
+ { |
|
69 |
+ filtersArgs: `{"stars":{"invalid":true}}`, |
|
70 |
+ expectedError: "Invalid filter 'stars=invalid'", |
|
71 |
+ }, |
|
72 |
+ { |
|
73 |
+ filtersArgs: `{"stars":{"1":true,"invalid":true}}`, |
|
74 |
+ expectedError: "Invalid filter 'stars=invalid'", |
|
75 |
+ }, |
|
76 |
+ } |
|
77 |
+ for index, e := range errorCases { |
|
78 |
+ daemon := &ImageService{ |
|
79 |
+ registryService: &FakeService{ |
|
80 |
+ shouldReturnError: e.shouldReturnError, |
|
81 |
+ }, |
|
82 |
+ } |
|
83 |
+ _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) |
|
84 |
+ if err == nil { |
|
85 |
+ t.Errorf("%d: expected an error, got nothing", index) |
|
86 |
+ } |
|
87 |
+ if !strings.Contains(err.Error(), e.expectedError) { |
|
88 |
+ t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) |
|
89 |
+ } |
|
90 |
+ } |
|
91 |
+} |
|
92 |
+ |
|
93 |
+func TestSearchRegistryForImages(t *testing.T) { |
|
94 |
+ term := "term" |
|
95 |
+ successCases := []struct { |
|
96 |
+ filtersArgs string |
|
97 |
+ registryResults []registrytypes.SearchResult |
|
98 |
+ expectedResults []registrytypes.SearchResult |
|
99 |
+ }{ |
|
100 |
+ { |
|
101 |
+ filtersArgs: "", |
|
102 |
+ registryResults: []registrytypes.SearchResult{}, |
|
103 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
104 |
+ }, |
|
105 |
+ { |
|
106 |
+ filtersArgs: "", |
|
107 |
+ registryResults: []registrytypes.SearchResult{ |
|
108 |
+ { |
|
109 |
+ Name: "name", |
|
110 |
+ Description: "description", |
|
111 |
+ }, |
|
112 |
+ }, |
|
113 |
+ expectedResults: []registrytypes.SearchResult{ |
|
114 |
+ { |
|
115 |
+ Name: "name", |
|
116 |
+ Description: "description", |
|
117 |
+ }, |
|
118 |
+ }, |
|
119 |
+ }, |
|
120 |
+ { |
|
121 |
+ filtersArgs: `{"is-automated":{"true":true}}`, |
|
122 |
+ registryResults: []registrytypes.SearchResult{ |
|
123 |
+ { |
|
124 |
+ Name: "name", |
|
125 |
+ Description: "description", |
|
126 |
+ }, |
|
127 |
+ }, |
|
128 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
129 |
+ }, |
|
130 |
+ { |
|
131 |
+ filtersArgs: `{"is-automated":{"true":true}}`, |
|
132 |
+ registryResults: []registrytypes.SearchResult{ |
|
133 |
+ { |
|
134 |
+ Name: "name", |
|
135 |
+ Description: "description", |
|
136 |
+ IsAutomated: true, |
|
137 |
+ }, |
|
138 |
+ }, |
|
139 |
+ expectedResults: []registrytypes.SearchResult{ |
|
140 |
+ { |
|
141 |
+ Name: "name", |
|
142 |
+ Description: "description", |
|
143 |
+ IsAutomated: true, |
|
144 |
+ }, |
|
145 |
+ }, |
|
146 |
+ }, |
|
147 |
+ { |
|
148 |
+ filtersArgs: `{"is-automated":{"false":true}}`, |
|
149 |
+ registryResults: []registrytypes.SearchResult{ |
|
150 |
+ { |
|
151 |
+ Name: "name", |
|
152 |
+ Description: "description", |
|
153 |
+ IsAutomated: true, |
|
154 |
+ }, |
|
155 |
+ }, |
|
156 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
157 |
+ }, |
|
158 |
+ { |
|
159 |
+ filtersArgs: `{"is-automated":{"false":true}}`, |
|
160 |
+ registryResults: []registrytypes.SearchResult{ |
|
161 |
+ { |
|
162 |
+ Name: "name", |
|
163 |
+ Description: "description", |
|
164 |
+ IsAutomated: false, |
|
165 |
+ }, |
|
166 |
+ }, |
|
167 |
+ expectedResults: []registrytypes.SearchResult{ |
|
168 |
+ { |
|
169 |
+ Name: "name", |
|
170 |
+ Description: "description", |
|
171 |
+ IsAutomated: false, |
|
172 |
+ }, |
|
173 |
+ }, |
|
174 |
+ }, |
|
175 |
+ { |
|
176 |
+ filtersArgs: `{"is-official":{"true":true}}`, |
|
177 |
+ registryResults: []registrytypes.SearchResult{ |
|
178 |
+ { |
|
179 |
+ Name: "name", |
|
180 |
+ Description: "description", |
|
181 |
+ }, |
|
182 |
+ }, |
|
183 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
184 |
+ }, |
|
185 |
+ { |
|
186 |
+ filtersArgs: `{"is-official":{"true":true}}`, |
|
187 |
+ registryResults: []registrytypes.SearchResult{ |
|
188 |
+ { |
|
189 |
+ Name: "name", |
|
190 |
+ Description: "description", |
|
191 |
+ IsOfficial: true, |
|
192 |
+ }, |
|
193 |
+ }, |
|
194 |
+ expectedResults: []registrytypes.SearchResult{ |
|
195 |
+ { |
|
196 |
+ Name: "name", |
|
197 |
+ Description: "description", |
|
198 |
+ IsOfficial: true, |
|
199 |
+ }, |
|
200 |
+ }, |
|
201 |
+ }, |
|
202 |
+ { |
|
203 |
+ filtersArgs: `{"is-official":{"false":true}}`, |
|
204 |
+ registryResults: []registrytypes.SearchResult{ |
|
205 |
+ { |
|
206 |
+ Name: "name", |
|
207 |
+ Description: "description", |
|
208 |
+ IsOfficial: true, |
|
209 |
+ }, |
|
210 |
+ }, |
|
211 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
212 |
+ }, |
|
213 |
+ { |
|
214 |
+ filtersArgs: `{"is-official":{"false":true}}`, |
|
215 |
+ registryResults: []registrytypes.SearchResult{ |
|
216 |
+ { |
|
217 |
+ Name: "name", |
|
218 |
+ Description: "description", |
|
219 |
+ IsOfficial: false, |
|
220 |
+ }, |
|
221 |
+ }, |
|
222 |
+ expectedResults: []registrytypes.SearchResult{ |
|
223 |
+ { |
|
224 |
+ Name: "name", |
|
225 |
+ Description: "description", |
|
226 |
+ IsOfficial: false, |
|
227 |
+ }, |
|
228 |
+ }, |
|
229 |
+ }, |
|
230 |
+ { |
|
231 |
+ filtersArgs: `{"stars":{"0":true}}`, |
|
232 |
+ registryResults: []registrytypes.SearchResult{ |
|
233 |
+ { |
|
234 |
+ Name: "name", |
|
235 |
+ Description: "description", |
|
236 |
+ StarCount: 0, |
|
237 |
+ }, |
|
238 |
+ }, |
|
239 |
+ expectedResults: []registrytypes.SearchResult{ |
|
240 |
+ { |
|
241 |
+ Name: "name", |
|
242 |
+ Description: "description", |
|
243 |
+ StarCount: 0, |
|
244 |
+ }, |
|
245 |
+ }, |
|
246 |
+ }, |
|
247 |
+ { |
|
248 |
+ filtersArgs: `{"stars":{"1":true}}`, |
|
249 |
+ registryResults: []registrytypes.SearchResult{ |
|
250 |
+ { |
|
251 |
+ Name: "name", |
|
252 |
+ Description: "description", |
|
253 |
+ StarCount: 0, |
|
254 |
+ }, |
|
255 |
+ }, |
|
256 |
+ expectedResults: []registrytypes.SearchResult{}, |
|
257 |
+ }, |
|
258 |
+ { |
|
259 |
+ filtersArgs: `{"stars":{"1":true}}`, |
|
260 |
+ registryResults: []registrytypes.SearchResult{ |
|
261 |
+ { |
|
262 |
+ Name: "name0", |
|
263 |
+ Description: "description0", |
|
264 |
+ StarCount: 0, |
|
265 |
+ }, |
|
266 |
+ { |
|
267 |
+ Name: "name1", |
|
268 |
+ Description: "description1", |
|
269 |
+ StarCount: 1, |
|
270 |
+ }, |
|
271 |
+ }, |
|
272 |
+ expectedResults: []registrytypes.SearchResult{ |
|
273 |
+ { |
|
274 |
+ Name: "name1", |
|
275 |
+ Description: "description1", |
|
276 |
+ StarCount: 1, |
|
277 |
+ }, |
|
278 |
+ }, |
|
279 |
+ }, |
|
280 |
+ { |
|
281 |
+ filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, |
|
282 |
+ registryResults: []registrytypes.SearchResult{ |
|
283 |
+ { |
|
284 |
+ Name: "name0", |
|
285 |
+ Description: "description0", |
|
286 |
+ StarCount: 0, |
|
287 |
+ IsOfficial: true, |
|
288 |
+ IsAutomated: true, |
|
289 |
+ }, |
|
290 |
+ { |
|
291 |
+ Name: "name1", |
|
292 |
+ Description: "description1", |
|
293 |
+ StarCount: 1, |
|
294 |
+ IsOfficial: false, |
|
295 |
+ IsAutomated: true, |
|
296 |
+ }, |
|
297 |
+ { |
|
298 |
+ Name: "name2", |
|
299 |
+ Description: "description2", |
|
300 |
+ StarCount: 1, |
|
301 |
+ IsOfficial: true, |
|
302 |
+ IsAutomated: false, |
|
303 |
+ }, |
|
304 |
+ { |
|
305 |
+ Name: "name3", |
|
306 |
+ Description: "description3", |
|
307 |
+ StarCount: 2, |
|
308 |
+ IsOfficial: true, |
|
309 |
+ IsAutomated: true, |
|
310 |
+ }, |
|
311 |
+ }, |
|
312 |
+ expectedResults: []registrytypes.SearchResult{ |
|
313 |
+ { |
|
314 |
+ Name: "name3", |
|
315 |
+ Description: "description3", |
|
316 |
+ StarCount: 2, |
|
317 |
+ IsOfficial: true, |
|
318 |
+ IsAutomated: true, |
|
319 |
+ }, |
|
320 |
+ }, |
|
321 |
+ }, |
|
322 |
+ } |
|
323 |
+ for index, s := range successCases { |
|
324 |
+ daemon := &ImageService{ |
|
325 |
+ registryService: &FakeService{ |
|
326 |
+ term: term, |
|
327 |
+ results: s.registryResults, |
|
328 |
+ }, |
|
329 |
+ } |
|
330 |
+ results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) |
|
331 |
+ if err != nil { |
|
332 |
+ t.Errorf("%d: %v", index, err) |
|
333 |
+ } |
|
334 |
+ if results.Query != term { |
|
335 |
+ t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) |
|
336 |
+ } |
|
337 |
+ if results.NumResults != len(s.expectedResults) { |
|
338 |
+ t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) |
|
339 |
+ } |
|
340 |
+ for _, result := range results.Results { |
|
341 |
+ found := false |
|
342 |
+ for _, expectedResult := range s.expectedResults { |
|
343 |
+ if expectedResult.Name == result.Name && |
|
344 |
+ expectedResult.Description == result.Description && |
|
345 |
+ expectedResult.IsAutomated == result.IsAutomated && |
|
346 |
+ expectedResult.IsOfficial == result.IsOfficial && |
|
347 |
+ expectedResult.StarCount == result.StarCount { |
|
348 |
+ found = true |
|
349 |
+ break |
|
350 |
+ } |
|
351 |
+ } |
|
352 |
+ if !found { |
|
353 |
+ t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) |
|
354 |
+ } |
|
355 |
+ } |
|
356 |
+ } |
|
357 |
+} |
0 | 358 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,41 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "github.com/docker/distribution/reference" |
|
4 |
+ "github.com/docker/docker/image" |
|
5 |
+) |
|
6 |
+ |
|
7 |
+// TagImage creates the tag specified by newTag, pointing to the image named |
|
8 |
+// imageName (alternatively, imageName can also be an image ID). |
|
9 |
+func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { |
|
10 |
+ imageID, _, err := i.GetImageIDAndOS(imageName) |
|
11 |
+ if err != nil { |
|
12 |
+ return "", err |
|
13 |
+ } |
|
14 |
+ |
|
15 |
+ newTag, err := reference.ParseNormalizedNamed(repository) |
|
16 |
+ if err != nil { |
|
17 |
+ return "", err |
|
18 |
+ } |
|
19 |
+ if tag != "" { |
|
20 |
+ if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { |
|
21 |
+ return "", err |
|
22 |
+ } |
|
23 |
+ } |
|
24 |
+ |
|
25 |
+ err = i.TagImageWithReference(imageID, newTag) |
|
26 |
+ return reference.FamiliarString(newTag), err |
|
27 |
+} |
|
28 |
+ |
|
29 |
+// TagImageWithReference adds the given reference to the image ID provided. |
|
30 |
+func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { |
|
31 |
+ if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { |
|
32 |
+ return err |
|
33 |
+ } |
|
34 |
+ |
|
35 |
+ if err := i.imageStore.SetLastUpdated(imageID); err != nil { |
|
36 |
+ return err |
|
37 |
+ } |
|
38 |
+ i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") |
|
39 |
+ return nil |
|
40 |
+} |
0 | 41 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,45 @@ |
0 |
+// +build linux freebsd |
|
1 |
+ |
|
2 |
+package images // import "github.com/docker/docker/daemon/images" |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "runtime" |
|
6 |
+ |
|
7 |
+ "github.com/sirupsen/logrus" |
|
8 |
+) |
|
9 |
+ |
|
10 |
+// GetContainerLayerSize returns the real size & virtual size of the container. |
|
11 |
+func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { |
|
12 |
+ var ( |
|
13 |
+ sizeRw, sizeRootfs int64 |
|
14 |
+ err error |
|
15 |
+ ) |
|
16 |
+ |
|
17 |
+ // Safe to index by runtime.GOOS as Unix hosts don't support multiple |
|
18 |
+ // container operating systems. |
|
19 |
+ rwlayer, err := i.layerStores[runtime.GOOS].GetRWLayer(containerID) |
|
20 |
+ if err != nil { |
|
21 |
+ logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) |
|
22 |
+ return sizeRw, sizeRootfs |
|
23 |
+ } |
|
24 |
+ defer i.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer) |
|
25 |
+ |
|
26 |
+ sizeRw, err = rwlayer.Size() |
|
27 |
+ if err != nil { |
|
28 |
+ logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", |
|
29 |
+ i.layerStores[runtime.GOOS].DriverName(), containerID, err) |
|
30 |
+ // FIXME: GetSize should return an error. Not changing it now in case |
|
31 |
+ // there is a side-effect. |
|
32 |
+ sizeRw = -1 |
|
33 |
+ } |
|
34 |
+ |
|
35 |
+ if parent := rwlayer.Parent(); parent != nil { |
|
36 |
+ sizeRootfs, err = parent.Size() |
|
37 |
+ if err != nil { |
|
38 |
+ sizeRootfs = -1 |
|
39 |
+ } else if sizeRw != -1 { |
|
40 |
+ sizeRootfs += sizeRw |
|
41 |
+ } |
|
42 |
+ } |
|
43 |
+ return sizeRw, sizeRootfs |
|
44 |
+} |
0 | 45 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,41 @@ |
0 |
+package images |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "github.com/docker/docker/image" |
|
4 |
+ "github.com/docker/docker/layer" |
|
5 |
+ "github.com/docker/docker/pkg/system" |
|
6 |
+ "github.com/pkg/errors" |
|
7 |
+) |
|
8 |
+ |
|
9 |
+// GetContainerLayerSize returns real size & virtual size |
|
10 |
+func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { |
|
11 |
+ // TODO Windows |
|
12 |
+ return 0, 0 |
|
13 |
+} |
|
14 |
+ |
|
15 |
+// GetLayerFolders returns the layer folders from an image RootFS |
|
16 |
+func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) { |
|
17 |
+ folders := []string{} |
|
18 |
+ max := len(img.RootFS.DiffIDs) |
|
19 |
+ for index := 1; index <= max; index++ { |
|
20 |
+ // FIXME: why does this mutate the RootFS? |
|
21 |
+ img.RootFS.DiffIDs = img.RootFS.DiffIDs[:index] |
|
22 |
+ if !system.IsOSSupported(img.OperatingSystem()) { |
|
23 |
+ return nil, errors.Wrapf(system.ErrNotSupportedOperatingSystem, "cannot get layerpath for ImageID %s", img.RootFS.ChainID()) |
|
24 |
+ } |
|
25 |
+ layerPath, err := layer.GetLayerPath(i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) |
|
26 |
+ if err != nil { |
|
27 |
+ return nil, errors.Wrapf(err, "failed to get layer path from graphdriver %s for ImageID %s", i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) |
|
28 |
+ } |
|
29 |
+ // Reverse order, expecting parent first |
|
30 |
+ folders = append([]string{layerPath}, folders...) |
|
31 |
+ } |
|
32 |
+ if rwLayer == nil { |
|
33 |
+ return nil, errors.New("RWLayer is unexpectedly nil") |
|
34 |
+ } |
|
35 |
+ m, err := rwLayer.Metadata() |
|
36 |
+ if err != nil { |
|
37 |
+ return nil, errors.Wrap(err, "failed to get layer metadata") |
|
38 |
+ } |
|
39 |
+ return append(folders, m["dir"]), nil |
|
40 |
+} |
0 | 41 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,346 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "encoding/json" |
|
4 |
+ "fmt" |
|
5 |
+ "sort" |
|
6 |
+ "time" |
|
7 |
+ |
|
8 |
+ "github.com/pkg/errors" |
|
9 |
+ |
|
10 |
+ "github.com/docker/distribution/reference" |
|
11 |
+ "github.com/docker/docker/api/types" |
|
12 |
+ "github.com/docker/docker/api/types/filters" |
|
13 |
+ "github.com/docker/docker/container" |
|
14 |
+ "github.com/docker/docker/image" |
|
15 |
+ "github.com/docker/docker/layer" |
|
16 |
+ "github.com/docker/docker/pkg/system" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+var acceptedImageFilterTags = map[string]bool{ |
|
20 |
+ "dangling": true, |
|
21 |
+ "label": true, |
|
22 |
+ "before": true, |
|
23 |
+ "since": true, |
|
24 |
+ "reference": true, |
|
25 |
+} |
|
26 |
+ |
|
27 |
+// byCreated is a temporary type used to sort a list of images by creation |
|
28 |
+// time. |
|
29 |
+type byCreated []*types.ImageSummary |
|
30 |
+ |
|
31 |
+func (r byCreated) Len() int { return len(r) } |
|
32 |
+func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } |
|
33 |
+func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } |
|
34 |
+ |
|
35 |
+// Map returns a map of all images in the ImageStore |
|
36 |
+func (i *ImageService) Map() map[image.ID]*image.Image { |
|
37 |
+ return i.imageStore.Map() |
|
38 |
+} |
|
39 |
+ |
|
40 |
+// Images returns a filtered list of images. filterArgs is a JSON-encoded set |
|
41 |
+// of filter arguments which will be interpreted by api/types/filters. |
|
42 |
+// filter is a shell glob string applied to repository names. The argument |
|
43 |
+// named all controls whether all images in the graph are filtered, or just |
|
44 |
+// the heads. |
|
45 |
+func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { |
|
46 |
+ var ( |
|
47 |
+ allImages map[image.ID]*image.Image |
|
48 |
+ err error |
|
49 |
+ danglingOnly = false |
|
50 |
+ ) |
|
51 |
+ |
|
52 |
+ if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { |
|
53 |
+ return nil, err |
|
54 |
+ } |
|
55 |
+ |
|
56 |
+ if imageFilters.Contains("dangling") { |
|
57 |
+ if imageFilters.ExactMatch("dangling", "true") { |
|
58 |
+ danglingOnly = true |
|
59 |
+ } else if !imageFilters.ExactMatch("dangling", "false") { |
|
60 |
+ return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} |
|
61 |
+ } |
|
62 |
+ } |
|
63 |
+ if danglingOnly { |
|
64 |
+ allImages = i.imageStore.Heads() |
|
65 |
+ } else { |
|
66 |
+ allImages = i.imageStore.Map() |
|
67 |
+ } |
|
68 |
+ |
|
69 |
+ var beforeFilter, sinceFilter *image.Image |
|
70 |
+ err = imageFilters.WalkValues("before", func(value string) error { |
|
71 |
+ beforeFilter, err = i.GetImage(value) |
|
72 |
+ return err |
|
73 |
+ }) |
|
74 |
+ if err != nil { |
|
75 |
+ return nil, err |
|
76 |
+ } |
|
77 |
+ |
|
78 |
+ err = imageFilters.WalkValues("since", func(value string) error { |
|
79 |
+ sinceFilter, err = i.GetImage(value) |
|
80 |
+ return err |
|
81 |
+ }) |
|
82 |
+ if err != nil { |
|
83 |
+ return nil, err |
|
84 |
+ } |
|
85 |
+ |
|
86 |
+ images := []*types.ImageSummary{} |
|
87 |
+ var imagesMap map[*image.Image]*types.ImageSummary |
|
88 |
+ var layerRefs map[layer.ChainID]int |
|
89 |
+ var allLayers map[layer.ChainID]layer.Layer |
|
90 |
+ var allContainers []*container.Container |
|
91 |
+ |
|
92 |
+ for id, img := range allImages { |
|
93 |
+ if beforeFilter != nil { |
|
94 |
+ if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { |
|
95 |
+ continue |
|
96 |
+ } |
|
97 |
+ } |
|
98 |
+ |
|
99 |
+ if sinceFilter != nil { |
|
100 |
+ if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { |
|
101 |
+ continue |
|
102 |
+ } |
|
103 |
+ } |
|
104 |
+ |
|
105 |
+ if imageFilters.Contains("label") { |
|
106 |
+ // Very old image that do not have image.Config (or even labels) |
|
107 |
+ if img.Config == nil { |
|
108 |
+ continue |
|
109 |
+ } |
|
110 |
+ // We are now sure image.Config is not nil |
|
111 |
+ if !imageFilters.MatchKVList("label", img.Config.Labels) { |
|
112 |
+ continue |
|
113 |
+ } |
|
114 |
+ } |
|
115 |
+ |
|
116 |
+ // Skip any images with an unsupported operating system to avoid a potential |
|
117 |
+ // panic when indexing through the layerstore. Don't error as we want to list |
|
118 |
+ // the other images. This should never happen, but here as a safety precaution. |
|
119 |
+ if !system.IsOSSupported(img.OperatingSystem()) { |
|
120 |
+ continue |
|
121 |
+ } |
|
122 |
+ |
|
123 |
+ layerID := img.RootFS.ChainID() |
|
124 |
+ var size int64 |
|
125 |
+ if layerID != "" { |
|
126 |
+ l, err := i.layerStores[img.OperatingSystem()].Get(layerID) |
|
127 |
+ if err != nil { |
|
128 |
+ // The layer may have been deleted between the call to `Map()` or |
|
129 |
+ // `Heads()` and the call to `Get()`, so we just ignore this error |
|
130 |
+ if err == layer.ErrLayerDoesNotExist { |
|
131 |
+ continue |
|
132 |
+ } |
|
133 |
+ return nil, err |
|
134 |
+ } |
|
135 |
+ |
|
136 |
+ size, err = l.Size() |
|
137 |
+ layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) |
|
138 |
+ if err != nil { |
|
139 |
+ return nil, err |
|
140 |
+ } |
|
141 |
+ } |
|
142 |
+ |
|
143 |
+ newImage := newImage(img, size) |
|
144 |
+ |
|
145 |
+ for _, ref := range i.referenceStore.References(id.Digest()) { |
|
146 |
+ if imageFilters.Contains("reference") { |
|
147 |
+ var found bool |
|
148 |
+ var matchErr error |
|
149 |
+ for _, pattern := range imageFilters.Get("reference") { |
|
150 |
+ found, matchErr = reference.FamiliarMatch(pattern, ref) |
|
151 |
+ if matchErr != nil { |
|
152 |
+ return nil, matchErr |
|
153 |
+ } |
|
154 |
+ } |
|
155 |
+ if !found { |
|
156 |
+ continue |
|
157 |
+ } |
|
158 |
+ } |
|
159 |
+ if _, ok := ref.(reference.Canonical); ok { |
|
160 |
+ newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) |
|
161 |
+ } |
|
162 |
+ if _, ok := ref.(reference.NamedTagged); ok { |
|
163 |
+ newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) |
|
164 |
+ } |
|
165 |
+ } |
|
166 |
+ if newImage.RepoDigests == nil && newImage.RepoTags == nil { |
|
167 |
+ if all || len(i.imageStore.Children(id)) == 0 { |
|
168 |
+ |
|
169 |
+ if imageFilters.Contains("dangling") && !danglingOnly { |
|
170 |
+ //dangling=false case, so dangling image is not needed |
|
171 |
+ continue |
|
172 |
+ } |
|
173 |
+ if imageFilters.Contains("reference") { // skip images with no references if filtering by reference |
|
174 |
+ continue |
|
175 |
+ } |
|
176 |
+ newImage.RepoDigests = []string{"<none>@<none>"} |
|
177 |
+ newImage.RepoTags = []string{"<none>:<none>"} |
|
178 |
+ } else { |
|
179 |
+ continue |
|
180 |
+ } |
|
181 |
+ } else if danglingOnly && len(newImage.RepoTags) > 0 { |
|
182 |
+ continue |
|
183 |
+ } |
|
184 |
+ |
|
185 |
+ if withExtraAttrs { |
|
186 |
+ // lazily init variables |
|
187 |
+ if imagesMap == nil { |
|
188 |
+ allContainers = i.containers.List() |
|
189 |
+ allLayers = i.layerStores[img.OperatingSystem()].Map() |
|
190 |
+ imagesMap = make(map[*image.Image]*types.ImageSummary) |
|
191 |
+ layerRefs = make(map[layer.ChainID]int) |
|
192 |
+ } |
|
193 |
+ |
|
194 |
+ // Get container count |
|
195 |
+ newImage.Containers = 0 |
|
196 |
+ for _, c := range allContainers { |
|
197 |
+ if c.ImageID == id { |
|
198 |
+ newImage.Containers++ |
|
199 |
+ } |
|
200 |
+ } |
|
201 |
+ |
|
202 |
+ // count layer references |
|
203 |
+ rootFS := *img.RootFS |
|
204 |
+ rootFS.DiffIDs = nil |
|
205 |
+ for _, id := range img.RootFS.DiffIDs { |
|
206 |
+ rootFS.Append(id) |
|
207 |
+ chid := rootFS.ChainID() |
|
208 |
+ layerRefs[chid]++ |
|
209 |
+ if _, ok := allLayers[chid]; !ok { |
|
210 |
+ return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) |
|
211 |
+ } |
|
212 |
+ } |
|
213 |
+ imagesMap[img] = newImage |
|
214 |
+ } |
|
215 |
+ |
|
216 |
+ images = append(images, newImage) |
|
217 |
+ } |
|
218 |
+ |
|
219 |
+ if withExtraAttrs { |
|
220 |
+ // Get Shared sizes |
|
221 |
+ for img, newImage := range imagesMap { |
|
222 |
+ rootFS := *img.RootFS |
|
223 |
+ rootFS.DiffIDs = nil |
|
224 |
+ |
|
225 |
+ newImage.SharedSize = 0 |
|
226 |
+ for _, id := range img.RootFS.DiffIDs { |
|
227 |
+ rootFS.Append(id) |
|
228 |
+ chid := rootFS.ChainID() |
|
229 |
+ |
|
230 |
+ diffSize, err := allLayers[chid].DiffSize() |
|
231 |
+ if err != nil { |
|
232 |
+ return nil, err |
|
233 |
+ } |
|
234 |
+ |
|
235 |
+ if layerRefs[chid] > 1 { |
|
236 |
+ newImage.SharedSize += diffSize |
|
237 |
+ } |
|
238 |
+ } |
|
239 |
+ } |
|
240 |
+ } |
|
241 |
+ |
|
242 |
+ sort.Sort(sort.Reverse(byCreated(images))) |
|
243 |
+ |
|
244 |
+ return images, nil |
|
245 |
+} |
|
246 |
+ |
|
247 |
+// SquashImage creates a new image with the diff of the specified image and the specified parent. |
|
248 |
+// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. |
|
249 |
+// The existing image(s) is not destroyed. |
|
250 |
+// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. |
|
251 |
+func (i *ImageService) SquashImage(id, parent string) (string, error) { |
|
252 |
+ |
|
253 |
+ var ( |
|
254 |
+ img *image.Image |
|
255 |
+ err error |
|
256 |
+ ) |
|
257 |
+ if img, err = i.imageStore.Get(image.ID(id)); err != nil { |
|
258 |
+ return "", err |
|
259 |
+ } |
|
260 |
+ |
|
261 |
+ var parentImg *image.Image |
|
262 |
+ var parentChainID layer.ChainID |
|
263 |
+ if len(parent) != 0 { |
|
264 |
+ parentImg, err = i.imageStore.Get(image.ID(parent)) |
|
265 |
+ if err != nil { |
|
266 |
+ return "", errors.Wrap(err, "error getting specified parent layer") |
|
267 |
+ } |
|
268 |
+ parentChainID = parentImg.RootFS.ChainID() |
|
269 |
+ } else { |
|
270 |
+ rootFS := image.NewRootFS() |
|
271 |
+ parentImg = &image.Image{RootFS: rootFS} |
|
272 |
+ } |
|
273 |
+ |
|
274 |
+ l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) |
|
275 |
+ if err != nil { |
|
276 |
+ return "", errors.Wrap(err, "error getting image layer") |
|
277 |
+ } |
|
278 |
+ defer i.layerStores[img.OperatingSystem()].Release(l) |
|
279 |
+ |
|
280 |
+ ts, err := l.TarStreamFrom(parentChainID) |
|
281 |
+ if err != nil { |
|
282 |
+ return "", errors.Wrapf(err, "error getting tar stream to parent") |
|
283 |
+ } |
|
284 |
+ defer ts.Close() |
|
285 |
+ |
|
286 |
+ newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) |
|
287 |
+ if err != nil { |
|
288 |
+ return "", errors.Wrap(err, "error registering layer") |
|
289 |
+ } |
|
290 |
+ defer i.layerStores[img.OperatingSystem()].Release(newL) |
|
291 |
+ |
|
292 |
+ newImage := *img |
|
293 |
+ newImage.RootFS = nil |
|
294 |
+ |
|
295 |
+ rootFS := *parentImg.RootFS |
|
296 |
+ rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) |
|
297 |
+ newImage.RootFS = &rootFS |
|
298 |
+ |
|
299 |
+ for i, hi := range newImage.History { |
|
300 |
+ if i >= len(parentImg.History) { |
|
301 |
+ hi.EmptyLayer = true |
|
302 |
+ } |
|
303 |
+ newImage.History[i] = hi |
|
304 |
+ } |
|
305 |
+ |
|
306 |
+ now := time.Now() |
|
307 |
+ var historyComment string |
|
308 |
+ if len(parent) > 0 { |
|
309 |
+ historyComment = fmt.Sprintf("merge %s to %s", id, parent) |
|
310 |
+ } else { |
|
311 |
+ historyComment = fmt.Sprintf("create new from %s", id) |
|
312 |
+ } |
|
313 |
+ |
|
314 |
+ newImage.History = append(newImage.History, image.History{ |
|
315 |
+ Created: now, |
|
316 |
+ Comment: historyComment, |
|
317 |
+ }) |
|
318 |
+ newImage.Created = now |
|
319 |
+ |
|
320 |
+ b, err := json.Marshal(&newImage) |
|
321 |
+ if err != nil { |
|
322 |
+ return "", errors.Wrap(err, "error marshalling image config") |
|
323 |
+ } |
|
324 |
+ |
|
325 |
+ newImgID, err := i.imageStore.Create(b) |
|
326 |
+ if err != nil { |
|
327 |
+ return "", errors.Wrap(err, "error creating new image after squash") |
|
328 |
+ } |
|
329 |
+ return string(newImgID), nil |
|
330 |
+} |
|
331 |
+ |
|
332 |
+func newImage(image *image.Image, size int64) *types.ImageSummary { |
|
333 |
+ newImage := new(types.ImageSummary) |
|
334 |
+ newImage.ParentID = image.Parent.String() |
|
335 |
+ newImage.ID = image.ID().String() |
|
336 |
+ newImage.Created = image.Created.Unix() |
|
337 |
+ newImage.Size = size |
|
338 |
+ newImage.VirtualSize = size |
|
339 |
+ newImage.SharedSize = -1 |
|
340 |
+ newImage.Containers = -1 |
|
341 |
+ if image.Config != nil { |
|
342 |
+ newImage.Labels = image.Config.Labels |
|
343 |
+ } |
|
344 |
+ return newImage |
|
345 |
+} |
0 | 346 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,32 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ |
|
5 |
+ metrics "github.com/docker/go-metrics" |
|
6 |
+) |
|
7 |
+ |
|
8 |
+type invalidFilter struct { |
|
9 |
+ filter string |
|
10 |
+ value interface{} |
|
11 |
+} |
|
12 |
+ |
|
13 |
+func (e invalidFilter) Error() string { |
|
14 |
+ msg := "Invalid filter '" + e.filter |
|
15 |
+ if e.value != nil { |
|
16 |
+ msg += fmt.Sprintf("=%s", e.value) |
|
17 |
+ } |
|
18 |
+ return msg + "'" |
|
19 |
+} |
|
20 |
+ |
|
21 |
+func (e invalidFilter) InvalidParameter() {} |
|
22 |
+ |
|
23 |
+var imageActions metrics.LabeledTimer |
|
24 |
+ |
|
25 |
+func init() { |
|
26 |
+ ns := metrics.NewNamespace("engine", "daemon", nil) |
|
27 |
+ imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") |
|
28 |
+ // TODO: is it OK to register a namespace with the same name? Or does this |
|
29 |
+ // need to be exported from somewhere? |
|
30 |
+ metrics.Register(ns) |
|
31 |
+} |
0 | 32 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,229 @@ |
0 |
+package images // import "github.com/docker/docker/daemon/images" |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "context" |
|
4 |
+ "os" |
|
5 |
+ |
|
6 |
+ "github.com/docker/docker/container" |
|
7 |
+ daemonevents "github.com/docker/docker/daemon/events" |
|
8 |
+ "github.com/docker/docker/distribution/metadata" |
|
9 |
+ "github.com/docker/docker/distribution/xfer" |
|
10 |
+ "github.com/docker/docker/image" |
|
11 |
+ "github.com/docker/docker/layer" |
|
12 |
+ dockerreference "github.com/docker/docker/reference" |
|
13 |
+ "github.com/docker/docker/registry" |
|
14 |
+ "github.com/docker/libtrust" |
|
15 |
+ "github.com/opencontainers/go-digest" |
|
16 |
+ "github.com/pkg/errors" |
|
17 |
+ "github.com/sirupsen/logrus" |
|
18 |
+) |
|
19 |
+ |
|
20 |
+type containerStore interface { |
|
21 |
+ // used by image delete |
|
22 |
+ First(container.StoreFilter) *container.Container |
|
23 |
+ // used by image prune, and image list |
|
24 |
+ List() []*container.Container |
|
25 |
+ // TODO: remove, only used for CommitBuildStep |
|
26 |
+ Get(string) *container.Container |
|
27 |
+} |
|
28 |
+ |
|
29 |
+// ImageServiceConfig is the configuration used to create a new ImageService |
|
30 |
+type ImageServiceConfig struct { |
|
31 |
+ ContainerStore containerStore |
|
32 |
+ DistributionMetadataStore metadata.Store |
|
33 |
+ EventsService *daemonevents.Events |
|
34 |
+ ImageStore image.Store |
|
35 |
+ LayerStores map[string]layer.Store |
|
36 |
+ MaxConcurrentDownloads int |
|
37 |
+ MaxConcurrentUploads int |
|
38 |
+ ReferenceStore dockerreference.Store |
|
39 |
+ RegistryService registry.Service |
|
40 |
+ TrustKey libtrust.PrivateKey |
|
41 |
+} |
|
42 |
+ |
|
43 |
+// NewImageService returns a new ImageService from a configuration |
|
44 |
+func NewImageService(config ImageServiceConfig) *ImageService { |
|
45 |
+ logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) |
|
46 |
+ logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) |
|
47 |
+ return &ImageService{ |
|
48 |
+ containers: config.ContainerStore, |
|
49 |
+ distributionMetadataStore: config.DistributionMetadataStore, |
|
50 |
+ downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), |
|
51 |
+ eventsService: config.EventsService, |
|
52 |
+ imageStore: config.ImageStore, |
|
53 |
+ layerStores: config.LayerStores, |
|
54 |
+ referenceStore: config.ReferenceStore, |
|
55 |
+ registryService: config.RegistryService, |
|
56 |
+ trustKey: config.TrustKey, |
|
57 |
+ uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), |
|
58 |
+ } |
|
59 |
+} |
|
60 |
+ |
|
61 |
+// ImageService provides a backend for image management |
|
62 |
+type ImageService struct { |
|
63 |
+ containers containerStore |
|
64 |
+ distributionMetadataStore metadata.Store |
|
65 |
+ downloadManager *xfer.LayerDownloadManager |
|
66 |
+ eventsService *daemonevents.Events |
|
67 |
+ imageStore image.Store |
|
68 |
+ layerStores map[string]layer.Store // By operating system |
|
69 |
+ pruneRunning int32 |
|
70 |
+ referenceStore dockerreference.Store |
|
71 |
+ registryService registry.Service |
|
72 |
+ trustKey libtrust.PrivateKey |
|
73 |
+ uploadManager *xfer.LayerUploadManager |
|
74 |
+} |
|
75 |
+ |
|
76 |
+// CountImages returns the number of images stored by ImageService |
|
77 |
+// called from info.go |
|
78 |
+func (i *ImageService) CountImages() int { |
|
79 |
+ return len(i.imageStore.Map()) |
|
80 |
+} |
|
81 |
+ |
|
82 |
+// Children returns the children image.IDs for a parent image. |
|
83 |
+// called from list.go to filter containers |
|
84 |
+// TODO: refactor to expose an ancestry for image.ID? |
|
85 |
+func (i *ImageService) Children(id image.ID) []image.ID { |
|
86 |
+ return i.imageStore.Children(id) |
|
87 |
+} |
|
88 |
+ |
|
89 |
+// CreateLayer creates a filesystem layer for a container. |
|
90 |
+// called from create.go |
|
91 |
+// TODO: accept an opt struct instead of container? |
|
92 |
+func (i *ImageService) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { |
|
93 |
+ var layerID layer.ChainID |
|
94 |
+ if container.ImageID != "" { |
|
95 |
+ img, err := i.imageStore.Get(container.ImageID) |
|
96 |
+ if err != nil { |
|
97 |
+ return nil, err |
|
98 |
+ } |
|
99 |
+ layerID = img.RootFS.ChainID() |
|
100 |
+ } |
|
101 |
+ |
|
102 |
+ rwLayerOpts := &layer.CreateRWLayerOpts{ |
|
103 |
+ MountLabel: container.MountLabel, |
|
104 |
+ InitFunc: initFunc, |
|
105 |
+ StorageOpt: container.HostConfig.StorageOpt, |
|
106 |
+ } |
|
107 |
+ |
|
108 |
+ // Indexing by OS is safe here as validation of OS has already been performed in create() (the only |
|
109 |
+ // caller), and guaranteed non-nil |
|
110 |
+ return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts) |
|
111 |
+} |
|
112 |
+ |
|
113 |
+// GetLayerByID returns a layer by ID and operating system |
|
114 |
+// called from daemon.go Daemon.restore(), and Daemon.containerExport() |
|
115 |
+func (i *ImageService) GetLayerByID(cid string, os string) (layer.RWLayer, error) { |
|
116 |
+ return i.layerStores[os].GetRWLayer(cid) |
|
117 |
+} |
|
118 |
+ |
|
119 |
+// LayerStoreStatus returns the status for each layer store |
|
120 |
+// called from info.go |
|
121 |
+func (i *ImageService) LayerStoreStatus() map[string][][2]string { |
|
122 |
+ result := make(map[string][][2]string) |
|
123 |
+ for os, store := range i.layerStores { |
|
124 |
+ result[os] = store.DriverStatus() |
|
125 |
+ } |
|
126 |
+ return result |
|
127 |
+} |
|
128 |
+ |
|
129 |
+// GetLayerMountID returns the mount ID for a layer |
|
130 |
+// called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) |
|
131 |
+// TODO: needs to be refactored to Unmount (see callers), or removed and replaced |
|
132 |
+// with GetLayerByID |
|
133 |
+func (i *ImageService) GetLayerMountID(cid string, os string) (string, error) { |
|
134 |
+ return i.layerStores[os].GetMountID(cid) |
|
135 |
+} |
|
136 |
+ |
|
137 |
+// Cleanup resources before the process is shutdown. |
|
138 |
+// called from daemon.go Daemon.Shutdown() |
|
139 |
+func (i *ImageService) Cleanup() { |
|
140 |
+ for os, ls := range i.layerStores { |
|
141 |
+ if ls != nil { |
|
142 |
+ if err := ls.Cleanup(); err != nil { |
|
143 |
+ logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) |
|
144 |
+ } |
|
145 |
+ } |
|
146 |
+ } |
|
147 |
+} |
|
148 |
+ |
|
149 |
+// GraphDriverForOS returns the name of the graph drvier |
|
150 |
+// moved from Daemon.GraphDriverName, used by: |
|
151 |
+// - newContainer |
|
152 |
+// - to report an error in Daemon.Mount(container) |
|
153 |
+func (i *ImageService) GraphDriverForOS(os string) string { |
|
154 |
+ return i.layerStores[os].DriverName() |
|
155 |
+} |
|
156 |
+ |
|
157 |
+// ReleaseLayer releases a layer allowing it to be removed |
|
158 |
+// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() |
|
159 |
+func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) error { |
|
160 |
+ metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer) |
|
161 |
+ layer.LogReleaseMetadata(metadata) |
|
162 |
+ if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { |
|
163 |
+ return errors.Wrapf(err, "driver %q failed to remove root filesystem", |
|
164 |
+ i.layerStores[containerOS].DriverName()) |
|
165 |
+ } |
|
166 |
+ return nil |
|
167 |
+} |
|
168 |
+ |
|
169 |
+// LayerDiskUsage returns the number of bytes used by layer stores |
|
170 |
+// called from disk_usage.go |
|
171 |
+func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { |
|
172 |
+ var allLayersSize int64 |
|
173 |
+ layerRefs := i.getLayerRefs() |
|
174 |
+ for _, ls := range i.layerStores { |
|
175 |
+ allLayers := ls.Map() |
|
176 |
+ for _, l := range allLayers { |
|
177 |
+ select { |
|
178 |
+ case <-ctx.Done(): |
|
179 |
+ return allLayersSize, ctx.Err() |
|
180 |
+ default: |
|
181 |
+ size, err := l.DiffSize() |
|
182 |
+ if err == nil { |
|
183 |
+ if _, ok := layerRefs[l.ChainID()]; ok { |
|
184 |
+ allLayersSize += size |
|
185 |
+ } else { |
|
186 |
+ logrus.Warnf("found leaked image layer %v", l.ChainID()) |
|
187 |
+ } |
|
188 |
+ } else { |
|
189 |
+ logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) |
|
190 |
+ } |
|
191 |
+ } |
|
192 |
+ } |
|
193 |
+ } |
|
194 |
+ return allLayersSize, nil |
|
195 |
+} |
|
196 |
+ |
|
197 |
+func (i *ImageService) getLayerRefs() map[layer.ChainID]int { |
|
198 |
+ tmpImages := i.imageStore.Map() |
|
199 |
+ layerRefs := map[layer.ChainID]int{} |
|
200 |
+ for id, img := range tmpImages { |
|
201 |
+ dgst := digest.Digest(id) |
|
202 |
+ if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { |
|
203 |
+ continue |
|
204 |
+ } |
|
205 |
+ |
|
206 |
+ rootFS := *img.RootFS |
|
207 |
+ rootFS.DiffIDs = nil |
|
208 |
+ for _, id := range img.RootFS.DiffIDs { |
|
209 |
+ rootFS.Append(id) |
|
210 |
+ chid := rootFS.ChainID() |
|
211 |
+ layerRefs[chid]++ |
|
212 |
+ } |
|
213 |
+ } |
|
214 |
+ |
|
215 |
+ return layerRefs |
|
216 |
+} |
|
217 |
+ |
|
218 |
+// UpdateConfig values |
|
219 |
+// |
|
220 |
+// called from reload.go |
|
221 |
+func (i *ImageService) UpdateConfig(maxDownloads, maxUploads *int) { |
|
222 |
+ if i.downloadManager != nil && maxDownloads != nil { |
|
223 |
+ i.downloadManager.SetConcurrency(*maxDownloads) |
|
224 |
+ } |
|
225 |
+ if i.uploadManager != nil && maxUploads != nil { |
|
226 |
+ i.uploadManager.SetConcurrency(*maxUploads) |
|
227 |
+ } |
|
228 |
+} |
... | ... |
@@ -80,7 +80,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { |
80 | 80 |
|
81 | 81 |
var ds [][2]string |
82 | 82 |
drivers := "" |
83 |
- statuses := daemon.imageService.GraphDriverStatuses() |
|
83 |
+ statuses := daemon.imageService.LayerStoreStatus() |
|
84 | 84 |
for os, gd := range daemon.graphDrivers { |
85 | 85 |
ds = append(ds, statuses[os]...) |
86 | 86 |
drivers += gd |
... | ... |
@@ -9,6 +9,7 @@ import ( |
9 | 9 |
"github.com/docker/docker/api/types" |
10 | 10 |
"github.com/docker/docker/api/types/filters" |
11 | 11 |
"github.com/docker/docker/container" |
12 |
+ "github.com/docker/docker/daemon/images" |
|
12 | 13 |
"github.com/docker/docker/errdefs" |
13 | 14 |
"github.com/docker/docker/image" |
14 | 15 |
"github.com/docker/docker/volume" |
... | ... |
@@ -592,7 +593,7 @@ func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*ty |
592 | 592 |
image := s.Image // keep the original ref if still valid (hasn't changed) |
593 | 593 |
if image != s.ImageID { |
594 | 594 |
id, _, err := daemon.imageService.GetImageIDAndOS(image) |
595 |
- if _, isDNE := err.(errImageDoesNotExist); err != nil && !isDNE { |
|
595 |
+ if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { |
|
596 | 596 |
return nil, err |
597 | 597 |
} |
598 | 598 |
if err != nil || id.String() != s.ImageID { |
... | ... |
@@ -14,7 +14,6 @@ const metricsPluginType = "MetricsCollector" |
14 | 14 |
|
15 | 15 |
var ( |
16 | 16 |
containerActions metrics.LabeledTimer |
17 |
- imageActions metrics.LabeledTimer |
|
18 | 17 |
networkActions metrics.LabeledTimer |
19 | 18 |
engineInfo metrics.LabeledGauge |
20 | 19 |
engineCpus metrics.Gauge |
... | ... |
@@ -52,7 +51,6 @@ func init() { |
52 | 52 |
engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) |
53 | 53 |
healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") |
54 | 54 |
healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") |
55 |
- imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") |
|
56 | 55 |
|
57 | 56 |
stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) |
58 | 57 |
ns.Add(stateCtr) |
... | ... |
@@ -138,7 +138,7 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { |
138 | 138 |
} |
139 | 139 |
} |
140 | 140 |
s.Process.User.Username = c.Config.User |
141 |
- s.Windows.LayerFolders, err = daemon.GetLayerFolders(img, c.RWLayer) |
|
141 |
+ s.Windows.LayerFolders, err = daemon.imageService.GetLayerFolders(img, c.RWLayer) |
|
142 | 142 |
if err != nil { |
143 | 143 |
return nil, errors.Wrapf(err, "container %s", c.ID) |
144 | 144 |
} |
... | ... |
@@ -90,12 +90,6 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config |
90 | 90 |
daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads |
91 | 91 |
} |
92 | 92 |
logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) |
93 |
- if daemon.imageService.downloadManager != nil { |
|
94 |
- daemon.imageService.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) |
|
95 |
- } |
|
96 |
- |
|
97 |
- // prepare reload event attributes with updatable configurations |
|
98 |
- attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) |
|
99 | 93 |
|
100 | 94 |
// If no value is set for max-concurrent-upload we assume it is the default value |
101 | 95 |
// We always "reset" as the cost is lightweight and easy to maintain. |
... | ... |
@@ -106,10 +100,10 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config |
106 | 106 |
daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads |
107 | 107 |
} |
108 | 108 |
logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) |
109 |
- if daemon.imageService.uploadManager != nil { |
|
110 |
- daemon.imageService.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) |
|
111 |
- } |
|
112 | 109 |
|
110 |
+ daemon.imageService.UpdateConfig(conf.MaxConcurrentDownloads, conf.MaxConcurrentUploads) |
|
111 |
+ // prepare reload event attributes with updatable configurations |
|
112 |
+ attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) |
|
113 | 113 |
// prepare reload event attributes with updatable configurations |
114 | 114 |
attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) |
115 | 115 |
} |
... | ... |
@@ -7,6 +7,7 @@ import ( |
7 | 7 |
"time" |
8 | 8 |
|
9 | 9 |
"github.com/docker/docker/daemon/config" |
10 |
+ "github.com/docker/docker/daemon/images" |
|
10 | 11 |
"github.com/docker/docker/pkg/discovery" |
11 | 12 |
_ "github.com/docker/docker/pkg/discovery/memory" |
12 | 13 |
"github.com/docker/docker/registry" |
... | ... |
@@ -21,7 +22,7 @@ func TestDaemonReloadLabels(t *testing.T) { |
21 | 21 |
Labels: []string{"foo:bar"}, |
22 | 22 |
}, |
23 | 23 |
}, |
24 |
- imageService: &imageService{}, |
|
24 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
25 | 25 |
} |
26 | 26 |
|
27 | 27 |
valuesSets := make(map[string]interface{}) |
... | ... |
@@ -46,7 +47,7 @@ func TestDaemonReloadLabels(t *testing.T) { |
46 | 46 |
func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { |
47 | 47 |
daemon := &Daemon{ |
48 | 48 |
configStore: &config.Config{}, |
49 |
- imageService: &imageService{}, |
|
49 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
50 | 50 |
} |
51 | 51 |
|
52 | 52 |
var err error |
... | ... |
@@ -101,7 +102,7 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { |
101 | 101 |
|
102 | 102 |
func TestDaemonReloadMirrors(t *testing.T) { |
103 | 103 |
daemon := &Daemon{ |
104 |
- imageService: &imageService{}, |
|
104 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
105 | 105 |
} |
106 | 106 |
var err error |
107 | 107 |
daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ |
... | ... |
@@ -200,7 +201,7 @@ func TestDaemonReloadMirrors(t *testing.T) { |
200 | 200 |
|
201 | 201 |
func TestDaemonReloadInsecureRegistries(t *testing.T) { |
202 | 202 |
daemon := &Daemon{ |
203 |
- imageService: &imageService{}, |
|
203 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
204 | 204 |
} |
205 | 205 |
var err error |
206 | 206 |
// initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" |
... | ... |
@@ -292,7 +293,7 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) { |
292 | 292 |
|
293 | 293 |
func TestDaemonReloadNotAffectOthers(t *testing.T) { |
294 | 294 |
daemon := &Daemon{ |
295 |
- imageService: &imageService{}, |
|
295 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
296 | 296 |
} |
297 | 297 |
daemon.configStore = &config.Config{ |
298 | 298 |
CommonConfig: config.CommonConfig{ |
... | ... |
@@ -326,7 +327,7 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) { |
326 | 326 |
|
327 | 327 |
func TestDaemonDiscoveryReload(t *testing.T) { |
328 | 328 |
daemon := &Daemon{ |
329 |
- imageService: &imageService{}, |
|
329 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
330 | 330 |
} |
331 | 331 |
daemon.configStore = &config.Config{ |
332 | 332 |
CommonConfig: config.CommonConfig{ |
... | ... |
@@ -405,7 +406,7 @@ func TestDaemonDiscoveryReload(t *testing.T) { |
405 | 405 |
|
406 | 406 |
func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { |
407 | 407 |
daemon := &Daemon{ |
408 |
- imageService: &imageService{}, |
|
408 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
409 | 409 |
} |
410 | 410 |
daemon.configStore = &config.Config{} |
411 | 411 |
|
... | ... |
@@ -452,7 +453,7 @@ func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { |
452 | 452 |
|
453 | 453 |
func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { |
454 | 454 |
daemon := &Daemon{ |
455 |
- imageService: &imageService{}, |
|
455 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
456 | 456 |
} |
457 | 457 |
daemon.configStore = &config.Config{ |
458 | 458 |
CommonConfig: config.CommonConfig{ |
... | ... |
@@ -498,7 +499,7 @@ func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { |
498 | 498 |
|
499 | 499 |
func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { |
500 | 500 |
daemon := &Daemon{ |
501 |
- imageService: &imageService{}, |
|
501 |
+ imageService: images.NewImageService(images.ImageServiceConfig{}), |
|
502 | 502 |
} |
503 | 503 |
daemon.configStore = &config.Config{} |
504 | 504 |
|
505 | 505 |
deleted file mode 100644 |
... | ... |
@@ -1,358 +0,0 @@ |
1 |
-package daemon // import "github.com/docker/docker/daemon" |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "errors" |
|
5 |
- "strings" |
|
6 |
- "testing" |
|
7 |
- |
|
8 |
- "golang.org/x/net/context" |
|
9 |
- |
|
10 |
- "github.com/docker/docker/api/types" |
|
11 |
- registrytypes "github.com/docker/docker/api/types/registry" |
|
12 |
- "github.com/docker/docker/registry" |
|
13 |
-) |
|
14 |
- |
|
15 |
-type FakeService struct { |
|
16 |
- registry.DefaultService |
|
17 |
- |
|
18 |
- shouldReturnError bool |
|
19 |
- |
|
20 |
- term string |
|
21 |
- results []registrytypes.SearchResult |
|
22 |
-} |
|
23 |
- |
|
24 |
-func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { |
|
25 |
- if s.shouldReturnError { |
|
26 |
- return nil, errors.New("Search unknown error") |
|
27 |
- } |
|
28 |
- return ®istrytypes.SearchResults{ |
|
29 |
- Query: s.term, |
|
30 |
- NumResults: len(s.results), |
|
31 |
- Results: s.results, |
|
32 |
- }, nil |
|
33 |
-} |
|
34 |
- |
|
35 |
-func TestSearchRegistryForImagesErrors(t *testing.T) { |
|
36 |
- errorCases := []struct { |
|
37 |
- filtersArgs string |
|
38 |
- shouldReturnError bool |
|
39 |
- expectedError string |
|
40 |
- }{ |
|
41 |
- { |
|
42 |
- expectedError: "Search unknown error", |
|
43 |
- shouldReturnError: true, |
|
44 |
- }, |
|
45 |
- { |
|
46 |
- filtersArgs: "invalid json", |
|
47 |
- expectedError: "invalid character 'i' looking for beginning of value", |
|
48 |
- }, |
|
49 |
- { |
|
50 |
- filtersArgs: `{"type":{"custom":true}}`, |
|
51 |
- expectedError: "Invalid filter 'type'", |
|
52 |
- }, |
|
53 |
- { |
|
54 |
- filtersArgs: `{"is-automated":{"invalid":true}}`, |
|
55 |
- expectedError: "Invalid filter 'is-automated=[invalid]'", |
|
56 |
- }, |
|
57 |
- { |
|
58 |
- filtersArgs: `{"is-automated":{"true":true,"false":true}}`, |
|
59 |
- expectedError: "Invalid filter 'is-automated", |
|
60 |
- }, |
|
61 |
- { |
|
62 |
- filtersArgs: `{"is-official":{"invalid":true}}`, |
|
63 |
- expectedError: "Invalid filter 'is-official=[invalid]'", |
|
64 |
- }, |
|
65 |
- { |
|
66 |
- filtersArgs: `{"is-official":{"true":true,"false":true}}`, |
|
67 |
- expectedError: "Invalid filter 'is-official", |
|
68 |
- }, |
|
69 |
- { |
|
70 |
- filtersArgs: `{"stars":{"invalid":true}}`, |
|
71 |
- expectedError: "Invalid filter 'stars=invalid'", |
|
72 |
- }, |
|
73 |
- { |
|
74 |
- filtersArgs: `{"stars":{"1":true,"invalid":true}}`, |
|
75 |
- expectedError: "Invalid filter 'stars=invalid'", |
|
76 |
- }, |
|
77 |
- } |
|
78 |
- for index, e := range errorCases { |
|
79 |
- daemon := &Daemon{ |
|
80 |
- RegistryService: &FakeService{ |
|
81 |
- shouldReturnError: e.shouldReturnError, |
|
82 |
- }, |
|
83 |
- } |
|
84 |
- _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) |
|
85 |
- if err == nil { |
|
86 |
- t.Errorf("%d: expected an error, got nothing", index) |
|
87 |
- } |
|
88 |
- if !strings.Contains(err.Error(), e.expectedError) { |
|
89 |
- t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) |
|
90 |
- } |
|
91 |
- } |
|
92 |
-} |
|
93 |
- |
|
94 |
-func TestSearchRegistryForImages(t *testing.T) { |
|
95 |
- term := "term" |
|
96 |
- successCases := []struct { |
|
97 |
- filtersArgs string |
|
98 |
- registryResults []registrytypes.SearchResult |
|
99 |
- expectedResults []registrytypes.SearchResult |
|
100 |
- }{ |
|
101 |
- { |
|
102 |
- filtersArgs: "", |
|
103 |
- registryResults: []registrytypes.SearchResult{}, |
|
104 |
- expectedResults: []registrytypes.SearchResult{}, |
|
105 |
- }, |
|
106 |
- { |
|
107 |
- filtersArgs: "", |
|
108 |
- registryResults: []registrytypes.SearchResult{ |
|
109 |
- { |
|
110 |
- Name: "name", |
|
111 |
- Description: "description", |
|
112 |
- }, |
|
113 |
- }, |
|
114 |
- expectedResults: []registrytypes.SearchResult{ |
|
115 |
- { |
|
116 |
- Name: "name", |
|
117 |
- Description: "description", |
|
118 |
- }, |
|
119 |
- }, |
|
120 |
- }, |
|
121 |
- { |
|
122 |
- filtersArgs: `{"is-automated":{"true":true}}`, |
|
123 |
- registryResults: []registrytypes.SearchResult{ |
|
124 |
- { |
|
125 |
- Name: "name", |
|
126 |
- Description: "description", |
|
127 |
- }, |
|
128 |
- }, |
|
129 |
- expectedResults: []registrytypes.SearchResult{}, |
|
130 |
- }, |
|
131 |
- { |
|
132 |
- filtersArgs: `{"is-automated":{"true":true}}`, |
|
133 |
- registryResults: []registrytypes.SearchResult{ |
|
134 |
- { |
|
135 |
- Name: "name", |
|
136 |
- Description: "description", |
|
137 |
- IsAutomated: true, |
|
138 |
- }, |
|
139 |
- }, |
|
140 |
- expectedResults: []registrytypes.SearchResult{ |
|
141 |
- { |
|
142 |
- Name: "name", |
|
143 |
- Description: "description", |
|
144 |
- IsAutomated: true, |
|
145 |
- }, |
|
146 |
- }, |
|
147 |
- }, |
|
148 |
- { |
|
149 |
- filtersArgs: `{"is-automated":{"false":true}}`, |
|
150 |
- registryResults: []registrytypes.SearchResult{ |
|
151 |
- { |
|
152 |
- Name: "name", |
|
153 |
- Description: "description", |
|
154 |
- IsAutomated: true, |
|
155 |
- }, |
|
156 |
- }, |
|
157 |
- expectedResults: []registrytypes.SearchResult{}, |
|
158 |
- }, |
|
159 |
- { |
|
160 |
- filtersArgs: `{"is-automated":{"false":true}}`, |
|
161 |
- registryResults: []registrytypes.SearchResult{ |
|
162 |
- { |
|
163 |
- Name: "name", |
|
164 |
- Description: "description", |
|
165 |
- IsAutomated: false, |
|
166 |
- }, |
|
167 |
- }, |
|
168 |
- expectedResults: []registrytypes.SearchResult{ |
|
169 |
- { |
|
170 |
- Name: "name", |
|
171 |
- Description: "description", |
|
172 |
- IsAutomated: false, |
|
173 |
- }, |
|
174 |
- }, |
|
175 |
- }, |
|
176 |
- { |
|
177 |
- filtersArgs: `{"is-official":{"true":true}}`, |
|
178 |
- registryResults: []registrytypes.SearchResult{ |
|
179 |
- { |
|
180 |
- Name: "name", |
|
181 |
- Description: "description", |
|
182 |
- }, |
|
183 |
- }, |
|
184 |
- expectedResults: []registrytypes.SearchResult{}, |
|
185 |
- }, |
|
186 |
- { |
|
187 |
- filtersArgs: `{"is-official":{"true":true}}`, |
|
188 |
- registryResults: []registrytypes.SearchResult{ |
|
189 |
- { |
|
190 |
- Name: "name", |
|
191 |
- Description: "description", |
|
192 |
- IsOfficial: true, |
|
193 |
- }, |
|
194 |
- }, |
|
195 |
- expectedResults: []registrytypes.SearchResult{ |
|
196 |
- { |
|
197 |
- Name: "name", |
|
198 |
- Description: "description", |
|
199 |
- IsOfficial: true, |
|
200 |
- }, |
|
201 |
- }, |
|
202 |
- }, |
|
203 |
- { |
|
204 |
- filtersArgs: `{"is-official":{"false":true}}`, |
|
205 |
- registryResults: []registrytypes.SearchResult{ |
|
206 |
- { |
|
207 |
- Name: "name", |
|
208 |
- Description: "description", |
|
209 |
- IsOfficial: true, |
|
210 |
- }, |
|
211 |
- }, |
|
212 |
- expectedResults: []registrytypes.SearchResult{}, |
|
213 |
- }, |
|
214 |
- { |
|
215 |
- filtersArgs: `{"is-official":{"false":true}}`, |
|
216 |
- registryResults: []registrytypes.SearchResult{ |
|
217 |
- { |
|
218 |
- Name: "name", |
|
219 |
- Description: "description", |
|
220 |
- IsOfficial: false, |
|
221 |
- }, |
|
222 |
- }, |
|
223 |
- expectedResults: []registrytypes.SearchResult{ |
|
224 |
- { |
|
225 |
- Name: "name", |
|
226 |
- Description: "description", |
|
227 |
- IsOfficial: false, |
|
228 |
- }, |
|
229 |
- }, |
|
230 |
- }, |
|
231 |
- { |
|
232 |
- filtersArgs: `{"stars":{"0":true}}`, |
|
233 |
- registryResults: []registrytypes.SearchResult{ |
|
234 |
- { |
|
235 |
- Name: "name", |
|
236 |
- Description: "description", |
|
237 |
- StarCount: 0, |
|
238 |
- }, |
|
239 |
- }, |
|
240 |
- expectedResults: []registrytypes.SearchResult{ |
|
241 |
- { |
|
242 |
- Name: "name", |
|
243 |
- Description: "description", |
|
244 |
- StarCount: 0, |
|
245 |
- }, |
|
246 |
- }, |
|
247 |
- }, |
|
248 |
- { |
|
249 |
- filtersArgs: `{"stars":{"1":true}}`, |
|
250 |
- registryResults: []registrytypes.SearchResult{ |
|
251 |
- { |
|
252 |
- Name: "name", |
|
253 |
- Description: "description", |
|
254 |
- StarCount: 0, |
|
255 |
- }, |
|
256 |
- }, |
|
257 |
- expectedResults: []registrytypes.SearchResult{}, |
|
258 |
- }, |
|
259 |
- { |
|
260 |
- filtersArgs: `{"stars":{"1":true}}`, |
|
261 |
- registryResults: []registrytypes.SearchResult{ |
|
262 |
- { |
|
263 |
- Name: "name0", |
|
264 |
- Description: "description0", |
|
265 |
- StarCount: 0, |
|
266 |
- }, |
|
267 |
- { |
|
268 |
- Name: "name1", |
|
269 |
- Description: "description1", |
|
270 |
- StarCount: 1, |
|
271 |
- }, |
|
272 |
- }, |
|
273 |
- expectedResults: []registrytypes.SearchResult{ |
|
274 |
- { |
|
275 |
- Name: "name1", |
|
276 |
- Description: "description1", |
|
277 |
- StarCount: 1, |
|
278 |
- }, |
|
279 |
- }, |
|
280 |
- }, |
|
281 |
- { |
|
282 |
- filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, |
|
283 |
- registryResults: []registrytypes.SearchResult{ |
|
284 |
- { |
|
285 |
- Name: "name0", |
|
286 |
- Description: "description0", |
|
287 |
- StarCount: 0, |
|
288 |
- IsOfficial: true, |
|
289 |
- IsAutomated: true, |
|
290 |
- }, |
|
291 |
- { |
|
292 |
- Name: "name1", |
|
293 |
- Description: "description1", |
|
294 |
- StarCount: 1, |
|
295 |
- IsOfficial: false, |
|
296 |
- IsAutomated: true, |
|
297 |
- }, |
|
298 |
- { |
|
299 |
- Name: "name2", |
|
300 |
- Description: "description2", |
|
301 |
- StarCount: 1, |
|
302 |
- IsOfficial: true, |
|
303 |
- IsAutomated: false, |
|
304 |
- }, |
|
305 |
- { |
|
306 |
- Name: "name3", |
|
307 |
- Description: "description3", |
|
308 |
- StarCount: 2, |
|
309 |
- IsOfficial: true, |
|
310 |
- IsAutomated: true, |
|
311 |
- }, |
|
312 |
- }, |
|
313 |
- expectedResults: []registrytypes.SearchResult{ |
|
314 |
- { |
|
315 |
- Name: "name3", |
|
316 |
- Description: "description3", |
|
317 |
- StarCount: 2, |
|
318 |
- IsOfficial: true, |
|
319 |
- IsAutomated: true, |
|
320 |
- }, |
|
321 |
- }, |
|
322 |
- }, |
|
323 |
- } |
|
324 |
- for index, s := range successCases { |
|
325 |
- daemon := &Daemon{ |
|
326 |
- RegistryService: &FakeService{ |
|
327 |
- term: term, |
|
328 |
- results: s.registryResults, |
|
329 |
- }, |
|
330 |
- } |
|
331 |
- results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) |
|
332 |
- if err != nil { |
|
333 |
- t.Errorf("%d: %v", index, err) |
|
334 |
- } |
|
335 |
- if results.Query != term { |
|
336 |
- t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) |
|
337 |
- } |
|
338 |
- if results.NumResults != len(s.expectedResults) { |
|
339 |
- t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) |
|
340 |
- } |
|
341 |
- for _, result := range results.Results { |
|
342 |
- found := false |
|
343 |
- for _, expectedResult := range s.expectedResults { |
|
344 |
- if expectedResult.Name == result.Name && |
|
345 |
- expectedResult.Description == result.Description && |
|
346 |
- expectedResult.IsAutomated == result.IsAutomated && |
|
347 |
- expectedResult.IsOfficial == result.IsOfficial && |
|
348 |
- expectedResult.StarCount == result.StarCount { |
|
349 |
- found = true |
|
350 |
- break |
|
351 |
- } |
|
352 |
- } |
|
353 |
- if !found { |
|
354 |
- t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) |
|
355 |
- } |
|
356 |
- } |
|
357 |
- } |
|
358 |
-} |
... | ... |
@@ -223,7 +223,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) { |
223 | 223 |
if err := daemon.conditionalUnmountOnCleanup(container); err != nil { |
224 | 224 |
// FIXME: remove once reference counting for graphdrivers has been refactored |
225 | 225 |
// Ensure that all the mounts are gone |
226 |
- if mountid, err := daemon.imageService.GetContainerMountID(container.ID, container.OS); err == nil { |
|
226 |
+ if mountid, err := daemon.imageService.GetLayerMountID(container.ID, container.OS); err == nil { |
|
227 | 227 |
daemon.cleanupMountsByID(mountid) |
228 | 228 |
} |
229 | 229 |
} |