Since the docker/distribution has a lot of changes we have to adapt code to use it.
* Adapt code to changes in the API.
* Add support of Manifest Version 2 Schema 2.
| ... | ... |
@@ -22195,6 +22195,21 @@ |
| 22195 | 22195 |
"$ref": "v1.ImageSignature" |
| 22196 | 22196 |
}, |
| 22197 | 22197 |
"description": "Signatures holds all signatures of the image." |
| 22198 |
+ }, |
|
| 22199 |
+ "dockerImageSignatures": {
|
|
| 22200 |
+ "type": "array", |
|
| 22201 |
+ "items": {
|
|
| 22202 |
+ "$ref": "v1.Image.dockerImageSignatures" |
|
| 22203 |
+ }, |
|
| 22204 |
+ "description": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1." |
|
| 22205 |
+ }, |
|
| 22206 |
+ "dockerImageManifestMediaType": {
|
|
| 22207 |
+ "type": "string", |
|
| 22208 |
+ "description": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2." |
|
| 22209 |
+ }, |
|
| 22210 |
+ "dockerImageConfig": {
|
|
| 22211 |
+ "type": "string", |
|
| 22212 |
+ "description": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2." |
|
| 22198 | 22213 |
} |
| 22199 | 22214 |
} |
| 22200 | 22215 |
}, |
| ... | ... |
@@ -22203,7 +22218,8 @@ |
| 22203 | 22203 |
"description": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", |
| 22204 | 22204 |
"required": [ |
| 22205 | 22205 |
"name", |
| 22206 |
- "size" |
|
| 22206 |
+ "size", |
|
| 22207 |
+ "mediaType" |
|
| 22207 | 22208 |
], |
| 22208 | 22209 |
"properties": {
|
| 22209 | 22210 |
"name": {
|
| ... | ... |
@@ -22214,6 +22230,10 @@ |
| 22214 | 22214 |
"type": "integer", |
| 22215 | 22215 |
"format": "int64", |
| 22216 | 22216 |
"description": "Size of the layer in bytes as defined by the underlying store." |
| 22217 |
+ }, |
|
| 22218 |
+ "mediaType": {
|
|
| 22219 |
+ "type": "string", |
|
| 22220 |
+ "description": "MediaType of the referenced object." |
|
| 22217 | 22221 |
} |
| 22218 | 22222 |
} |
| 22219 | 22223 |
}, |
| ... | ... |
@@ -22334,6 +22354,10 @@ |
| 22334 | 22334 |
} |
| 22335 | 22335 |
} |
| 22336 | 22336 |
}, |
| 22337 |
+ "v1.Image.dockerImageSignatures": {
|
|
| 22338 |
+ "id": "v1.Image.dockerImageSignatures", |
|
| 22339 |
+ "properties": {}
|
|
| 22340 |
+ }, |
|
| 22337 | 22341 |
"v1.ImageStreamImage": {
|
| 22338 | 22342 |
"id": "v1.ImageStreamImage", |
| 22339 | 22343 |
"description": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream.", |
| ... | ... |
@@ -14,9 +14,14 @@ auth: |
| 14 | 14 |
openshift: |
| 15 | 15 |
realm: openshift |
| 16 | 16 |
middleware: |
| 17 |
+ registry: |
|
| 18 |
+ - name: openshift |
|
| 17 | 19 |
repository: |
| 18 | 20 |
- name: openshift |
| 19 | 21 |
options: |
| 22 |
+ acceptschema2: false |
|
| 20 | 23 |
pullthrough: true |
| 21 | 24 |
enforcequota: false |
| 22 | 25 |
projectcachettl: 1m |
| 26 |
+ storage: |
|
| 27 |
+ - name: openshift |
| ... | ... |
@@ -1959,12 +1959,15 @@ func deepCopy_v1beta3_Image(in imageapiv1beta3.Image, out *imageapiv1beta3.Image |
| 1959 | 1959 |
} else {
|
| 1960 | 1960 |
out.DockerImageLayers = nil |
| 1961 | 1961 |
} |
| 1962 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 1963 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 1962 | 1964 |
return nil |
| 1963 | 1965 |
} |
| 1964 | 1966 |
|
| 1965 | 1967 |
func deepCopy_v1beta3_ImageLayer(in imageapiv1beta3.ImageLayer, out *imageapiv1beta3.ImageLayer, c *conversion.Cloner) error {
|
| 1966 | 1968 |
out.Name = in.Name |
| 1967 | 1969 |
out.Size = in.Size |
| 1970 |
+ out.MediaType = in.MediaType |
|
| 1968 | 1971 |
return nil |
| 1969 | 1972 |
} |
| 1970 | 1973 |
|
| ... | ... |
@@ -32,8 +32,7 @@ import ( |
| 32 | 32 |
_ "github.com/docker/distribution/registry/storage/driver/gcs" |
| 33 | 33 |
_ "github.com/docker/distribution/registry/storage/driver/inmemory" |
| 34 | 34 |
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" |
| 35 |
- _ "github.com/docker/distribution/registry/storage/driver/oss" |
|
| 36 |
- _ "github.com/docker/distribution/registry/storage/driver/s3" |
|
| 35 |
+ _ "github.com/docker/distribution/registry/storage/driver/s3-aws" |
|
| 37 | 36 |
_ "github.com/docker/distribution/registry/storage/driver/swift" |
| 38 | 37 |
|
| 39 | 38 |
"github.com/openshift/origin/pkg/cmd/server/crypto" |
| ... | ... |
@@ -437,8 +437,8 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 437 | 437 |
Rules: []authorizationapi.PolicyRule{
|
| 438 | 438 |
authorizationapi.NewRule("list").Groups(kapiGroup).Resources("limitranges", "resourcequotas").RuleOrDie(),
|
| 439 | 439 |
|
| 440 |
- authorizationapi.NewRule("get", "delete").Groups(imageGroup).Resources("images").RuleOrDie(),
|
|
| 441 |
- authorizationapi.NewRule("get").Groups(imageGroup).Resources("imagestreamimages", "imagestreamtags", "imagestreams/secrets").RuleOrDie(),
|
|
| 440 |
+ authorizationapi.NewRule("get", "delete").Groups(imageGroup).Resources("images", "imagestreamtags").RuleOrDie(),
|
|
| 441 |
+ authorizationapi.NewRule("get").Groups(imageGroup).Resources("imagestreamimages", "imagestreams/secrets").RuleOrDie(),
|
|
| 442 | 442 |
authorizationapi.NewRule("get", "update").Groups(imageGroup).Resources("imagestreams").RuleOrDie(),
|
| 443 | 443 |
authorizationapi.NewRule("create").Groups(imageGroup).Resources("imagestreammappings").RuleOrDie(),
|
| 444 | 444 |
}, |
| ... | ... |
@@ -48,13 +48,9 @@ func (bh *blobHandler) Delete(w http.ResponseWriter, req *http.Request) {
|
| 48 | 48 |
return |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 |
- bd, err := storage.RegistryBlobDeleter(bh.Namespace()) |
|
| 52 |
- if err != nil {
|
|
| 53 |
- bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) |
|
| 54 |
- return |
|
| 55 |
- } |
|
| 51 |
+ vacuum := storage.NewVacuum(bh.Context, dockerStorageDriver) |
|
| 56 | 52 |
|
| 57 |
- err = bd.Delete(bh, bh.Digest) |
|
| 53 |
+ err := vacuum.RemoveBlob(bh.Digest.String()) |
|
| 58 | 54 |
if err != nil {
|
| 59 | 55 |
// ignore not found error |
| 60 | 56 |
switch t := err.(type) {
|
| 61 | 57 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,30 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/distribution" |
|
| 4 |
+ "github.com/docker/distribution/context" |
|
| 5 |
+ "github.com/docker/distribution/digest" |
|
| 6 |
+ "github.com/docker/distribution/registry/middleware/registry" |
|
| 7 |
+ "github.com/docker/distribution/registry/storage" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+func init() {
|
|
| 11 |
+ middleware.RegisterOptions(storage.BlobDescriptorServiceFactory(&blobDescriptorServiceFactory{}))
|
|
| 12 |
+} |
|
| 13 |
+ |
|
| 14 |
+// blobDescriptorServiceFactory needs to be able to work with blobs |
|
| 15 |
+// directly without using links. This allows us to ignore the distribution |
|
| 16 |
+// of blobs between repositories. |
|
| 17 |
+type blobDescriptorServiceFactory struct{}
|
|
| 18 |
+ |
|
| 19 |
+func (bf *blobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
|
| 20 |
+ return &blobDescriptorService{svc}
|
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+type blobDescriptorService struct {
|
|
| 24 |
+ distribution.BlobDescriptorService |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+func (bs *blobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
| 28 |
+ return dockerRegistry.BlobStatter().Stat(ctx, dgst) |
|
| 29 |
+} |
| ... | ... |
@@ -112,10 +112,10 @@ type quotaRestrictedBlobStore struct {
|
| 112 | 112 |
var _ distribution.BlobStore = "aRestrictedBlobStore{}
|
| 113 | 113 |
|
| 114 | 114 |
// Create wraps returned blobWriter with quota guard wrapper. |
| 115 |
-func (bs *quotaRestrictedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) {
|
|
| 115 |
+func (bs *quotaRestrictedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
|
| 116 | 116 |
context.GetLogger(ctx).Debug("(*quotaRestrictedBlobStore).Create: starting")
|
| 117 | 117 |
|
| 118 |
- bw, err := bs.BlobStore.Create(ctx) |
|
| 118 |
+ bw, err := bs.BlobStore.Create(ctx, options...) |
|
| 119 | 119 |
if err != nil {
|
| 120 | 120 |
return nil, err |
| 121 | 121 |
} |
| 122 | 122 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,23 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ log "github.com/Sirupsen/logrus" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/distribution" |
|
| 6 |
+ "github.com/docker/distribution/context" |
|
| 7 |
+ "github.com/docker/distribution/registry/middleware/registry" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// dockerRegistry represents a collection of repositories, addressable by name. |
|
| 11 |
+// This variable holds the object created by the docker/distribution. We import |
|
| 12 |
+// it into our namespace because there are no other ways to access it. In other |
|
| 13 |
+// cases it is hidden from us. |
|
| 14 |
+var dockerRegistry distribution.Namespace |
|
| 15 |
+ |
|
| 16 |
+func init() {
|
|
| 17 |
+ middleware.Register("openshift", func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) {
|
|
| 18 |
+ log.Info("OpenShift registry middleware initializing")
|
|
| 19 |
+ dockerRegistry = registry |
|
| 20 |
+ return dockerRegistry, nil |
|
| 21 |
+ }) |
|
| 22 |
+} |
| ... | ... |
@@ -11,6 +11,8 @@ import ( |
| 11 | 11 |
"github.com/docker/distribution/context" |
| 12 | 12 |
"github.com/docker/distribution/digest" |
| 13 | 13 |
"github.com/docker/distribution/manifest/schema1" |
| 14 |
+ "github.com/docker/distribution/manifest/schema2" |
|
| 15 |
+ regapi "github.com/docker/distribution/registry/api/v2" |
|
| 14 | 16 |
repomw "github.com/docker/distribution/registry/middleware/repository" |
| 15 | 17 |
"github.com/docker/libtrust" |
| 16 | 18 |
|
| ... | ... |
@@ -40,6 +42,10 @@ const ( |
| 40 | 40 |
// objects. It takes a valid time duration string (e.g. "2m"). If empty, you get the default timeout. If |
| 41 | 41 |
// zero (e.g. "0m"), caching is disabled. |
| 42 | 42 |
ProjectCacheTTLEnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_PROJECTCACHETTL" |
| 43 |
+ |
|
| 44 |
+ // AcceptSchema2EnvVar is a boolean environment variable that allows to accept manifest schema v2 |
|
| 45 |
+ // on manifest put requests. |
|
| 46 |
+ AcceptSchema2EnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ACCEPTSCHEMA2" |
|
| 43 | 47 |
) |
| 44 | 48 |
|
| 45 | 49 |
var ( |
| ... | ... |
@@ -68,9 +74,17 @@ func init() {
|
| 68 | 68 |
// DefaultRegistryClient before starting a registry. |
| 69 | 69 |
repomw.Register("openshift",
|
| 70 | 70 |
func(ctx context.Context, repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) {
|
| 71 |
- registryOSClient, kClient, err := DefaultRegistryClient.Clients() |
|
| 72 |
- if err != nil {
|
|
| 73 |
- return nil, err |
|
| 71 |
+ if dockerRegistry == nil {
|
|
| 72 |
+ panic(fmt.Sprintf("Configuration error: OpenShift registry middleware not activated"))
|
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ if dockerStorageDriver == nil {
|
|
| 76 |
+ panic(fmt.Sprintf("Configuration error: OpenShift storage driver middleware not activated"))
|
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ registryOSClient, kClient, errClients := DefaultRegistryClient.Clients() |
|
| 80 |
+ if errClients != nil {
|
|
| 81 |
+ return nil, errClients |
|
| 74 | 82 |
} |
| 75 | 83 |
if quotaEnforcing == nil {
|
| 76 | 84 |
quotaEnforcing = newQuotaEnforcingConfig(ctx, os.Getenv(EnforceQuotaEnvVar), os.Getenv(ProjectCacheTTLEnvVar), options) |
| ... | ... |
@@ -102,6 +116,8 @@ type repository struct {
|
| 102 | 102 |
// if true, the repository will check remote references in the image stream to support pulling "through" |
| 103 | 103 |
// from a remote repository |
| 104 | 104 |
pullthrough bool |
| 105 |
+ // acceptschema2 allows to refuse the manifest schema version 2 |
|
| 106 |
+ acceptschema2 bool |
|
| 105 | 107 |
// cachedLayers remembers a mapping of layer digest to repositories recently seen with that image to avoid |
| 106 | 108 |
// having to check every potential upstream repository when a blob request is made. The cache is useful only |
| 107 | 109 |
// when session affinity is on for the registry, but in practice the first pull will fill the cache. |
| ... | ... |
@@ -124,16 +140,19 @@ func newRepositoryWithClient( |
| 124 | 124 |
return nil, fmt.Errorf("%s is required", DockerRegistryURLEnvVar)
|
| 125 | 125 |
} |
| 126 | 126 |
|
| 127 |
- pullthrough := false |
|
| 128 |
- if value, ok := options["pullthrough"]; ok {
|
|
| 129 |
- if b, ok := value.(bool); ok {
|
|
| 130 |
- pullthrough = b |
|
| 131 |
- } |
|
| 127 |
+ pullthrough := getBoolOption("pullthrough", false, options)
|
|
| 128 |
+ |
|
| 129 |
+ acceptschema2 := false |
|
| 130 |
+ |
|
| 131 |
+ if os.Getenv(AcceptSchema2EnvVar) != "" {
|
|
| 132 |
+ acceptschema2 = os.Getenv(AcceptSchema2EnvVar) == "true" |
|
| 133 |
+ } else {
|
|
| 134 |
+ acceptschema2 = getBoolOption("acceptschema2", false, options)
|
|
| 132 | 135 |
} |
| 133 | 136 |
|
| 134 |
- nameParts := strings.SplitN(repo.Name(), "/", 2) |
|
| 137 |
+ nameParts := strings.SplitN(repo.Named().Name(), "/", 2) |
|
| 135 | 138 |
if len(nameParts) != 2 {
|
| 136 |
- return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Name())
|
|
| 139 |
+ return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Named().Name())
|
|
| 137 | 140 |
} |
| 138 | 141 |
|
| 139 | 142 |
return &repository{
|
| ... | ... |
@@ -147,10 +166,21 @@ func newRepositoryWithClient( |
| 147 | 147 |
namespace: nameParts[0], |
| 148 | 148 |
name: nameParts[1], |
| 149 | 149 |
pullthrough: pullthrough, |
| 150 |
+ acceptschema2: acceptschema2, |
|
| 150 | 151 |
cachedLayers: cachedLayers, |
| 151 | 152 |
}, nil |
| 152 | 153 |
} |
| 153 | 154 |
|
| 155 |
+func getBoolOption(name string, defval bool, options map[string]interface{}) bool {
|
|
| 156 |
+ if value, ok := options[name]; ok {
|
|
| 157 |
+ var b bool |
|
| 158 |
+ if b, ok = value.(bool); ok {
|
|
| 159 |
+ return b |
|
| 160 |
+ } |
|
| 161 |
+ } |
|
| 162 |
+ return defval |
|
| 163 |
+} |
|
| 164 |
+ |
|
| 154 | 165 |
// Manifests returns r, which implements distribution.ManifestService. |
| 155 | 166 |
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
| 156 | 167 |
if r.ctx == ctx {
|
| ... | ... |
@@ -186,25 +216,18 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
|
| 186 | 186 |
} |
| 187 | 187 |
|
| 188 | 188 |
return bs |
| 189 |
- |
|
| 190 | 189 |
} |
| 191 | 190 |
|
| 192 |
-// Tags lists the tags under the named repository. |
|
| 193 |
-func (r *repository) Tags() ([]string, error) {
|
|
| 194 |
- imageStream, err := r.getImageStream() |
|
| 195 |
- if err != nil {
|
|
| 196 |
- return []string{}, nil
|
|
| 191 |
+// Tags returns a reference to this repository tag service. |
|
| 192 |
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
|
| 193 |
+ return &tagService{
|
|
| 194 |
+ TagService: r.Repository.Tags(ctx), |
|
| 195 |
+ repo: r, |
|
| 197 | 196 |
} |
| 198 |
- tags := []string{}
|
|
| 199 |
- for tag := range imageStream.Status.Tags {
|
|
| 200 |
- tags = append(tags, tag) |
|
| 201 |
- } |
|
| 202 |
- |
|
| 203 |
- return tags, nil |
|
| 204 | 197 |
} |
| 205 | 198 |
|
| 206 | 199 |
// Exists returns true if the manifest specified by dgst exists. |
| 207 |
-func (r *repository) Exists(dgst digest.Digest) (bool, error) {
|
|
| 200 |
+func (r *repository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
|
| 208 | 201 |
image, err := r.getImage(dgst) |
| 209 | 202 |
if err != nil {
|
| 210 | 203 |
return false, err |
| ... | ... |
@@ -212,18 +235,8 @@ func (r *repository) Exists(dgst digest.Digest) (bool, error) {
|
| 212 | 212 |
return image != nil, nil |
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 |
-// ExistsByTag returns true if the manifest with tag `tag` exists. |
|
| 216 |
-func (r *repository) ExistsByTag(tag string) (bool, error) {
|
|
| 217 |
- imageStream, err := r.getImageStream() |
|
| 218 |
- if err != nil {
|
|
| 219 |
- return false, err |
|
| 220 |
- } |
|
| 221 |
- _, found := imageStream.Status.Tags[tag] |
|
| 222 |
- return found, nil |
|
| 223 |
-} |
|
| 224 |
- |
|
| 225 | 215 |
// Get retrieves the manifest with digest `dgst`. |
| 226 |
-func (r *repository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
|
|
| 216 |
+func (r *repository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
|
| 227 | 217 |
if _, err := r.getImageStreamImage(dgst); err != nil {
|
| 228 | 218 |
context.GetLogger(r.ctx).Errorf("error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err)
|
| 229 | 219 |
return nil, err |
| ... | ... |
@@ -236,137 +249,40 @@ func (r *repository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
|
| 236 | 236 |
} |
| 237 | 237 |
|
| 238 | 238 |
ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr}
|
| 239 |
- return r.manifestFromImageWithCachedLayers(image, ref.DockerClientDefaults().Exact()) |
|
| 240 |
-} |
|
| 239 |
+ manifest, err := r.manifestFromImageWithCachedLayers(image, ref.DockerClientDefaults().Exact()) |
|
| 241 | 240 |
|
| 242 |
-// Enumerate retrieves digests of manifest revisions in particular repository |
|
| 243 |
-func (r *repository) Enumerate() ([]digest.Digest, error) {
|
|
| 244 |
- panic("not implemented")
|
|
| 241 |
+ return manifest, err |
|
| 245 | 242 |
} |
| 246 | 243 |
|
| 247 |
-// GetByTag retrieves the named manifest with the provided tag |
|
| 248 |
-func (r *repository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
|
|
| 249 |
- for _, opt := range options {
|
|
| 250 |
- if err := opt(r); err != nil {
|
|
| 251 |
- return nil, err |
|
| 252 |
- } |
|
| 253 |
- } |
|
| 254 |
- |
|
| 255 |
- // find the image mapped to this tag |
|
| 256 |
- imageStreamTag, err := r.getImageStreamTag(tag) |
|
| 257 |
- if err != nil {
|
|
| 258 |
- // TODO: typed errors |
|
| 259 |
- context.GetLogger(r.ctx).Errorf("error getting ImageStreamTag %q: %v", tag, err)
|
|
| 260 |
- return nil, err |
|
| 261 |
- } |
|
| 262 |
- image := &imageStreamTag.Image |
|
| 263 |
- |
|
| 264 |
- ref, referenceErr := imageapi.ParseDockerImageReference(image.DockerImageReference) |
|
| 265 |
- if referenceErr == nil {
|
|
| 266 |
- ref.Namespace = r.namespace |
|
| 267 |
- ref.Name = r.name |
|
| 268 |
- ref.Registry = r.registryAddr |
|
| 269 |
- } |
|
| 270 |
- defaultRef := ref.DockerClientDefaults() |
|
| 271 |
- cacheName := defaultRef.AsRepository().Exact() |
|
| 272 |
- |
|
| 273 |
- // if we have a local manifest, use it |
|
| 274 |
- if len(image.DockerImageManifest) > 0 {
|
|
| 275 |
- return r.manifestFromImageWithCachedLayers(image, cacheName) |
|
| 276 |
- } |
|
| 277 |
- |
|
| 278 |
- dgst, err := digest.ParseDigest(imageStreamTag.Image.Name) |
|
| 279 |
- if err != nil {
|
|
| 280 |
- context.GetLogger(r.ctx).Errorf("error parsing digest %q: %v", imageStreamTag.Image.Name, err)
|
|
| 281 |
- return nil, err |
|
| 282 |
- } |
|
| 283 |
- |
|
| 284 |
- if localImage, err := r.getImage(dgst); err != nil {
|
|
| 285 |
- // if the image is managed by OpenShift and we cannot load the image, report an error |
|
| 286 |
- if image.Annotations[imageapi.ManagedByOpenShiftAnnotation] == "true" {
|
|
| 287 |
- context.GetLogger(r.ctx).Errorf("error getting image %q: %v", dgst.String(), err)
|
|
| 288 |
- return nil, err |
|
| 289 |
- } |
|
| 290 |
- } else {
|
|
| 291 |
- // if we have a local manifest, use it |
|
| 292 |
- if len(localImage.DockerImageManifest) > 0 {
|
|
| 293 |
- return r.manifestFromImageWithCachedLayers(localImage, cacheName) |
|
| 294 |
- } |
|
| 295 |
- } |
|
| 296 |
- |
|
| 297 |
- // allow pullthrough to be disabled |
|
| 298 |
- if !r.pullthrough {
|
|
| 299 |
- return nil, distribution.ErrManifestBlobUnknown{Digest: dgst}
|
|
| 300 |
- } |
|
| 301 |
- |
|
| 302 |
- // check the previous error here |
|
| 303 |
- if referenceErr != nil {
|
|
| 304 |
- context.GetLogger(r.ctx).Errorf("error parsing image %q: %v", image.DockerImageReference, referenceErr)
|
|
| 305 |
- return nil, referenceErr |
|
| 306 |
- } |
|
| 307 |
- |
|
| 308 |
- return r.pullthroughGetByTag(image, ref, cacheName, options...) |
|
| 309 |
-} |
|
| 310 |
- |
|
| 311 |
-// pullthroughGetByTag attempts to load the given image manifest from the remote server defined by ref, using cacheName to store any cached layers. |
|
| 312 |
-func (r *repository) pullthroughGetByTag(image *imageapi.Image, ref imageapi.DockerImageReference, cacheName string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
|
|
| 313 |
- defaultRef := ref.DockerClientDefaults() |
|
| 314 |
- |
|
| 315 |
- retriever := r.importContext() |
|
| 244 |
+// Put creates or updates the named manifest. |
|
| 245 |
+func (r *repository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
|
|
| 246 |
+ var canonical []byte |
|
| 316 | 247 |
|
| 317 |
- repo, err := retriever.Repository(r.ctx, defaultRef.RegistryURL(), defaultRef.RepositoryName(), false) |
|
| 248 |
+ // Resolve the payload in the manifest. |
|
| 249 |
+ mediatype, payload, err := manifest.Payload() |
|
| 318 | 250 |
if err != nil {
|
| 319 |
- context.GetLogger(r.ctx).Errorf("error getting remote repository for image %q: %v", image.DockerImageReference, err)
|
|
| 320 |
- return nil, err |
|
| 251 |
+ return "", err |
|
| 321 | 252 |
} |
| 322 | 253 |
|
| 323 |
- // get a manifest context |
|
| 324 |
- manifests, err := repo.Manifests(r.ctx) |
|
| 325 |
- if err != nil {
|
|
| 326 |
- context.GetLogger(r.ctx).Errorf("error getting manifests for image %q: %v", image.DockerImageReference, err)
|
|
| 327 |
- return nil, err |
|
| 254 |
+ switch manifest.(type) {
|
|
| 255 |
+ case *schema1.SignedManifest: |
|
| 256 |
+ canonical = manifest.(*schema1.SignedManifest).Canonical |
|
| 257 |
+ case *schema2.DeserializedManifest: |
|
| 258 |
+ canonical = payload |
|
| 259 |
+ default: |
|
| 260 |
+ err = fmt.Errorf("unrecognized manifest type %T", manifest)
|
|
| 261 |
+ return "", regapi.ErrorCodeManifestInvalid.WithDetail(err) |
|
| 328 | 262 |
} |
| 329 | 263 |
|
| 330 |
- // fetch this by image |
|
| 331 |
- if len(ref.ID) > 0 {
|
|
| 332 |
- dgst, err := digest.ParseDigest(ref.ID) |
|
| 333 |
- if err != nil {
|
|
| 334 |
- context.GetLogger(r.ctx).Errorf("error getting manifests for image %q: %v", image.DockerImageReference, err)
|
|
| 335 |
- return nil, err |
|
| 264 |
+ if !r.acceptschema2 {
|
|
| 265 |
+ if _, ok := manifest.(*schema1.SignedManifest); !ok {
|
|
| 266 |
+ err = fmt.Errorf("schema version 2 disabled")
|
|
| 267 |
+ return "", regapi.ErrorCodeManifestInvalid.WithDetail(err) |
|
| 336 | 268 |
} |
| 337 |
- manifest, err := manifests.Get(dgst) |
|
| 338 |
- if err != nil {
|
|
| 339 |
- context.GetLogger(r.ctx).Errorf("error getting manifest from remote server for image %q: %v", image.DockerImageReference, err)
|
|
| 340 |
- return nil, err |
|
| 341 |
- } |
|
| 342 |
- r.rememberLayers(manifest, cacheName) |
|
| 343 |
- return manifest, nil |
|
| 344 |
- } |
|
| 345 |
- |
|
| 346 |
- // fetch this by tag |
|
| 347 |
- manifest, err := manifests.GetByTag(ref.Tag, options...) |
|
| 348 |
- if err != nil {
|
|
| 349 |
- context.GetLogger(r.ctx).Errorf("error getting manifest from remote server for image %q: %v", image.DockerImageReference, err)
|
|
| 350 |
- return nil, err |
|
| 351 |
- } |
|
| 352 |
- |
|
| 353 |
- r.rememberLayers(manifest, cacheName) |
|
| 354 |
- return manifest, nil |
|
| 355 |
-} |
|
| 356 |
- |
|
| 357 |
-// Put creates or updates the named manifest. |
|
| 358 |
-func (r *repository) Put(manifest *schema1.SignedManifest) error {
|
|
| 359 |
- // Resolve the payload in the manifest. |
|
| 360 |
- payload, err := manifest.Payload() |
|
| 361 |
- if err != nil {
|
|
| 362 |
- return err |
|
| 363 | 269 |
} |
| 364 | 270 |
|
| 365 | 271 |
// Calculate digest |
| 366 |
- dgst, err := digest.FromBytes(payload) |
|
| 367 |
- if err != nil {
|
|
| 368 |
- return err |
|
| 369 |
- } |
|
| 272 |
+ dgst := digest.FromBytes(canonical) |
|
| 370 | 273 |
|
| 371 | 274 |
// Upload to openshift |
| 372 | 275 |
ism := imageapi.ImageStreamMapping{
|
| ... | ... |
@@ -374,7 +290,6 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error {
|
| 374 | 374 |
Namespace: r.namespace, |
| 375 | 375 |
Name: r.name, |
| 376 | 376 |
}, |
| 377 |
- Tag: manifest.Tag, |
|
| 378 | 377 |
Image: imageapi.Image{
|
| 379 | 378 |
ObjectMeta: kapi.ObjectMeta{
|
| 380 | 379 |
Name: dgst.String(), |
| ... | ... |
@@ -382,32 +297,40 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error {
|
| 382 | 382 |
imageapi.ManagedByOpenShiftAnnotation: "true", |
| 383 | 383 |
}, |
| 384 | 384 |
}, |
| 385 |
- DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", r.registryAddr, r.namespace, r.name, dgst.String()),
|
|
| 386 |
- DockerImageManifest: string(manifest.Raw), |
|
| 385 |
+ DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", r.registryAddr, r.namespace, r.name, dgst.String()),
|
|
| 386 |
+ DockerImageManifest: string(payload), |
|
| 387 |
+ DockerImageManifestMediaType: mediatype, |
|
| 387 | 388 |
}, |
| 388 | 389 |
} |
| 389 | 390 |
|
| 390 |
- if err := r.fillImageWithMetadata(manifest, &ism.Image); err != nil {
|
|
| 391 |
- return err |
|
| 391 |
+ for _, option := range options {
|
|
| 392 |
+ if opt, ok := option.(distribution.WithTagOption); ok {
|
|
| 393 |
+ ism.Tag = opt.Tag |
|
| 394 |
+ break |
|
| 395 |
+ } |
|
| 396 |
+ } |
|
| 397 |
+ |
|
| 398 |
+ if err = r.fillImageWithMetadata(manifest, &ism.Image); err != nil {
|
|
| 399 |
+ return "", err |
|
| 392 | 400 |
} |
| 393 | 401 |
|
| 394 |
- if err := r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil {
|
|
| 402 |
+ if err = r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil {
|
|
| 395 | 403 |
// if the error was that the image stream wasn't found, try to auto provision it |
| 396 | 404 |
statusErr, ok := err.(*kerrors.StatusError) |
| 397 | 405 |
if !ok {
|
| 398 | 406 |
context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err)
|
| 399 |
- return err |
|
| 407 |
+ return "", err |
|
| 400 | 408 |
} |
| 401 | 409 |
|
| 402 | 410 |
if quotautil.IsErrorQuotaExceeded(statusErr) {
|
| 403 | 411 |
context.GetLogger(r.ctx).Errorf("denied creating ImageStreamMapping: %v", statusErr)
|
| 404 |
- return distribution.ErrAccessDenied |
|
| 412 |
+ return "", distribution.ErrAccessDenied |
|
| 405 | 413 |
} |
| 406 | 414 |
|
| 407 | 415 |
status := statusErr.ErrStatus |
| 408 | 416 |
if status.Code != http.StatusNotFound || (strings.ToLower(status.Details.Kind) != "imagestream" /*pre-1.2*/ && strings.ToLower(status.Details.Kind) != "imagestreams") || status.Details.Name != r.name {
|
| 409 | 417 |
context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err)
|
| 410 |
- return err |
|
| 418 |
+ return "", err |
|
| 411 | 419 |
} |
| 412 | 420 |
|
| 413 | 421 |
stream := imageapi.ImageStream{
|
| ... | ... |
@@ -416,78 +339,110 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error {
|
| 416 | 416 |
}, |
| 417 | 417 |
} |
| 418 | 418 |
|
| 419 |
- client, ok := UserClientFrom(r.ctx) |
|
| 419 |
+ uclient, ok := UserClientFrom(r.ctx) |
|
| 420 | 420 |
if !ok {
|
| 421 | 421 |
context.GetLogger(r.ctx).Errorf("error creating user client to auto provision image stream: Origin user client unavailable")
|
| 422 |
- return statusErr |
|
| 422 |
+ return "", statusErr |
|
| 423 | 423 |
} |
| 424 | 424 |
|
| 425 |
- if _, err := client.ImageStreams(r.namespace).Create(&stream); err != nil {
|
|
| 425 |
+ if _, err := uclient.ImageStreams(r.namespace).Create(&stream); err != nil {
|
|
| 426 | 426 |
if quotautil.IsErrorQuotaExceeded(err) {
|
| 427 | 427 |
context.GetLogger(r.ctx).Errorf("denied creating ImageStream: %v", err)
|
| 428 |
- return distribution.ErrAccessDenied |
|
| 428 |
+ return "", distribution.ErrAccessDenied |
|
| 429 | 429 |
} |
| 430 | 430 |
context.GetLogger(r.ctx).Errorf("error auto provisioning ImageStream: %s", err)
|
| 431 |
- return statusErr |
|
| 431 |
+ return "", statusErr |
|
| 432 | 432 |
} |
| 433 | 433 |
|
| 434 | 434 |
// try to create the ISM again |
| 435 | 435 |
if err := r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil {
|
| 436 | 436 |
if quotautil.IsErrorQuotaExceeded(err) {
|
| 437 | 437 |
context.GetLogger(r.ctx).Errorf("denied a creation of ImageStreamMapping: %v", err)
|
| 438 |
- return distribution.ErrAccessDenied |
|
| 438 |
+ return "", distribution.ErrAccessDenied |
|
| 439 | 439 |
} |
| 440 | 440 |
context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err)
|
| 441 |
- return err |
|
| 441 |
+ return "", err |
|
| 442 | 442 |
} |
| 443 | 443 |
} |
| 444 | 444 |
|
| 445 |
- // Grab each json signature and store them. |
|
| 446 |
- signatures, err := manifest.Signatures() |
|
| 447 |
- if err != nil {
|
|
| 448 |
- return err |
|
| 449 |
- } |
|
| 445 |
+ return dgst, nil |
|
| 446 |
+} |
|
| 450 | 447 |
|
| 451 |
- for _, signature := range signatures {
|
|
| 452 |
- if err := r.Signatures().Put(dgst, signature); err != nil {
|
|
| 453 |
- context.GetLogger(r.ctx).Errorf("error storing signature: %s", err)
|
|
| 454 |
- return err |
|
| 455 |
- } |
|
| 448 |
+// fillImageWithMetadata fills a given image with metadata. |
|
| 449 |
+func (r *repository) fillImageWithMetadata(manifest distribution.Manifest, image *imageapi.Image) error {
|
|
| 450 |
+ if deserializedManifest, ok := manifest.(*schema2.DeserializedManifest); ok {
|
|
| 451 |
+ r.deserializedManifestFillImageMetadata(deserializedManifest, image) |
|
| 452 |
+ } else if signedManifest, ok := manifest.(*schema1.SignedManifest); ok {
|
|
| 453 |
+ r.signedManifestFillImageMetadata(signedManifest, image) |
|
| 454 |
+ } else {
|
|
| 455 |
+ return fmt.Errorf("unrecognized manifest type %T", manifest)
|
|
| 456 | 456 |
} |
| 457 | 457 |
|
| 458 |
+ context.GetLogger(r.ctx).Infof("total size of image %s with docker ref %s: %d", image.Name, image.DockerImageReference, image.DockerImageMetadata.Size)
|
|
| 458 | 459 |
return nil |
| 459 | 460 |
} |
| 460 | 461 |
|
| 461 |
-// fillImageWithMetadata fills a given image with metadata. Also correct layer sizes with blob sizes. Newer |
|
| 462 |
+// signedManifestFillImageMetadata fills a given image with metadata. It also corrects layer sizes with blob sizes. Newer |
|
| 462 | 463 |
// Docker client versions don't set layer sizes in the manifest at all. Origin master needs correct layer |
| 463 | 464 |
// sizes for proper image quota support. That's why we need to fill the metadata in the registry. |
| 464 |
-func (r *repository) fillImageWithMetadata(manifest *schema1.SignedManifest, image *imageapi.Image) error {
|
|
| 465 |
+func (r *repository) signedManifestFillImageMetadata(manifest *schema1.SignedManifest, image *imageapi.Image) error {
|
|
| 466 |
+ signatures, err := manifest.Signatures() |
|
| 467 |
+ if err != nil {
|
|
| 468 |
+ return err |
|
| 469 |
+ } |
|
| 470 |
+ |
|
| 471 |
+ for _, signDigest := range signatures {
|
|
| 472 |
+ image.DockerImageSignatures = append(image.DockerImageSignatures, signDigest) |
|
| 473 |
+ } |
|
| 474 |
+ |
|
| 465 | 475 |
if err := imageapi.ImageWithMetadata(image); err != nil {
|
| 466 | 476 |
return err |
| 467 | 477 |
} |
| 468 | 478 |
|
| 479 |
+ refs := manifest.References() |
|
| 480 |
+ |
|
| 469 | 481 |
layerSet := sets.NewString() |
| 470 |
- size := int64(0) |
|
| 482 |
+ image.DockerImageMetadata.Size = int64(0) |
|
| 471 | 483 |
|
| 472 | 484 |
blobs := r.Blobs(r.ctx) |
| 473 | 485 |
for i := range image.DockerImageLayers {
|
| 474 | 486 |
layer := &image.DockerImageLayers[i] |
| 475 | 487 |
// DockerImageLayers represents manifest.Manifest.FSLayers in reversed order |
| 476 |
- desc, err := blobs.Stat(r.ctx, manifest.Manifest.FSLayers[len(image.DockerImageLayers)-i-1].BlobSum) |
|
| 488 |
+ desc, err := blobs.Stat(r.ctx, refs[len(image.DockerImageLayers)-i-1].Digest) |
|
| 477 | 489 |
if err != nil {
|
| 478 | 490 |
context.GetLogger(r.ctx).Errorf("failed to stat blobs %s of image %s", layer.Name, image.DockerImageReference)
|
| 479 | 491 |
return err |
| 480 | 492 |
} |
| 493 |
+ if layer.MediaType == "" {
|
|
| 494 |
+ if desc.MediaType != "" {
|
|
| 495 |
+ layer.MediaType = desc.MediaType |
|
| 496 |
+ } else {
|
|
| 497 |
+ layer.MediaType = schema1.MediaTypeManifestLayer |
|
| 498 |
+ } |
|
| 499 |
+ } |
|
| 481 | 500 |
layer.LayerSize = desc.Size |
| 482 | 501 |
// count empty layer just once (empty layer may actually have non-zero size) |
| 483 | 502 |
if !layerSet.Has(layer.Name) {
|
| 484 |
- size += desc.Size |
|
| 503 |
+ image.DockerImageMetadata.Size += desc.Size |
|
| 485 | 504 |
layerSet.Insert(layer.Name) |
| 486 | 505 |
} |
| 487 | 506 |
} |
| 488 | 507 |
|
| 489 |
- image.DockerImageMetadata.Size = size |
|
| 490 |
- context.GetLogger(r.ctx).Infof("total size of image %s with docker ref %s: %d", image.Name, image.DockerImageReference, size)
|
|
| 508 |
+ return nil |
|
| 509 |
+} |
|
| 510 |
+ |
|
| 511 |
+// deserializedManifestFillImageMetadata fills a given image with metadata. |
|
| 512 |
+func (r *repository) deserializedManifestFillImageMetadata(manifest *schema2.DeserializedManifest, image *imageapi.Image) error {
|
|
| 513 |
+ configBytes, err := r.Blobs(r.ctx).Get(r.ctx, manifest.Config.Digest) |
|
| 514 |
+ if err != nil {
|
|
| 515 |
+ context.GetLogger(r.ctx).Errorf("failed to get image config %s: %v", manifest.Config.Digest.String(), err)
|
|
| 516 |
+ return err |
|
| 517 |
+ } |
|
| 518 |
+ image.DockerImageConfig = string(configBytes) |
|
| 519 |
+ |
|
| 520 |
+ if err := imageapi.ImageWithMetadata(image); err != nil {
|
|
| 521 |
+ return err |
|
| 522 |
+ } |
|
| 491 | 523 |
|
| 492 | 524 |
return nil |
| 493 | 525 |
} |
| ... | ... |
@@ -495,12 +450,12 @@ func (r *repository) fillImageWithMetadata(manifest *schema1.SignedManifest, ima |
| 495 | 495 |
// Delete deletes the manifest with digest `dgst`. Note: Image resources |
| 496 | 496 |
// in OpenShift are deleted via 'oadm prune images'. This function deletes |
| 497 | 497 |
// the content related to the manifest in the registry's storage (signatures). |
| 498 |
-func (r *repository) Delete(dgst digest.Digest) error {
|
|
| 498 |
+func (r *repository) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
| 499 | 499 |
ms, err := r.Repository.Manifests(r.ctx) |
| 500 | 500 |
if err != nil {
|
| 501 | 501 |
return err |
| 502 | 502 |
} |
| 503 |
- return ms.Delete(dgst) |
|
| 503 |
+ return ms.Delete(ctx, dgst) |
|
| 504 | 504 |
} |
| 505 | 505 |
|
| 506 | 506 |
// importContext loads secrets for this image stream and returns a context for getting distribution |
| ... | ... |
@@ -508,7 +463,7 @@ func (r *repository) Delete(dgst digest.Digest) error {
|
| 508 | 508 |
func (r *repository) importContext() importer.RepositoryRetriever {
|
| 509 | 509 |
secrets, err := r.registryOSClient.ImageStreamSecrets(r.namespace).Secrets(r.name, kapi.ListOptions{})
|
| 510 | 510 |
if err != nil {
|
| 511 |
- context.GetLogger(r.ctx).Errorf("error getting secrets for repository %q: %v", r.Name(), err)
|
|
| 511 |
+ context.GetLogger(r.ctx).Errorf("error getting secrets for repository %q: %v", r.Named().Name(), err)
|
|
| 512 | 512 |
secrets = &kapi.SecretList{}
|
| 513 | 513 |
} |
| 514 | 514 |
credentials := importer.NewCredentialsForSecrets(secrets.Items) |
| ... | ... |
@@ -525,12 +480,6 @@ func (r *repository) getImage(dgst digest.Digest) (*imageapi.Image, error) {
|
| 525 | 525 |
return r.registryOSClient.Images().Get(dgst.String()) |
| 526 | 526 |
} |
| 527 | 527 |
|
| 528 |
-// getImageStreamTag retrieves the Image with tag `tag` for the ImageStream |
|
| 529 |
-// associated with r. |
|
| 530 |
-func (r *repository) getImageStreamTag(tag string) (*imageapi.ImageStreamTag, error) {
|
|
| 531 |
- return r.registryOSClient.ImageStreamTags(r.namespace).Get(r.name, tag) |
|
| 532 |
-} |
|
| 533 |
- |
|
| 534 | 528 |
// getImageStreamImage retrieves the Image with digest `dgst` for the ImageStream |
| 535 | 529 |
// associated with r. This ensures the image belongs to the image stream. |
| 536 | 530 |
func (r *repository) getImageStreamImage(dgst digest.Digest) (*imageapi.ImageStreamImage, error) {
|
| ... | ... |
@@ -538,50 +487,71 @@ func (r *repository) getImageStreamImage(dgst digest.Digest) (*imageapi.ImageStr |
| 538 | 538 |
} |
| 539 | 539 |
|
| 540 | 540 |
// rememberLayers caches the provided layers |
| 541 |
-func (r *repository) rememberLayers(manifest *schema1.SignedManifest, cacheName string) {
|
|
| 541 |
+func (r *repository) rememberLayers(manifest distribution.Manifest, cacheName string) {
|
|
| 542 | 542 |
if !r.pullthrough {
|
| 543 | 543 |
return |
| 544 | 544 |
} |
| 545 | 545 |
// remember the layers in the cache as an optimization to avoid searching all remote repositories |
| 546 |
- for _, layer := range manifest.FSLayers {
|
|
| 547 |
- r.cachedLayers.RememberDigest(layer.BlobSum, cacheName) |
|
| 546 |
+ for _, layer := range manifest.References() {
|
|
| 547 |
+ r.cachedLayers.RememberDigest(layer.Digest, cacheName) |
|
| 548 | 548 |
} |
| 549 | 549 |
} |
| 550 | 550 |
|
| 551 | 551 |
// manifestFromImageWithCachedLayers loads the image and then caches any located layers |
| 552 |
-func (r *repository) manifestFromImageWithCachedLayers(image *imageapi.Image, cacheName string) (*schema1.SignedManifest, error) {
|
|
| 553 |
- manifest, err := r.manifestFromImage(image) |
|
| 552 |
+func (r *repository) manifestFromImageWithCachedLayers(image *imageapi.Image, cacheName string) (manifest distribution.Manifest, err error) {
|
|
| 553 |
+ if image.DockerImageManifestMediaType == schema2.MediaTypeManifest {
|
|
| 554 |
+ manifest, err = r.deserializedManifestFromImage(image) |
|
| 555 |
+ } else {
|
|
| 556 |
+ manifest, err = r.signedManifestFromImage(image) |
|
| 557 |
+ } |
|
| 558 |
+ |
|
| 554 | 559 |
if err != nil {
|
| 555 |
- return nil, err |
|
| 560 |
+ return |
|
| 556 | 561 |
} |
| 562 |
+ |
|
| 557 | 563 |
r.rememberLayers(manifest, cacheName) |
| 558 |
- return manifest, nil |
|
| 564 |
+ return |
|
| 559 | 565 |
} |
| 560 | 566 |
|
| 561 |
-// manifestFromImage converts an Image to a SignedManifest. |
|
| 562 |
-func (r *repository) manifestFromImage(image *imageapi.Image) (*schema1.SignedManifest, error) {
|
|
| 563 |
- dgst, err := digest.ParseDigest(image.Name) |
|
| 564 |
- if err != nil {
|
|
| 565 |
- return nil, err |
|
| 567 |
+// signedManifestFromImage converts an Image to a SignedManifest. |
|
| 568 |
+func (r *repository) signedManifestFromImage(image *imageapi.Image) (*schema1.SignedManifest, error) {
|
|
| 569 |
+ if image.DockerImageManifestMediaType == schema2.MediaTypeManifest {
|
|
| 570 |
+ context.GetLogger(r.ctx).Errorf("old client pulling new image %s", image.DockerImageReference)
|
|
| 571 |
+ return nil, fmt.Errorf("unable to convert new image to old one")
|
|
| 566 | 572 |
} |
| 567 | 573 |
|
| 568 | 574 |
raw := []byte(image.DockerImageManifest) |
| 569 |
- |
|
| 570 | 575 |
// prefer signatures from the manifest |
| 571 | 576 |
if _, err := libtrust.ParsePrettySignature(raw, "signatures"); err == nil {
|
| 572 |
- sm := schema1.SignedManifest{Raw: raw}
|
|
| 573 |
- if err := json.Unmarshal(raw, &sm); err == nil {
|
|
| 577 |
+ sm := schema1.SignedManifest{Canonical: raw}
|
|
| 578 |
+ if err = json.Unmarshal(raw, &sm); err == nil {
|
|
| 574 | 579 |
return &sm, nil |
| 575 | 580 |
} |
| 576 | 581 |
} |
| 577 | 582 |
|
| 578 |
- // Fetch the signatures for the manifest |
|
| 579 |
- signatures, err := r.Signatures().Get(dgst) |
|
| 583 |
+ dgst, err := digest.ParseDigest(image.Name) |
|
| 580 | 584 |
if err != nil {
|
| 581 | 585 |
return nil, err |
| 582 | 586 |
} |
| 583 | 587 |
|
| 584 |
- jsig, err := libtrust.NewJSONSignature(raw, signatures...) |
|
| 588 |
+ var signBytes [][]byte |
|
| 589 |
+ if len(image.DockerImageSignatures) == 0 {
|
|
| 590 |
+ // Fetch the signatures for the manifest |
|
| 591 |
+ signatures, errSign := r.getSignatures(dgst) |
|
| 592 |
+ if errSign != nil {
|
|
| 593 |
+ return nil, errSign |
|
| 594 |
+ } |
|
| 595 |
+ |
|
| 596 |
+ for _, signatureDigest := range signatures {
|
|
| 597 |
+ signBytes = append(signBytes, []byte(signatureDigest)) |
|
| 598 |
+ } |
|
| 599 |
+ } else {
|
|
| 600 |
+ for _, sign := range image.DockerImageSignatures {
|
|
| 601 |
+ signBytes = append(signBytes, sign) |
|
| 602 |
+ } |
|
| 603 |
+ } |
|
| 604 |
+ |
|
| 605 |
+ jsig, err := libtrust.NewJSONSignature(raw, signBytes...) |
|
| 585 | 606 |
if err != nil {
|
| 586 | 607 |
return nil, err |
| 587 | 608 |
} |
| ... | ... |
@@ -593,8 +563,39 @@ func (r *repository) manifestFromImage(image *imageapi.Image) (*schema1.SignedMa |
| 593 | 593 |
} |
| 594 | 594 |
|
| 595 | 595 |
var sm schema1.SignedManifest |
| 596 |
- if err := json.Unmarshal(raw, &sm); err != nil {
|
|
| 596 |
+ if err = json.Unmarshal(raw, &sm); err != nil {
|
|
| 597 | 597 |
return nil, err |
| 598 | 598 |
} |
| 599 | 599 |
return &sm, err |
| 600 | 600 |
} |
| 601 |
+ |
|
| 602 |
+func (r *repository) getSignatures(dgst digest.Digest) ([]digest.Digest, error) {
|
|
| 603 |
+ // We can not use the r.repository here. docker/distribution wraps all the methods that |
|
| 604 |
+ // write or read blobs. It is made for notifications service. We need to get a real |
|
| 605 |
+ // repository without any wrappers. |
|
| 606 |
+ repository, err := dockerRegistry.Repository(r.ctx, r.Named()) |
|
| 607 |
+ if err != nil {
|
|
| 608 |
+ return nil, err |
|
| 609 |
+ } |
|
| 610 |
+ |
|
| 611 |
+ manifestService, err := repository.Manifests(r.ctx) |
|
| 612 |
+ if err != nil {
|
|
| 613 |
+ return nil, err |
|
| 614 |
+ } |
|
| 615 |
+ |
|
| 616 |
+ signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) |
|
| 617 |
+ if !ok {
|
|
| 618 |
+ return nil, fmt.Errorf("unable to convert ManifestService into SignaturesGetter")
|
|
| 619 |
+ } |
|
| 620 |
+ |
|
| 621 |
+ return signaturesGetter.GetSignatures(r.ctx, dgst) |
|
| 622 |
+} |
|
| 623 |
+ |
|
| 624 |
+// deserializedManifestFromImage converts an Image to a DeserializedManifest. |
|
| 625 |
+func (r *repository) deserializedManifestFromImage(image *imageapi.Image) (*schema2.DeserializedManifest, error) {
|
|
| 626 |
+ var manifest schema2.DeserializedManifest |
|
| 627 |
+ if err := json.Unmarshal([]byte(image.DockerImageManifest), &manifest); err != nil {
|
|
| 628 |
+ return nil, err |
|
| 629 |
+ } |
|
| 630 |
+ return &manifest, nil |
|
| 631 |
+} |
| 601 | 632 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,26 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ log "github.com/Sirupsen/logrus" |
|
| 4 |
+ |
|
| 5 |
+ storagedriver "github.com/docker/distribution/registry/storage/driver" |
|
| 6 |
+ registrystorage "github.com/docker/distribution/registry/storage/driver/middleware" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// dockerStorageDriver gives access to the blob store. |
|
| 10 |
+// This variable holds the object created by the docker/distribution. We import |
|
| 11 |
+// it into our namespace because there are no other ways to access it. In other |
|
| 12 |
+// cases it is hidden from us. |
|
| 13 |
+var dockerStorageDriver storagedriver.StorageDriver |
|
| 14 |
+ |
|
| 15 |
+func init() {
|
|
| 16 |
+ registrystorage.Register("openshift", func(driver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
|
|
| 17 |
+ log.Info("OpenShift middleware for storage driver initializing")
|
|
| 18 |
+ |
|
| 19 |
+ // We can do this because of an initialization sequence of middlewares. |
|
| 20 |
+ // Storage driver is required to create registry. So we can be sure that |
|
| 21 |
+ // this assignment will happen before registry and repository initialization. |
|
| 22 |
+ dockerStorageDriver = driver |
|
| 23 |
+ return dockerStorageDriver, nil |
|
| 24 |
+ }) |
|
| 25 |
+} |
| 0 | 26 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,219 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/distribution" |
|
| 4 |
+ "github.com/docker/distribution/context" |
|
| 5 |
+ "github.com/docker/distribution/digest" |
|
| 6 |
+ |
|
| 7 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 8 |
+ |
|
| 9 |
+ imageapi "github.com/openshift/origin/pkg/image/api" |
|
| 10 |
+ quotautil "github.com/openshift/origin/pkg/quota/util" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+type tagService struct {
|
|
| 14 |
+ distribution.TagService |
|
| 15 |
+ |
|
| 16 |
+ repo *repository |
|
| 17 |
+} |
|
| 18 |
+ |
|
| 19 |
+func (t tagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
|
| 20 |
+ imageStream, err := t.repo.getImageStream() |
|
| 21 |
+ if err != nil {
|
|
| 22 |
+ context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
|
|
| 23 |
+ return distribution.Descriptor{}, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ te := imageapi.LatestTaggedImage(imageStream, tag) |
|
| 27 |
+ if te == nil {
|
|
| 28 |
+ return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
|
|
| 29 |
+ } |
|
| 30 |
+ dgst, err := digest.ParseDigest(te.Image) |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ return distribution.Descriptor{}, err
|
|
| 33 |
+ } |
|
| 34 |
+ |
|
| 35 |
+ if !t.repo.pullthrough {
|
|
| 36 |
+ image, err := t.repo.getImage(dgst) |
|
| 37 |
+ if err != nil {
|
|
| 38 |
+ return distribution.Descriptor{}, err
|
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ if !isImageManaged(image) {
|
|
| 42 |
+ return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
|
|
| 43 |
+ } |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ return distribution.Descriptor{Digest: dgst}, nil
|
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+func (t tagService) All(ctx context.Context) ([]string, error) {
|
|
| 50 |
+ tags := []string{}
|
|
| 51 |
+ |
|
| 52 |
+ imageStream, err := t.repo.getImageStream() |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
|
|
| 55 |
+ return tags, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 56 |
+ } |
|
| 57 |
+ |
|
| 58 |
+ managedImages := make(map[string]bool) |
|
| 59 |
+ |
|
| 60 |
+ for tag, history := range imageStream.Status.Tags {
|
|
| 61 |
+ if len(history.Items) == 0 {
|
|
| 62 |
+ continue |
|
| 63 |
+ } |
|
| 64 |
+ |
|
| 65 |
+ if t.repo.pullthrough {
|
|
| 66 |
+ tags = append(tags, tag) |
|
| 67 |
+ continue |
|
| 68 |
+ } |
|
| 69 |
+ |
|
| 70 |
+ managed, found := managedImages[history.Items[0].Image] |
|
| 71 |
+ if !found {
|
|
| 72 |
+ dgst, err := digest.ParseDigest(history.Items[0].Image) |
|
| 73 |
+ if err != nil {
|
|
| 74 |
+ context.GetLogger(ctx).Errorf("bad digest %s: %v", history.Items[0].Image, err)
|
|
| 75 |
+ continue |
|
| 76 |
+ } |
|
| 77 |
+ |
|
| 78 |
+ image, err := t.repo.getImage(dgst) |
|
| 79 |
+ if err != nil {
|
|
| 80 |
+ context.GetLogger(ctx).Errorf("unable to get image %s/%s %s: %v", t.repo.namespace, t.repo.name, dgst.String(), err)
|
|
| 81 |
+ continue |
|
| 82 |
+ } |
|
| 83 |
+ managed = isImageManaged(image) |
|
| 84 |
+ managedImages[history.Items[0].Image] = managed |
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ if !managed {
|
|
| 88 |
+ continue |
|
| 89 |
+ } |
|
| 90 |
+ |
|
| 91 |
+ tags = append(tags, tag) |
|
| 92 |
+ } |
|
| 93 |
+ return tags, nil |
|
| 94 |
+} |
|
| 95 |
+ |
|
| 96 |
+func (t tagService) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) {
|
|
| 97 |
+ tags := []string{}
|
|
| 98 |
+ |
|
| 99 |
+ imageStream, err := t.repo.getImageStream() |
|
| 100 |
+ if err != nil {
|
|
| 101 |
+ context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
|
|
| 102 |
+ return tags, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 103 |
+ } |
|
| 104 |
+ |
|
| 105 |
+ managedImages := make(map[string]bool) |
|
| 106 |
+ |
|
| 107 |
+ for tag, history := range imageStream.Status.Tags {
|
|
| 108 |
+ if len(history.Items) == 0 {
|
|
| 109 |
+ continue |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 112 |
+ dgst, err := digest.ParseDigest(history.Items[0].Image) |
|
| 113 |
+ if err != nil {
|
|
| 114 |
+ context.GetLogger(ctx).Errorf("bad digest %s: %v", history.Items[0].Image, err)
|
|
| 115 |
+ continue |
|
| 116 |
+ } |
|
| 117 |
+ |
|
| 118 |
+ if dgst != desc.Digest {
|
|
| 119 |
+ continue |
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ if t.repo.pullthrough {
|
|
| 123 |
+ tags = append(tags, tag) |
|
| 124 |
+ continue |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ managed, found := managedImages[history.Items[0].Image] |
|
| 128 |
+ if !found {
|
|
| 129 |
+ image, err := t.repo.getImage(dgst) |
|
| 130 |
+ if err != nil {
|
|
| 131 |
+ context.GetLogger(ctx).Errorf("unable to get image %s/%s %s: %v", t.repo.namespace, t.repo.name, dgst.String(), err)
|
|
| 132 |
+ continue |
|
| 133 |
+ } |
|
| 134 |
+ managed = isImageManaged(image) |
|
| 135 |
+ managedImages[history.Items[0].Image] = managed |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ if !managed {
|
|
| 139 |
+ continue |
|
| 140 |
+ } |
|
| 141 |
+ |
|
| 142 |
+ tags = append(tags, tag) |
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 145 |
+ return tags, nil |
|
| 146 |
+} |
|
| 147 |
+ |
|
| 148 |
+func (t tagService) Tag(ctx context.Context, tag string, dgst distribution.Descriptor) error {
|
|
| 149 |
+ imageStream, err := t.repo.getImageStream() |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
|
|
| 152 |
+ return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 153 |
+ } |
|
| 154 |
+ |
|
| 155 |
+ image, err := t.repo.registryOSClient.Images().Get(dgst.Digest.String()) |
|
| 156 |
+ if err != nil {
|
|
| 157 |
+ context.GetLogger(ctx).Errorf("unable to get image: %s", dgst.Digest.String())
|
|
| 158 |
+ return err |
|
| 159 |
+ } |
|
| 160 |
+ image.SetResourceVersion("")
|
|
| 161 |
+ |
|
| 162 |
+ if !t.repo.pullthrough && !isImageManaged(image) {
|
|
| 163 |
+ return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ ism := imageapi.ImageStreamMapping{
|
|
| 167 |
+ ObjectMeta: kapi.ObjectMeta{
|
|
| 168 |
+ Namespace: imageStream.Namespace, |
|
| 169 |
+ Name: imageStream.Name, |
|
| 170 |
+ }, |
|
| 171 |
+ Tag: tag, |
|
| 172 |
+ Image: *image, |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ err = t.repo.registryOSClient.ImageStreamMappings(imageStream.Namespace).Create(&ism) |
|
| 176 |
+ if quotautil.IsErrorQuotaExceeded(err) {
|
|
| 177 |
+ context.GetLogger(ctx).Errorf("denied creating ImageStreamMapping: %v", err)
|
|
| 178 |
+ return distribution.ErrAccessDenied |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 181 |
+ return err |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+func (t tagService) Untag(ctx context.Context, tag string) error {
|
|
| 185 |
+ imageStream, err := t.repo.getImageStream() |
|
| 186 |
+ if err != nil {
|
|
| 187 |
+ context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
|
|
| 188 |
+ return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
|
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ te := imageapi.LatestTaggedImage(imageStream, tag) |
|
| 192 |
+ if te == nil {
|
|
| 193 |
+ return distribution.ErrTagUnknown{Tag: tag}
|
|
| 194 |
+ } |
|
| 195 |
+ |
|
| 196 |
+ if !t.repo.pullthrough {
|
|
| 197 |
+ dgst, err := digest.ParseDigest(te.Image) |
|
| 198 |
+ if err != nil {
|
|
| 199 |
+ return err |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ image, err := t.repo.getImage(dgst) |
|
| 203 |
+ if err != nil {
|
|
| 204 |
+ return err |
|
| 205 |
+ } |
|
| 206 |
+ |
|
| 207 |
+ if !isImageManaged(image) {
|
|
| 208 |
+ return distribution.ErrTagUnknown{Tag: tag}
|
|
| 209 |
+ } |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ return t.repo.registryOSClient.ImageStreamTags(imageStream.Namespace).Delete(imageStream.Name, tag) |
|
| 213 |
+} |
|
| 214 |
+ |
|
| 215 |
+func isImageManaged(image *imageapi.Image) bool {
|
|
| 216 |
+ managed, ok := image.ObjectMeta.Annotations[imageapi.ManagedByOpenShiftAnnotation] |
|
| 217 |
+ return ok && managed == "true" |
|
| 218 |
+} |
| ... | ... |
@@ -14,9 +14,12 @@ func init() {
|
| 14 | 14 |
if err := api.Scheme.AddGeneratedDeepCopyFuncs( |
| 15 | 15 |
DeepCopy_api_Descriptor, |
| 16 | 16 |
DeepCopy_api_DockerConfig, |
| 17 |
+ DeepCopy_api_DockerConfigHistory, |
|
| 18 |
+ DeepCopy_api_DockerConfigRootFS, |
|
| 17 | 19 |
DeepCopy_api_DockerFSLayer, |
| 18 | 20 |
DeepCopy_api_DockerHistory, |
| 19 | 21 |
DeepCopy_api_DockerImage, |
| 22 |
+ DeepCopy_api_DockerImageConfig, |
|
| 20 | 23 |
DeepCopy_api_DockerImageManifest, |
| 21 | 24 |
DeepCopy_api_DockerImageReference, |
| 22 | 25 |
DeepCopy_api_DockerV1CompatibilityImage, |
| ... | ... |
@@ -167,6 +170,29 @@ func DeepCopy_api_DockerConfig(in DockerConfig, out *DockerConfig, c *conversion |
| 167 | 167 |
return nil |
| 168 | 168 |
} |
| 169 | 169 |
|
| 170 |
+func DeepCopy_api_DockerConfigHistory(in DockerConfigHistory, out *DockerConfigHistory, c *conversion.Cloner) error {
|
|
| 171 |
+ if err := unversioned.DeepCopy_unversioned_Time(in.Created, &out.Created, c); err != nil {
|
|
| 172 |
+ return err |
|
| 173 |
+ } |
|
| 174 |
+ out.Author = in.Author |
|
| 175 |
+ out.CreatedBy = in.CreatedBy |
|
| 176 |
+ out.Comment = in.Comment |
|
| 177 |
+ out.EmptyLayer = in.EmptyLayer |
|
| 178 |
+ return nil |
|
| 179 |
+} |
|
| 180 |
+ |
|
| 181 |
+func DeepCopy_api_DockerConfigRootFS(in DockerConfigRootFS, out *DockerConfigRootFS, c *conversion.Cloner) error {
|
|
| 182 |
+ out.Type = in.Type |
|
| 183 |
+ if in.DiffIDs != nil {
|
|
| 184 |
+ in, out := in.DiffIDs, &out.DiffIDs |
|
| 185 |
+ *out = make([]string, len(in)) |
|
| 186 |
+ copy(*out, in) |
|
| 187 |
+ } else {
|
|
| 188 |
+ out.DiffIDs = nil |
|
| 189 |
+ } |
|
| 190 |
+ return nil |
|
| 191 |
+} |
|
| 192 |
+ |
|
| 170 | 193 |
func DeepCopy_api_DockerFSLayer(in DockerFSLayer, out *DockerFSLayer, c *conversion.Cloner) error {
|
| 171 | 194 |
out.DockerBlobSum = in.DockerBlobSum |
| 172 | 195 |
return nil |
| ... | ... |
@@ -207,6 +233,41 @@ func DeepCopy_api_DockerImage(in DockerImage, out *DockerImage, c *conversion.Cl |
| 207 | 207 |
return nil |
| 208 | 208 |
} |
| 209 | 209 |
|
| 210 |
+func DeepCopy_api_DockerImageConfig(in DockerImageConfig, out *DockerImageConfig, c *conversion.Cloner) error {
|
|
| 211 |
+ if err := DeepCopy_api_DockerImage(in.DockerImage, &out.DockerImage, c); err != nil {
|
|
| 212 |
+ return err |
|
| 213 |
+ } |
|
| 214 |
+ if in.RootFS != nil {
|
|
| 215 |
+ in, out := in.RootFS, &out.RootFS |
|
| 216 |
+ *out = new(DockerConfigRootFS) |
|
| 217 |
+ if err := DeepCopy_api_DockerConfigRootFS(*in, *out, c); err != nil {
|
|
| 218 |
+ return err |
|
| 219 |
+ } |
|
| 220 |
+ } else {
|
|
| 221 |
+ out.RootFS = nil |
|
| 222 |
+ } |
|
| 223 |
+ if in.History != nil {
|
|
| 224 |
+ in, out := in.History, &out.History |
|
| 225 |
+ *out = make([]DockerConfigHistory, len(in)) |
|
| 226 |
+ for i := range in {
|
|
| 227 |
+ if err := DeepCopy_api_DockerConfigHistory(in[i], &(*out)[i], c); err != nil {
|
|
| 228 |
+ return err |
|
| 229 |
+ } |
|
| 230 |
+ } |
|
| 231 |
+ } else {
|
|
| 232 |
+ out.History = nil |
|
| 233 |
+ } |
|
| 234 |
+ out.OSVersion = in.OSVersion |
|
| 235 |
+ if in.OSFeatures != nil {
|
|
| 236 |
+ in, out := in.OSFeatures, &out.OSFeatures |
|
| 237 |
+ *out = make([]string, len(in)) |
|
| 238 |
+ copy(*out, in) |
|
| 239 |
+ } else {
|
|
| 240 |
+ out.OSFeatures = nil |
|
| 241 |
+ } |
|
| 242 |
+ return nil |
|
| 243 |
+} |
|
| 244 |
+ |
|
| 210 | 245 |
func DeepCopy_api_DockerImageManifest(in DockerImageManifest, out *DockerImageManifest, c *conversion.Cloner) error {
|
| 211 | 246 |
out.SchemaVersion = in.SchemaVersion |
| 212 | 247 |
out.MediaType = in.MediaType |
| ... | ... |
@@ -328,6 +389,21 @@ func DeepCopy_api_Image(in Image, out *Image, c *conversion.Cloner) error {
|
| 328 | 328 |
} else {
|
| 329 | 329 |
out.Signatures = nil |
| 330 | 330 |
} |
| 331 |
+ if in.DockerImageSignatures != nil {
|
|
| 332 |
+ in, out := in.DockerImageSignatures, &out.DockerImageSignatures |
|
| 333 |
+ *out = make([][]byte, len(in)) |
|
| 334 |
+ for i := range in {
|
|
| 335 |
+ if newVal, err := c.DeepCopy(in[i]); err != nil {
|
|
| 336 |
+ return err |
|
| 337 |
+ } else {
|
|
| 338 |
+ (*out)[i] = newVal.([]byte) |
|
| 339 |
+ } |
|
| 340 |
+ } |
|
| 341 |
+ } else {
|
|
| 342 |
+ out.DockerImageSignatures = nil |
|
| 343 |
+ } |
|
| 344 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 345 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 331 | 346 |
return nil |
| 332 | 347 |
} |
| 333 | 348 |
|
| ... | ... |
@@ -371,6 +447,7 @@ func DeepCopy_api_ImageImportStatus(in ImageImportStatus, out *ImageImportStatus |
| 371 | 371 |
func DeepCopy_api_ImageLayer(in ImageLayer, out *ImageLayer, c *conversion.Cloner) error {
|
| 372 | 372 |
out.Name = in.Name |
| 373 | 373 |
out.LayerSize = in.LayerSize |
| 374 |
+ out.MediaType = in.MediaType |
|
| 374 | 375 |
return nil |
| 375 | 376 |
} |
| 376 | 377 |
|
| ... | ... |
@@ -121,3 +121,27 @@ type DockerV1CompatibilityImage struct {
|
| 121 | 121 |
type DockerV1CompatibilityImageSize struct {
|
| 122 | 122 |
Size int64 `json:"size,omitempty"` |
| 123 | 123 |
} |
| 124 |
+ |
|
| 125 |
+// DockerImageConfig stores the image configuration |
|
| 126 |
+type DockerImageConfig struct {
|
|
| 127 |
+ DockerImage `json:",inline"` |
|
| 128 |
+ RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` |
|
| 129 |
+ History []DockerConfigHistory `json:"history,omitempty"` |
|
| 130 |
+ OSVersion string `json:"os.version,omitempty"` |
|
| 131 |
+ OSFeatures []string `json:"os.features,omitempty"` |
|
| 132 |
+} |
|
| 133 |
+ |
|
| 134 |
+// DockerConfigHistory stores build commands that were used to create an image |
|
| 135 |
+type DockerConfigHistory struct {
|
|
| 136 |
+ Created unversioned.Time `json:"created"` |
|
| 137 |
+ Author string `json:"author,omitempty"` |
|
| 138 |
+ CreatedBy string `json:"created_by,omitempty"` |
|
| 139 |
+ Comment string `json:"comment,omitempty"` |
|
| 140 |
+ EmptyLayer bool `json:"empty_layer,omitempty"` |
|
| 141 |
+} |
|
| 142 |
+ |
|
| 143 |
+// DockerConfigRootFS describes images root filesystem |
|
| 144 |
+type DockerConfigRootFS struct {
|
|
| 145 |
+ Type string `json:"type"` |
|
| 146 |
+ DiffIDs []string `json:"diff_ids,omitempty"` |
|
| 147 |
+} |
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"github.com/blang/semver" |
| 16 | 16 |
"github.com/docker/distribution/digest" |
| 17 | 17 |
"github.com/docker/distribution/manifest/schema1" |
| 18 |
+ "github.com/docker/distribution/manifest/schema2" |
|
| 18 | 19 |
"github.com/golang/glog" |
| 19 | 20 |
) |
| 20 | 21 |
|
| ... | ... |
@@ -382,14 +383,54 @@ func ManifestMatchesImage(image *Image, newManifest []byte) (bool, error) {
|
| 382 | 382 |
if err != nil {
|
| 383 | 383 |
return false, err |
| 384 | 384 |
} |
| 385 |
- sm := schema1.SignedManifest{Raw: newManifest}
|
|
| 386 |
- raw, err := sm.Payload() |
|
| 385 |
+ var canonical []byte |
|
| 386 |
+ |
|
| 387 |
+ switch image.DockerImageManifestMediaType {
|
|
| 388 |
+ case schema2.MediaTypeManifest: |
|
| 389 |
+ var m schema2.DeserializedManifest |
|
| 390 |
+ if err := json.Unmarshal(newManifest, &m); err != nil {
|
|
| 391 |
+ return false, err |
|
| 392 |
+ } |
|
| 393 |
+ _, canonical, err = m.Payload() |
|
| 394 |
+ if err != nil {
|
|
| 395 |
+ return false, err |
|
| 396 |
+ } |
|
| 397 |
+ case schema1.MediaTypeManifest, "": |
|
| 398 |
+ var m schema1.SignedManifest |
|
| 399 |
+ if err := json.Unmarshal(newManifest, &m); err != nil {
|
|
| 400 |
+ return false, err |
|
| 401 |
+ } |
|
| 402 |
+ canonical = m.Canonical |
|
| 403 |
+ default: |
|
| 404 |
+ return false, fmt.Errorf("unsupported manifest mediatype: %s", image.DockerImageManifestMediaType)
|
|
| 405 |
+ } |
|
| 406 |
+ if _, err := v.Write(canonical); err != nil {
|
|
| 407 |
+ return false, err |
|
| 408 |
+ } |
|
| 409 |
+ return v.Verified(), nil |
|
| 410 |
+} |
|
| 411 |
+ |
|
| 412 |
+// ImageConfigMatchesImage returns true if the provided image config matches a digest |
|
| 413 |
+// stored in the manifest of the image. |
|
| 414 |
+func ImageConfigMatchesImage(image *Image, imageConfig []byte) (bool, error) {
|
|
| 415 |
+ if image.DockerImageManifestMediaType != schema2.MediaTypeManifest {
|
|
| 416 |
+ return false, nil |
|
| 417 |
+ } |
|
| 418 |
+ |
|
| 419 |
+ var m schema2.DeserializedManifest |
|
| 420 |
+ if err := json.Unmarshal([]byte(image.DockerImageManifest), &m); err != nil {
|
|
| 421 |
+ return false, err |
|
| 422 |
+ } |
|
| 423 |
+ |
|
| 424 |
+ v, err := digest.NewDigestVerifier(m.Config.Digest) |
|
| 387 | 425 |
if err != nil {
|
| 388 | 426 |
return false, err |
| 389 | 427 |
} |
| 390 |
- if _, err := v.Write(raw); err != nil {
|
|
| 428 |
+ |
|
| 429 |
+ if _, err := v.Write(imageConfig); err != nil {
|
|
| 391 | 430 |
return false, err |
| 392 | 431 |
} |
| 432 |
+ |
|
| 393 | 433 |
return v.Verified(), nil |
| 394 | 434 |
} |
| 395 | 435 |
|
| ... | ... |
@@ -429,6 +470,7 @@ func ImageWithMetadata(image *Image) error {
|
| 429 | 429 |
|
| 430 | 430 |
image.DockerImageLayers = make([]ImageLayer, len(manifest.FSLayers)) |
| 431 | 431 |
for i, layer := range manifest.FSLayers {
|
| 432 |
+ image.DockerImageLayers[i].MediaType = schema1.MediaTypeManifestLayer |
|
| 432 | 433 |
image.DockerImageLayers[i].Name = layer.DockerBlobSum |
| 433 | 434 |
} |
| 434 | 435 |
if len(manifest.History) == len(image.DockerImageLayers) {
|
| ... | ... |
@@ -469,8 +511,41 @@ func ImageWithMetadata(image *Image) error {
|
| 469 | 469 |
image.DockerImageMetadata.Size = v1Metadata.Size |
| 470 | 470 |
} |
| 471 | 471 |
case 2: |
| 472 |
- // TODO: need to prepare for this |
|
| 473 |
- return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference)
|
|
| 472 |
+ config := DockerImageConfig{}
|
|
| 473 |
+ if err := json.Unmarshal([]byte(image.DockerImageConfig), &config); err != nil {
|
|
| 474 |
+ return err |
|
| 475 |
+ } |
|
| 476 |
+ |
|
| 477 |
+ image.DockerImageLayers = make([]ImageLayer, len(manifest.Layers)) |
|
| 478 |
+ for i, layer := range manifest.Layers {
|
|
| 479 |
+ image.DockerImageLayers[i].Name = layer.Digest |
|
| 480 |
+ image.DockerImageLayers[i].LayerSize = layer.Size |
|
| 481 |
+ image.DockerImageLayers[i].MediaType = layer.MediaType |
|
| 482 |
+ } |
|
| 483 |
+ // reverse order of the layers for v1 (lowest = 0, highest = i) |
|
| 484 |
+ for i, j := 0, len(image.DockerImageLayers)-1; i < j; i, j = i+1, j-1 {
|
|
| 485 |
+ image.DockerImageLayers[i], image.DockerImageLayers[j] = image.DockerImageLayers[j], image.DockerImageLayers[i] |
|
| 486 |
+ } |
|
| 487 |
+ |
|
| 488 |
+ image.DockerImageMetadata.ID = manifest.Config.Digest |
|
| 489 |
+ image.DockerImageMetadata.Parent = config.Parent |
|
| 490 |
+ image.DockerImageMetadata.Comment = config.Comment |
|
| 491 |
+ image.DockerImageMetadata.Created = config.Created |
|
| 492 |
+ image.DockerImageMetadata.Container = config.Container |
|
| 493 |
+ image.DockerImageMetadata.ContainerConfig = config.ContainerConfig |
|
| 494 |
+ image.DockerImageMetadata.DockerVersion = config.DockerVersion |
|
| 495 |
+ image.DockerImageMetadata.Author = config.Author |
|
| 496 |
+ image.DockerImageMetadata.Config = config.Config |
|
| 497 |
+ image.DockerImageMetadata.Architecture = config.Architecture |
|
| 498 |
+ image.DockerImageMetadata.Size = int64(len(image.DockerImageConfig)) |
|
| 499 |
+ |
|
| 500 |
+ if len(image.DockerImageLayers) > 0 {
|
|
| 501 |
+ for _, layer := range image.DockerImageLayers {
|
|
| 502 |
+ image.DockerImageMetadata.Size += layer.LayerSize |
|
| 503 |
+ } |
|
| 504 |
+ } else {
|
|
| 505 |
+ image.DockerImageMetadata.Size += config.Size |
|
| 506 |
+ } |
|
| 474 | 507 |
default: |
| 475 | 508 |
return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference)
|
| 476 | 509 |
} |
| ... | ... |
@@ -671,11 +671,11 @@ func TestImageWithMetadata(t *testing.T) {
|
| 671 | 671 |
}, |
| 672 | 672 |
DockerImageManifest: validImageWithManifestData().DockerImageManifest, |
| 673 | 673 |
DockerImageLayers: []ImageLayer{
|
| 674 |
- {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", LayerSize: 0},
|
|
| 675 |
- {Name: "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", LayerSize: 188097705},
|
|
| 676 |
- {Name: "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", LayerSize: 194533},
|
|
| 677 |
- {Name: "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", LayerSize: 1895},
|
|
| 678 |
- {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", LayerSize: 0},
|
|
| 674 |
+ {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 0},
|
|
| 675 |
+ {Name: "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 188097705},
|
|
| 676 |
+ {Name: "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 194533},
|
|
| 677 |
+ {Name: "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 1895},
|
|
| 678 |
+ {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 0},
|
|
| 679 | 679 |
}, |
| 680 | 680 |
DockerImageMetadata: DockerImage{
|
| 681 | 681 |
ID: "2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7", |
| ... | ... |
@@ -68,6 +68,12 @@ type Image struct {
|
| 68 | 68 |
DockerImageLayers []ImageLayer |
| 69 | 69 |
// Signatures holds all signatures of the image. |
| 70 | 70 |
Signatures []ImageSignature |
| 71 |
+ // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. |
|
| 72 |
+ DockerImageSignatures [][]byte |
|
| 73 |
+ // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. |
|
| 74 |
+ DockerImageManifestMediaType string |
|
| 75 |
+ // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. |
|
| 76 |
+ DockerImageConfig string |
|
| 71 | 77 |
} |
| 72 | 78 |
|
| 73 | 79 |
// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. |
| ... | ... |
@@ -76,6 +82,8 @@ type ImageLayer struct {
|
| 76 | 76 |
Name string |
| 77 | 77 |
// LayerSize of the layer as defined by the underlying store. |
| 78 | 78 |
LayerSize int64 |
| 79 |
+ // MediaType of the referenced object. |
|
| 80 |
+ MediaType string |
|
| 79 | 81 |
} |
| 80 | 82 |
|
| 81 | 83 |
const ( |
| ... | ... |
@@ -21,6 +21,8 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco |
| 21 | 21 |
|
| 22 | 22 |
out.DockerImageReference = in.DockerImageReference |
| 23 | 23 |
out.DockerImageManifest = in.DockerImageManifest |
| 24 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 25 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 24 | 26 |
|
| 25 | 27 |
gvString := in.DockerImageMetadataVersion |
| 26 | 28 |
if len(gvString) == 0 {
|
| ... | ... |
@@ -44,6 +46,7 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco |
| 44 | 44 |
if in.DockerImageLayers != nil {
|
| 45 | 45 |
out.DockerImageLayers = make([]ImageLayer, len(in.DockerImageLayers)) |
| 46 | 46 |
for i := range in.DockerImageLayers {
|
| 47 |
+ out.DockerImageLayers[i].MediaType = in.DockerImageLayers[i].MediaType |
|
| 47 | 48 |
out.DockerImageLayers[i].Name = in.DockerImageLayers[i].Name |
| 48 | 49 |
out.DockerImageLayers[i].LayerSize = in.DockerImageLayers[i].LayerSize |
| 49 | 50 |
} |
| ... | ... |
@@ -62,6 +65,15 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco |
| 62 | 62 |
out.Signatures = nil |
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 |
+ if in.DockerImageSignatures != nil {
|
|
| 66 |
+ out.DockerImageSignatures = nil |
|
| 67 |
+ for _, v := range in.DockerImageSignatures {
|
|
| 68 |
+ out.DockerImageSignatures = append(out.DockerImageSignatures, v) |
|
| 69 |
+ } |
|
| 70 |
+ } else {
|
|
| 71 |
+ out.DockerImageSignatures = nil |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 65 | 74 |
return nil |
| 66 | 75 |
} |
| 67 | 76 |
|
| ... | ... |
@@ -72,6 +84,8 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco |
| 72 | 72 |
|
| 73 | 73 |
out.DockerImageReference = in.DockerImageReference |
| 74 | 74 |
out.DockerImageManifest = in.DockerImageManifest |
| 75 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 76 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 75 | 77 |
|
| 76 | 78 |
version := in.DockerImageMetadataVersion |
| 77 | 79 |
if len(version) == 0 {
|
| ... | ... |
@@ -95,6 +109,7 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco |
| 95 | 95 |
if in.DockerImageLayers != nil {
|
| 96 | 96 |
out.DockerImageLayers = make([]newer.ImageLayer, len(in.DockerImageLayers)) |
| 97 | 97 |
for i := range in.DockerImageLayers {
|
| 98 |
+ out.DockerImageLayers[i].MediaType = in.DockerImageLayers[i].MediaType |
|
| 98 | 99 |
out.DockerImageLayers[i].Name = in.DockerImageLayers[i].Name |
| 99 | 100 |
out.DockerImageLayers[i].LayerSize = in.DockerImageLayers[i].LayerSize |
| 100 | 101 |
} |
| ... | ... |
@@ -113,6 +128,15 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco |
| 113 | 113 |
out.Signatures = nil |
| 114 | 114 |
} |
| 115 | 115 |
|
| 116 |
+ if in.DockerImageSignatures != nil {
|
|
| 117 |
+ out.DockerImageSignatures = nil |
|
| 118 |
+ for _, v := range in.DockerImageSignatures {
|
|
| 119 |
+ out.DockerImageSignatures = append(out.DockerImageSignatures, v) |
|
| 120 |
+ } |
|
| 121 |
+ } else {
|
|
| 122 |
+ out.DockerImageSignatures = nil |
|
| 123 |
+ } |
|
| 124 |
+ |
|
| 116 | 125 |
return nil |
| 117 | 126 |
} |
| 118 | 127 |
|
| ... | ... |
@@ -202,6 +202,7 @@ func Convert_api_ImageImportStatus_To_v1_ImageImportStatus(in *image_api.ImageIm |
| 202 | 202 |
func autoConvert_v1_ImageLayer_To_api_ImageLayer(in *ImageLayer, out *image_api.ImageLayer, s conversion.Scope) error {
|
| 203 | 203 |
out.Name = in.Name |
| 204 | 204 |
out.LayerSize = in.LayerSize |
| 205 |
+ out.MediaType = in.MediaType |
|
| 205 | 206 |
return nil |
| 206 | 207 |
} |
| 207 | 208 |
|
| ... | ... |
@@ -212,6 +213,7 @@ func Convert_v1_ImageLayer_To_api_ImageLayer(in *ImageLayer, out *image_api.Imag |
| 212 | 212 |
func autoConvert_api_ImageLayer_To_v1_ImageLayer(in *image_api.ImageLayer, out *ImageLayer, s conversion.Scope) error {
|
| 213 | 213 |
out.Name = in.Name |
| 214 | 214 |
out.LayerSize = in.LayerSize |
| 215 |
+ out.MediaType = in.MediaType |
|
| 215 | 216 |
return nil |
| 216 | 217 |
} |
| 217 | 218 |
|
| ... | ... |
@@ -93,6 +93,21 @@ func DeepCopy_v1_Image(in Image, out *Image, c *conversion.Cloner) error {
|
| 93 | 93 |
} else {
|
| 94 | 94 |
out.Signatures = nil |
| 95 | 95 |
} |
| 96 |
+ if in.DockerImageSignatures != nil {
|
|
| 97 |
+ in, out := in.DockerImageSignatures, &out.DockerImageSignatures |
|
| 98 |
+ *out = make([][]byte, len(in)) |
|
| 99 |
+ for i := range in {
|
|
| 100 |
+ if newVal, err := c.DeepCopy(in[i]); err != nil {
|
|
| 101 |
+ return err |
|
| 102 |
+ } else {
|
|
| 103 |
+ (*out)[i] = newVal.([]byte) |
|
| 104 |
+ } |
|
| 105 |
+ } |
|
| 106 |
+ } else {
|
|
| 107 |
+ out.DockerImageSignatures = nil |
|
| 108 |
+ } |
|
| 109 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 110 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 96 | 111 |
return nil |
| 97 | 112 |
} |
| 98 | 113 |
|
| ... | ... |
@@ -136,6 +151,7 @@ func DeepCopy_v1_ImageImportStatus(in ImageImportStatus, out *ImageImportStatus, |
| 136 | 136 |
func DeepCopy_v1_ImageLayer(in ImageLayer, out *ImageLayer, c *conversion.Cloner) error {
|
| 137 | 137 |
out.Name = in.Name |
| 138 | 138 |
out.LayerSize = in.LayerSize |
| 139 |
+ out.MediaType = in.MediaType |
|
| 139 | 140 |
return nil |
| 140 | 141 |
} |
| 141 | 142 |
|
| ... | ... |
@@ -19,14 +19,17 @@ func (DockerImageReference) SwaggerDoc() map[string]string {
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
var map_Image = map[string]string{
|
| 22 |
- "": "Image is an immutable representation of a Docker image and metadata at a point in time.", |
|
| 23 |
- "metadata": "Standard object's metadata.", |
|
| 24 |
- "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", |
|
| 25 |
- "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", |
|
| 26 |
- "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", |
|
| 27 |
- "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", |
|
| 28 |
- "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", |
|
| 29 |
- "signatures": "Signatures holds all signatures of the image.", |
|
| 22 |
+ "": "Image is an immutable representation of a Docker image and metadata at a point in time.", |
|
| 23 |
+ "metadata": "Standard object's metadata.", |
|
| 24 |
+ "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", |
|
| 25 |
+ "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", |
|
| 26 |
+ "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", |
|
| 27 |
+ "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", |
|
| 28 |
+ "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", |
|
| 29 |
+ "signatures": "Signatures holds all signatures of the image.", |
|
| 30 |
+ "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", |
|
| 31 |
+ "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", |
|
| 32 |
+ "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.", |
|
| 30 | 33 |
} |
| 31 | 34 |
|
| 32 | 35 |
func (Image) SwaggerDoc() map[string]string {
|
| ... | ... |
@@ -57,9 +60,10 @@ func (ImageImportStatus) SwaggerDoc() map[string]string {
|
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 | 59 |
var map_ImageLayer = map[string]string{
|
| 60 |
- "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", |
|
| 61 |
- "name": "Name of the layer as defined by the underlying store.", |
|
| 62 |
- "size": "Size of the layer in bytes as defined by the underlying store.", |
|
| 60 |
+ "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", |
|
| 61 |
+ "name": "Name of the layer as defined by the underlying store.", |
|
| 62 |
+ "size": "Size of the layer in bytes as defined by the underlying store.", |
|
| 63 |
+ "mediaType": "MediaType of the referenced object.", |
|
| 63 | 64 |
} |
| 64 | 65 |
|
| 65 | 66 |
func (ImageLayer) SwaggerDoc() map[string]string {
|
| ... | ... |
@@ -34,6 +34,12 @@ type Image struct {
|
| 34 | 34 |
DockerImageLayers []ImageLayer `json:"dockerImageLayers"` |
| 35 | 35 |
// Signatures holds all signatures of the image. |
| 36 | 36 |
Signatures []ImageSignature `json:"signatures,omitempty"` |
| 37 |
+ // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. |
|
| 38 |
+ DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` |
|
| 39 |
+ // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. |
|
| 40 |
+ DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty"` |
|
| 41 |
+ // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. |
|
| 42 |
+ DockerImageConfig string `json:"dockerImageConfig,omitempty"` |
|
| 37 | 43 |
} |
| 38 | 44 |
|
| 39 | 45 |
// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. |
| ... | ... |
@@ -42,6 +48,8 @@ type ImageLayer struct {
|
| 42 | 42 |
Name string `json:"name"` |
| 43 | 43 |
// Size of the layer in bytes as defined by the underlying store. |
| 44 | 44 |
LayerSize int64 `json:"size"` |
| 45 |
+ // MediaType of the referenced object. |
|
| 46 |
+ MediaType string `json:"mediaType"` |
|
| 45 | 47 |
} |
| 46 | 48 |
|
| 47 | 49 |
// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims |
| ... | ... |
@@ -21,6 +21,8 @@ func Convert_api_Image_To_v1beta3_Image(in *newer.Image, out *Image, s conversio |
| 21 | 21 |
|
| 22 | 22 |
out.DockerImageReference = in.DockerImageReference |
| 23 | 23 |
out.DockerImageManifest = in.DockerImageManifest |
| 24 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 25 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 24 | 26 |
|
| 25 | 27 |
gvString := in.DockerImageMetadataVersion |
| 26 | 28 |
if len(gvString) == 0 {
|
| ... | ... |
@@ -51,6 +53,8 @@ func Convert_v1beta3_Image_To_api_Image(in *Image, out *newer.Image, s conversio |
| 51 | 51 |
|
| 52 | 52 |
out.DockerImageReference = in.DockerImageReference |
| 53 | 53 |
out.DockerImageManifest = in.DockerImageManifest |
| 54 |
+ out.DockerImageManifestMediaType = in.DockerImageManifestMediaType |
|
| 55 |
+ out.DockerImageConfig = in.DockerImageConfig |
|
| 54 | 56 |
|
| 55 | 57 |
version := in.DockerImageMetadataVersion |
| 56 | 58 |
if len(version) == 0 {
|
| ... | ... |
@@ -31,6 +31,12 @@ type Image struct {
|
| 31 | 31 |
DockerImageLayers []ImageLayer `json:"dockerImageLayers"` |
| 32 | 32 |
// Signatures holds all signatures of the image. |
| 33 | 33 |
Signatures []ImageSignature |
| 34 |
+ // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. |
|
| 35 |
+ DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` |
|
| 36 |
+ // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. |
|
| 37 |
+ DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty"` |
|
| 38 |
+ // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. |
|
| 39 |
+ DockerImageConfig string `json:"dockerImageConfig,omitempty"` |
|
| 34 | 40 |
} |
| 35 | 41 |
|
| 36 | 42 |
// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. |
| ... | ... |
@@ -39,6 +45,8 @@ type ImageLayer struct {
|
| 39 | 39 |
Name string `json:"name"` |
| 40 | 40 |
// Size of the layer as defined by the underlying store. |
| 41 | 41 |
Size int64 `json:"size"` |
| 42 |
+ // MediaType of the referenced object. |
|
| 43 |
+ MediaType string `json:"mediaType"` |
|
| 42 | 44 |
} |
| 43 | 45 |
|
| 44 | 46 |
// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims |
| ... | ... |
@@ -16,6 +16,8 @@ import ( |
| 16 | 16 |
"github.com/docker/distribution/context" |
| 17 | 17 |
"github.com/docker/distribution/digest" |
| 18 | 18 |
"github.com/docker/distribution/manifest/schema1" |
| 19 |
+ "github.com/docker/distribution/manifest/schema2" |
|
| 20 |
+ "github.com/docker/distribution/reference" |
|
| 19 | 21 |
"github.com/docker/distribution/registry/api/errcode" |
| 20 | 22 |
registryclient "github.com/docker/distribution/registry/client" |
| 21 | 23 |
"github.com/docker/distribution/registry/client/auth" |
| ... | ... |
@@ -71,6 +73,11 @@ type repositoryRetriever struct {
|
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 | 73 |
func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) {
|
| 74 |
+ named, err := reference.ParseNamed(repoName) |
|
| 75 |
+ if err != nil {
|
|
| 76 |
+ return nil, err |
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 74 | 79 |
t := r.context.Transport |
| 75 | 80 |
if insecure && r.context.InsecureTransport != nil {
|
| 76 | 81 |
t = r.context.InsecureTransport |
| ... | ... |
@@ -107,7 +114,7 @@ func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.UR |
| 107 | 107 |
), |
| 108 | 108 |
) |
| 109 | 109 |
|
| 110 |
- repo, err := registryclient.NewRepository(context.Context(ctx), repoName, src.String(), rt) |
|
| 110 |
+ repo, err := registryclient.NewRepository(context.Context(ctx), named, src.String(), rt) |
|
| 111 | 111 |
if err != nil {
|
| 112 | 112 |
return nil, err |
| 113 | 113 |
} |
| ... | ... |
@@ -159,30 +166,54 @@ func schema1ToImage(manifest *schema1.SignedManifest, d digest.Digest) (*api.Ima |
| 159 | 159 |
if err != nil {
|
| 160 | 160 |
return nil, err |
| 161 | 161 |
} |
| 162 |
+ mediatype, payload, err := manifest.Payload() |
|
| 163 |
+ if err != nil {
|
|
| 164 |
+ return nil, err |
|
| 165 |
+ } |
|
| 166 |
+ |
|
| 162 | 167 |
if len(d) > 0 {
|
| 163 | 168 |
dockerImage.ID = d.String() |
| 164 | 169 |
} else {
|
| 165 |
- if p, err := manifest.Payload(); err == nil {
|
|
| 166 |
- d, err := digest.FromBytes(p) |
|
| 167 |
- if err != nil {
|
|
| 168 |
- return nil, fmt.Errorf("unable to create digest from image payload: %v", err)
|
|
| 169 |
- } |
|
| 170 |
- dockerImage.ID = d.String() |
|
| 171 |
- } else {
|
|
| 172 |
- d, err := digest.FromBytes(manifest.Raw) |
|
| 173 |
- if err != nil {
|
|
| 174 |
- return nil, fmt.Errorf("unable to create digest from image bytes: %v", err)
|
|
| 175 |
- } |
|
| 176 |
- dockerImage.ID = d.String() |
|
| 177 |
- } |
|
| 170 |
+ dockerImage.ID = digest.FromBytes(manifest.Canonical).String() |
|
| 178 | 171 |
} |
| 179 | 172 |
image := &api.Image{
|
| 180 | 173 |
ObjectMeta: kapi.ObjectMeta{
|
| 181 | 174 |
Name: dockerImage.ID, |
| 182 | 175 |
}, |
| 183 |
- DockerImageMetadata: *dockerImage, |
|
| 184 |
- DockerImageManifest: string(manifest.Raw), |
|
| 185 |
- DockerImageMetadataVersion: "1.0", |
|
| 176 |
+ DockerImageMetadata: *dockerImage, |
|
| 177 |
+ DockerImageManifest: string(payload), |
|
| 178 |
+ DockerImageManifestMediaType: mediatype, |
|
| 179 |
+ DockerImageMetadataVersion: "1.0", |
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ return image, nil |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+func schema2ToImage(manifest *schema2.DeserializedManifest, imageConfig []byte, d digest.Digest) (*api.Image, error) {
|
|
| 186 |
+ mediatype, payload, err := manifest.Payload() |
|
| 187 |
+ if err != nil {
|
|
| 188 |
+ return nil, err |
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ dockerImage, err := unmarshalDockerImage(imageConfig) |
|
| 192 |
+ if err != nil {
|
|
| 193 |
+ return nil, err |
|
| 194 |
+ } |
|
| 195 |
+ if len(d) > 0 {
|
|
| 196 |
+ dockerImage.ID = d.String() |
|
| 197 |
+ } else {
|
|
| 198 |
+ dockerImage.ID = digest.FromBytes(payload).String() |
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ image := &api.Image{
|
|
| 202 |
+ ObjectMeta: kapi.ObjectMeta{
|
|
| 203 |
+ Name: dockerImage.ID, |
|
| 204 |
+ }, |
|
| 205 |
+ DockerImageMetadata: *dockerImage, |
|
| 206 |
+ DockerImageManifest: string(payload), |
|
| 207 |
+ DockerImageConfig: string(imageConfig), |
|
| 208 |
+ DockerImageManifestMediaType: mediatype, |
|
| 209 |
+ DockerImageMetadataVersion: "1.0", |
|
| 186 | 210 |
} |
| 187 | 211 |
|
| 188 | 212 |
return image, nil |
| ... | ... |
@@ -319,6 +350,11 @@ func (r *retryRepository) Blobs(ctx context.Context) distribution.BlobStore {
|
| 319 | 319 |
return retryBlobStore{BlobStore: r.Repository.Blobs(ctx), repo: r}
|
| 320 | 320 |
} |
| 321 | 321 |
|
| 322 |
+// Tags lists the tags under the named repository. |
|
| 323 |
+func (r *retryRepository) Tags(ctx context.Context) distribution.TagService {
|
|
| 324 |
+ return &retryTags{TagService: r.Repository.Tags(ctx), repo: r}
|
|
| 325 |
+} |
|
| 326 |
+ |
|
| 322 | 327 |
// retryManifest wraps the manifest service and invokes retries on the repo. |
| 323 | 328 |
type retryManifest struct {
|
| 324 | 329 |
distribution.ManifestService |
| ... | ... |
@@ -326,9 +362,9 @@ type retryManifest struct {
|
| 326 | 326 |
} |
| 327 | 327 |
|
| 328 | 328 |
// Exists returns true if the manifest exists. |
| 329 |
-func (r retryManifest) Exists(dgst digest.Digest) (bool, error) {
|
|
| 329 |
+func (r retryManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
|
| 330 | 330 |
for {
|
| 331 |
- if exists, err := r.ManifestService.Exists(dgst); r.repo.shouldRetry(err) {
|
|
| 331 |
+ if exists, err := r.ManifestService.Exists(ctx, dgst); r.repo.shouldRetry(err) {
|
|
| 332 | 332 |
continue |
| 333 | 333 |
} else {
|
| 334 | 334 |
return exists, err |
| ... | ... |
@@ -336,10 +372,10 @@ func (r retryManifest) Exists(dgst digest.Digest) (bool, error) {
|
| 336 | 336 |
} |
| 337 | 337 |
} |
| 338 | 338 |
|
| 339 |
-// Get retrieves the identified by the digest, if it exists. |
|
| 340 |
-func (r retryManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
|
|
| 339 |
+// Get retrieves the manifest identified by the digest, if it exists. |
|
| 340 |
+func (r retryManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
|
| 341 | 341 |
for {
|
| 342 |
- if m, err := r.ManifestService.Get(dgst); r.repo.shouldRetry(err) {
|
|
| 342 |
+ if m, err := r.ManifestService.Get(ctx, dgst, options...); r.repo.shouldRetry(err) {
|
|
| 343 | 343 |
continue |
| 344 | 344 |
} else {
|
| 345 | 345 |
return m, err |
| ... | ... |
@@ -347,82 +383,73 @@ func (r retryManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) |
| 347 | 347 |
} |
| 348 | 348 |
} |
| 349 | 349 |
|
| 350 |
-// Enumerate returns an array of manifest revisions in repository. |
|
| 351 |
-func (r retryManifest) Enumerate() ([]digest.Digest, error) {
|
|
| 352 |
- for {
|
|
| 353 |
- if d, err := r.ManifestService.Enumerate(); r.repo.shouldRetry(err) {
|
|
| 354 |
- continue |
|
| 355 |
- } else {
|
|
| 356 |
- return d, err |
|
| 357 |
- } |
|
| 358 |
- } |
|
| 350 |
+// retryBlobStore wraps the blob store and invokes retries on the repo. |
|
| 351 |
+type retryBlobStore struct {
|
|
| 352 |
+ distribution.BlobStore |
|
| 353 |
+ repo *retryRepository |
|
| 359 | 354 |
} |
| 360 | 355 |
|
| 361 |
-// Tags lists the tags under the named repository. |
|
| 362 |
-func (r retryManifest) Tags() ([]string, error) {
|
|
| 356 |
+func (r retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
| 363 | 357 |
for {
|
| 364 |
- if t, err := r.ManifestService.Tags(); r.repo.shouldRetry(err) {
|
|
| 358 |
+ if d, err := r.BlobStore.Stat(ctx, dgst); r.repo.shouldRetry(err) {
|
|
| 365 | 359 |
continue |
| 366 | 360 |
} else {
|
| 367 |
- return t, err |
|
| 361 |
+ return d, err |
|
| 368 | 362 |
} |
| 369 | 363 |
} |
| 370 | 364 |
} |
| 371 | 365 |
|
| 372 |
-// ExistsByTag returns true if the manifest exists. |
|
| 373 |
-func (r retryManifest) ExistsByTag(tag string) (bool, error) {
|
|
| 366 |
+func (r retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
|
|
| 374 | 367 |
for {
|
| 375 |
- if exists, err := r.ManifestService.ExistsByTag(tag); r.repo.shouldRetry(err) {
|
|
| 368 |
+ if err := r.BlobStore.ServeBlob(ctx, w, req, dgst); r.repo.shouldRetry(err) {
|
|
| 376 | 369 |
continue |
| 377 | 370 |
} else {
|
| 378 |
- return exists, err |
|
| 371 |
+ return err |
|
| 379 | 372 |
} |
| 380 | 373 |
} |
| 381 | 374 |
} |
| 382 | 375 |
|
| 383 |
-// GetByTag retrieves the named manifest, if it exists. |
|
| 384 |
-func (r retryManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
|
|
| 376 |
+func (r retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
|
| 385 | 377 |
for {
|
| 386 |
- if m, err := r.ManifestService.GetByTag(tag, options...); r.repo.shouldRetry(err) {
|
|
| 378 |
+ if rsc, err := r.BlobStore.Open(ctx, dgst); r.repo.shouldRetry(err) {
|
|
| 387 | 379 |
continue |
| 388 | 380 |
} else {
|
| 389 |
- return m, err |
|
| 381 |
+ return rsc, err |
|
| 390 | 382 |
} |
| 391 | 383 |
} |
| 392 | 384 |
} |
| 393 | 385 |
|
| 394 |
-// retryManifest wraps the blob store and invokes retries on the repo. |
|
| 395 |
-type retryBlobStore struct {
|
|
| 396 |
- distribution.BlobStore |
|
| 386 |
+type retryTags struct {
|
|
| 387 |
+ distribution.TagService |
|
| 397 | 388 |
repo *retryRepository |
| 398 | 389 |
} |
| 399 | 390 |
|
| 400 |
-func (r retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
| 391 |
+func (r *retryTags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
|
| 401 | 392 |
for {
|
| 402 |
- if d, err := r.BlobStore.Stat(ctx, dgst); r.repo.shouldRetry(err) {
|
|
| 393 |
+ if t, err := r.TagService.Get(ctx, tag); r.repo.shouldRetry(err) {
|
|
| 403 | 394 |
continue |
| 404 | 395 |
} else {
|
| 405 |
- return d, err |
|
| 396 |
+ return t, err |
|
| 406 | 397 |
} |
| 407 | 398 |
} |
| 408 | 399 |
} |
| 409 | 400 |
|
| 410 |
-func (r retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
|
|
| 401 |
+func (r *retryTags) All(ctx context.Context) ([]string, error) {
|
|
| 411 | 402 |
for {
|
| 412 |
- if err := r.BlobStore.ServeBlob(ctx, w, req, dgst); r.repo.shouldRetry(err) {
|
|
| 403 |
+ if t, err := r.TagService.All(ctx); r.repo.shouldRetry(err) {
|
|
| 413 | 404 |
continue |
| 414 | 405 |
} else {
|
| 415 |
- return err |
|
| 406 |
+ return t, err |
|
| 416 | 407 |
} |
| 417 | 408 |
} |
| 418 | 409 |
} |
| 419 | 410 |
|
| 420 |
-func (r retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
|
| 411 |
+func (r *retryTags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
|
|
| 421 | 412 |
for {
|
| 422 |
- if rsc, err := r.BlobStore.Open(ctx, dgst); r.repo.shouldRetry(err) {
|
|
| 413 |
+ if t, err := r.TagService.Lookup(ctx, digest); r.repo.shouldRetry(err) {
|
|
| 423 | 414 |
continue |
| 424 | 415 |
} else {
|
| 425 |
- return rsc, err |
|
| 416 |
+ return t, err |
|
| 426 | 417 |
} |
| 427 | 418 |
} |
| 428 | 419 |
} |
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
"github.com/docker/distribution/context" |
| 17 | 17 |
"github.com/docker/distribution/digest" |
| 18 | 18 |
"github.com/docker/distribution/manifest/schema1" |
| 19 |
+ "github.com/docker/distribution/reference" |
|
| 19 | 20 |
"github.com/docker/distribution/registry/api/errcode" |
| 20 | 21 |
|
| 21 | 22 |
kapi "k8s.io/kubernetes/pkg/api" |
| ... | ... |
@@ -36,40 +37,38 @@ func (r *mockRetriever) Repository(ctx gocontext.Context, registry *url.URL, rep |
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 | 38 |
type mockRepository struct {
|
| 39 |
- repoErr, getErr, getByTagErr, tagsErr, err error |
|
| 39 |
+ repoErr, getErr, getTagErr, tagErr, untagErr, allTagErr, err error |
|
| 40 | 40 |
|
| 41 | 41 |
blobs *mockBlobStore |
| 42 | 42 |
|
| 43 | 43 |
manifest *schema1.SignedManifest |
| 44 |
- tags []string |
|
| 44 |
+ tags map[string]string |
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
func (r *mockRepository) Name() string { return "test" }
|
| 48 |
+func (r *mockRepository) Named() reference.Named {
|
|
| 49 |
+ named, _ := reference.WithName("test")
|
|
| 50 |
+ return named |
|
| 51 |
+} |
|
| 48 | 52 |
|
| 49 | 53 |
func (r *mockRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
| 50 | 54 |
return r, r.repoErr |
| 51 | 55 |
} |
| 52 | 56 |
func (r *mockRepository) Blobs(ctx context.Context) distribution.BlobStore { return r.blobs }
|
| 53 |
-func (r *mockRepository) Signatures() distribution.SignatureService { return nil }
|
|
| 54 |
-func (r *mockRepository) Exists(dgst digest.Digest) (bool, error) {
|
|
| 57 |
+func (r *mockRepository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
|
| 55 | 58 |
return false, r.getErr |
| 56 | 59 |
} |
| 57 |
-func (r *mockRepository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
|
|
| 60 |
+func (r *mockRepository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
|
| 58 | 61 |
return r.manifest, r.getErr |
| 59 | 62 |
} |
| 60 |
-func (r *mockRepository) Enumerate() ([]digest.Digest, error) {
|
|
| 61 |
- return nil, r.getErr |
|
| 62 |
-} |
|
| 63 |
-func (r *mockRepository) Delete(dgst digest.Digest) error { return fmt.Errorf("not implemented") }
|
|
| 64 |
-func (r *mockRepository) Put(manifest *schema1.SignedManifest) error {
|
|
| 63 |
+func (r *mockRepository) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
| 65 | 64 |
return fmt.Errorf("not implemented")
|
| 66 | 65 |
} |
| 67 |
-func (r *mockRepository) Tags() ([]string, error) { return r.tags, r.tagsErr }
|
|
| 68 |
-func (r *mockRepository) ExistsByTag(tag string) (bool, error) {
|
|
| 69 |
- return false, r.tagsErr |
|
| 66 |
+func (r *mockRepository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
|
|
| 67 |
+ return "", fmt.Errorf("not implemented")
|
|
| 70 | 68 |
} |
| 71 |
-func (r *mockRepository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
|
|
| 72 |
- return r.manifest, r.getByTagErr |
|
| 69 |
+func (r *mockRepository) Tags(ctx context.Context) distribution.TagService {
|
|
| 70 |
+ return &mockTagService{repo: r}
|
|
| 73 | 71 |
} |
| 74 | 72 |
|
| 75 | 73 |
type mockBlobStore struct {
|
| ... | ... |
@@ -90,6 +89,48 @@ func (r *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribut |
| 90 | 90 |
return nil, r.openErr |
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 |
+type mockTagService struct {
|
|
| 94 |
+ distribution.TagService |
|
| 95 |
+ |
|
| 96 |
+ repo *mockRepository |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func (r *mockTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
|
| 100 |
+ v, ok := r.repo.tags[tag] |
|
| 101 |
+ if !ok {
|
|
| 102 |
+ return distribution.Descriptor{}, r.repo.getTagErr
|
|
| 103 |
+ } |
|
| 104 |
+ dgst, err := digest.ParseDigest(v) |
|
| 105 |
+ if err != nil {
|
|
| 106 |
+ panic(err) |
|
| 107 |
+ } |
|
| 108 |
+ return distribution.Descriptor{Digest: dgst}, r.repo.getTagErr
|
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+func (r *mockTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
|
|
| 112 |
+ r.repo.tags[tag] = desc.Digest.String() |
|
| 113 |
+ return r.repo.tagErr |
|
| 114 |
+} |
|
| 115 |
+ |
|
| 116 |
+func (r *mockTagService) Untag(ctx context.Context, tag string) error {
|
|
| 117 |
+ if _, ok := r.repo.tags[tag]; ok {
|
|
| 118 |
+ delete(r.repo.tags, tag) |
|
| 119 |
+ } |
|
| 120 |
+ return r.repo.untagErr |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+func (r *mockTagService) All(ctx context.Context) (res []string, err error) {
|
|
| 124 |
+ err = r.repo.allTagErr |
|
| 125 |
+ for tag := range r.repo.tags {
|
|
| 126 |
+ res = append(res, tag) |
|
| 127 |
+ } |
|
| 128 |
+ return |
|
| 129 |
+} |
|
| 130 |
+ |
|
| 131 |
+func (r *mockTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
|
|
| 132 |
+ return nil, fmt.Errorf("not implemented")
|
|
| 133 |
+} |
|
| 134 |
+ |
|
| 93 | 135 |
func TestSchema1ToImage(t *testing.T) {
|
| 94 | 136 |
m := &schema1.SignedManifest{}
|
| 95 | 137 |
if err := json.Unmarshal([]byte(etcdManifest), m); err != nil {
|
| ... | ... |
@@ -270,21 +311,19 @@ func TestRetryFailure(t *testing.T) {
|
| 270 | 270 |
} |
| 271 | 271 |
|
| 272 | 272 |
// do not retry on non standard errors |
| 273 |
- repo = &mockRepository{getByTagErr: fmt.Errorf("does not support v2 API")}
|
|
| 273 |
+ repo = &mockRepository{getErr: fmt.Errorf("does not support v2 API")}
|
|
| 274 | 274 |
r = NewRetryRepository(repo, 4, 0).(*retryRepository) |
| 275 | 275 |
m, err := r.Manifests(nil) |
| 276 | 276 |
if err != nil {
|
| 277 | 277 |
t.Fatal(err) |
| 278 | 278 |
} |
| 279 |
- if m, err := m.GetByTag("test"); m != nil || err != repo.getByTagErr || r.retries != 4 {
|
|
| 279 |
+ if _, err := m.Get(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 4 {
|
|
| 280 | 280 |
t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
| 281 | 281 |
} |
| 282 | 282 |
|
| 283 | 283 |
// retry four times |
| 284 | 284 |
repo = &mockRepository{
|
| 285 |
- getByTagErr: errcode.ErrorCodeUnauthorized, |
|
| 286 |
- getErr: errcode.ErrorCodeUnauthorized, |
|
| 287 |
- tagsErr: errcode.ErrorCodeUnauthorized, |
|
| 285 |
+ getErr: errcode.ErrorCodeUnauthorized, |
|
| 288 | 286 |
blobs: &mockBlobStore{
|
| 289 | 287 |
serveErr: errcode.ErrorCodeUnauthorized, |
| 290 | 288 |
statErr: errcode.ErrorCodeUnauthorized, |
| ... | ... |
@@ -295,27 +334,12 @@ func TestRetryFailure(t *testing.T) {
|
| 295 | 295 |
if m, err = r.Manifests(nil); err != nil {
|
| 296 | 296 |
t.Fatal(err) |
| 297 | 297 |
} |
| 298 |
- if m, err := m.GetByTag("test"); m != nil || err != repo.getByTagErr || r.retries != 0 {
|
|
| 299 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 300 |
- } |
|
| 301 | 298 |
r.retries = 2 |
| 302 |
- if m, err := m.Get(digest.Digest("foo")); m != nil || err != repo.getErr || r.retries != 0 {
|
|
| 303 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 299 |
+ if _, err := m.Get(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 {
|
|
| 300 |
+ t.Fatalf("unexpected: %v %#v", err, r)
|
|
| 304 | 301 |
} |
| 305 | 302 |
r.retries = 2 |
| 306 |
- if m, err := m.Exists("foo"); m || err != repo.getErr || r.retries != 0 {
|
|
| 307 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 308 |
- } |
|
| 309 |
- r.retries = 2 |
|
| 310 |
- if m, err := m.Enumerate(); m != nil || err != repo.getErr || r.retries != 0 {
|
|
| 311 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 312 |
- } |
|
| 313 |
- r.retries = 2 |
|
| 314 |
- if m, err := m.ExistsByTag("foo"); m || err != repo.getErr || r.retries != 0 {
|
|
| 315 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 316 |
- } |
|
| 317 |
- r.retries = 2 |
|
| 318 |
- if m, err := m.Tags(); m != nil || err != repo.tagsErr || r.retries != 0 {
|
|
| 303 |
+ if m, err := m.Exists(nil, "foo"); m || err != repo.getErr || r.retries != 0 {
|
|
| 319 | 304 |
t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
| 320 | 305 |
} |
| 321 | 306 |
|
| ... | ... |
@@ -324,15 +348,15 @@ func TestRetryFailure(t *testing.T) {
|
| 324 | 324 |
if err != nil {
|
| 325 | 325 |
t.Fatal(err) |
| 326 | 326 |
} |
| 327 |
- if _, err := b.Stat(nil, digest.Digest("x")); err != repo.getByTagErr || r.retries != 0 {
|
|
| 328 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 327 |
+ if _, err := b.Stat(nil, digest.Digest("x")); err != repo.blobs.statErr || r.retries != 0 {
|
|
| 328 |
+ t.Fatalf("unexpected: %v %#v", err, r)
|
|
| 329 | 329 |
} |
| 330 | 330 |
r.retries = 2 |
| 331 |
- if err := b.ServeBlob(nil, nil, nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 {
|
|
| 332 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 331 |
+ if err := b.ServeBlob(nil, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr || r.retries != 0 {
|
|
| 332 |
+ t.Fatalf("unexpected: %v %#v", err, r)
|
|
| 333 | 333 |
} |
| 334 | 334 |
r.retries = 2 |
| 335 |
- if _, err := b.Open(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 {
|
|
| 336 |
- t.Fatalf("unexpected: %v %v %#v", m, err, r)
|
|
| 335 |
+ if _, err := b.Open(nil, digest.Digest("foo")); err != repo.blobs.openErr || r.retries != 0 {
|
|
| 336 |
+ t.Fatalf("unexpected: %v %#v", err, r)
|
|
| 337 | 337 |
} |
| 338 | 338 |
} |
| ... | ... |
@@ -26,6 +26,15 @@ func (s *noopCredentialStore) Basic(url *url.URL) (string, string) {
|
| 26 | 26 |
return "", "" |
| 27 | 27 |
} |
| 28 | 28 |
|
| 29 |
+func (s *noopCredentialStore) RefreshToken(url *url.URL, service string) string {
|
|
| 30 |
+ glog.Infof("asked to provide RefreshToken for %s", url)
|
|
| 31 |
+ return "" |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+func (s *noopCredentialStore) SetRefreshToken(url *url.URL, service string, token string) {
|
|
| 35 |
+ glog.Infof("asked to provide SetRefreshToken for %s", url)
|
|
| 36 |
+} |
|
| 37 |
+ |
|
| 29 | 38 |
func NewBasicCredentials() *BasicCredentials {
|
| 30 | 39 |
return &BasicCredentials{}
|
| 31 | 40 |
} |
| ... | ... |
@@ -56,6 +65,13 @@ func (c *BasicCredentials) Basic(url *url.URL) (string, string) {
|
| 56 | 56 |
return "", "" |
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 |
+func (c *BasicCredentials) RefreshToken(url *url.URL, service string) string {
|
|
| 60 |
+ return "" |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+func (c *BasicCredentials) SetRefreshToken(url *url.URL, service string, token string) {
|
|
| 64 |
+} |
|
| 65 |
+ |
|
| 59 | 66 |
func NewLocalCredentials() auth.CredentialStore {
|
| 60 | 67 |
return &keyringCredentialStore{credentialprovider.NewDockerKeyring()}
|
| 61 | 68 |
} |
| ... | ... |
@@ -68,6 +84,13 @@ func (s *keyringCredentialStore) Basic(url *url.URL) (string, string) {
|
| 68 | 68 |
return basicCredentialsFromKeyring(s.DockerKeyring, url) |
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 |
+func (s *keyringCredentialStore) RefreshToken(url *url.URL, service string) string {
|
|
| 72 |
+ return "" |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+func (s *keyringCredentialStore) SetRefreshToken(url *url.URL, service string, token string) {
|
|
| 76 |
+} |
|
| 77 |
+ |
|
| 71 | 78 |
func NewCredentialsForSecrets(secrets []kapi.Secret) *SecretCredentialStore {
|
| 72 | 79 |
return &SecretCredentialStore{secrets: secrets}
|
| 73 | 80 |
} |
| ... | ... |
@@ -88,6 +111,13 @@ func (s *SecretCredentialStore) Basic(url *url.URL) (string, string) {
|
| 88 | 88 |
return basicCredentialsFromKeyring(s.init(), url) |
| 89 | 89 |
} |
| 90 | 90 |
|
| 91 |
+func (s *SecretCredentialStore) RefreshToken(url *url.URL, service string) string {
|
|
| 92 |
+ return "" |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+func (s *SecretCredentialStore) SetRefreshToken(url *url.URL, service string, token string) {
|
|
| 96 |
+} |
|
| 97 |
+ |
|
| 91 | 98 |
func (s *SecretCredentialStore) Err() error {
|
| 92 | 99 |
s.lock.Lock() |
| 93 | 100 |
defer s.lock.Unlock() |
| ... | ... |
@@ -10,6 +10,8 @@ import ( |
| 10 | 10 |
|
| 11 | 11 |
"github.com/docker/distribution" |
| 12 | 12 |
"github.com/docker/distribution/digest" |
| 13 |
+ "github.com/docker/distribution/manifest/schema1" |
|
| 14 |
+ "github.com/docker/distribution/manifest/schema2" |
|
| 13 | 15 |
"github.com/docker/distribution/reference" |
| 14 | 16 |
"github.com/docker/distribution/registry/api/errcode" |
| 15 | 17 |
"github.com/docker/distribution/registry/api/v2" |
| ... | ... |
@@ -282,6 +284,21 @@ func applyErrorToRepository(repository *importRepository, err error) {
|
| 282 | 282 |
} |
| 283 | 283 |
} |
| 284 | 284 |
|
| 285 |
+func formatRepositoryError(repository *importRepository, refName string, refID string, defErr error) (err error) {
|
|
| 286 |
+ err = defErr |
|
| 287 |
+ switch {
|
|
| 288 |
+ case isDockerError(err, v2.ErrorCodeManifestUnknown): |
|
| 289 |
+ ref := repository.Ref |
|
| 290 |
+ ref.Tag, ref.ID = refName, refID |
|
| 291 |
+ err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact())
|
|
| 292 |
+ case isDockerError(err, errcode.ErrorCodeUnauthorized): |
|
| 293 |
+ err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 294 |
+ case strings.HasSuffix(err.Error(), "no basic auth credentials"): |
|
| 295 |
+ err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 296 |
+ } |
|
| 297 |
+ return |
|
| 298 |
+} |
|
| 299 |
+ |
|
| 285 | 300 |
// importRepositoryFromDocker loads the tags and images requested in the passed importRepository, obeying the |
| 286 | 301 |
// optional rate limiter. Errors are set onto the individual tags and digest objects. |
| 287 | 302 |
func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetriever, repository *importRepository, limiter flowcontrol.RateLimiter) {
|
| ... | ... |
@@ -325,9 +342,12 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri |
| 325 | 325 |
return |
| 326 | 326 |
} |
| 327 | 327 |
|
| 328 |
+ // get a blob context |
|
| 329 |
+ b := repo.Blobs(ctx) |
|
| 330 |
+ |
|
| 328 | 331 |
// if repository import is requested (MaximumTags), attempt to load the tags, sort them, and request the first N |
| 329 | 332 |
if count := repository.MaximumTags; count > 0 || count == -1 {
|
| 330 |
- tags, err := s.Tags() |
|
| 333 |
+ tags, err := repo.Tags(ctx).All(ctx) |
|
| 331 | 334 |
if err != nil {
|
| 332 | 335 |
glog.V(5).Infof("unable to access tags for repository %#v: %#v", repository, err)
|
| 333 | 336 |
switch {
|
| ... | ... |
@@ -372,27 +392,40 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri |
| 372 | 372 |
continue |
| 373 | 373 |
} |
| 374 | 374 |
limiter.Accept() |
| 375 |
- m, err := s.Get(d) |
|
| 375 |
+ manifest, err := s.Get(ctx, d) |
|
| 376 | 376 |
if err != nil {
|
| 377 | 377 |
glog.V(5).Infof("unable to access digest %q for repository %#v: %#v", d, repository, err)
|
| 378 |
- switch {
|
|
| 379 |
- case isDockerError(err, v2.ErrorCodeManifestUnknown): |
|
| 380 |
- ref := repository.Ref |
|
| 381 |
- ref.Tag, ref.ID = "", importDigest.Name |
|
| 382 |
- err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact())
|
|
| 383 |
- case isDockerError(err, errcode.ErrorCodeUnauthorized): |
|
| 384 |
- err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 385 |
- case strings.HasSuffix(err.Error(), "no basic auth credentials"): |
|
| 386 |
- err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 378 |
+ importDigest.Err = formatRepositoryError(repository, "", importDigest.Name, err) |
|
| 379 |
+ continue |
|
| 380 |
+ } |
|
| 381 |
+ |
|
| 382 |
+ if signedManifest, isSchema1 := manifest.(*schema1.SignedManifest); isSchema1 {
|
|
| 383 |
+ importDigest.Image, err = schema1ToImage(signedManifest, d) |
|
| 384 |
+ } else if deserializedManifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 {
|
|
| 385 |
+ imageConfig, err := b.Get(ctx, deserializedManifest.Config.Digest) |
|
| 386 |
+ if err != nil {
|
|
| 387 |
+ glog.V(5).Infof("unable to access the image config using digest %q for repository %#v: %#v", d, repository, err)
|
|
| 388 |
+ if isDockerError(err, v2.ErrorCodeManifestUnknown) {
|
|
| 389 |
+ ref := repository.Ref |
|
| 390 |
+ ref.ID = deserializedManifest.Config.Digest.String() |
|
| 391 |
+ importDigest.Err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact())
|
|
| 392 |
+ } else {
|
|
| 393 |
+ importDigest.Err = formatRepositoryError(repository, "", importDigest.Name, err) |
|
| 394 |
+ } |
|
| 395 |
+ continue |
|
| 387 | 396 |
} |
| 388 |
- importDigest.Err = err |
|
| 397 |
+ |
|
| 398 |
+ importDigest.Image, err = schema2ToImage(deserializedManifest, imageConfig, d) |
|
| 399 |
+ } else {
|
|
| 400 |
+ glog.V(5).Infof("unsupported manifest type: %T", manifest)
|
|
| 389 | 401 |
continue |
| 390 | 402 |
} |
| 391 |
- importDigest.Image, err = schema1ToImage(m, d) |
|
| 403 |
+ |
|
| 392 | 404 |
if err != nil {
|
| 393 | 405 |
importDigest.Err = err |
| 394 | 406 |
continue |
| 395 | 407 |
} |
| 408 |
+ |
|
| 396 | 409 |
if err := api.ImageWithMetadata(importDigest.Image); err != nil {
|
| 397 | 410 |
importDigest.Err = err |
| 398 | 411 |
continue |
| ... | ... |
@@ -405,23 +438,34 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri |
| 405 | 405 |
continue |
| 406 | 406 |
} |
| 407 | 407 |
limiter.Accept() |
| 408 |
- m, err := s.GetByTag(importTag.Name) |
|
| 408 |
+ desc, err := repo.Tags(ctx).Get(ctx, importTag.Name) |
|
| 409 | 409 |
if err != nil {
|
| 410 |
- glog.V(5).Infof("unable to access tag %q for repository %#v: %#v", importTag.Name, repository, err)
|
|
| 411 |
- switch {
|
|
| 412 |
- case isDockerError(err, v2.ErrorCodeManifestUnknown): |
|
| 413 |
- ref := repository.Ref |
|
| 414 |
- ref.Tag = importTag.Name |
|
| 415 |
- err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact())
|
|
| 416 |
- case isDockerError(err, errcode.ErrorCodeUnauthorized): |
|
| 417 |
- err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 418 |
- case strings.HasSuffix(err.Error(), "no basic auth credentials"): |
|
| 419 |
- err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact()))
|
|
| 410 |
+ glog.V(5).Infof("unable to get tag %q for repository %#v: %#v", importTag.Name, repository, err)
|
|
| 411 |
+ importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) |
|
| 412 |
+ continue |
|
| 413 |
+ } |
|
| 414 |
+ manifest, err := s.Get(ctx, desc.Digest) |
|
| 415 |
+ if err != nil {
|
|
| 416 |
+ glog.V(5).Infof("unable to access digest %q for tag %q for repository %#v: %#v", desc.Digest, importTag.Name, repository, err)
|
|
| 417 |
+ importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) |
|
| 418 |
+ continue |
|
| 419 |
+ } |
|
| 420 |
+ |
|
| 421 |
+ if signedManifest, isSchema1 := manifest.(*schema1.SignedManifest); isSchema1 {
|
|
| 422 |
+ importTag.Image, err = schema1ToImage(signedManifest, "") |
|
| 423 |
+ } else if deserializedManifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 {
|
|
| 424 |
+ imageConfig, err := b.Get(ctx, deserializedManifest.Config.Digest) |
|
| 425 |
+ if err != nil {
|
|
| 426 |
+ glog.V(5).Infof("unable to access image config using digest %q for tag %q for repository %#v: %#v", desc.Digest, importTag.Name, repository, err)
|
|
| 427 |
+ importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) |
|
| 428 |
+ continue |
|
| 420 | 429 |
} |
| 421 |
- importTag.Err = err |
|
| 430 |
+ importTag.Image, err = schema2ToImage(deserializedManifest, imageConfig, "") |
|
| 431 |
+ } else {
|
|
| 432 |
+ glog.V(5).Infof("unsupported manifest type: %T", manifest)
|
|
| 422 | 433 |
continue |
| 423 | 434 |
} |
| 424 |
- importTag.Image, err = schema1ToImage(m, "") |
|
| 435 |
+ |
|
| 425 | 436 |
if err != nil {
|
| 426 | 437 |
importTag.Err = err |
| 427 | 438 |
continue |
| ... | ... |
@@ -32,14 +32,14 @@ func expectStatusError(status unversioned.Status, message string) bool {
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 | 34 |
func TestImport(t *testing.T) {
|
| 35 |
- m := &schema1.SignedManifest{Raw: []byte(etcdManifest)}
|
|
| 35 |
+ m := &schema1.SignedManifest{}
|
|
| 36 | 36 |
if err := json.Unmarshal([]byte(etcdManifest), m); err != nil {
|
| 37 | 37 |
t.Fatal(err) |
| 38 | 38 |
} |
| 39 | 39 |
insecureRetriever := &mockRetriever{
|
| 40 | 40 |
repo: &mockRepository{
|
| 41 |
- getByTagErr: fmt.Errorf("no such tag"),
|
|
| 42 |
- getErr: fmt.Errorf("no such digest"),
|
|
| 41 |
+ getTagErr: fmt.Errorf("no such tag"),
|
|
| 42 |
+ getErr: fmt.Errorf("no such digest"),
|
|
| 43 | 43 |
}, |
| 44 | 44 |
} |
| 45 | 45 |
testCases := []struct {
|
| ... | ... |
@@ -65,8 +65,8 @@ func TestImport(t *testing.T) {
|
| 65 | 65 |
{
|
| 66 | 66 |
retriever: &mockRetriever{
|
| 67 | 67 |
repo: &mockRepository{
|
| 68 |
- getByTagErr: fmt.Errorf("no such tag"),
|
|
| 69 |
- getErr: fmt.Errorf("no such digest"),
|
|
| 68 |
+ getTagErr: fmt.Errorf("no such tag"),
|
|
| 69 |
+ getErr: fmt.Errorf("no such digest"),
|
|
| 70 | 70 |
}, |
| 71 | 71 |
}, |
| 72 | 72 |
isi: api.ImageStreamImport{
|
| ... | ... |
@@ -158,8 +158,16 @@ func TestImport(t *testing.T) {
|
| 158 | 158 |
{
|
| 159 | 159 |
retriever: &mockRetriever{
|
| 160 | 160 |
repo: &mockRepository{
|
| 161 |
- tags: []string{"v1", "other", "v2", "3", "3.1", "abc"},
|
|
| 162 |
- getByTagErr: fmt.Errorf("no such tag"),
|
|
| 161 |
+ manifest: m, |
|
| 162 |
+ tags: map[string]string{
|
|
| 163 |
+ "v1": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 164 |
+ "other": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 165 |
+ "v2": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 166 |
+ "3": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 167 |
+ "3.1": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 168 |
+ "abc": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", |
|
| 169 |
+ }, |
|
| 170 |
+ getTagErr: fmt.Errorf("no such tag"),
|
|
| 163 | 171 |
}, |
| 164 | 172 |
}, |
| 165 | 173 |
isi: api.ImageStreamImport{
|
| ... | ... |
@@ -111,8 +111,7 @@ func TestWatch(t *testing.T) {
|
| 111 | 111 |
) |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 |
-const etcdManifest = ` |
|
| 115 |
-{
|
|
| 114 |
+const etcdManifest = `{
|
|
| 116 | 115 |
"schemaVersion": 1, |
| 117 | 116 |
"tag": "latest", |
| 118 | 117 |
"name": "coreos/etcd", |
| ... | ... |
@@ -161,8 +160,7 @@ const etcdManifest = ` |
| 161 | 161 |
] |
| 162 | 162 |
}` |
| 163 | 163 |
|
| 164 |
-const etcdManifestNoSignature = ` |
|
| 165 |
-{
|
|
| 164 |
+const etcdManifestNoSignature = `{
|
|
| 166 | 165 |
"schemaVersion": 1, |
| 167 | 166 |
"tag": "latest", |
| 168 | 167 |
"name": "coreos/etcd", |
| ... | ... |
@@ -362,13 +360,13 @@ func TestUpdateResetsMetadata(t *testing.T) {
|
| 362 | 362 |
return true |
| 363 | 363 |
}, |
| 364 | 364 |
existing: &api.Image{
|
| 365 |
- ObjectMeta: kapi.ObjectMeta{Name: "sha256:54820434e2ccd1596892668504fef12ed980f0cc312f60eac93d6864445ba123", ResourceVersion: "1"},
|
|
| 365 |
+ ObjectMeta: kapi.ObjectMeta{Name: "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", ResourceVersion: "1"},
|
|
| 366 | 366 |
DockerImageReference: "openshift/ruby-19-centos-2", |
| 367 | 367 |
DockerImageLayers: []api.ImageLayer{},
|
| 368 | 368 |
DockerImageManifest: etcdManifestNoSignature, |
| 369 | 369 |
}, |
| 370 | 370 |
image: &api.Image{
|
| 371 |
- ObjectMeta: kapi.ObjectMeta{Name: "sha256:54820434e2ccd1596892668504fef12ed980f0cc312f60eac93d6864445ba123", ResourceVersion: "1"},
|
|
| 371 |
+ ObjectMeta: kapi.ObjectMeta{Name: "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", ResourceVersion: "1"},
|
|
| 372 | 372 |
DockerImageReference: "openshift/ruby-19-centos", |
| 373 | 373 |
DockerImageMetadata: api.DockerImage{ID: "foo"},
|
| 374 | 374 |
DockerImageManifest: etcdManifest, |
| ... | ... |
@@ -75,6 +75,13 @@ func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) {
|
| 75 | 75 |
newImage.DockerImageLayers = oldImage.DockerImageLayers |
| 76 | 76 |
newImage.Signatures = oldImage.Signatures |
| 77 | 77 |
|
| 78 |
+ if oldImage.DockerImageSignatures != nil {
|
|
| 79 |
+ newImage.DockerImageSignatures = nil |
|
| 80 |
+ for _, v := range oldImage.DockerImageSignatures {
|
|
| 81 |
+ newImage.DockerImageSignatures = append(newImage.DockerImageSignatures, v) |
|
| 82 |
+ } |
|
| 83 |
+ } |
|
| 84 |
+ |
|
| 78 | 85 |
// allow an image update that results in the manifest matching the digest (the name) |
| 79 | 86 |
newManifest := newImage.DockerImageManifest |
| 80 | 87 |
newImage.DockerImageManifest = oldImage.DockerImageManifest |
| ... | ... |
@@ -87,6 +94,17 @@ func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) {
|
| 87 | 87 |
} |
| 88 | 88 |
} |
| 89 | 89 |
|
| 90 |
+ newImageConfig := newImage.DockerImageConfig |
|
| 91 |
+ newImage.DockerImageConfig = oldImage.DockerImageConfig |
|
| 92 |
+ if newImageConfig != oldImage.DockerImageConfig && len(newImageConfig) > 0 {
|
|
| 93 |
+ ok, err := api.ImageConfigMatchesImage(newImage, []byte(newImageConfig)) |
|
| 94 |
+ if err != nil {
|
|
| 95 |
+ utilruntime.HandleError(fmt.Errorf("attempted to validate that a new config for %q mentioned in the manifest, but failed: %v", oldImage.Name, err))
|
|
| 96 |
+ } else if ok {
|
|
| 97 |
+ newImage.DockerImageConfig = newImageConfig |
|
| 98 |
+ } |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 90 | 101 |
if err := api.ImageWithMetadata(newImage); err != nil {
|
| 91 | 102 |
utilruntime.HandleError(fmt.Errorf("Unable to update image metadata for %q: %v", newImage.Name, err))
|
| 92 | 103 |
} |
| ... | ... |
@@ -179,7 +179,13 @@ func mockRegistryHandler(t *testing.T, requireAuth bool, count *int) http.Handle |
| 179 | 179 |
case "/v2/test/image3/manifests/latest", "/v2/test/image3/manifests/v2", "/v2/test/image3/manifests/" + danglingDigest: |
| 180 | 180 |
errcode.ServeJSON(w, errcode.ErrorCodeUnknown) |
| 181 | 181 |
case "/v2/test/image3/manifests/v1", "/v2/test/image3/manifests/" + etcdDigest: |
| 182 |
- w.Write([]byte(etcdManifest)) |
|
| 182 |
+ if r.Method == "HEAD" {
|
|
| 183 |
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(etcdManifest)))
|
|
| 184 |
+ w.Header().Set("Docker-Content-Digest", etcdDigest)
|
|
| 185 |
+ w.WriteHeader(http.StatusOK) |
|
| 186 |
+ } else {
|
|
| 187 |
+ w.Write([]byte(etcdManifest)) |
|
| 188 |
+ } |
|
| 183 | 189 |
default: |
| 184 | 190 |
t.Fatalf("unexpected request %s: %#v", r.URL.Path, r)
|
| 185 | 191 |
} |
| ... | ... |
@@ -462,21 +468,29 @@ func TestImageStreamImportScheduled(t *testing.T) {
|
| 462 | 462 |
case "/v2/": |
| 463 | 463 |
w.Header().Set("Docker-Distribution-API-Version", "registry/2.0")
|
| 464 | 464 |
w.Write([]byte(`{}`))
|
| 465 |
- case "/v2/test/image/manifests/latest": |
|
| 465 |
+ case "/v2/test/image/manifests/latest", "/v2/test/image/manifests/" + etcdDigest, "/v2/test/image/manifests/" + phpDigest: |
|
| 466 | 466 |
count++ |
| 467 | 467 |
t.Logf("serving %d", count)
|
| 468 |
- var manifest string |
|
| 468 |
+ var manifest, digest string |
|
| 469 | 469 |
switch count {
|
| 470 |
- case 1: |
|
| 470 |
+ case 1, 2: |
|
| 471 |
+ digest = etcdDigest |
|
| 471 | 472 |
manifest = etcdManifest |
| 472 |
- case 2, 3: |
|
| 473 |
+ case 3, 4, 5, 6: |
|
| 474 |
+ digest = phpDigest |
|
| 473 | 475 |
manifest = phpManifest |
| 474 | 476 |
default: |
| 475 | 477 |
w.WriteHeader(500) |
| 476 | 478 |
return |
| 477 | 479 |
} |
| 478 |
- written <- struct{}{}
|
|
| 480 |
+ if r.Method == "HEAD" {
|
|
| 481 |
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(manifest)))
|
|
| 482 |
+ w.Header().Set("Docker-Content-Digest", digest)
|
|
| 483 |
+ w.WriteHeader(http.StatusOK) |
|
| 484 |
+ return |
|
| 485 |
+ } |
|
| 479 | 486 |
w.Write([]byte(manifest)) |
| 487 |
+ written <- struct{}{}
|
|
| 480 | 488 |
default: |
| 481 | 489 |
t.Fatalf("unexpected request %s: %#v", r.URL.Path, r)
|
| 482 | 490 |
} |
| ... | ... |
@@ -51,10 +51,7 @@ func signedManifest(name string) ([]byte, digest.Digest, error) {
|
| 51 | 51 |
if err != nil {
|
| 52 | 52 |
return []byte{}, "", fmt.Errorf("error marshaling manifest: %s", err)
|
| 53 | 53 |
} |
| 54 |
- dgst, err := digest.FromBytes(manifestBytes) |
|
| 55 |
- if err != nil {
|
|
| 56 |
- return []byte{}, "", fmt.Errorf("error calculating manifest digest: %s", err)
|
|
| 57 |
- } |
|
| 54 |
+ dgst := digest.FromBytes(manifestBytes) |
|
| 58 | 55 |
|
| 59 | 56 |
jsonSignature, err := libtrust.NewJSONSignature(manifestBytes) |
| 60 | 57 |
if err != nil {
|
| ... | ... |
@@ -98,7 +95,8 @@ func TestV2RegistryGetTags(t *testing.T) {
|
| 98 | 98 |
} |
| 99 | 99 |
|
| 100 | 100 |
config := `version: 0.1 |
| 101 |
-loglevel: debug |
|
| 101 |
+log: |
|
| 102 |
+ level: debug |
|
| 102 | 103 |
http: |
| 103 | 104 |
addr: 127.0.0.1:5000 |
| 104 | 105 |
storage: |
| ... | ... |
@@ -106,8 +104,12 @@ storage: |
| 106 | 106 |
auth: |
| 107 | 107 |
openshift: |
| 108 | 108 |
middleware: |
| 109 |
+ registry: |
|
| 110 |
+ - name: openshift |
|
| 109 | 111 |
repository: |
| 110 | 112 |
- name: openshift |
| 113 |
+ storage: |
|
| 114 |
+ - name: openshift |
|
| 111 | 115 |
` |
| 112 | 116 |
|
| 113 | 117 |
os.Setenv("OPENSHIFT_CA_DATA", string(clusterAdminClientConfig.CAData))
|
| ... | ... |
@@ -1405,6 +1405,7 @@ items: |
| 1405 | 1405 |
attributeRestrictions: null |
| 1406 | 1406 |
resources: |
| 1407 | 1407 |
- images |
| 1408 |
+ - imagestreamtags |
|
| 1408 | 1409 |
verbs: |
| 1409 | 1410 |
- delete |
| 1410 | 1411 |
- get |
| ... | ... |
@@ -1414,7 +1415,6 @@ items: |
| 1414 | 1414 |
resources: |
| 1415 | 1415 |
- imagestreamimages |
| 1416 | 1416 |
- imagestreams/secrets |
| 1417 |
- - imagestreamtags |
|
| 1418 | 1417 |
verbs: |
| 1419 | 1418 |
- get |
| 1420 | 1419 |
- apiGroups: |