Browse code

Mirror blobs to the local registry on pullthrough

Add a new option for registry pullthrough (default on) that mirrors any blob
fetched via pullthrough into the local store. If the mirror fails the
operation will still succeed. Set
`REGISTRY_OPENSHIFT_MIDDLEWARE_MIRRORPULLTHROUGH=false` to disable
mirroring.

Update `oadm prune images` to accept `--all`, which considers all images
as deletion candidates. This flag requires `--registry-url` since
external images won't contain a registry URL in them.

Clayton Coleman authored on 2016/11/21 13:47:35
Showing 6 changed files
... ...
@@ -13,7 +13,7 @@ storage:
13 13
 auth:
14 14
   openshift:
15 15
     realm: openshift
16
-    
16
+
17 17
     # tokenrealm is a base URL to use for the token-granting registry endpoint.
18 18
     # If unspecified, the scheme and host for the token redirect are determined from the incoming request.
19 19
     # If specified, a scheme and host must be chosen that all registry clients can resolve and access:
... ...
@@ -27,6 +27,7 @@ middleware:
27 27
       options:
28 28
         acceptschema2: false
29 29
         pullthrough: true
30
+        mirrorpullthrough: true
30 31
         enforcequota: false
31 32
         projectcachettl: 1m
32 33
         blobrepositorycachettl: 10m
... ...
@@ -34,7 +34,12 @@ const PruneImagesRecommendedName = "images"
34 34
 
35 35
 var (
36 36
 	imagesLongDesc = templates.LongDesc(`
37
-		Prune images no longer needed due to age and/or status
37
+		Remove image stream tags, images, and image layers by age or usage
38
+
39
+		This command removes historical image stream tags, unused images, and unreferenced image
40
+		layers from the integrated registry. It prefers images that have been directly pushed to
41
+		the registry, but you may specify --all to include images that were imported (if registry
42
+		mirroring is enabled).
38 43
 
39 44
 		By default, the prune operation performs a dry run making no changes to internal registry. A
40 45
 		--confirm flag is needed for changes to be effective.
... ...
@@ -51,7 +56,7 @@ var (
51 51
 	  %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m --confirm
52 52
 
53 53
 	  # See, what the prune command would delete if we're interested in removing images
54
-	  # exceeding currently set LimitRanges ('openshift.io/Image')
54
+	  # exceeding currently set limit ranges ('openshift.io/Image')
55 55
 	  %[1]s %[2]s --prune-over-size-limit
56 56
 
57 57
 	  # To actually perform the prune operation, the confirm flag must be appended
... ...
@@ -70,6 +75,7 @@ type PruneImagesOptions struct {
70 70
 	KeepYoungerThan     *time.Duration
71 71
 	KeepTagRevisions    *int
72 72
 	PruneOverSizeLimit  *bool
73
+	AllImages           *bool
73 74
 	CABundle            string
74 75
 	RegistryUrlOverride string
75 76
 	Namespace           string
... ...
@@ -82,11 +88,13 @@ type PruneImagesOptions struct {
82 82
 
83 83
 // NewCmdPruneImages implements the OpenShift cli prune images command.
84 84
 func NewCmdPruneImages(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
85
+	allImages := false
85 86
 	opts := &PruneImagesOptions{
86 87
 		Confirm:            false,
87 88
 		KeepYoungerThan:    &defaultKeepYoungerThan,
88 89
 		KeepTagRevisions:   &defaultKeepTagRevisions,
89 90
 		PruneOverSizeLimit: &defaultPruneImageOverSizeLimit,
91
+		AllImages:          &allImages,
90 92
 	}
91 93
 
92 94
 	cmd := &cobra.Command{
... ...
@@ -104,6 +112,7 @@ func NewCmdPruneImages(f *clientcmd.Factory, parentName, name string, out io.Wri
104 104
 	}
105 105
 
106 106
 	cmd.Flags().BoolVar(&opts.Confirm, "confirm", opts.Confirm, "Specify that image pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.")
107
+	cmd.Flags().BoolVar(opts.AllImages, "all", *opts.AllImages, "Include images that were not pushed to the registry but have been mirrored by pullthrough. Requires --registry-url")
107 108
 	cmd.Flags().DurationVar(opts.KeepYoungerThan, "keep-younger-than", *opts.KeepYoungerThan, "Specify the minimum age of an image for it to be considered a candidate for pruning.")
108 109
 	cmd.Flags().IntVar(opts.KeepTagRevisions, "keep-tag-revisions", *opts.KeepTagRevisions, "Specify the number of image revisions for a tag in an image stream that will be preserved.")
109 110
 	cmd.Flags().BoolVar(opts.PruneOverSizeLimit, "prune-over-size-limit", *opts.PruneOverSizeLimit, "Specify if images which are exceeding LimitRanges (see 'openshift.io/Image'), specified in the same namespace, should be considered for pruning. This flag cannot be combined with --keep-younger-than nor --keep-tag-revisions.")
... ...
@@ -130,6 +139,11 @@ func (o *PruneImagesOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command,
130 130
 			o.KeepTagRevisions = nil
131 131
 		}
132 132
 	}
133
+	if *o.AllImages {
134
+		if len(o.RegistryUrlOverride) == 0 {
135
+			return kcmdutil.UsageError(cmd, "--registry-url must be specified when --all is true")
136
+		}
137
+	}
133 138
 	o.Namespace = kapi.NamespaceAll
134 139
 	if cmd.Flags().Lookup("namespace").Changed {
135 140
 		var err error
... ...
@@ -228,6 +242,7 @@ func (o PruneImagesOptions) Run() error {
228 228
 		KeepYoungerThan:    o.KeepYoungerThan,
229 229
 		KeepTagRevisions:   o.KeepTagRevisions,
230 230
 		PruneOverSizeLimit: o.PruneOverSizeLimit,
231
+		AllImages:          o.AllImages,
231 232
 		Images:             allImages,
232 233
 		Streams:            allStreams,
233 234
 		Pods:               allPods,
... ...
@@ -1,8 +1,9 @@
1 1
 package server
2 2
 
3 3
 import (
4
+	"io"
4 5
 	"net/http"
5
-	"strconv"
6
+	"sync"
6 7
 	"time"
7 8
 
8 9
 	"github.com/docker/distribution"
... ...
@@ -23,6 +24,7 @@ type pullthroughBlobStore struct {
23 23
 	repo                       *repository
24 24
 	digestToStore              map[string]distribution.BlobStore
25 25
 	pullFromInsecureRegistries bool
26
+	mirror                     bool
26 27
 }
27 28
 
28 29
 var _ distribution.BlobStore = &pullthroughBlobStore{}
... ...
@@ -116,30 +118,36 @@ func (r *pullthroughBlobStore) proxyStat(ctx context.Context, retriever importer
116 116
 }
117 117
 
118 118
 // ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary.
119
-func (r *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
120
-	store, ok := r.digestToStore[dgst.String()]
119
+func (pbs *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
120
+	store, ok := pbs.digestToStore[dgst.String()]
121 121
 	if !ok {
122
-		return r.BlobStore.ServeBlob(ctx, w, req, dgst)
122
+		return pbs.BlobStore.ServeBlob(ctx, w, req, dgst)
123 123
 	}
124 124
 
125
-	desc, err := store.Stat(ctx, dgst)
126
-	if err != nil {
127
-		context.GetLogger(ctx).Errorf("failed to stat digest %q: %v", dgst.String(), err)
128
-		return err
129
-	}
125
+	// store the content locally if requested, but ensure only one instance at a time
126
+	// is storing to avoid excessive local writes
127
+	if pbs.mirror {
128
+		mu.Lock()
129
+		if _, ok = inflight[dgst]; ok {
130
+			mu.Unlock()
131
+			context.GetLogger(ctx).Infof("Serving %q while mirroring in background", dgst)
132
+			_, err := pbs.copyContent(store, ctx, dgst, w, req)
133
+			return err
134
+		}
135
+		inflight[dgst] = struct{}{}
136
+		mu.Unlock()
130 137
 
131
-	remoteReader, err := store.Open(ctx, dgst)
132
-	if err != nil {
133
-		context.GetLogger(ctx).Errorf("failure to open remote store for digest %q: %v", dgst.String(), err)
134
-		return err
138
+		go func(dgst digest.Digest) {
139
+			context.GetLogger(ctx).Infof("Start background mirroring of %q", dgst)
140
+			if err := pbs.storeLocal(store, ctx, dgst); err != nil {
141
+				context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
142
+			}
143
+			context.GetLogger(ctx).Infof("Completed mirroring of %q", dgst)
144
+		}(dgst)
135 145
 	}
136
-	defer remoteReader.Close()
137 146
 
138
-	setResponseHeaders(w, desc.Size, desc.MediaType, dgst)
139
-
140
-	context.GetLogger(ctx).Infof("serving blob %s of type %s %d bytes long", dgst.String(), desc.MediaType, desc.Size)
141
-	http.ServeContent(w, req, desc.Digest.String(), time.Time{}, remoteReader)
142
-	return nil
147
+	_, err := pbs.copyContent(store, ctx, dgst, w, req)
148
+	return err
143 149
 }
144 150
 
145 151
 // Get attempts to fetch the requested blob by digest using a remote proxy store if necessary.
... ...
@@ -239,8 +247,72 @@ func identifyCandidateRepositories(is *imageapi.ImageStream, localRegistry strin
239 239
 
240 240
 // setResponseHeaders sets the appropriate content serving headers
241 241
 func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) {
242
-	w.Header().Set("Content-Length", strconv.FormatInt(length, 10))
243 242
 	w.Header().Set("Content-Type", mediaType)
244 243
 	w.Header().Set("Docker-Content-Digest", digest.String())
245 244
 	w.Header().Set("Etag", digest.String())
246 245
 }
246
+
247
+// inflight tracks currently downloading blobs
248
+var inflight = make(map[digest.Digest]struct{})
249
+
250
+// mu protects inflight
251
+var mu sync.Mutex
252
+
253
+// copyContent attempts to load and serve the provided blob. If req != nil and writer is an instance of http.ResponseWriter,
254
+// response headers will be set and range requests honored.
255
+func (pbs *pullthroughBlobStore) copyContent(store distribution.BlobStore, ctx context.Context, dgst digest.Digest, writer io.Writer, req *http.Request) (distribution.Descriptor, error) {
256
+	desc, err := store.Stat(ctx, dgst)
257
+	if err != nil {
258
+		return distribution.Descriptor{}, err
259
+	}
260
+
261
+	remoteReader, err := store.Open(ctx, dgst)
262
+	if err != nil {
263
+		return distribution.Descriptor{}, err
264
+	}
265
+
266
+	rw, ok := writer.(http.ResponseWriter)
267
+	if ok {
268
+		setResponseHeaders(rw, desc.Size, desc.MediaType, dgst)
269
+		// serve range requests
270
+		if req != nil {
271
+			http.ServeContent(rw, req, desc.Digest.String(), time.Time{}, remoteReader)
272
+			return desc, nil
273
+		}
274
+	}
275
+
276
+	if _, err = io.CopyN(writer, remoteReader, desc.Size); err != nil {
277
+		return distribution.Descriptor{}, err
278
+	}
279
+	return desc, nil
280
+}
281
+
282
+// storeLocal retrieves the named blob from the provided store and writes it into the local store.
283
+func (pbs *pullthroughBlobStore) storeLocal(store distribution.BlobStore, ctx context.Context, dgst digest.Digest) error {
284
+	defer func() {
285
+		mu.Lock()
286
+		delete(inflight, dgst)
287
+		mu.Unlock()
288
+	}()
289
+
290
+	var desc distribution.Descriptor
291
+	var err error
292
+	var bw distribution.BlobWriter
293
+
294
+	bw, err = pbs.BlobStore.Create(ctx)
295
+	if err != nil {
296
+		return err
297
+	}
298
+
299
+	desc, err = pbs.copyContent(store, ctx, dgst, bw, nil)
300
+	if err != nil {
301
+		return err
302
+	}
303
+
304
+	_, err = bw.Commit(ctx, desc)
305
+	if err != nil {
306
+		return err
307
+	}
308
+
309
+	return nil
310
+}
... ...
@@ -55,6 +55,12 @@ const (
55 55
 	// leaking a blob that is no longer tagged in given repository.
56 56
 	BlobRepositoryCacheTTLEnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_BLOBREPOSITORYCACHETTL"
57 57
 
58
+	// Pullthrough is a boolean environment variable that controls whether pullthrough is enabled.
59
+	PullthroughEnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_PULLTHROUGH"
60
+
61
+	// MirrorPullthrough is a boolean environment variable that controls mirroring of blobs on pullthrough.
62
+	MirrorPullthroughEnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_MIRRORPULLTHROUGH"
63
+
58 64
 	// Default values
59 65
 
60 66
 	defaultDigestToRepositoryCacheSize = 2048
... ...
@@ -136,6 +142,8 @@ type repository struct {
136 136
 	// if true, the repository will check remote references in the image stream to support pulling "through"
137 137
 	// from a remote repository
138 138
 	pullthrough bool
139
+	// mirrorPullthrough will mirror remote blobs into the local repository if set
140
+	mirrorPullthrough bool
139 141
 	// acceptschema2 allows to refuse the manifest schema version 2
140 142
 	acceptschema2 bool
141 143
 	// blobrepositorycachettl is an eviction timeout for <blob belongs to repository> entries of cachedLayers
... ...
@@ -170,7 +178,11 @@ func newRepositoryWithClient(
170 170
 	if err != nil {
171 171
 		context.GetLogger(ctx).Error(err)
172 172
 	}
173
-	pullthrough, err := getBoolOption("", "pullthrough", false, options)
173
+	pullthrough, err := getBoolOption(PullthroughEnvVar, "pullthrough", false, options)
174
+	if err != nil {
175
+		context.GetLogger(ctx).Error(err)
176
+	}
177
+	mirrorPullthrough, err := getBoolOption(MirrorPullthroughEnvVar, "mirrorpullthrough", true, options)
174 178
 	if err != nil {
175 179
 		context.GetLogger(ctx).Error(err)
176 180
 	}
... ...
@@ -193,6 +205,7 @@ func newRepositoryWithClient(
193 193
 		acceptschema2:          acceptschema2,
194 194
 		blobrepositorycachettl: blobrepositorycachettl,
195 195
 		pullthrough:            pullthrough,
196
+		mirrorPullthrough:      mirrorPullthrough,
196 197
 		cachedLayers:           cachedLayers,
197 198
 	}, nil
198 199
 }
... ...
@@ -228,6 +241,7 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
228 228
 
229 229
 			repo:          &repo,
230 230
 			digestToStore: make(map[string]distribution.BlobStore),
231
+			mirror:        r.mirrorPullthrough,
231 232
 		}
232 233
 	}
233 234
 
... ...
@@ -55,6 +55,7 @@ type pruneAlgorithm struct {
55 55
 	keepTagRevisions   int
56 56
 	pruneOverSizeLimit bool
57 57
 	namespace          string
58
+	allImages          bool
58 59
 }
59 60
 
60 61
 // ImageDeleter knows how to remove images from OpenShift.
... ...
@@ -103,6 +104,9 @@ type PrunerOptions struct {
103 103
 	// PruneOverSizeLimit indicates that images exceeding defined limits (openshift.io/Image)
104 104
 	// will be considered as candidates for pruning.
105 105
 	PruneOverSizeLimit *bool
106
+	// AllImages considers all images for pruning, not just those pushed directly to the registry.
107
+	// Requires RegistryURL be set.
108
+	AllImages *bool
106 109
 	// Namespace to be pruned, if specified it should never remove Images.
107 110
 	Namespace string
108 111
 	// Images is the entire list of images in OpenShift. An image must be in this
... ...
@@ -224,9 +228,10 @@ func (*dryRunRegistryPinger) ping(registry string) error {
224 224
 // cluster; otherwise, the pruning algorithm might result in incorrect
225 225
 // calculations and premature pruning.
226 226
 //
227
-// The ImageDeleter performs the following logic: remove any image containing the
228
-// annotation openshift.io/image.managed=true that was created at least *n*
229
-// minutes ago and is *not* currently referenced by:
227
+// The ImageDeleter performs the following logic:
228
+//
229
+// remove any image was created at least *n* minutes ago and is *not* currently
230
+// referenced by:
230 231
 //
231 232
 // - any pod created less than *n* minutes ago
232 233
 // - any image stream created less than *n* minutes ago
... ...
@@ -238,6 +243,9 @@ func (*dryRunRegistryPinger) ping(registry string) error {
238 238
 // - any builds
239 239
 // - the n most recent tag revisions in an image stream's status.tags
240 240
 //
241
+// including only images with the annotation openshift.io/image.managed=true
242
+// unless allImages is true.
243
+//
241 244
 // When removing an image, remove all references to the image from all
242 245
 // ImageStreams having a reference to the image in `status.tags`.
243 246
 //
... ...
@@ -252,8 +260,8 @@ func NewPruner(options PrunerOptions) Pruner {
252 252
 	if options.PruneOverSizeLimit != nil {
253 253
 		pruneOverSizeLimit = fmt.Sprintf("%v", *options.PruneOverSizeLimit)
254 254
 	}
255
-	glog.V(1).Infof("Creating image pruner with keepYoungerThan=%v, keepTagRevisions=%s, pruneOverSizeLimit=%s",
256
-		options.KeepYoungerThan, keepTagRevisions, pruneOverSizeLimit)
255
+	glog.V(1).Infof("Creating image pruner with keepYoungerThan=%v, keepTagRevisions=%s, pruneOverSizeLimit=%s allImages=%t",
256
+		options.KeepYoungerThan, keepTagRevisions, pruneOverSizeLimit, options.AllImages)
257 257
 
258 258
 	algorithm := pruneAlgorithm{}
259 259
 	if options.KeepYoungerThan != nil {
... ...
@@ -265,6 +273,9 @@ func NewPruner(options PrunerOptions) Pruner {
265 265
 	if options.PruneOverSizeLimit != nil {
266 266
 		algorithm.pruneOverSizeLimit = *options.PruneOverSizeLimit
267 267
 	}
268
+	if options.AllImages != nil {
269
+		algorithm.allImages = *options.AllImages
270
+	}
268 271
 	algorithm.namespace = options.Namespace
269 272
 
270 273
 	g := graph.New()
... ...
@@ -302,13 +313,15 @@ func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm prune
302 302
 
303 303
 		glog.V(4).Infof("Examining image %q", image.Name)
304 304
 
305
-		if image.Annotations == nil {
306
-			glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference)
307
-			continue
308
-		}
309
-		if value, ok := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; !ok || value != "true" {
310
-			glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference)
311
-			continue
305
+		if !algorithm.allImages {
306
+			if image.Annotations == nil {
307
+				glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference)
308
+				continue
309
+			}
310
+			if value, ok := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; !ok || value != "true" {
311
+				glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference)
312
+				continue
313
+			}
312 314
 		}
313 315
 
314 316
 		age := unversioned.Now().Sub(image.CreationTimestamp.Time)
... ...
@@ -105,6 +105,7 @@ os::cmd::try_until_success "curl --max-time 2 --fail --silent 'http://${DOCKER_R
105 105
 os::cmd::expect_success "curl -f http://${DOCKER_REGISTRY}/healthz"
106 106
 
107 107
 os::cmd::expect_success "dig @${API_HOST} docker-registry.default.local. A"
108
+registry_pod=$(oc get pod -n default -l deploymentconfig=docker-registry --template='{{(index .items 0).metadata.name}}')
108 109
 
109 110
 # Client setup (log in as e2e-user and set 'test' as the default project)
110 111
 # This is required to be able to push to the registry!
... ...
@@ -147,6 +148,11 @@ os::log::info "Ruby's testing blob digest: $rubyimageblob"
147 147
 os::log::info "Docker pullthrough"
148 148
 os::cmd::expect_success "oc import-image --confirm --from=mysql:latest mysql:pullthrough"
149 149
 os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/mysql:pullthrough"
150
+mysqlblob="$(oc get istag -o go-template='{{range .image.dockerImageLayers}}{{if gt .size 1024.}}{{.name}},{{end}}{{end}}' "mysql:pullthrough" | cut -d , -f 1)"
151
+# directly hit the image to trigger mirroring in case the layer already exists on disk
152
+os::cmd::expect_success "curl -H 'Authorization: bearer $(oc whoami -t)' 'http://${DOCKER_REGISTRY}/v2/cache/mysql/blobs/${mysqlblob}' 1>/dev/null"
153
+# verify the blob exists on disk in the registry due to mirroring under .../blobs/sha256/<2 char prefix>/<sha value>
154
+os::cmd::try_until_success "oc exec --context='${CLUSTER_ADMIN_CONTEXT}' -n default -p ${registry_pod} du /registry | tee '${LOG_DIR}/registry-images.txt' | grep '${mysqlblob:7:100}' | grep blobs"
150 155
 
151 156
 os::log::info "Docker registry start with GCS"
152 157
 os::cmd::expect_failure_and_text "docker run -e REGISTRY_STORAGE=\"gcs: {}\" openshift/origin-docker-registry:${TAG}" "No bucket parameter provided"
... ...
@@ -519,23 +525,34 @@ os::cmd::expect_success "docker tag gcr.io/google_containers/pause ${DOCKER_REGI
519 519
 os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/cache/prune"
520 520
 
521 521
 # record the storage before pruning
522
-registry_pod=$(oc get pod -l deploymentconfig=docker-registry --template='{{(index .items 0).metadata.name}}')
522
+registry_pod=$(oc get pod -n default -l deploymentconfig=docker-registry --template='{{(index .items 0).metadata.name}}')
523 523
 os::cmd::expect_success "oc exec -p ${registry_pod} du /registry > '${LOG_DIR}/prune-images.before.txt'"
524 524
 
525 525
 # set up pruner user
526
-os::cmd::expect_success 'oadm policy add-cluster-role-to-user system:image-pruner e2e-pruner'
527
-os::cmd::try_until_text 'oadm policy who-can list images' 'e2e-pruner'
528
-os::cmd::expect_success 'oc login -u e2e-pruner -p pass'
526
+os::cmd::expect_success 'oadm policy add-cluster-role-to-user system:image-pruner system:serviceaccount:cache:builder'
527
+os::cmd::try_until_text 'oadm policy who-can list images' 'system:serviceaccount:cache:builder'
529 528
 
530 529
 # run image pruning
531
-os::cmd::expect_success_and_not_text "oadm prune images --keep-younger-than=0 --keep-tag-revisions=1 --confirm" 'error'
530
+os::cmd::expect_success_and_not_text "oadm prune images --token=$(oc sa get-token builder -n cache) --keep-younger-than=0 --keep-tag-revisions=1 --confirm" 'error'
532 531
 
533
-os::cmd::expect_success "oc project ${CLUSTER_ADMIN_CONTEXT}"
534 532
 # record the storage after pruning
535 533
 os::cmd::expect_success "oc exec -p ${registry_pod} du /registry > '${LOG_DIR}/prune-images.after.txt'"
536 534
 
537 535
 # make sure there were changes to the registry's storage
538 536
 os::cmd::expect_code "diff ${LOG_DIR}/prune-images.before.txt ${LOG_DIR}/prune-images.after.txt" 1
537
+
538
+# prune a mirror, external image that is no longer referenced
539
+os::cmd::expect_success "oc import-image nginx --confirm -n cache"
540
+nginxblob="$(oc get istag -o go-template='{{range .image.dockerImageLayers}}{{if gt .size 1024.}}{{.name}},{{end}}{{end}}' "nginx:latest" -n cache | cut -d , -f 1)"
541
+# directly hit the image to trigger mirroring in case the layer already exists on disk
542
+os::cmd::expect_success "curl -H 'Authorization: bearer $(oc sa get-token builder -n cache)' 'http://${DOCKER_REGISTRY}/v2/cache/nginx/blobs/${nginxblob}' 1>/dev/null"
543
+# verify the blob exists on disk in the registry due to mirroring under .../blobs/sha256/<2 char prefix>/<sha value>
544
+os::cmd::try_until_success "oc exec --context='${CLUSTER_ADMIN_CONTEXT}' -n default -p ${registry_pod} du /registry | tee '${LOG_DIR}/registry-images.txt' | grep '${nginxblob:7:100}' | grep blobs"
545
+os::cmd::expect_success "oc delete is nginx -n cache"
546
+os::cmd::expect_success "oc exec -p ${registry_pod} du /registry > '${LOG_DIR}/prune-images.before.txt'"
547
+os::cmd::expect_success_and_not_text "oadm prune images --token=$(oc sa get-token builder -n cache) --keep-younger-than=0 --confirm --all --registry-url=${DOCKER_REGISTRY}" 'error'
548
+os::cmd::expect_success "oc exec -p ${registry_pod} du /registry > '${LOG_DIR}/prune-images.after.txt'"
549
+os::cmd::expect_code "diff ${LOG_DIR}/prune-images.before.txt ${LOG_DIR}/prune-images.after.txt" 1
539 550
 os::log::info "Validated image pruning"
540 551
 
541 552
 # with registry's re-deployment we loose all the blobs stored in its storage until now