Browse code

Change push to use manifest builder

Currently this always uses the schema1 manifest builder. Later, it will
be changed to attempt schema2 first, and fall back when necessary.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>

Aaron Lehmann authored on 2015/12/11 09:27:58
Showing 7 changed files
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"io"
8 8
 
9 9
 	"github.com/Sirupsen/logrus"
10
-	"github.com/docker/distribution/digest"
11 10
 	"github.com/docker/docker/distribution/metadata"
12 11
 	"github.com/docker/docker/distribution/xfer"
13 12
 	"github.com/docker/docker/image"
... ...
@@ -77,7 +76,6 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
77 77
 			endpoint:       endpoint,
78 78
 			repoInfo:       repoInfo,
79 79
 			config:         imagePushConfig,
80
-			layersPushed:   pushMap{layersPushed: make(map[digest.Digest]bool)},
81 80
 		}, nil
82 81
 	case registry.APIVersion1:
83 82
 		return &v1Pusher{
... ...
@@ -1,22 +1,18 @@
1 1
 package distribution
2 2
 
3 3
 import (
4
-	"encoding/json"
5
-	"errors"
6 4
 	"fmt"
7 5
 	"io"
8 6
 	"sync"
9
-	"time"
10 7
 
11 8
 	"github.com/Sirupsen/logrus"
12 9
 	"github.com/docker/distribution"
13 10
 	"github.com/docker/distribution/digest"
14
-	"github.com/docker/distribution/manifest"
15 11
 	"github.com/docker/distribution/manifest/schema1"
12
+	"github.com/docker/distribution/manifest/schema2"
13
+	"github.com/docker/distribution/registry/client"
16 14
 	"github.com/docker/docker/distribution/metadata"
17 15
 	"github.com/docker/docker/distribution/xfer"
18
-	"github.com/docker/docker/image"
19
-	"github.com/docker/docker/image/v1"
20 16
 	"github.com/docker/docker/layer"
21 17
 	"github.com/docker/docker/pkg/ioutils"
22 18
 	"github.com/docker/docker/pkg/progress"
... ...
@@ -43,31 +39,34 @@ type v2Pusher struct {
43 43
 	config         *ImagePushConfig
44 44
 	repo           distribution.Repository
45 45
 
46
-	// confirmedV2 is set to true if we confirm we're talking to a v2
47
-	// registry. This is used to limit fallbacks to the v1 protocol.
48
-	confirmedV2 bool
49
-
50
-	// layersPushed is the set of layers known to exist on the remote side.
51
-	// This avoids redundant queries when pushing multiple tags that
52
-	// involve the same layers.
53
-	layersPushed pushMap
46
+	// pushState is state built by the Download functions.
47
+	pushState pushState
54 48
 }
55 49
 
56
-type pushMap struct {
50
+type pushState struct {
57 51
 	sync.Mutex
58
-	layersPushed map[digest.Digest]bool
52
+	// remoteLayers is the set of layers known to exist on the remote side.
53
+	// This avoids redundant queries when pushing multiple tags that
54
+	// involve the same layers. It is also used to fill in digest and size
55
+	// information when building the manifest.
56
+	remoteLayers map[layer.DiffID]distribution.Descriptor
57
+	// confirmedV2 is set to true if we confirm we're talking to a v2
58
+	// registry. This is used to limit fallbacks to the v1 protocol.
59
+	confirmedV2 bool
59 60
 }
60 61
 
61 62
 func (p *v2Pusher) Push(ctx context.Context) (err error) {
62
-	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
63
+	p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
64
+
65
+	p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
63 66
 	if err != nil {
64 67
 		logrus.Debugf("Error getting v2 registry: %v", err)
65
-		return fallbackError{err: err, confirmedV2: p.confirmedV2}
68
+		return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
66 69
 	}
67 70
 
68 71
 	if err = p.pushV2Repository(ctx); err != nil {
69 72
 		if registry.ContinueOnError(err) {
70
-			return fallbackError{err: err, confirmedV2: p.confirmedV2}
73
+			return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
71 74
 		}
72 75
 	}
73 76
 	return err
... ...
@@ -134,18 +133,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
134 134
 	descriptorTemplate := v2PushDescriptor{
135 135
 		blobSumService: p.blobSumService,
136 136
 		repo:           p.repo,
137
-		layersPushed:   &p.layersPushed,
138
-		confirmedV2:    &p.confirmedV2,
139
-	}
140
-
141
-	// Push empty layer if necessary
142
-	for _, h := range img.History {
143
-		if h.EmptyLayer {
144
-			descriptor := descriptorTemplate
145
-			descriptor.layer = layer.EmptyLayer
146
-			descriptors = []xfer.UploadDescriptor{&descriptor}
147
-			break
148
-		}
137
+		pushState:      &p.pushState,
149 138
 	}
150 139
 
151 140
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
... ...
@@ -157,8 +145,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
157 157
 		l = l.Parent()
158 158
 	}
159 159
 
160
-	fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput)
161
-	if err != nil {
160
+	if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
162 161
 		return err
163 162
 	}
164 163
 
... ...
@@ -166,18 +153,22 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
166 166
 	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
167 167
 		tag = tagged.Tag()
168 168
 	}
169
-	m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
170
-	if err != nil {
171
-		return err
169
+	builder := schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), tag, img.RawJSON())
170
+
171
+	// descriptors is in reverse order; iterate backwards to get references
172
+	// appended in the right order.
173
+	for i := len(descriptors) - 1; i >= 0; i-- {
174
+		if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
175
+			return err
176
+		}
172 177
 	}
173 178
 
174
-	logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
175
-	signed, err := schema1.Sign(m, p.config.TrustKey)
179
+	manifest, err := builder.Build(ctx)
176 180
 	if err != nil {
177 181
 		return err
178 182
 	}
179 183
 
180
-	manifestDigest, manifestSize, err := digestFromManifest(signed, ref)
184
+	manifestDigest, manifestSize, err := digestFromManifest(manifest.(*schema1.SignedManifest), ref)
181 185
 	if err != nil {
182 186
 		return err
183 187
 	}
... ...
@@ -194,7 +185,12 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
194 194
 	if err != nil {
195 195
 		return err
196 196
 	}
197
-	_, err = manSvc.Put(ctx, signed)
197
+
198
+	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
199
+		_, err = manSvc.Put(ctx, manifest, client.WithTag(tagged.Tag()))
200
+	} else {
201
+		_, err = manSvc.Put(ctx, manifest)
202
+	}
198 203
 	// FIXME create a tag
199 204
 	return err
200 205
 }
... ...
@@ -203,8 +199,7 @@ type v2PushDescriptor struct {
203 203
 	layer          layer.Layer
204 204
 	blobSumService *metadata.BlobSumService
205 205
 	repo           distribution.Repository
206
-	layersPushed   *pushMap
207
-	confirmedV2    *bool
206
+	pushState      *pushState
208 207
 }
209 208
 
210 209
 func (pd *v2PushDescriptor) Key() string {
... ...
@@ -219,25 +214,38 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
219 219
 	return pd.layer.DiffID()
220 220
 }
221 221
 
222
-func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
222
+func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
223 223
 	diffID := pd.DiffID()
224 224
 
225
-	logrus.Debugf("Pushing layer: %s", diffID)
225
+	pd.pushState.Lock()
226
+	if _, ok := pd.pushState.remoteLayers[diffID]; ok {
227
+		// it is already known that the push is not needed and
228
+		// therefore doing a stat is unnecessary
229
+		pd.pushState.Unlock()
230
+		progress.Update(progressOutput, pd.ID(), "Layer already exists")
231
+		return nil
232
+	}
233
+	pd.pushState.Unlock()
226 234
 
227 235
 	// Do we have any blobsums associated with this layer's DiffID?
228 236
 	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
229 237
 	if err == nil {
230
-		dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
238
+		descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
231 239
 		if err != nil {
232 240
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
233
-			return "", retryOnError(err)
241
+			return retryOnError(err)
234 242
 		}
235 243
 		if exists {
236 244
 			progress.Update(progressOutput, pd.ID(), "Layer already exists")
237
-			return dgst, nil
245
+			pd.pushState.Lock()
246
+			pd.pushState.remoteLayers[diffID] = descriptor
247
+			pd.pushState.Unlock()
248
+			return nil
238 249
 		}
239 250
 	}
240 251
 
252
+	logrus.Debugf("Pushing layer: %s", diffID)
253
+
241 254
 	// if digest was empty or not saved, or if blob does not exist on the remote repository,
242 255
 	// then push the blob.
243 256
 	bs := pd.repo.Blobs(ctx)
... ...
@@ -245,13 +253,13 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
245 245
 	// Send the layer
246 246
 	layerUpload, err := bs.Create(ctx)
247 247
 	if err != nil {
248
-		return "", retryOnError(err)
248
+		return retryOnError(err)
249 249
 	}
250 250
 	defer layerUpload.Close()
251 251
 
252 252
 	arch, err := pd.layer.TarStream()
253 253
 	if err != nil {
254
-		return "", xfer.DoNotRetry{Err: err}
254
+		return xfer.DoNotRetry{Err: err}
255 255
 	}
256 256
 
257 257
 	// don't care if this fails; best effort
... ...
@@ -267,177 +275,62 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
267 267
 	nn, err := layerUpload.ReadFrom(tee)
268 268
 	compressedReader.Close()
269 269
 	if err != nil {
270
-		return "", retryOnError(err)
270
+		return retryOnError(err)
271 271
 	}
272 272
 
273 273
 	pushDigest := digester.Digest()
274 274
 	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
275
-		return "", retryOnError(err)
275
+		return retryOnError(err)
276 276
 	}
277 277
 
278
-	// If Commit succeded, that's an indication that the remote registry
279
-	// speaks the v2 protocol.
280
-	*pd.confirmedV2 = true
281
-
282 278
 	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
283 279
 	progress.Update(progressOutput, pd.ID(), "Pushed")
284 280
 
285 281
 	// Cache mapping from this layer's DiffID to the blobsum
286 282
 	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
287
-		return "", xfer.DoNotRetry{Err: err}
283
+		return xfer.DoNotRetry{Err: err}
288 284
 	}
289 285
 
290
-	pd.layersPushed.Lock()
291
-	pd.layersPushed.layersPushed[pushDigest] = true
292
-	pd.layersPushed.Unlock()
286
+	pd.pushState.Lock()
287
+
288
+	// If Commit succeded, that's an indication that the remote registry
289
+	// speaks the v2 protocol.
290
+	pd.pushState.confirmedV2 = true
291
+
292
+	pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
293
+		Digest:    pushDigest,
294
+		MediaType: schema2.MediaTypeLayer,
295
+		Size:      nn,
296
+	}
297
+
298
+	pd.pushState.Unlock()
299
+
300
+	return nil
301
+}
293 302
 
294
-	return pushDigest, nil
303
+func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
304
+	// Not necessary to lock pushStatus because this is always
305
+	// called after all the mutation in pushStatus.
306
+	// By the time this function is called, every layer will have
307
+	// an entry in remoteLayers.
308
+	return pd.pushState.remoteLayers[pd.DiffID()]
295 309
 }
296 310
 
297 311
 // blobSumAlreadyExists checks if the registry already know about any of the
298 312
 // blobsums passed in the "blobsums" slice. If it finds one that the registry
299 313
 // knows about, it returns the known digest and "true".
300
-func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) {
301
-	layersPushed.Lock()
302
-	for _, dgst := range blobsums {
303
-		if layersPushed.layersPushed[dgst] {
304
-			// it is already known that the push is not needed and
305
-			// therefore doing a stat is unnecessary
306
-			layersPushed.Unlock()
307
-			return dgst, true, nil
308
-		}
309
-	}
310
-	layersPushed.Unlock()
311
-
314
+func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
312 315
 	for _, dgst := range blobsums {
313
-		_, err := repo.Blobs(ctx).Stat(ctx, dgst)
316
+		descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
314 317
 		switch err {
315 318
 		case nil:
316
-			return dgst, true, nil
319
+			descriptor.MediaType = schema2.MediaTypeLayer
320
+			return descriptor, true, nil
317 321
 		case distribution.ErrBlobUnknown:
318 322
 			// nop
319 323
 		default:
320
-			return "", false, err
321
-		}
322
-	}
323
-	return "", false, nil
324
-}
325
-
326
-// CreateV2Manifest creates a V2 manifest from an image config and set of
327
-// FSLayer digests.
328
-// FIXME: This should be moved to the distribution repo, since it will also
329
-// be useful for converting new manifests to the old format.
330
-func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) {
331
-	if len(img.History) == 0 {
332
-		return nil, errors.New("empty history when trying to create V2 manifest")
333
-	}
334
-
335
-	// Generate IDs for each layer
336
-	// For non-top-level layers, create fake V1Compatibility strings that
337
-	// fit the format and don't collide with anything else, but don't
338
-	// result in runnable images on their own.
339
-	type v1Compatibility struct {
340
-		ID              string    `json:"id"`
341
-		Parent          string    `json:"parent,omitempty"`
342
-		Comment         string    `json:"comment,omitempty"`
343
-		Created         time.Time `json:"created"`
344
-		ContainerConfig struct {
345
-			Cmd []string
346
-		} `json:"container_config,omitempty"`
347
-		ThrowAway bool `json:"throwaway,omitempty"`
348
-	}
349
-
350
-	fsLayerList := make([]schema1.FSLayer, len(img.History))
351
-	history := make([]schema1.History, len(img.History))
352
-
353
-	parent := ""
354
-	layerCounter := 0
355
-	for i, h := range img.History {
356
-		if i == len(img.History)-1 {
357
-			break
358
-		}
359
-
360
-		var diffID layer.DiffID
361
-		if h.EmptyLayer {
362
-			diffID = layer.EmptyLayer.DiffID()
363
-		} else {
364
-			if len(img.RootFS.DiffIDs) <= layerCounter {
365
-				return nil, errors.New("too many non-empty layers in History section")
366
-			}
367
-			diffID = img.RootFS.DiffIDs[layerCounter]
368
-			layerCounter++
369
-		}
370
-
371
-		fsLayer, present := fsLayers[diffID]
372
-		if !present {
373
-			return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
374
-		}
375
-		dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent))
376
-		v1ID := dgst.Hex()
377
-
378
-		v1Compatibility := v1Compatibility{
379
-			ID:      v1ID,
380
-			Parent:  parent,
381
-			Comment: h.Comment,
382
-			Created: h.Created,
324
+			return distribution.Descriptor{}, false, err
383 325
 		}
384
-		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
385
-		if h.EmptyLayer {
386
-			v1Compatibility.ThrowAway = true
387
-		}
388
-		jsonBytes, err := json.Marshal(&v1Compatibility)
389
-		if err != nil {
390
-			return nil, err
391
-		}
392
-
393
-		reversedIndex := len(img.History) - i - 1
394
-		history[reversedIndex].V1Compatibility = string(jsonBytes)
395
-		fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer}
396
-
397
-		parent = v1ID
398
-	}
399
-
400
-	latestHistory := img.History[len(img.History)-1]
401
-
402
-	var diffID layer.DiffID
403
-	if latestHistory.EmptyLayer {
404
-		diffID = layer.EmptyLayer.DiffID()
405
-	} else {
406
-		if len(img.RootFS.DiffIDs) <= layerCounter {
407
-			return nil, errors.New("too many non-empty layers in History section")
408
-		}
409
-		diffID = img.RootFS.DiffIDs[layerCounter]
410 326
 	}
411
-	fsLayer, present := fsLayers[diffID]
412
-	if !present {
413
-		return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
414
-	}
415
-
416
-	fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer}
417
-	dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON())))
418
-
419
-	// Top-level v1compatibility string should be a modified version of the
420
-	// image config.
421
-	transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
422
-	if err != nil {
423
-		return nil, err
424
-	}
425
-
426
-	history[0].V1Compatibility = string(transformedConfig)
427
-
428
-	// windows-only baselayer setup
429
-	if err := setupBaseLayer(history, *img.RootFS); err != nil {
430
-		return nil, err
431
-	}
432
-
433
-	return &schema1.Manifest{
434
-		Versioned: manifest.Versioned{
435
-			SchemaVersion: 1,
436
-		},
437
-		Name:         name,
438
-		Tag:          tag,
439
-		Architecture: img.Architecture,
440
-		FSLayers:     fsLayerList,
441
-		History:      history,
442
-	}, nil
327
+	return distribution.Descriptor{}, false, nil
443 328
 }
444 329
deleted file mode 100644
... ...
@@ -1,176 +0,0 @@
1
-package distribution
2
-
3
-import (
4
-	"reflect"
5
-	"testing"
6
-
7
-	"github.com/docker/distribution/digest"
8
-	"github.com/docker/distribution/manifest/schema1"
9
-	"github.com/docker/docker/image"
10
-	"github.com/docker/docker/layer"
11
-)
12
-
13
-func TestCreateV2Manifest(t *testing.T) {
14
-	imgJSON := `{
15
-    "architecture": "amd64",
16
-    "config": {
17
-        "AttachStderr": false,
18
-        "AttachStdin": false,
19
-        "AttachStdout": false,
20
-        "Cmd": [
21
-            "/bin/sh",
22
-            "-c",
23
-            "echo hi"
24
-        ],
25
-        "Domainname": "",
26
-        "Entrypoint": null,
27
-        "Env": [
28
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
29
-            "derived=true",
30
-            "asdf=true"
31
-        ],
32
-        "Hostname": "23304fc829f9",
33
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
34
-        "Labels": {},
35
-        "OnBuild": [],
36
-        "OpenStdin": false,
37
-        "StdinOnce": false,
38
-        "Tty": false,
39
-        "User": "",
40
-        "Volumes": null,
41
-        "WorkingDir": ""
42
-    },
43
-    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
44
-    "container_config": {
45
-        "AttachStderr": false,
46
-        "AttachStdin": false,
47
-        "AttachStdout": false,
48
-        "Cmd": [
49
-            "/bin/sh",
50
-            "-c",
51
-            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
52
-        ],
53
-        "Domainname": "",
54
-        "Entrypoint": null,
55
-        "Env": [
56
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
57
-            "derived=true",
58
-            "asdf=true"
59
-        ],
60
-        "Hostname": "23304fc829f9",
61
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
62
-        "Labels": {},
63
-        "OnBuild": [],
64
-        "OpenStdin": false,
65
-        "StdinOnce": false,
66
-        "Tty": false,
67
-        "User": "",
68
-        "Volumes": null,
69
-        "WorkingDir": ""
70
-    },
71
-    "created": "2015-11-04T23:06:32.365666163Z",
72
-    "docker_version": "1.9.0-dev",
73
-    "history": [
74
-        {
75
-            "created": "2015-10-31T22:22:54.690851953Z",
76
-            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
77
-        },
78
-        {
79
-            "created": "2015-10-31T22:22:55.613815829Z",
80
-            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
81
-        },
82
-        {
83
-            "created": "2015-11-04T23:06:30.934316144Z",
84
-            "created_by": "/bin/sh -c #(nop) ENV derived=true",
85
-            "empty_layer": true
86
-        },
87
-        {
88
-            "created": "2015-11-04T23:06:31.192097572Z",
89
-            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
90
-            "empty_layer": true
91
-        },
92
-        {
93
-            "created": "2015-11-04T23:06:32.083868454Z",
94
-            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
95
-        },
96
-        {
97
-            "created": "2015-11-04T23:06:32.365666163Z",
98
-            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
99
-            "empty_layer": true
100
-        }
101
-    ],
102
-    "os": "linux",
103
-    "rootfs": {
104
-        "diff_ids": [
105
-            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
106
-            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
107
-            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
108
-        ],
109
-        "type": "layers"
110
-    }
111
-}`
112
-
113
-	// To fill in rawJSON
114
-	img, err := image.NewFromJSON([]byte(imgJSON))
115
-	if err != nil {
116
-		t.Fatalf("json decoding failed: %v", err)
117
-	}
118
-
119
-	fsLayers := map[layer.DiffID]digest.Digest{
120
-		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
121
-		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
122
-		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
123
-	}
124
-
125
-	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
126
-	if err != nil {
127
-		t.Fatalf("CreateV2Manifest returned error: %v", err)
128
-	}
129
-
130
-	if manifest.Versioned.SchemaVersion != 1 {
131
-		t.Fatal("SchemaVersion != 1")
132
-	}
133
-	if manifest.Name != "testrepo" {
134
-		t.Fatal("incorrect name in manifest")
135
-	}
136
-	if manifest.Tag != "testtag" {
137
-		t.Fatal("incorrect tag in manifest")
138
-	}
139
-	if manifest.Architecture != "amd64" {
140
-		t.Fatal("incorrect arch in manifest")
141
-	}
142
-
143
-	expectedFSLayers := []schema1.FSLayer{
144
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
145
-		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
146
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
147
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
148
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
149
-		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
150
-	}
151
-
152
-	if len(manifest.FSLayers) != len(expectedFSLayers) {
153
-		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
154
-	}
155
-	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
156
-		t.Fatal("wrong FSLayers list")
157
-	}
158
-
159
-	expectedV1Compatibility := []string{
160
-		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
161
-		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
162
-		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
163
-		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
164
-		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
165
-		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
166
-	}
167
-
168
-	if len(manifest.History) != len(expectedV1Compatibility) {
169
-		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
170
-	}
171
-	for i := range expectedV1Compatibility {
172
-		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
173
-			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
174
-		}
175
-	}
176
-}
177 1
deleted file mode 100644
... ...
@@ -1,12 +0,0 @@
1
-// +build !windows
2
-
3
-package distribution
4
-
5
-import (
6
-	"github.com/docker/distribution/manifest/schema1"
7
-	"github.com/docker/docker/image"
8
-)
9
-
10
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
11
-	return nil
12
-}
13 1
deleted file mode 100644
... ...
@@ -1,28 +0,0 @@
1
-// +build windows
2
-
3
-package distribution
4
-
5
-import (
6
-	"encoding/json"
7
-
8
-	"github.com/docker/distribution/manifest/schema1"
9
-	"github.com/docker/docker/image"
10
-)
11
-
12
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
13
-	var v1Config map[string]*json.RawMessage
14
-	if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
15
-		return err
16
-	}
17
-	baseID, err := json.Marshal(rootFS.BaseLayerID())
18
-	if err != nil {
19
-		return err
20
-	}
21
-	v1Config["parent"] = (*json.RawMessage)(&baseID)
22
-	configJSON, err := json.Marshal(v1Config)
23
-	if err != nil {
24
-		return err
25
-	}
26
-	history[len(history)-1].V1Compatibility = string(configJSON)
27
-	return nil
28
-}
... ...
@@ -5,7 +5,6 @@ import (
5 5
 	"time"
6 6
 
7 7
 	"github.com/Sirupsen/logrus"
8
-	"github.com/docker/distribution/digest"
9 8
 	"github.com/docker/docker/layer"
10 9
 	"github.com/docker/docker/pkg/progress"
11 10
 	"golang.org/x/net/context"
... ...
@@ -30,7 +29,6 @@ type uploadTransfer struct {
30 30
 	Transfer
31 31
 
32 32
 	diffID layer.DiffID
33
-	digest digest.Digest
34 33
 	err    error
35 34
 }
36 35
 
... ...
@@ -43,16 +41,15 @@ type UploadDescriptor interface {
43 43
 	// DiffID should return the DiffID for this layer.
44 44
 	DiffID() layer.DiffID
45 45
 	// Upload is called to perform the Upload.
46
-	Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error)
46
+	Upload(ctx context.Context, progressOutput progress.Output) error
47 47
 }
48 48
 
49 49
 // Upload is a blocking function which ensures the listed layers are present on
50 50
 // the remote registry. It uses the string returned by the Key method to
51 51
 // deduplicate uploads.
52
-func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) (map[layer.DiffID]digest.Digest, error) {
52
+func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
53 53
 	var (
54 54
 		uploads          []*uploadTransfer
55
-		digests          = make(map[layer.DiffID]digest.Digest)
56 55
 		dedupDescriptors = make(map[string]struct{})
57 56
 	)
58 57
 
... ...
@@ -74,16 +71,15 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
74 74
 	for _, upload := range uploads {
75 75
 		select {
76 76
 		case <-ctx.Done():
77
-			return nil, ctx.Err()
77
+			return ctx.Err()
78 78
 		case <-upload.Transfer.Done():
79 79
 			if upload.err != nil {
80
-				return nil, upload.err
80
+				return upload.err
81 81
 			}
82
-			digests[upload.diffID] = upload.digest
83 82
 		}
84 83
 	}
85 84
 
86
-	return digests, nil
85
+	return nil
87 86
 }
88 87
 
89 88
 func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
... ...
@@ -109,9 +105,8 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
109 109
 
110 110
 			retries := 0
111 111
 			for {
112
-				digest, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
112
+				err := descriptor.Upload(u.Transfer.Context(), progressOutput)
113 113
 				if err == nil {
114
-					u.digest = digest
115 114
 					break
116 115
 				}
117 116
 
... ...
@@ -36,12 +36,12 @@ func (u *mockUploadDescriptor) DiffID() layer.DiffID {
36 36
 }
37 37
 
38 38
 // Upload is called to perform the upload.
39
-func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
39
+func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
40 40
 	if u.currentUploads != nil {
41 41
 		defer atomic.AddInt32(u.currentUploads, -1)
42 42
 
43 43
 		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
44
-			return "", errors.New("concurrency limit exceeded")
44
+			return errors.New("concurrency limit exceeded")
45 45
 		}
46 46
 	}
47 47
 
... ...
@@ -49,7 +49,7 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
49 49
 	for i := int64(0); i <= 10; i++ {
50 50
 		select {
51 51
 		case <-ctx.Done():
52
-			return "", ctx.Err()
52
+			return ctx.Err()
53 53
 		case <-time.After(10 * time.Millisecond):
54 54
 			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
55 55
 		}
... ...
@@ -57,12 +57,10 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
57 57
 
58 58
 	if u.simulateRetries != 0 {
59 59
 		u.simulateRetries--
60
-		return "", errors.New("simulating retry")
60
+		return errors.New("simulating retry")
61 61
 	}
62 62
 
63
-	// For the mock implementation, use SHA256(DiffID) as the returned
64
-	// digest.
65
-	return digest.FromBytes([]byte(u.diffID.String())), nil
63
+	return nil
66 64
 }
67 65
 
68 66
 func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
... ...
@@ -101,26 +99,13 @@ func TestSuccessfulUpload(t *testing.T) {
101 101
 	var currentUploads int32
102 102
 	descriptors := uploadDescriptors(&currentUploads)
103 103
 
104
-	digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
104
+	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
105 105
 	if err != nil {
106 106
 		t.Fatalf("upload error: %v", err)
107 107
 	}
108 108
 
109 109
 	close(progressChan)
110 110
 	<-progressDone
111
-
112
-	if len(digests) != len(expectedDigests) {
113
-		t.Fatal("wrong number of keys in digests map")
114
-	}
115
-
116
-	for key, val := range expectedDigests {
117
-		if digests[key] != val {
118
-			t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
119
-		}
120
-		if receivedProgress[key.String()] != 10 {
121
-			t.Fatalf("missing or wrong progress output for %v", key)
122
-		}
123
-	}
124 111
 }
125 112
 
126 113
 func TestCancelledUpload(t *testing.T) {
... ...
@@ -143,7 +128,7 @@ func TestCancelledUpload(t *testing.T) {
143 143
 	}()
144 144
 
145 145
 	descriptors := uploadDescriptors(nil)
146
-	_, err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
146
+	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
147 147
 	if err != context.Canceled {
148 148
 		t.Fatal("expected upload to be cancelled")
149 149
 	}