distribution/pull_v2.go
694df3ff
 package distribution
 
 import (
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
7b81bc14
 	"net/url"
694df3ff
 	"os"
 	"runtime"
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/digest"
2bb8c85b
 	"github.com/docker/distribution/manifest/manifestlist"
694df3ff
 	"github.com/docker/distribution/manifest/schema1"
94726f7c
 	"github.com/docker/distribution/manifest/schema2"
9d6acbee
 	"github.com/docker/distribution/registry/api/errcode"
7b81bc14
 	"github.com/docker/distribution/registry/client/auth"
056bf9f2
 	"github.com/docker/distribution/registry/client/transport"
694df3ff
 	"github.com/docker/docker/distribution/metadata"
572ce802
 	"github.com/docker/docker/distribution/xfer"
694df3ff
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image/v1"
 	"github.com/docker/docker/layer"
572ce802
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
694df3ff
 	"github.com/docker/docker/pkg/stringid"
2655954c
 	"github.com/docker/docker/reference"
694df3ff
 	"github.com/docker/docker/registry"
 	"golang.org/x/net/context"
 )
 
9b6dcc8b
 var (
 	errRootFSMismatch  = errors.New("layers from manifest don't match image configuration")
 	errMediaTypePlugin = errors.New("target is a plugin")
 	errRootFSInvalid   = errors.New("invalid rootfs in image configuration")
 )
94726f7c
 
8f26fe4f
 // ImageConfigPullError is an error pulling the image config blob
 // (only applies to schema2).
 type ImageConfigPullError struct {
 	Err error
 }
 
 // Error returns the error string for ImageConfigPullError.
 func (e ImageConfigPullError) Error() string {
 	return "error pulling image configuration: " + e.Err.Error()
 }
 
694df3ff
 type v2Puller struct {
d3bd14a4
 	V2MetadataService metadata.V2MetadataService
63099477
 	endpoint          registry.APIEndpoint
 	config            *ImagePullConfig
 	repoInfo          *registry.RepositoryInfo
 	repo              distribution.Repository
a57478d6
 	// confirmedV2 is set to true if we confirm we're talking to a v2
 	// registry. This is used to limit fallbacks to the v1 protocol.
 	confirmedV2 bool
694df3ff
 }
 
a57478d6
 func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
694df3ff
 	// TODO(tiborvass): was ReceiveTimeout
a57478d6
 	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
694df3ff
 	if err != nil {
572ce802
 		logrus.Warnf("Error getting v2 registry: %v", err)
5e8af46f
 		return err
694df3ff
 	}
 
a57478d6
 	if err = p.pullV2Repository(ctx, ref); err != nil {
589a5226
 		if _, ok := err.(fallbackError); ok {
 			return err
 		}
8f26fe4f
 		if continueOnError(err) {
 			logrus.Errorf("Error trying v2 registry: %v", err)
5e8af46f
 			return fallbackError{
 				err:         err,
 				confirmedV2: p.confirmedV2,
 				transportOK: true,
 			}
694df3ff
 		}
 	}
a57478d6
 	return err
694df3ff
 }
 
572ce802
 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
589a5226
 	var layersDownloaded bool
eeb2d4c1
 	if !reference.IsNameOnly(ref) {
589a5226
 		layersDownloaded, err = p.pullV2Tag(ctx, ref)
 		if err != nil {
 			return err
 		}
694df3ff
 	} else {
c168a005
 		tags, err := p.repo.Tags(ctx).All(ctx)
694df3ff
 		if err != nil {
589a5226
 			// If this repository doesn't exist on V2, we should
 			// permit a fallback to V1.
 			return allowV1Fallback(err)
694df3ff
 		}
 
589a5226
 		// The v2 registry knows about this repository, so we will not
 		// allow fallback to the v1 protocol even if we encounter an
 		// error later on.
a57478d6
 		p.confirmedV2 = true
 
694df3ff
 		for _, tag := range tags {
ffded61d
 			tagRef, err := reference.WithTag(ref, tag)
694df3ff
 			if err != nil {
 				return err
 			}
589a5226
 			pulledNew, err := p.pullV2Tag(ctx, tagRef)
 			if err != nil {
 				// Since this is the pull-all-tags case, don't
 				// allow an error pulling a particular tag to
 				// make the whole pull fall back to v1.
 				if fallbackErr, ok := err.(fallbackError); ok {
 					return fallbackErr.err
 				}
 				return err
 			}
 			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
 			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
 			layersDownloaded = layersDownloaded || pulledNew
694df3ff
 		}
 	}
 
eeb2d4c1
 	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
694df3ff
 
 	return nil
 }
 
572ce802
 type v2LayerDescriptor struct {
63099477
 	digest            digest.Digest
 	repoInfo          *registry.RepositoryInfo
 	repo              distribution.Repository
d3bd14a4
 	V2MetadataService metadata.V2MetadataService
f425529e
 	tmpFile           *os.File
056bf9f2
 	verifier          digest.Verifier
2c60430a
 	src               distribution.Descriptor
694df3ff
 }
 
572ce802
 func (ld *v2LayerDescriptor) Key() string {
 	return "v2:" + ld.digest.String()
 }
694df3ff
 
572ce802
 func (ld *v2LayerDescriptor) ID() string {
 	return stringid.TruncateID(ld.digest.String())
 }
694df3ff
 
572ce802
 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
63099477
 	return ld.V2MetadataService.GetDiffID(ld.digest)
572ce802
 }
 
 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
 	logrus.Debugf("pulling blob %q", ld.digest)
694df3ff
 
056bf9f2
 	var (
 		err    error
 		offset int64
 	)
f425529e
 
 	if ld.tmpFile == nil {
 		ld.tmpFile, err = createDownloadFile()
056bf9f2
 		if err != nil {
 			return nil, 0, xfer.DoNotRetry{Err: err}
 		}
f425529e
 	} else {
056bf9f2
 		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
 		if err != nil {
 			logrus.Debugf("error seeking to end of download file: %v", err)
 			offset = 0
 
 			ld.tmpFile.Close()
 			if err := os.Remove(ld.tmpFile.Name()); err != nil {
 				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
 			}
 			ld.tmpFile, err = createDownloadFile()
 			if err != nil {
 				return nil, 0, xfer.DoNotRetry{Err: err}
 			}
 		} else if offset != 0 {
 			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
 		}
f425529e
 	}
 
 	tmpFile := ld.tmpFile
694df3ff
 
05bd0435
 	layerDownload, err := ld.open(ctx)
694df3ff
 	if err != nil {
8f26fe4f
 		logrus.Errorf("Error initiating layer download: %v", err)
572ce802
 		return nil, 0, retryOnError(err)
694df3ff
 	}
 
056bf9f2
 	if offset != 0 {
 		_, err := layerDownload.Seek(offset, os.SEEK_SET)
 		if err != nil {
 			if err := ld.truncateDownloadFile(); err != nil {
 				return nil, 0, xfer.DoNotRetry{Err: err}
 			}
 			return nil, 0, err
 		}
 	}
572ce802
 	size, err := layerDownload.Seek(0, os.SEEK_END)
39589800
 	if err != nil {
 		// Seek failed, perhaps because there was no Content-Length
 		// header. This shouldn't fail the download, because we can
 		// still continue without a progress bar.
572ce802
 		size = 0
39589800
 	} else {
056bf9f2
 		if size != 0 && offset > size {
a72b45db
 			logrus.Debug("Partial download is larger than full blob. Starting over")
056bf9f2
 			offset = 0
 			if err := ld.truncateDownloadFile(); err != nil {
 				return nil, 0, xfer.DoNotRetry{Err: err}
 			}
 		}
 
 		// Restore the seek offset either at the beginning of the
 		// stream, or just after the last byte we have from previous
 		// attempts.
 		_, err = layerDownload.Seek(offset, os.SEEK_SET)
39589800
 		if err != nil {
572ce802
 			return nil, 0, err
39589800
 		}
 	}
 
056bf9f2
 	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
572ce802
 	defer reader.Close()
 
056bf9f2
 	if ld.verifier == nil {
 		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
 		if err != nil {
 			return nil, 0, xfer.DoNotRetry{Err: err}
 		}
694df3ff
 	}
 
056bf9f2
 	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
572ce802
 	if err != nil {
056bf9f2
 		if err == transport.ErrWrongCodeForByteRange {
 			if err := ld.truncateDownloadFile(); err != nil {
 				return nil, 0, xfer.DoNotRetry{Err: err}
 			}
 			return nil, 0, err
5a363ce6
 		}
572ce802
 		return nil, 0, retryOnError(err)
 	}
694df3ff
 
572ce802
 	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
694df3ff
 
056bf9f2
 	if !ld.verifier.Verified() {
572ce802
 		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
694df3ff
 		logrus.Error(err)
5a363ce6
 
056bf9f2
 		// Allow a retry if this digest verification error happened
 		// after a resumed download.
 		if offset != 0 {
 			if err := ld.truncateDownloadFile(); err != nil {
 				return nil, 0, xfer.DoNotRetry{Err: err}
 			}
572ce802
 
056bf9f2
 			return nil, 0, err
 		}
572ce802
 		return nil, 0, xfer.DoNotRetry{Err: err}
694df3ff
 	}
 
572ce802
 	progress.Update(progressOutput, ld.ID(), "Download complete")
694df3ff
 
572ce802
 	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
 
5a363ce6
 	_, err = tmpFile.Seek(0, os.SEEK_SET)
 	if err != nil {
 		tmpFile.Close()
 		if err := os.Remove(tmpFile.Name()); err != nil {
 			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
 		}
f425529e
 		ld.tmpFile = nil
056bf9f2
 		ld.verifier = nil
5a363ce6
 		return nil, 0, xfer.DoNotRetry{Err: err}
 	}
930ae3db
 
 	// hand off the temporary file to the download manager, so it will only
 	// be closed once
 	ld.tmpFile = nil
 
 	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
 		tmpFile.Close()
 		err := os.RemoveAll(tmpFile.Name())
 		if err != nil {
 			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
 		}
 		return err
 	}), size, nil
f425529e
 }
 
 func (ld *v2LayerDescriptor) Close() {
 	if ld.tmpFile != nil {
 		ld.tmpFile.Close()
 		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
 			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
 		}
 	}
572ce802
 }
694df3ff
 
056bf9f2
 func (ld *v2LayerDescriptor) truncateDownloadFile() error {
 	// Need a new hash context since we will be redoing the download
 	ld.verifier = nil
 
 	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
8f26fe4f
 		logrus.Errorf("error seeking to beginning of download file: %v", err)
056bf9f2
 		return err
 	}
 
 	if err := ld.tmpFile.Truncate(0); err != nil {
8f26fe4f
 		logrus.Errorf("error truncating download file: %v", err)
056bf9f2
 		return err
 	}
 
 	return nil
 }
 
572ce802
 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
 	// Cache mapping from this layer's DiffID to the blobsum
63099477
 	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
694df3ff
 }
 
572ce802
 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
c168a005
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return false, err
 	}
 
 	var (
 		manifest    distribution.Manifest
 		tagOrDigest string // Used for logging/progress only
 	)
2655954c
 	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
c18d03a7
 		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
c168a005
 		if err != nil {
 			return false, allowV1Fallback(err)
 		}
694df3ff
 		tagOrDigest = tagged.Tag()
c168a005
 	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
 		manifest, err = manSvc.Get(ctx, digested.Digest())
 		if err != nil {
 			return false, err
 		}
694df3ff
 		tagOrDigest = digested.Digest().String()
 	} else {
 		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
 	}
 
c168a005
 	if manifest == nil {
 		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
 	}
694df3ff
 
9b6dcc8b
 	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
970b23db
 		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig ||
 			m.Manifest.Config.MediaType == "application/vnd.docker.plugin.image.v0+json" { //TODO: remove this v0 before 1.13 GA
9b6dcc8b
 			return false, errMediaTypePlugin
 		}
 	}
 
c168a005
 	// If manSvc.Get succeeded, we can be confident that the registry on
 	// the other side speaks the v2 protocol.
 	p.confirmedV2 = true
 
 	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
4d437a29
 	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
c168a005
 
94726f7c
 	var (
80522398
 		id             digest.Digest
94726f7c
 		manifestDigest digest.Digest
 	)
c168a005
 
 	switch v := manifest.(type) {
 	case *schema1.SignedManifest:
80522398
 		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
94726f7c
 		if err != nil {
 			return false, err
 		}
 	case *schema2.DeserializedManifest:
80522398
 		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
c168a005
 		if err != nil {
 			return false, err
 		}
2bb8c85b
 	case *manifestlist.DeserializedManifestList:
80522398
 		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
2bb8c85b
 		if err != nil {
 			return false, err
 		}
c168a005
 	default:
 		return false, errors.New("unsupported manifest format")
694df3ff
 	}
 
94726f7c
 	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
 
80522398
 	oldTagID, err := p.config.ReferenceStore.Get(ref)
94726f7c
 	if err == nil {
80522398
 		if oldTagID == id {
 			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
94726f7c
 		}
 	} else if err != reference.ErrDoesNotExist {
 		return false, err
694df3ff
 	}
c168a005
 
 	if canonical, ok := ref.(reference.Canonical); ok {
80522398
 		if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
c168a005
 			return false, err
 		}
33984f25
 	} else {
80522398
 		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
33984f25
 			return false, err
 		}
80522398
 		if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
33984f25
 			return false, err
 		}
694df3ff
 	}
c168a005
 	return true, nil
 }
a57478d6
 
80522398
 func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
694df3ff
 	var verifiedManifest *schema1.Manifest
94726f7c
 	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
694df3ff
 	if err != nil {
94726f7c
 		return "", "", err
694df3ff
 	}
 
 	rootFS := image.NewRootFS()
 
 	// remove duplicate layers and check parent chain validity
 	err = fixManifestLayers(verifiedManifest)
 	if err != nil {
94726f7c
 		return "", "", err
694df3ff
 	}
 
572ce802
 	var descriptors []xfer.DownloadDescriptor
694df3ff
 
 	// Image history converted to the new format
 	var history []image.History
 
 	// Note that the order of this loop is in the direction of bottom-most
 	// to top-most, so that the downloads slice gets ordered correctly.
 	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
 		blobSum := verifiedManifest.FSLayers[i].BlobSum
 
 		var throwAway struct {
 			ThrowAway bool `json:"throwaway,omitempty"`
 		}
 		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
94726f7c
 			return "", "", err
694df3ff
 		}
 
 		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
 		if err != nil {
94726f7c
 			return "", "", err
694df3ff
 		}
 		history = append(history, h)
 
 		if throwAway.ThrowAway {
 			continue
 		}
 
572ce802
 		layerDescriptor := &v2LayerDescriptor{
63099477
 			digest:            blobSum,
 			repoInfo:          p.repoInfo,
 			repo:              p.repo,
 			V2MetadataService: p.V2MetadataService,
694df3ff
 		}
 
572ce802
 		descriptors = append(descriptors, layerDescriptor)
694df3ff
 	}
 
572ce802
 	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
 	if err != nil {
94726f7c
 		return "", "", err
694df3ff
 	}
572ce802
 	defer release()
694df3ff
 
572ce802
 	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
694df3ff
 	if err != nil {
94726f7c
 		return "", "", err
694df3ff
 	}
 
80522398
 	imageID, err := p.config.ImageStore.Create(config)
694df3ff
 	if err != nil {
94726f7c
 		return "", "", err
694df3ff
 	}
 
94726f7c
 	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
 
80522398
 	return imageID.Digest(), manifestDigest, nil
94726f7c
 }
 
80522398
 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
2bb8c85b
 	manifestDigest, err = schema2ManifestDigest(ref, mfst)
694df3ff
 	if err != nil {
94726f7c
 		return "", "", err
 	}
 
 	target := mfst.Target()
80522398
 	if _, err := p.config.ImageStore.Get(image.IDFromDigest(target.Digest)); err == nil {
94726f7c
 		// If the image already exists locally, no need to pull
 		// anything.
80522398
 		return target.Digest, manifestDigest, nil
94726f7c
 	}
 
05bd0435
 	var descriptors []xfer.DownloadDescriptor
 
 	// Note that the order of this loop is in the direction of bottom-most
 	// to top-most, so that the downloads slice gets ordered correctly.
 	for _, d := range mfst.Layers {
 		layerDescriptor := &v2LayerDescriptor{
 			digest:            d.Digest,
 			repo:              p.repo,
 			repoInfo:          p.repoInfo,
 			V2MetadataService: p.V2MetadataService,
2c60430a
 			src:               d,
05bd0435
 		}
 
 		descriptors = append(descriptors, layerDescriptor)
 	}
 
94726f7c
 	configChan := make(chan []byte, 1)
 	errChan := make(chan error, 1)
 	var cancel func()
 	ctx, cancel = context.WithCancel(ctx)
 
 	// Pull the image config
 	go func() {
80522398
 		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
94726f7c
 		if err != nil {
8f26fe4f
 			errChan <- ImageConfigPullError{Err: err}
94726f7c
 			cancel()
 			return
 		}
 		configChan <- configJSON
 	}()
 
7450c258
 	var (
 		configJSON         []byte       // raw serialized image config
 		unmarshalledConfig image.Image  // deserialized image config
 		downloadRootFS     image.RootFS // rootFS to use for registering layers.
 	)
f342b271
 
 	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
8437d0a3
 	// explicitly blocking images intended for linux from the Windows daemon. On
 	// Windows, we do this before the attempt to download, effectively serialising
 	// the download slightly slowing it down. We have to do it this way, as
 	// chances are the download of layers itself would fail due to file names
 	// which aren't suitable for NTFS. At some point in the future, if a similar
 	// check to block Windows images being pulled on Linux is implemented, it
 	// may be necessary to perform the same type of serialisation.
 	if runtime.GOOS == "windows" {
 		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
 		if err != nil {
 			return "", "", err
 		}
 
 		if unmarshalledConfig.RootFS == nil {
 			return "", "", errRootFSInvalid
 		}
 
 		if unmarshalledConfig.OS == "linux" {
 			return "", "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
 		}
7450c258
 	}
 
f342b271
 	downloadRootFS = *image.NewRootFS()
 
7450c258
 	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
94726f7c
 	if err != nil {
8437d0a3
 		if configJSON != nil {
 			// Already received the config
 			return "", "", err
 		}
94726f7c
 		select {
 		case err = <-errChan:
 			return "", "", err
 		default:
 			cancel()
 			select {
 			case <-configChan:
 			case <-errChan:
 			}
 			return "", "", err
 		}
 	}
 	defer release()
 
8437d0a3
 	if configJSON == nil {
 		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
 		if err != nil {
 			return "", "", err
 		}
94726f7c
 
8437d0a3
 		if unmarshalledConfig.RootFS == nil {
 			return "", "", errRootFSInvalid
 		}
9b6dcc8b
 	}
 
94726f7c
 	// The DiffIDs returned in rootFS MUST match those in the config.
 	// Otherwise the image config could be referencing layers that aren't
 	// included in the manifest.
 	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
 		return "", "", errRootFSMismatch
 	}
 
 	for i := range rootFS.DiffIDs {
 		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
 			return "", "", errRootFSMismatch
 		}
 	}
 
80522398
 	imageID, err := p.config.ImageStore.Create(configJSON)
94726f7c
 	if err != nil {
 		return "", "", err
 	}
 
80522398
 	return imageID.Digest(), manifestDigest, nil
94726f7c
 }
 
7450c258
 func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
 	select {
 	case configJSON := <-configChan:
 		var unmarshalledConfig image.Image
 		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
 			return nil, image.Image{}, err
 		}
 		return configJSON, unmarshalledConfig, nil
 	case err := <-errChan:
 		return nil, image.Image{}, err
 		// Don't need a case for ctx.Done in the select because cancellation
 		// will trigger an error in p.pullSchema2ImageConfig.
 	}
 }
 
2bb8c85b
 // pullManifestList handles "manifest lists" which point to various
 // platform-specifc manifests.
80522398
 func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
2bb8c85b
 	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
 	if err != nil {
 		return "", "", err
 	}
 
 	var manifestDigest digest.Digest
 	for _, manifestDescriptor := range mfstList.Manifests {
 		// TODO(aaronl): The manifest list spec supports optional
 		// "features" and "variant" fields. These are not yet used.
 		// Once they are, their values should be interpreted here.
1f59bc8c
 		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
2bb8c85b
 			manifestDigest = manifestDescriptor.Digest
 			break
 		}
 	}
 
 	if manifestDigest == "" {
 		return "", "", errors.New("no supported platform found in manifest list")
 	}
 
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return "", "", err
 	}
 
 	manifest, err := manSvc.Get(ctx, manifestDigest)
 	if err != nil {
 		return "", "", err
 	}
 
c85eb008
 	manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
2bb8c85b
 	if err != nil {
 		return "", "", err
 	}
 
 	switch v := manifest.(type) {
 	case *schema1.SignedManifest:
80522398
 		id, _, err = p.pullSchema1(ctx, manifestRef, v)
2bb8c85b
 		if err != nil {
 			return "", "", err
 		}
 	case *schema2.DeserializedManifest:
80522398
 		id, _, err = p.pullSchema2(ctx, manifestRef, v)
2bb8c85b
 		if err != nil {
 			return "", "", err
 		}
 	default:
 		return "", "", errors.New("unsupported manifest format")
 	}
 
80522398
 	return id, manifestListDigest, err
2bb8c85b
 }
 
80522398
 func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
94726f7c
 	blobs := p.repo.Blobs(ctx)
 	configJSON, err = blobs.Get(ctx, dgst)
 	if err != nil {
 		return nil, err
 	}
 
 	// Verify image config digest
 	verifier, err := digest.NewDigestVerifier(dgst)
 	if err != nil {
 		return nil, err
 	}
 	if _, err := verifier.Write(configJSON); err != nil {
 		return nil, err
 	}
 	if !verifier.Verified() {
 		err := fmt.Errorf("image config verification failed for digest %s", dgst)
 		logrus.Error(err)
 		return nil, err
694df3ff
 	}
 
94726f7c
 	return configJSON, nil
694df3ff
 }
 
2bb8c85b
 // schema2ManifestDigest computes the manifest digest, and, if pulling by
 // digest, ensures that it matches the requested digest.
 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
 	_, canonical, err := mfst.Payload()
 	if err != nil {
 		return "", err
 	}
 
 	// If pull by digest, then verify the manifest digest.
 	if digested, isDigested := ref.(reference.Canonical); isDigested {
 		verifier, err := digest.NewDigestVerifier(digested.Digest())
 		if err != nil {
 			return "", err
 		}
 		if _, err := verifier.Write(canonical); err != nil {
 			return "", err
 		}
 		if !verifier.Verified() {
 			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
 			logrus.Error(err)
 			return "", err
 		}
 		return digested.Digest(), nil
 	}
 
 	return digest.FromBytes(canonical), nil
 }
 
589a5226
 // allowV1Fallback checks if the error is a possible reason to fallback to v1
 // (even if confirmedV2 has been set already), and if so, wraps the error in
 // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
 // error unmodified.
 func allowV1Fallback(err error) error {
 	switch v := err.(type) {
 	case errcode.Errors:
 		if len(v) != 0 {
8f26fe4f
 			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
5e8af46f
 				return fallbackError{
 					err:         err,
 					confirmedV2: false,
 					transportOK: true,
 				}
589a5226
 			}
 		}
 	case errcode.Error:
8f26fe4f
 		if shouldV2Fallback(v) {
5e8af46f
 			return fallbackError{
 				err:         err,
 				confirmedV2: false,
 				transportOK: true,
 			}
589a5226
 		}
7b81bc14
 	case *url.Error:
 		if v.Err == auth.ErrNoBasicAuthCredentials {
 			return fallbackError{err: err, confirmedV2: false}
 		}
589a5226
 	}
 
 	return err
 }
 
94726f7c
 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
694df3ff
 	// If pull by digest, then verify the manifest digest. NOTE: It is
 	// important to do this first, before any other content validation. If the
 	// digest cannot be verified, don't even bother with those other things.
2655954c
 	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
694df3ff
 		verifier, err := digest.NewDigestVerifier(digested.Digest())
 		if err != nil {
 			return nil, err
 		}
c168a005
 		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
694df3ff
 			return nil, err
 		}
 		if !verifier.Verified() {
 			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
 			logrus.Error(err)
 			return nil, err
 		}
 	}
c168a005
 	m = &signedManifest.Manifest
694df3ff
 
 	if m.SchemaVersion != 1 {
 		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
 	}
 	if len(m.FSLayers) != len(m.History) {
 		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
 	}
 	if len(m.FSLayers) == 0 {
 		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
 	}
 	return m, nil
 }
 
 // fixManifestLayers removes repeated layers from the manifest and checks the
 // correctness of the parent chain.
 func fixManifestLayers(m *schema1.Manifest) error {
 	imgs := make([]*image.V1Image, len(m.FSLayers))
 	for i := range m.FSLayers {
 		img := &image.V1Image{}
 
 		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
 			return err
 		}
 
 		imgs[i] = img
 		if err := v1.ValidateID(img.ID); err != nil {
 			return err
 		}
 	}
 
 	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
 		// Windows base layer can point to a base layer parent that is not in manifest.
4dbc78a5
 		return errors.New("invalid parent ID in the base layer of the image")
694df3ff
 	}
 
 	// check general duplicates to error instead of a deadlock
 	idmap := make(map[string]struct{})
 
 	var lastID string
 	for _, img := range imgs {
 		// skip IDs that appear after each other, we handle those later
 		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
 			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
 		}
 		lastID = img.ID
 		idmap[lastID] = struct{}{}
 	}
 
 	// backwards loop so that we keep the remaining indexes after removing items
 	for i := len(imgs) - 2; i >= 0; i-- {
 		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
 			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
 			m.History = append(m.History[:i], m.History[i+1:]...)
 		} else if imgs[i].Parent != imgs[i+1].ID {
 			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
 		}
 	}
 
 	return nil
 }
f425529e
 
 func createDownloadFile() (*os.File, error) {
 	return ioutil.TempFile("", "GetImageBlob")
 }