Browse code

Update graph to use vendored distribution client for the v2 codepath

Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
Signed-off-by: Tibor Vass <tibor@docker.com>

Derek McGowan authored on 2015/02/13 03:23:22
Showing 25 changed files
... ...
@@ -21,7 +21,7 @@ import (
21 21
 //
22 22
 // Usage: docker login SERVER
23 23
 func (cli *DockerCli) CmdLogin(args ...string) error {
24
-	cmd := cli.Subcmd("login", []string{"[SERVER]"}, "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
24
+	cmd := cli.Subcmd("login", []string{"[SERVER]"}, "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.INDEXSERVER+"\" is the default.", true)
25 25
 	cmd.Require(flag.Max, 1)
26 26
 
27 27
 	var username, password, email string
... ...
@@ -32,7 +32,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
32 32
 
33 33
 	cmd.ParseFlags(args, true)
34 34
 
35
-	serverAddress := registry.IndexServerAddress()
35
+	serverAddress := registry.INDEXSERVER
36 36
 	if len(cmd.Args()) > 0 {
37 37
 		serverAddress = cmd.Arg(0)
38 38
 	}
... ...
@@ -13,12 +13,12 @@ import (
13 13
 //
14 14
 // Usage: docker logout [SERVER]
15 15
 func (cli *DockerCli) CmdLogout(args ...string) error {
16
-	cmd := cli.Subcmd("logout", []string{"[SERVER]"}, "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
16
+	cmd := cli.Subcmd("logout", []string{"[SERVER]"}, "Log out from a Docker registry, if no server is\nspecified \""+registry.INDEXSERVER+"\" is the default.", true)
17 17
 	cmd.Require(flag.Max, 1)
18 18
 
19 19
 	cmd.ParseFlags(args, true)
20 20
 
21
-	serverAddress := registry.IndexServerAddress()
21
+	serverAddress := registry.INDEXSERVER
22 22
 	if len(cmd.Args()) > 0 {
23 23
 		serverAddress = cmd.Arg(0)
24 24
 	}
... ...
@@ -74,7 +74,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
74 74
 		NEventsListener:    daemon.EventsService.SubscribersCount(),
75 75
 		KernelVersion:      kernelVersion,
76 76
 		OperatingSystem:    operatingSystem,
77
-		IndexServerAddress: registry.IndexServerAddress(),
77
+		IndexServerAddress: registry.INDEXSERVER,
78 78
 		RegistryConfig:     daemon.RegistryService.Config,
79 79
 		InitSha1:           dockerversion.INITSHA1,
80 80
 		InitPath:           initPath,
81 81
deleted file mode 100644
... ...
@@ -1,178 +0,0 @@
1
-package graph
2
-
3
-import (
4
-	"encoding/json"
5
-	"fmt"
6
-
7
-	"github.com/Sirupsen/logrus"
8
-	"github.com/docker/distribution/digest"
9
-	"github.com/docker/docker/registry"
10
-	"github.com/docker/docker/trust"
11
-	"github.com/docker/libtrust"
12
-)
13
-
14
-// loadManifest loads a manifest from a byte array and verifies its content,
15
-// returning the local digest, the manifest itself, whether or not it was
16
-// verified. If ref is a digest, rather than a tag, this will be treated as
17
-// the local digest. An error will be returned if the signature verification
18
-// fails, local digest verification fails and, if provided, the remote digest
19
-// verification fails. The boolean return will only be false without error on
20
-// the failure of signatures trust check.
21
-func (s *TagStore) loadManifest(manifestBytes []byte, ref string, remoteDigest digest.Digest) (digest.Digest, *registry.ManifestData, bool, error) {
22
-	payload, keys, err := unpackSignedManifest(manifestBytes)
23
-	if err != nil {
24
-		return "", nil, false, fmt.Errorf("error unpacking manifest: %v", err)
25
-	}
26
-
27
-	// TODO(stevvooe): It would be a lot better here to build up a stack of
28
-	// verifiers, then push the bytes one time for signatures and digests, but
29
-	// the manifests are typically small, so this optimization is not worth
30
-	// hacking this code without further refactoring.
31
-
32
-	var localDigest digest.Digest
33
-
34
-	// Verify the local digest, if present in ref. ParseDigest will validate
35
-	// that the ref is a digest and verify against that if present. Otherwize
36
-	// (on error), we simply compute the localDigest and proceed.
37
-	if dgst, err := digest.ParseDigest(ref); err == nil {
38
-		// verify the manifest against local ref
39
-		if err := verifyDigest(dgst, payload); err != nil {
40
-			return "", nil, false, fmt.Errorf("verifying local digest: %v", err)
41
-		}
42
-
43
-		localDigest = dgst
44
-	} else {
45
-		// We don't have a local digest, since we are working from a tag.
46
-		// Compute the digest of the payload and return that.
47
-		logrus.Debugf("provided manifest reference %q is not a digest: %v", ref, err)
48
-		localDigest, err = digest.FromBytes(payload)
49
-		if err != nil {
50
-			// near impossible
51
-			logrus.Errorf("error calculating local digest during tag pull: %v", err)
52
-			return "", nil, false, err
53
-		}
54
-	}
55
-
56
-	// verify against the remote digest, if available
57
-	if remoteDigest != "" {
58
-		if err := verifyDigest(remoteDigest, payload); err != nil {
59
-			return "", nil, false, fmt.Errorf("verifying remote digest: %v", err)
60
-		}
61
-	}
62
-
63
-	var manifest registry.ManifestData
64
-	if err := json.Unmarshal(payload, &manifest); err != nil {
65
-		return "", nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
66
-	}
67
-
68
-	// validate the contents of the manifest
69
-	if err := validateManifest(&manifest); err != nil {
70
-		return "", nil, false, err
71
-	}
72
-
73
-	var verified bool
74
-	verified, err = s.verifyTrustedKeys(manifest.Name, keys)
75
-	if err != nil {
76
-		return "", nil, false, fmt.Errorf("error verifying trusted keys: %v", err)
77
-	}
78
-
79
-	return localDigest, &manifest, verified, nil
80
-}
81
-
82
-// unpackSignedManifest takes the raw, signed manifest bytes, unpacks the jws
83
-// and returns the payload and public keys used to signed the manifest.
84
-// Signatures are verified for authenticity but not against the trust store.
85
-func unpackSignedManifest(p []byte) ([]byte, []libtrust.PublicKey, error) {
86
-	sig, err := libtrust.ParsePrettySignature(p, "signatures")
87
-	if err != nil {
88
-		return nil, nil, fmt.Errorf("error parsing payload: %s", err)
89
-	}
90
-
91
-	keys, err := sig.Verify()
92
-	if err != nil {
93
-		return nil, nil, fmt.Errorf("error verifying payload: %s", err)
94
-	}
95
-
96
-	payload, err := sig.Payload()
97
-	if err != nil {
98
-		return nil, nil, fmt.Errorf("error retrieving payload: %s", err)
99
-	}
100
-
101
-	return payload, keys, nil
102
-}
103
-
104
-// verifyTrustedKeys checks the keys provided against the trust store,
105
-// ensuring that the provided keys are trusted for the namespace. The keys
106
-// provided from this method must come from the signatures provided as part of
107
-// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
108
-func (s *TagStore) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
109
-	if namespace[0] != '/' {
110
-		namespace = "/" + namespace
111
-	}
112
-
113
-	for _, key := range keys {
114
-		b, err := key.MarshalJSON()
115
-		if err != nil {
116
-			return false, fmt.Errorf("error marshalling public key: %s", err)
117
-		}
118
-		// Check key has read/write permission (0x03)
119
-		v, err := s.trustService.CheckKey(namespace, b, 0x03)
120
-		if err != nil {
121
-			vErr, ok := err.(trust.NotVerifiedError)
122
-			if !ok {
123
-				return false, fmt.Errorf("error running key check: %s", err)
124
-			}
125
-			logrus.Debugf("Key check result: %v", vErr)
126
-		}
127
-		verified = v
128
-	}
129
-
130
-	if verified {
131
-		logrus.Debug("Key check result: verified")
132
-	}
133
-
134
-	return
135
-}
136
-
137
-// verifyDigest checks the contents of p against the provided digest. Note
138
-// that for manifests, this is the signed payload and not the raw bytes with
139
-// signatures.
140
-func verifyDigest(dgst digest.Digest, p []byte) error {
141
-	if err := dgst.Validate(); err != nil {
142
-		return fmt.Errorf("error validating  digest %q: %v", dgst, err)
143
-	}
144
-
145
-	verifier, err := digest.NewDigestVerifier(dgst)
146
-	if err != nil {
147
-		// There are not many ways this can go wrong: if it does, its
148
-		// fatal. Likley, the cause would be poor validation of the
149
-		// incoming reference.
150
-		return fmt.Errorf("error creating verifier for digest %q: %v", dgst, err)
151
-	}
152
-
153
-	if _, err := verifier.Write(p); err != nil {
154
-		return fmt.Errorf("error writing payload to digest verifier (verifier target %q): %v", dgst, err)
155
-	}
156
-
157
-	if !verifier.Verified() {
158
-		return fmt.Errorf("verification against digest %q failed", dgst)
159
-	}
160
-
161
-	return nil
162
-}
163
-
164
-func validateManifest(manifest *registry.ManifestData) error {
165
-	if manifest.SchemaVersion != 1 {
166
-		return fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
167
-	}
168
-
169
-	if len(manifest.FSLayers) != len(manifest.History) {
170
-		return fmt.Errorf("length of history not equal to number of layers")
171
-	}
172
-
173
-	if len(manifest.FSLayers) == 0 {
174
-		return fmt.Errorf("no FSLayers in manifest")
175
-	}
176
-
177
-	return nil
178
-}
179 1
deleted file mode 100644
... ...
@@ -1,293 +0,0 @@
1
-package graph
2
-
3
-import (
4
-	"encoding/json"
5
-	"fmt"
6
-	"os"
7
-	"testing"
8
-
9
-	"github.com/docker/distribution/digest"
10
-	"github.com/docker/docker/registry"
11
-	"github.com/docker/docker/runconfig"
12
-	"github.com/docker/docker/utils"
13
-	"github.com/docker/libtrust"
14
-)
15
-
16
-const (
17
-	testManifestImageName    = "testapp"
18
-	testManifestImageID      = "d821b739e8834ec89ac4469266c3d11515da88fdcbcbdddcbcddb636f54fdde9"
19
-	testManifestImageIDShort = "d821b739e883"
20
-	testManifestTag          = "manifesttest"
21
-)
22
-
23
-func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error) {
24
-	manifest := &registry.ManifestData{
25
-		Name:          remoteName,
26
-		Tag:           tag,
27
-		SchemaVersion: 1,
28
-	}
29
-	localRepo, err := s.Get(localName)
30
-	if err != nil {
31
-		return nil, err
32
-	}
33
-	if localRepo == nil {
34
-		return nil, fmt.Errorf("Repo does not exist: %s", localName)
35
-	}
36
-
37
-	// Get the top-most layer id which the tag points to
38
-	layerId, exists := localRepo[tag]
39
-	if !exists {
40
-		return nil, fmt.Errorf("Tag does not exist for %s: %s", localName, tag)
41
-	}
42
-	layersSeen := make(map[string]bool)
43
-
44
-	layer, err := s.graph.Get(layerId)
45
-	if err != nil {
46
-		return nil, err
47
-	}
48
-	manifest.Architecture = layer.Architecture
49
-	manifest.FSLayers = make([]*registry.FSLayer, 0, 4)
50
-	manifest.History = make([]*registry.ManifestHistory, 0, 4)
51
-	var metadata runconfig.Config
52
-	if layer.Config != nil {
53
-		metadata = *layer.Config
54
-	}
55
-
56
-	for ; layer != nil; layer, err = s.graph.GetParent(layer) {
57
-		if err != nil {
58
-			return nil, err
59
-		}
60
-
61
-		if layersSeen[layer.ID] {
62
-			break
63
-		}
64
-		if layer.Config != nil && metadata.Image != layer.ID {
65
-			err = runconfig.Merge(&metadata, layer.Config)
66
-			if err != nil {
67
-				return nil, err
68
-			}
69
-		}
70
-
71
-		dgst, err := s.graph.GetDigest(layer.ID)
72
-		if err == ErrDigestNotSet {
73
-			archive, err := s.graph.TarLayer(layer)
74
-			if err != nil {
75
-				return nil, err
76
-			}
77
-
78
-			defer archive.Close()
79
-
80
-			dgst, err = digest.FromReader(archive)
81
-			if err != nil {
82
-				return nil, err
83
-			}
84
-
85
-			// Save checksum value
86
-			if err := s.graph.SetDigest(layer.ID, dgst); err != nil {
87
-				return nil, err
88
-			}
89
-		} else if err != nil {
90
-			return nil, fmt.Errorf("Error getting image checksum: %s", err)
91
-		}
92
-
93
-		jsonData, err := s.graph.RawJSON(layer.ID)
94
-		if err != nil {
95
-			return nil, fmt.Errorf("Cannot retrieve the path for {%s}: %s", layer.ID, err)
96
-		}
97
-
98
-		manifest.FSLayers = append(manifest.FSLayers, &registry.FSLayer{BlobSum: dgst.String()})
99
-
100
-		layersSeen[layer.ID] = true
101
-
102
-		manifest.History = append(manifest.History, &registry.ManifestHistory{V1Compatibility: string(jsonData)})
103
-	}
104
-
105
-	manifestBytes, err := json.MarshalIndent(manifest, "", "   ")
106
-	if err != nil {
107
-		return nil, err
108
-	}
109
-
110
-	return manifestBytes, nil
111
-}
112
-
113
-func TestManifestTarsumCache(t *testing.T) {
114
-	tmp, err := utils.TestDirectory("")
115
-	if err != nil {
116
-		t.Fatal(err)
117
-	}
118
-	defer os.RemoveAll(tmp)
119
-	store := mkTestTagStore(tmp, t)
120
-	defer store.graph.driver.Cleanup()
121
-
122
-	archive, err := fakeTar()
123
-	if err != nil {
124
-		t.Fatal(err)
125
-	}
126
-	img := &Image{ID: testManifestImageID}
127
-	if err := store.graph.Register(img, archive); err != nil {
128
-		t.Fatal(err)
129
-	}
130
-	if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
131
-		t.Fatal(err)
132
-	}
133
-
134
-	if _, err := store.graph.GetDigest(testManifestImageID); err == nil {
135
-		t.Fatalf("Non-empty checksum file after register")
136
-	} else if err != ErrDigestNotSet {
137
-		t.Fatal(err)
138
-	}
139
-
140
-	// Generate manifest
141
-	payload, err := store.newManifest(testManifestImageName, testManifestImageName, testManifestTag)
142
-	if err != nil {
143
-		t.Fatal(err)
144
-	}
145
-
146
-	manifestChecksum, err := store.graph.GetDigest(testManifestImageID)
147
-	if err != nil {
148
-		t.Fatal(err)
149
-	}
150
-
151
-	var manifest registry.ManifestData
152
-	if err := json.Unmarshal(payload, &manifest); err != nil {
153
-		t.Fatalf("error unmarshalling manifest: %s", err)
154
-	}
155
-
156
-	if len(manifest.FSLayers) != 1 {
157
-		t.Fatalf("Unexpected number of layers, expecting 1: %d", len(manifest.FSLayers))
158
-	}
159
-
160
-	if manifest.FSLayers[0].BlobSum != manifestChecksum.String() {
161
-		t.Fatalf("Unexpected blob sum, expecting %q, got %q", manifestChecksum, manifest.FSLayers[0].BlobSum)
162
-	}
163
-
164
-	if len(manifest.History) != 1 {
165
-		t.Fatalf("Unexpected number of layer history, expecting 1: %d", len(manifest.History))
166
-	}
167
-
168
-	v1compat, err := store.graph.RawJSON(img.ID)
169
-	if err != nil {
170
-		t.Fatal(err)
171
-	}
172
-	if manifest.History[0].V1Compatibility != string(v1compat) {
173
-		t.Fatalf("Unexpected json value\nExpected:\n%s\nActual:\n%s", v1compat, manifest.History[0].V1Compatibility)
174
-	}
175
-}
176
-
177
-// TestManifestDigestCheck ensures that loadManifest properly verifies the
178
-// remote and local digest.
179
-func TestManifestDigestCheck(t *testing.T) {
180
-	tmp, err := utils.TestDirectory("")
181
-	if err != nil {
182
-		t.Fatal(err)
183
-	}
184
-	defer os.RemoveAll(tmp)
185
-	store := mkTestTagStore(tmp, t)
186
-	defer store.graph.driver.Cleanup()
187
-
188
-	archive, err := fakeTar()
189
-	if err != nil {
190
-		t.Fatal(err)
191
-	}
192
-	img := &Image{ID: testManifestImageID}
193
-	if err := store.graph.Register(img, archive); err != nil {
194
-		t.Fatal(err)
195
-	}
196
-	if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
197
-		t.Fatal(err)
198
-	}
199
-
200
-	if _, err := store.graph.GetDigest(testManifestImageID); err == nil {
201
-		t.Fatalf("Non-empty checksum file after register")
202
-	} else if err != ErrDigestNotSet {
203
-		t.Fatal(err)
204
-	}
205
-
206
-	// Generate manifest
207
-	payload, err := store.newManifest(testManifestImageName, testManifestImageName, testManifestTag)
208
-	if err != nil {
209
-		t.Fatalf("unexpected error generating test manifest: %v", err)
210
-	}
211
-
212
-	pk, err := libtrust.GenerateECP256PrivateKey()
213
-	if err != nil {
214
-		t.Fatalf("unexpected error generating private key: %v", err)
215
-	}
216
-
217
-	sig, err := libtrust.NewJSONSignature(payload)
218
-	if err != nil {
219
-		t.Fatalf("error creating signature: %v", err)
220
-	}
221
-
222
-	if err := sig.Sign(pk); err != nil {
223
-		t.Fatalf("error signing manifest bytes: %v", err)
224
-	}
225
-
226
-	signedBytes, err := sig.PrettySignature("signatures")
227
-	if err != nil {
228
-		t.Fatalf("error getting signed bytes: %v", err)
229
-	}
230
-
231
-	dgst, err := digest.FromBytes(payload)
232
-	if err != nil {
233
-		t.Fatalf("error getting digest of manifest: %v", err)
234
-	}
235
-
236
-	// use this as the "bad" digest
237
-	zeroDigest, err := digest.FromBytes([]byte{})
238
-	if err != nil {
239
-		t.Fatalf("error making zero digest: %v", err)
240
-	}
241
-
242
-	// Remote and local match, everything should look good
243
-	local, _, _, err := store.loadManifest(signedBytes, dgst.String(), dgst)
244
-	if err != nil {
245
-		t.Fatalf("unexpected error verifying local and remote digest: %v", err)
246
-	}
247
-
248
-	if local != dgst {
249
-		t.Fatalf("local digest not correctly calculated: %v", err)
250
-	}
251
-
252
-	// remote and no local, since pulling by tag
253
-	local, _, _, err = store.loadManifest(signedBytes, "tag", dgst)
254
-	if err != nil {
255
-		t.Fatalf("unexpected error verifying tag pull and remote digest: %v", err)
256
-	}
257
-
258
-	if local != dgst {
259
-		t.Fatalf("local digest not correctly calculated: %v", err)
260
-	}
261
-
262
-	// remote and differing local, this is the most important to fail
263
-	local, _, _, err = store.loadManifest(signedBytes, zeroDigest.String(), dgst)
264
-	if err == nil {
265
-		t.Fatalf("error expected when verifying with differing local digest")
266
-	}
267
-
268
-	// no remote, no local (by tag)
269
-	local, _, _, err = store.loadManifest(signedBytes, "tag", "")
270
-	if err != nil {
271
-		t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
272
-	}
273
-
274
-	if local != dgst {
275
-		t.Fatalf("local digest not correctly calculated: %v", err)
276
-	}
277
-
278
-	// no remote, with local
279
-	local, _, _, err = store.loadManifest(signedBytes, dgst.String(), "")
280
-	if err != nil {
281
-		t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
282
-	}
283
-
284
-	if local != dgst {
285
-		t.Fatalf("local digest not correctly calculated: %v", err)
286
-	}
287
-
288
-	// bad remote, we fail the check.
289
-	local, _, _, err = store.loadManifest(signedBytes, dgst.String(), zeroDigest)
290
-	if err == nil {
291
-		t.Fatalf("error expected when verifying with differing remote digest")
292
-	}
293
-}
... ...
@@ -3,20 +3,10 @@ package graph
3 3
 import (
4 4
 	"fmt"
5 5
 	"io"
6
-	"io/ioutil"
7
-	"net"
8
-	"net/url"
9
-	"os"
10
-	"strings"
11
-	"time"
12 6
 
13 7
 	"github.com/Sirupsen/logrus"
14
-	"github.com/docker/distribution/digest"
15
-	"github.com/docker/distribution/registry/client/transport"
16 8
 	"github.com/docker/docker/cliconfig"
17
-	"github.com/docker/docker/pkg/progressreader"
18 9
 	"github.com/docker/docker/pkg/streamformatter"
19
-	"github.com/docker/docker/pkg/stringid"
20 10
 	"github.com/docker/docker/registry"
21 11
 	"github.com/docker/docker/utils"
22 12
 )
... ...
@@ -27,10 +17,38 @@ type ImagePullConfig struct {
27 27
 	OutStream   io.Writer
28 28
 }
29 29
 
30
+type Puller interface {
31
+	// Pull tries to pull the image referenced by `tag`
32
+	// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint.
33
+	//
34
+	// TODO(tiborvass): have Pull() take a reference to repository + tag, so that the puller itself is repository-agnostic.
35
+	Pull(tag string) (fallback bool, err error)
36
+}
37
+
38
+func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (Puller, error) {
39
+	switch endpoint.Version {
40
+	case registry.APIVersion2:
41
+		return &v2Puller{
42
+			TagStore: s,
43
+			endpoint: endpoint,
44
+			config:   imagePullConfig,
45
+			sf:       sf,
46
+			repoInfo: repoInfo,
47
+		}, nil
48
+	case registry.APIVersion1:
49
+		return &v1Puller{
50
+			TagStore: s,
51
+			endpoint: endpoint,
52
+			config:   imagePullConfig,
53
+			sf:       sf,
54
+			repoInfo: repoInfo,
55
+		}, nil
56
+	}
57
+	return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
58
+}
59
+
30 60
 func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
31
-	var (
32
-		sf = streamformatter.NewJSONStreamFormatter()
33
-	)
61
+	var sf = streamformatter.NewJSONStreamFormatter()
34 62
 
35 63
 	// Resolve the Repository name from fqn to RepositoryInfo
36 64
 	repoInfo, err := s.registryService.ResolveRepository(image)
... ...
@@ -38,424 +56,74 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
38 38
 		return err
39 39
 	}
40 40
 
41
+	// makes sure name is not empty or `scratch`
41 42
 	if err := validateRepoName(repoInfo.LocalName); err != nil {
42 43
 		return err
43 44
 	}
44 45
 
45
-	c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag))
46
+	endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName)
46 47
 	if err != nil {
47
-		if c != nil {
48
-			// Another pull of the same repository is already taking place; just wait for it to finish
49
-			imagePullConfig.OutStream.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
50
-			<-c
51
-			return nil
52
-		}
53 48
 		return err
54 49
 	}
55
-	defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))
56 50
 
57 51
 	logName := repoInfo.LocalName
58 52
 	if tag != "" {
59 53
 		logName = utils.ImageReference(logName, tag)
60 54
 	}
61 55
 
62
-	// Attempt pulling official content from a provided v2 mirror
63
-	if repoInfo.Index.Official {
64
-		v2mirrorEndpoint, v2mirrorRepoInfo, err := configureV2Mirror(repoInfo, s.registryService)
65
-		if err != nil {
66
-			logrus.Errorf("Error configuring mirrors: %s", err)
67
-			return err
68
-		}
69
-
70
-		if v2mirrorEndpoint != nil {
71
-			logrus.Debugf("Attempting to pull from v2 mirror: %s", v2mirrorEndpoint.URL)
72
-			return s.pullFromV2Mirror(v2mirrorEndpoint, v2mirrorRepoInfo, imagePullConfig, tag, sf, logName)
73
-		}
74
-	}
75
-
76
-	logrus.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
77
-
78
-	endpoint, err := repoInfo.GetEndpoint(imagePullConfig.MetaHeaders)
79
-	if err != nil {
80
-		return err
81
-	}
82
-	// TODO(tiborvass): reuse client from endpoint?
83
-	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
84
-	tr := transport.NewTransport(
85
-		registry.NewTransport(registry.ReceiveTimeout, endpoint.IsSecure),
86
-		registry.DockerHeaders(imagePullConfig.MetaHeaders)...,
56
+	var (
57
+		lastErr error
58
+
59
+		// discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport
60
+		// By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr.
61
+		// As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of
62
+		// any subsequent ErrNoSupport errors in lastErr.
63
+		// It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be
64
+		// returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant
65
+		// error is the ones from v2 endpoints not v1.
66
+		discardNoSupportErrors bool
87 67
 	)
88
-	client := registry.HTTPClient(tr)
89
-	r, err := registry.NewSession(client, imagePullConfig.AuthConfig, endpoint)
90
-	if err != nil {
91
-		return err
92
-	}
93
-
94
-	if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
95
-		if repoInfo.Official {
96
-			s.trustService.UpdateBase()
97
-		}
68
+	for _, endpoint := range endpoints {
69
+		logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version)
98 70
 
99
-		logrus.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
100
-		if err := s.pullV2Repository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err == nil {
101
-			s.eventsService.Log("pull", logName, "")
102
-			return nil
103
-		} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
104
-			logrus.Errorf("Error from V2 registry: %s", err)
71
+		if !endpoint.Mirror && (endpoint.Official || endpoint.Version == registry.APIVersion2) {
72
+			if repoInfo.Official {
73
+				s.trustService.UpdateBase()
74
+			}
105 75
 		}
106 76
 
107
-		logrus.Debug("image does not exist on v2 registry, falling back to v1")
108
-	}
109
-
110
-	if utils.DigestReference(tag) {
111
-		return fmt.Errorf("pulling with digest reference failed from v2 registry")
112
-	}
113
-
114
-	logrus.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
115
-	if err = s.pullRepository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err != nil {
116
-		return err
117
-	}
118
-
119
-	s.eventsService.Log("pull", logName, "")
120
-
121
-	return nil
122
-
123
-}
124
-
125
-func makeMirrorRepoInfo(repoInfo *registry.RepositoryInfo, mirror string) *registry.RepositoryInfo {
126
-	mirrorRepo := &registry.RepositoryInfo{
127
-		RemoteName:    repoInfo.RemoteName,
128
-		LocalName:     repoInfo.LocalName,
129
-		CanonicalName: repoInfo.CanonicalName,
130
-		Official:      false,
131
-
132
-		Index: &registry.IndexInfo{
133
-			Official: false,
134
-			Secure:   repoInfo.Index.Secure,
135
-			Name:     mirror,
136
-			Mirrors:  []string{},
137
-		},
138
-	}
139
-	return mirrorRepo
140
-}
141
-
142
-func configureV2Mirror(repoInfo *registry.RepositoryInfo, s *registry.Service) (*registry.Endpoint, *registry.RepositoryInfo, error) {
143
-	mirrors := repoInfo.Index.Mirrors
144
-	if len(mirrors) == 0 {
145
-		// no mirrors configured
146
-		return nil, nil, nil
147
-	}
148
-
149
-	v1MirrorCount := 0
150
-	var v2MirrorEndpoint *registry.Endpoint
151
-	var v2MirrorRepoInfo *registry.RepositoryInfo
152
-	for _, mirror := range mirrors {
153
-		mirrorRepoInfo := makeMirrorRepoInfo(repoInfo, mirror)
154
-		endpoint, err := registry.NewEndpoint(mirrorRepoInfo.Index, nil)
77
+		puller, err := NewPuller(s, endpoint, repoInfo, imagePullConfig, sf)
155 78
 		if err != nil {
156
-			logrus.Errorf("Unable to create endpoint for %s: %s", mirror, err)
79
+			lastErr = err
157 80
 			continue
158 81
 		}
159
-		if endpoint.Version == 2 {
160
-			if v2MirrorEndpoint == nil {
161
-				v2MirrorEndpoint = endpoint
162
-				v2MirrorRepoInfo = mirrorRepoInfo
163
-			} else {
164
-				// > 1 v2 mirrors given
165
-				return nil, nil, fmt.Errorf("multiple v2 mirrors configured")
166
-			}
167
-		} else {
168
-			v1MirrorCount++
169
-		}
170
-	}
171
-
172
-	if v1MirrorCount == len(mirrors) {
173
-		// OK, but mirrors are v1
174
-		return nil, nil, nil
175
-	}
176
-	if v2MirrorEndpoint != nil && v1MirrorCount == 0 {
177
-		// OK, 1 v2 mirror specified
178
-		return v2MirrorEndpoint, v2MirrorRepoInfo, nil
179
-	}
180
-	if v2MirrorEndpoint != nil && v1MirrorCount > 0 {
181
-		return nil, nil, fmt.Errorf("v1 and v2 mirrors configured")
182
-	}
183
-	// No endpoint could be established with the given mirror configurations
184
-	// Fallback to pulling from the hub as per v1 behavior.
185
-	return nil, nil, nil
186
-}
187
-
188
-func (s *TagStore) pullFromV2Mirror(mirrorEndpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo,
189
-	imagePullConfig *ImagePullConfig, tag string, sf *streamformatter.StreamFormatter, logName string) error {
190
-
191
-	tr := transport.NewTransport(
192
-		registry.NewTransport(registry.ReceiveTimeout, mirrorEndpoint.IsSecure),
193
-		registry.DockerHeaders(imagePullConfig.MetaHeaders)...,
194
-	)
195
-	client := registry.HTTPClient(tr)
196
-	mirrorSession, err := registry.NewSession(client, &cliconfig.AuthConfig{}, mirrorEndpoint)
197
-	if err != nil {
198
-		return err
199
-	}
200
-	logrus.Debugf("Pulling v2 repository with local name %q from %s", repoInfo.LocalName, mirrorEndpoint.URL)
201
-	if err := s.pullV2Repository(mirrorSession, imagePullConfig.OutStream, repoInfo, tag, sf); err != nil {
202
-		return err
203
-	}
204
-	s.eventsService.Log("pull", logName, "")
205
-	return nil
206
-}
207
-
208
-func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *streamformatter.StreamFormatter) error {
209
-	out.Write(sf.FormatStatus("", "Pulling repository %s", repoInfo.CanonicalName))
210
-
211
-	repoData, err := r.GetRepositoryData(repoInfo.RemoteName)
212
-	if err != nil {
213
-		if strings.Contains(err.Error(), "HTTP code: 404") {
214
-			return fmt.Errorf("Error: image %s not found", utils.ImageReference(repoInfo.RemoteName, askedTag))
215
-		}
216
-		// Unexpected HTTP error
217
-		return err
218
-	}
219
-
220
-	logrus.Debugf("Retrieving the tag list")
221
-	tagsList := make(map[string]string)
222
-	if askedTag == "" {
223
-		tagsList, err = r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName)
224
-	} else {
225
-		var tagId string
226
-		tagId, err = r.GetRemoteTag(repoData.Endpoints, repoInfo.RemoteName, askedTag)
227
-		tagsList[askedTag] = tagId
228
-	}
229
-	if err != nil {
230
-		if err == registry.ErrRepoNotFound && askedTag != "" {
231
-			return fmt.Errorf("Tag %s not found in repository %s", askedTag, repoInfo.CanonicalName)
232
-		}
233
-		logrus.Errorf("unable to get remote tags: %s", err)
234
-		return err
235
-	}
236
-
237
-	for tag, id := range tagsList {
238
-		repoData.ImgList[id] = &registry.ImgData{
239
-			ID:       id,
240
-			Tag:      tag,
241
-			Checksum: "",
242
-		}
243
-	}
244
-
245
-	logrus.Debugf("Registering tags")
246
-	// If no tag has been specified, pull them all
247
-	if askedTag == "" {
248
-		for tag, id := range tagsList {
249
-			repoData.ImgList[id].Tag = tag
250
-		}
251
-	} else {
252
-		// Otherwise, check that the tag exists and use only that one
253
-		id, exists := tagsList[askedTag]
254
-		if !exists {
255
-			return fmt.Errorf("Tag %s not found in repository %s", askedTag, repoInfo.CanonicalName)
256
-		}
257
-		repoData.ImgList[id].Tag = askedTag
258
-	}
259
-
260
-	errors := make(chan error)
261
-
262
-	layersDownloaded := false
263
-	for _, image := range repoData.ImgList {
264
-		downloadImage := func(img *registry.ImgData) {
265
-			if askedTag != "" && img.Tag != askedTag {
266
-				errors <- nil
267
-				return
82
+		if fallback, err := puller.Pull(tag); err != nil {
83
+			if fallback {
84
+				if _, ok := err.(registry.ErrNoSupport); !ok {
85
+					// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
86
+					discardNoSupportErrors = true
87
+					// save the current error
88
+					lastErr = err
89
+				} else if !discardNoSupportErrors {
90
+					// Save the ErrNoSupport error, because it's either the first error or all encountered errors
91
+					// were also ErrNoSupport errors.
92
+					lastErr = err
93
+				}
94
+				continue
268 95
 			}
269
-
270
-			if img.Tag == "" {
271
-				logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
272
-				errors <- nil
273
-				return
274
-			}
275
-
276
-			// ensure no two downloads of the same image happen at the same time
277
-			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
278
-				if c != nil {
279
-					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
280
-					<-c
281
-					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
282
-				} else {
283
-					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
284
-				}
285
-				errors <- nil
286
-				return
287
-			}
288
-			defer s.poolRemove("pull", "img:"+img.ID)
289
-
290
-			out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
291
-			success := false
292
-			var lastErr, err error
293
-			var isDownloaded bool
294
-			for _, ep := range repoInfo.Index.Mirrors {
295
-				// Ensure endpoint is v1
296
-				ep = ep + "v1/"
297
-				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
298
-				if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
299
-					// Don't report errors when pulling from mirrors.
300
-					logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
301
-					continue
302
-				}
303
-				layersDownloaded = layersDownloaded || isDownloaded
304
-				success = true
305
-				break
306
-			}
307
-			if !success {
308
-				for _, ep := range repoData.Endpoints {
309
-					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
310
-					if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
311
-						// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
312
-						// As the error is also given to the output stream the user will see the error.
313
-						lastErr = err
314
-						out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
315
-						continue
316
-					}
317
-					layersDownloaded = layersDownloaded || isDownloaded
318
-					success = true
319
-					break
320
-				}
321
-			}
322
-			if !success {
323
-				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr)
324
-				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
325
-				errors <- err
326
-				return
327
-			}
328
-			out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
329
-
330
-			errors <- nil
331
-		}
332
-
333
-		go downloadImage(image)
334
-	}
335
-
336
-	var lastError error
337
-	for i := 0; i < len(repoData.ImgList); i++ {
338
-		if err := <-errors; err != nil {
339
-			lastError = err
340
-		}
341
-	}
342
-	if lastError != nil {
343
-		return lastError
344
-	}
345
-
346
-	for tag, id := range tagsList {
347
-		if askedTag != "" && tag != askedTag {
348
-			continue
349
-		}
350
-		if err := s.Tag(repoInfo.LocalName, tag, id, true); err != nil {
96
+			logrus.Debugf("Not continuing with error: %v", err)
351 97
 			return err
352
-		}
353
-	}
354
-
355
-	requestedTag := repoInfo.LocalName
356
-	if len(askedTag) > 0 {
357
-		requestedTag = utils.ImageReference(repoInfo.LocalName, askedTag)
358
-	}
359
-	WriteStatus(requestedTag, out, sf, layersDownloaded)
360
-	return nil
361
-}
362
-
363
-func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *streamformatter.StreamFormatter) (bool, error) {
364
-	history, err := r.GetRemoteHistory(imgID, endpoint)
365
-	if err != nil {
366
-		return false, err
367
-	}
368
-	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
369
-	// FIXME: Try to stream the images?
370
-	// FIXME: Launch the getRemoteImage() in goroutines
371 98
 
372
-	layersDownloaded := false
373
-	for i := len(history) - 1; i >= 0; i-- {
374
-		id := history[i]
375
-
376
-		// ensure no two downloads of the same layer happen at the same time
377
-		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
378
-			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
379
-			<-c
380 99
 		}
381
-		defer s.poolRemove("pull", "layer:"+id)
382
-
383
-		if !s.graph.Exists(id) {
384
-			out.Write(sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
385
-			var (
386
-				imgJSON []byte
387
-				imgSize int
388
-				err     error
389
-				img     *Image
390
-			)
391
-			retries := 5
392
-			for j := 1; j <= retries; j++ {
393
-				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint)
394
-				if err != nil && j == retries {
395
-					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
396
-					return layersDownloaded, err
397
-				} else if err != nil {
398
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
399
-					continue
400
-				}
401
-				img, err = NewImgJSON(imgJSON)
402
-				layersDownloaded = true
403
-				if err != nil && j == retries {
404
-					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
405
-					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
406
-				} else if err != nil {
407
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
408
-					continue
409
-				} else {
410
-					break
411
-				}
412
-			}
413 100
 
414
-			for j := 1; j <= retries; j++ {
415
-				// Get the layer
416
-				status := "Pulling fs layer"
417
-				if j > 1 {
418
-					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
419
-				}
420
-				out.Write(sf.FormatProgress(stringid.TruncateID(id), status, nil))
421
-				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
422
-				if uerr, ok := err.(*url.Error); ok {
423
-					err = uerr.Err
424
-				}
425
-				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
426
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
427
-					continue
428
-				} else if err != nil {
429
-					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
430
-					return layersDownloaded, err
431
-				}
432
-				layersDownloaded = true
433
-				defer layer.Close()
101
+		s.eventsService.Log("pull", logName, "")
102
+		return nil
103
+	}
434 104
 
435
-				err = s.graph.Register(img,
436
-					progressreader.New(progressreader.Config{
437
-						In:        layer,
438
-						Out:       out,
439
-						Formatter: sf,
440
-						Size:      imgSize,
441
-						NewLines:  false,
442
-						ID:        stringid.TruncateID(id),
443
-						Action:    "Downloading",
444
-					}))
445
-				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
446
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
447
-					continue
448
-				} else if err != nil {
449
-					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
450
-					return layersDownloaded, err
451
-				} else {
452
-					break
453
-				}
454
-			}
455
-		}
456
-		out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
105
+	if lastErr == nil {
106
+		lastErr = fmt.Errorf("no endpoints found for %s", image)
457 107
 	}
458
-	return layersDownloaded, nil
108
+	return lastErr
459 109
 }
460 110
 
461 111
 func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) {
... ...
@@ -465,273 +133,3 @@ func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamF
465 465
 		out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag))
466 466
 	}
467 467
 }
468
-
469
-func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
470
-	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
471
-	if err != nil {
472
-		if repoInfo.Index.Official {
473
-			logrus.Debugf("Unable to pull from V2 registry, falling back to v1: %s", err)
474
-			return ErrV2RegistryUnavailable
475
-		}
476
-		return fmt.Errorf("error getting registry endpoint: %s", err)
477
-	}
478
-	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, true)
479
-	if err != nil {
480
-		return fmt.Errorf("error getting authorization: %s", err)
481
-	}
482
-	if !auth.CanAuthorizeV2() {
483
-		return ErrV2RegistryUnavailable
484
-	}
485
-
486
-	var layersDownloaded bool
487
-	if tag == "" {
488
-		logrus.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName)
489
-		tags, err := r.GetV2RemoteTags(endpoint, repoInfo.RemoteName, auth)
490
-		if err != nil {
491
-			return err
492
-		}
493
-		if len(tags) == 0 {
494
-			return registry.ErrDoesNotExist
495
-		}
496
-		for _, t := range tags {
497
-			if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, auth); err != nil {
498
-				return err
499
-			} else if downloaded {
500
-				layersDownloaded = true
501
-			}
502
-		}
503
-	} else {
504
-		if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, auth); err != nil {
505
-			return err
506
-		} else if downloaded {
507
-			layersDownloaded = true
508
-		}
509
-	}
510
-
511
-	requestedTag := repoInfo.LocalName
512
-	if len(tag) > 0 {
513
-		requestedTag = utils.ImageReference(repoInfo.LocalName, tag)
514
-	}
515
-	WriteStatus(requestedTag, out, sf, layersDownloaded)
516
-	return nil
517
-}
518
-
519
-func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
520
-	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
521
-
522
-	remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
523
-	if err != nil {
524
-		return false, err
525
-	}
526
-
527
-	// loadManifest ensures that the manifest payload has the expected digest
528
-	// if the tag is a digest reference.
529
-	localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
530
-	if err != nil {
531
-		return false, fmt.Errorf("error verifying manifest: %s", err)
532
-	}
533
-
534
-	if verified {
535
-		logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
536
-	}
537
-	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
538
-
539
-	// downloadInfo is used to pass information from download to extractor
540
-	type downloadInfo struct {
541
-		imgJSON    []byte
542
-		img        *Image
543
-		digest     digest.Digest
544
-		tmpFile    *os.File
545
-		length     int64
546
-		downloaded bool
547
-		err        chan error
548
-	}
549
-
550
-	downloads := make([]downloadInfo, len(manifest.FSLayers))
551
-
552
-	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
553
-		var (
554
-			sumStr  = manifest.FSLayers[i].BlobSum
555
-			imgJSON = []byte(manifest.History[i].V1Compatibility)
556
-		)
557
-
558
-		img, err := NewImgJSON(imgJSON)
559
-		if err != nil {
560
-			return false, fmt.Errorf("failed to parse json: %s", err)
561
-		}
562
-		downloads[i].img = img
563
-
564
-		// Check if exists
565
-		if s.graph.Exists(img.ID) {
566
-			logrus.Debugf("Image already exists: %s", img.ID)
567
-			continue
568
-		}
569
-
570
-		dgst, err := digest.ParseDigest(sumStr)
571
-		if err != nil {
572
-			return false, err
573
-		}
574
-		downloads[i].digest = dgst
575
-
576
-		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
577
-
578
-		downloadFunc := func(di *downloadInfo) error {
579
-			logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)
580
-
581
-			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
582
-				if c != nil {
583
-					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
584
-					<-c
585
-					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
586
-				} else {
587
-					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
588
-				}
589
-			} else {
590
-				defer s.poolRemove("pull", "img:"+img.ID)
591
-				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
592
-				if err != nil {
593
-					return err
594
-				}
595
-
596
-				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
597
-				if err != nil {
598
-					return err
599
-				}
600
-				defer r.Close()
601
-
602
-				verifier, err := digest.NewDigestVerifier(di.digest)
603
-				if err != nil {
604
-					return err
605
-				}
606
-
607
-				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
608
-					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
609
-					Out:       out,
610
-					Formatter: sf,
611
-					Size:      int(l),
612
-					NewLines:  false,
613
-					ID:        stringid.TruncateID(img.ID),
614
-					Action:    "Downloading",
615
-				})); err != nil {
616
-					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
617
-				}
618
-
619
-				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
620
-
621
-				if !verifier.Verified() {
622
-					return fmt.Errorf("image layer digest verification failed for %q", di.digest)
623
-				}
624
-
625
-				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
626
-
627
-				logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
628
-				di.tmpFile = tmpFile
629
-				di.length = l
630
-				di.downloaded = true
631
-			}
632
-			di.imgJSON = imgJSON
633
-
634
-			return nil
635
-		}
636
-
637
-		downloads[i].err = make(chan error)
638
-		go func(di *downloadInfo) {
639
-			di.err <- downloadFunc(di)
640
-		}(&downloads[i])
641
-	}
642
-
643
-	var tagUpdated bool
644
-	for i := len(downloads) - 1; i >= 0; i-- {
645
-		d := &downloads[i]
646
-		if d.err != nil {
647
-			if err := <-d.err; err != nil {
648
-				return false, err
649
-			}
650
-		}
651
-		if d.downloaded {
652
-			// if tmpFile is empty assume download and extracted elsewhere
653
-			defer os.Remove(d.tmpFile.Name())
654
-			defer d.tmpFile.Close()
655
-			d.tmpFile.Seek(0, 0)
656
-			if d.tmpFile != nil {
657
-				err = s.graph.Register(d.img,
658
-					progressreader.New(progressreader.Config{
659
-						In:        d.tmpFile,
660
-						Out:       out,
661
-						Formatter: sf,
662
-						Size:      int(d.length),
663
-						ID:        stringid.TruncateID(d.img.ID),
664
-						Action:    "Extracting",
665
-					}))
666
-				if err != nil {
667
-					return false, err
668
-				}
669
-
670
-				if err := s.graph.SetDigest(d.img.ID, d.digest); err != nil {
671
-					return false, err
672
-				}
673
-
674
-				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
675
-			}
676
-			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
677
-			tagUpdated = true
678
-		} else {
679
-			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
680
-		}
681
-
682
-	}
683
-
684
-	// Check for new tag if no layers downloaded
685
-	if !tagUpdated {
686
-		repo, err := s.Get(repoInfo.LocalName)
687
-		if err != nil {
688
-			return false, err
689
-		}
690
-		if repo != nil {
691
-			if _, exists := repo[tag]; !exists {
692
-				tagUpdated = true
693
-			}
694
-		} else {
695
-			tagUpdated = true
696
-		}
697
-	}
698
-
699
-	if verified && tagUpdated {
700
-		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
701
-	}
702
-
703
-	if localDigest != remoteDigest { // this is not a verification check.
704
-		// NOTE(stevvooe): This is a very defensive branch and should never
705
-		// happen, since all manifest digest implementations use the same
706
-		// algorithm.
707
-		logrus.WithFields(
708
-			logrus.Fields{
709
-				"local":  localDigest,
710
-				"remote": remoteDigest,
711
-			}).Debugf("local digest does not match remote")
712
-
713
-		out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
714
-	}
715
-
716
-	out.Write(sf.FormatStatus("", "Digest: %s", localDigest))
717
-
718
-	if tag == localDigest.String() {
719
-		// TODO(stevvooe): Ideally, we should always set the digest so we can
720
-		// use the digest whether we pull by it or not. Unfortunately, the tag
721
-		// store treats the digest as a separate tag, meaning there may be an
722
-		// untagged digest image that would seem to be dangling by a user.
723
-
724
-		if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
725
-			return false, err
726
-		}
727
-	}
728
-
729
-	if !utils.DigestReference(tag) {
730
-		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
731
-		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
732
-			return false, err
733
-		}
734
-	}
735
-
736
-	return tagUpdated, nil
737
-}
738 468
new file mode 100644
... ...
@@ -0,0 +1,316 @@
0
+package graph
1
+
2
+import (
3
+	"errors"
4
+	"fmt"
5
+	"net"
6
+	"net/url"
7
+	"strings"
8
+	"time"
9
+
10
+	"github.com/Sirupsen/logrus"
11
+	"github.com/docker/distribution/registry/client/transport"
12
+	"github.com/docker/docker/pkg/progressreader"
13
+	"github.com/docker/docker/pkg/streamformatter"
14
+	"github.com/docker/docker/pkg/stringid"
15
+	"github.com/docker/docker/registry"
16
+	"github.com/docker/docker/utils"
17
+)
18
+
19
+type v1Puller struct {
20
+	*TagStore
21
+	endpoint registry.APIEndpoint
22
+	config   *ImagePullConfig
23
+	sf       *streamformatter.StreamFormatter
24
+	repoInfo *registry.RepositoryInfo
25
+	session  *registry.Session
26
+}
27
+
28
+func (p *v1Puller) Pull(tag string) (fallback bool, err error) {
29
+	if utils.DigestReference(tag) {
30
+		// Allowing fallback, because HTTPS v1 is before HTTP v2
31
+		return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}
32
+	}
33
+
34
+	tlsConfig, err := p.registryService.TlsConfig(p.repoInfo.Index.Name)
35
+	if err != nil {
36
+		return false, err
37
+	}
38
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
39
+	tr := transport.NewTransport(
40
+		// TODO(tiborvass): was ReceiveTimeout
41
+		registry.NewTransport(tlsConfig),
42
+		registry.DockerHeaders(p.config.MetaHeaders)...,
43
+	)
44
+	client := registry.HTTPClient(tr)
45
+	v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
46
+	if err != nil {
47
+		logrus.Debugf("Could not get v1 endpoint: %v", err)
48
+		return true, err
49
+	}
50
+	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
51
+	if err != nil {
52
+		// TODO(dmcgowan): Check if should fallback
53
+		logrus.Debugf("Fallback from error: %s", err)
54
+		return true, err
55
+	}
56
+	if err := p.pullRepository(tag); err != nil {
57
+		// TODO(dmcgowan): Check if should fallback
58
+		return false, err
59
+	}
60
+	return false, nil
61
+}
62
+
63
+func (p *v1Puller) pullRepository(askedTag string) error {
64
+	out := p.config.OutStream
65
+	out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName))
66
+
67
+	repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName)
68
+	if err != nil {
69
+		if strings.Contains(err.Error(), "HTTP code: 404") {
70
+			return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag))
71
+		}
72
+		// Unexpected HTTP error
73
+		return err
74
+	}
75
+
76
+	logrus.Debugf("Retrieving the tag list")
77
+	tagsList := make(map[string]string)
78
+	if askedTag == "" {
79
+		tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName)
80
+	} else {
81
+		var tagId string
82
+		tagId, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag)
83
+		tagsList[askedTag] = tagId
84
+	}
85
+	if err != nil {
86
+		if err == registry.ErrRepoNotFound && askedTag != "" {
87
+			return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
88
+		}
89
+		logrus.Errorf("unable to get remote tags: %s", err)
90
+		return err
91
+	}
92
+
93
+	for tag, id := range tagsList {
94
+		repoData.ImgList[id] = &registry.ImgData{
95
+			ID:       id,
96
+			Tag:      tag,
97
+			Checksum: "",
98
+		}
99
+	}
100
+
101
+	logrus.Debugf("Registering tags")
102
+	// If no tag has been specified, pull them all
103
+	if askedTag == "" {
104
+		for tag, id := range tagsList {
105
+			repoData.ImgList[id].Tag = tag
106
+		}
107
+	} else {
108
+		// Otherwise, check that the tag exists and use only that one
109
+		id, exists := tagsList[askedTag]
110
+		if !exists {
111
+			return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
112
+		}
113
+		repoData.ImgList[id].Tag = askedTag
114
+	}
115
+
116
+	errors := make(chan error)
117
+
118
+	layersDownloaded := false
119
+	for _, image := range repoData.ImgList {
120
+		downloadImage := func(img *registry.ImgData) {
121
+			if askedTag != "" && img.Tag != askedTag {
122
+				errors <- nil
123
+				return
124
+			}
125
+
126
+			if img.Tag == "" {
127
+				logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
128
+				errors <- nil
129
+				return
130
+			}
131
+
132
+			// ensure no two downloads of the same image happen at the same time
133
+			if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil {
134
+				if c != nil {
135
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
136
+					<-c
137
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
138
+				} else {
139
+					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
140
+				}
141
+				errors <- nil
142
+				return
143
+			}
144
+			defer p.poolRemove("pull", "img:"+img.ID)
145
+
146
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil))
147
+			success := false
148
+			var lastErr, err error
149
+			var isDownloaded bool
150
+			for _, ep := range p.repoInfo.Index.Mirrors {
151
+				ep += "v1/"
152
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
153
+				if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
154
+					// Don't report errors when pulling from mirrors.
155
+					logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err)
156
+					continue
157
+				}
158
+				layersDownloaded = layersDownloaded || isDownloaded
159
+				success = true
160
+				break
161
+			}
162
+			if !success {
163
+				for _, ep := range repoData.Endpoints {
164
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
165
+					if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
166
+						// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
167
+						// As the error is also given to the output stream the user will see the error.
168
+						lastErr = err
169
+						out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil))
170
+						continue
171
+					}
172
+					layersDownloaded = layersDownloaded || isDownloaded
173
+					success = true
174
+					break
175
+				}
176
+			}
177
+			if !success {
178
+				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr)
179
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
180
+				errors <- err
181
+				return
182
+			}
183
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
184
+
185
+			errors <- nil
186
+		}
187
+
188
+		go downloadImage(image)
189
+	}
190
+
191
+	var lastError error
192
+	for i := 0; i < len(repoData.ImgList); i++ {
193
+		if err := <-errors; err != nil {
194
+			lastError = err
195
+		}
196
+	}
197
+	if lastError != nil {
198
+		return lastError
199
+	}
200
+
201
+	for tag, id := range tagsList {
202
+		if askedTag != "" && tag != askedTag {
203
+			continue
204
+		}
205
+		if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil {
206
+			return err
207
+		}
208
+	}
209
+
210
+	requestedTag := p.repoInfo.LocalName
211
+	if len(askedTag) > 0 {
212
+		requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag)
213
+	}
214
+	WriteStatus(requestedTag, out, p.sf, layersDownloaded)
215
+	return nil
216
+}
217
+
218
+func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) {
219
+	history, err := p.session.GetRemoteHistory(imgID, endpoint)
220
+	if err != nil {
221
+		return false, err
222
+	}
223
+	out := p.config.OutStream
224
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
225
+	// FIXME: Try to stream the images?
226
+	// FIXME: Launch the getRemoteImage() in goroutines
227
+
228
+	layersDownloaded := false
229
+	for i := len(history) - 1; i >= 0; i-- {
230
+		id := history[i]
231
+
232
+		// ensure no two downloads of the same layer happen at the same time
233
+		if c, err := p.poolAdd("pull", "layer:"+id); err != nil {
234
+			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
235
+			<-c
236
+		}
237
+		defer p.poolRemove("pull", "layer:"+id)
238
+
239
+		if !p.graph.Exists(id) {
240
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
241
+			var (
242
+				imgJSON []byte
243
+				imgSize int
244
+				err     error
245
+				img     *Image
246
+			)
247
+			retries := 5
248
+			for j := 1; j <= retries; j++ {
249
+				imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
250
+				if err != nil && j == retries {
251
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
252
+					return layersDownloaded, err
253
+				} else if err != nil {
254
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
255
+					continue
256
+				}
257
+				img, err = NewImgJSON(imgJSON)
258
+				layersDownloaded = true
259
+				if err != nil && j == retries {
260
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
261
+					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
262
+				} else if err != nil {
263
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
264
+					continue
265
+				} else {
266
+					break
267
+				}
268
+			}
269
+
270
+			for j := 1; j <= retries; j++ {
271
+				// Get the layer
272
+				status := "Pulling fs layer"
273
+				if j > 1 {
274
+					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
275
+				}
276
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
277
+				layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
278
+				if uerr, ok := err.(*url.Error); ok {
279
+					err = uerr.Err
280
+				}
281
+				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
282
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
283
+					continue
284
+				} else if err != nil {
285
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
286
+					return layersDownloaded, err
287
+				}
288
+				layersDownloaded = true
289
+				defer layer.Close()
290
+
291
+				err = p.graph.Register(img,
292
+					progressreader.New(progressreader.Config{
293
+						In:        layer,
294
+						Out:       out,
295
+						Formatter: p.sf,
296
+						Size:      imgSize,
297
+						NewLines:  false,
298
+						ID:        stringid.TruncateID(id),
299
+						Action:    "Downloading",
300
+					}))
301
+				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
302
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
303
+					continue
304
+				} else if err != nil {
305
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
306
+					return layersDownloaded, err
307
+				} else {
308
+					break
309
+				}
310
+			}
311
+		}
312
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
313
+	}
314
+	return layersDownloaded, nil
315
+}
0 316
new file mode 100644
... ...
@@ -0,0 +1,384 @@
0
+package graph
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"io/ioutil"
6
+	"os"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution"
10
+	"github.com/docker/distribution/digest"
11
+	"github.com/docker/distribution/manifest"
12
+	"github.com/docker/docker/pkg/progressreader"
13
+	"github.com/docker/docker/pkg/streamformatter"
14
+	"github.com/docker/docker/pkg/stringid"
15
+	"github.com/docker/docker/registry"
16
+	"github.com/docker/docker/trust"
17
+	"github.com/docker/docker/utils"
18
+	"github.com/docker/libtrust"
19
+)
20
+
21
+type v2Puller struct {
22
+	*TagStore
23
+	endpoint registry.APIEndpoint
24
+	config   *ImagePullConfig
25
+	sf       *streamformatter.StreamFormatter
26
+	repoInfo *registry.RepositoryInfo
27
+	repo     distribution.Repository
28
+}
29
+
30
+func (p *v2Puller) Pull(tag string) (fallback bool, err error) {
31
+	// TODO(tiborvass): was ReceiveTimeout
32
+	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig)
33
+	if err != nil {
34
+		logrus.Debugf("Error getting v2 registry: %v", err)
35
+		return true, err
36
+	}
37
+
38
+	if err := p.pullV2Repository(tag); err != nil {
39
+		if registry.ContinueOnError(err) {
40
+			logrus.Debugf("Error trying v2 registry: %v", err)
41
+			return true, err
42
+		}
43
+		return false, err
44
+	}
45
+	return false, nil
46
+}
47
+
48
+func (p *v2Puller) pullV2Repository(tag string) (err error) {
49
+	var tags []string
50
+	taggedName := p.repoInfo.LocalName
51
+	if len(tag) > 0 {
52
+		tags = []string{tag}
53
+		taggedName = utils.ImageReference(p.repoInfo.LocalName, tag)
54
+	} else {
55
+		var err error
56
+		tags, err = p.repo.Manifests().Tags()
57
+		if err != nil {
58
+			return err
59
+		}
60
+
61
+	}
62
+
63
+	c, err := p.poolAdd("pull", taggedName)
64
+	if err != nil {
65
+		if c != nil {
66
+			// Another pull of the same repository is already taking place; just wait for it to finish
67
+			p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName)
68
+			<-c
69
+			return nil
70
+		}
71
+		return err
72
+	}
73
+	defer p.poolRemove("pull", taggedName)
74
+
75
+	var layersDownloaded bool
76
+	for _, tag := range tags {
77
+		// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
78
+		// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
79
+		pulledNew, err := p.pullV2Tag(tag, taggedName)
80
+		if err != nil {
81
+			return err
82
+		}
83
+		layersDownloaded = layersDownloaded || pulledNew
84
+	}
85
+
86
+	WriteStatus(taggedName, p.config.OutStream, p.sf, layersDownloaded)
87
+
88
+	return nil
89
+}
90
+
91
+// downloadInfo is used to pass information from download to extractor
92
+type downloadInfo struct {
93
+	img      *Image
94
+	tmpFile  *os.File
95
+	digest   digest.Digest
96
+	layer    distribution.ReadSeekCloser
97
+	size     int64
98
+	err      chan error
99
+	verified bool
100
+}
101
+
102
+type errVerification struct{}
103
+
104
+func (errVerification) Error() string { return "verification failed" }
105
+
106
+func (p *v2Puller) download(di *downloadInfo) {
107
+	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)
108
+
109
+	out := p.config.OutStream
110
+
111
+	if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil {
112
+		if c != nil {
113
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Layer already being pulled by another client. Waiting.", nil))
114
+			<-c
115
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
116
+		} else {
117
+			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.ID, err)
118
+		}
119
+		di.err <- nil
120
+		return
121
+	}
122
+
123
+	defer p.poolRemove("pull", "img:"+di.img.ID)
124
+	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
125
+	if err != nil {
126
+		di.err <- err
127
+		return
128
+	}
129
+
130
+	blobs := p.repo.Blobs(nil)
131
+
132
+	desc, err := blobs.Stat(nil, di.digest)
133
+	if err != nil {
134
+		logrus.Debugf("Error statting layer: %v", err)
135
+		di.err <- err
136
+		return
137
+	}
138
+	di.size = desc.Length
139
+
140
+	layerDownload, err := blobs.Open(nil, di.digest)
141
+	if err != nil {
142
+		logrus.Debugf("Error fetching layer: %v", err)
143
+		di.err <- err
144
+		return
145
+	}
146
+	defer layerDownload.Close()
147
+
148
+	verifier, err := digest.NewDigestVerifier(di.digest)
149
+	if err != nil {
150
+		di.err <- err
151
+		return
152
+	}
153
+
154
+	reader := progressreader.New(progressreader.Config{
155
+		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
156
+		Out:       out,
157
+		Formatter: p.sf,
158
+		Size:      int(di.size),
159
+		NewLines:  false,
160
+		ID:        stringid.TruncateID(di.img.ID),
161
+		Action:    "Downloading",
162
+	})
163
+	io.Copy(tmpFile, reader)
164
+
165
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))
166
+
167
+	di.verified = verifier.Verified()
168
+	if !di.verified {
169
+		logrus.Infof("Image verification failed for layer %s", di.digest)
170
+	}
171
+
172
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
173
+
174
+	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
175
+	di.tmpFile = tmpFile
176
+	di.layer = layerDownload
177
+
178
+	di.err <- nil
179
+}
180
+
181
+func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
182
+	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
183
+	out := p.config.OutStream
184
+
185
+	manifest, err := p.repo.Manifests().GetByTag(tag)
186
+	if err != nil {
187
+		return false, err
188
+	}
189
+	verified, err := p.validateManifest(manifest, tag)
190
+	if err != nil {
191
+		return false, err
192
+	}
193
+	if verified {
194
+		logrus.Printf("Image manifest for %s has been verified", taggedName)
195
+	}
196
+
197
+	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
198
+
199
+	downloads := make([]downloadInfo, len(manifest.FSLayers))
200
+	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
201
+		img, err := NewImgJSON([]byte(manifest.History[i].V1Compatibility))
202
+		if err != nil {
203
+			logrus.Debugf("error getting image v1 json: %v", err)
204
+			return false, err
205
+		}
206
+		downloads[i].img = img
207
+		downloads[i].digest = manifest.FSLayers[i].BlobSum
208
+
209
+		// Check if exists
210
+		if p.graph.Exists(img.ID) {
211
+			logrus.Debugf("Image already exists: %s", img.ID)
212
+			continue
213
+		}
214
+
215
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
216
+
217
+		downloads[i].err = make(chan error)
218
+		go p.download(&downloads[i])
219
+	}
220
+
221
+	var tagUpdated bool
222
+	for i := len(downloads) - 1; i >= 0; i-- {
223
+		d := &downloads[i]
224
+		if d.err != nil {
225
+			if err := <-d.err; err != nil {
226
+				return false, err
227
+			}
228
+		}
229
+		verified = verified && d.verified
230
+		if d.layer != nil {
231
+			// if tmpFile is empty assume download and extracted elsewhere
232
+			defer os.Remove(d.tmpFile.Name())
233
+			defer d.tmpFile.Close()
234
+			d.tmpFile.Seek(0, 0)
235
+			if d.tmpFile != nil {
236
+
237
+				reader := progressreader.New(progressreader.Config{
238
+					In:        d.tmpFile,
239
+					Out:       out,
240
+					Formatter: p.sf,
241
+					Size:      int(d.size),
242
+					NewLines:  false,
243
+					ID:        stringid.TruncateID(d.img.ID),
244
+					Action:    "Extracting",
245
+				})
246
+
247
+				err = p.graph.Register(d.img, reader)
248
+				if err != nil {
249
+					return false, err
250
+				}
251
+
252
+				if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
253
+					return false, err
254
+				}
255
+
256
+				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
257
+			}
258
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
259
+			tagUpdated = true
260
+		} else {
261
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
262
+		}
263
+	}
264
+
265
+	manifestDigest, err := digestFromManifest(manifest, p.repoInfo.LocalName)
266
+	if err != nil {
267
+		return false, err
268
+	}
269
+
270
+	// Check for new tag if no layers downloaded
271
+	if !tagUpdated {
272
+		repo, err := p.Get(p.repoInfo.LocalName)
273
+		if err != nil {
274
+			return false, err
275
+		}
276
+		if repo != nil {
277
+			if _, exists := repo[tag]; !exists {
278
+				tagUpdated = true
279
+			}
280
+		} else {
281
+			tagUpdated = true
282
+		}
283
+	}
284
+
285
+	if verified && tagUpdated {
286
+		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
287
+	}
288
+
289
+	if utils.DigestReference(tag) {
290
+		// TODO(stevvooe): Ideally, we should always set the digest so we can
291
+		// use the digest whether we pull by it or not. Unfortunately, the tag
292
+		// store treats the digest as a separate tag, meaning there may be an
293
+		// untagged digest image that would seem to be dangling by a user.
294
+		if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
295
+			return false, err
296
+		}
297
+	} else {
298
+		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
299
+		if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
300
+			return false, err
301
+		}
302
+	}
303
+
304
+	if manifestDigest != "" {
305
+		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
306
+	}
307
+
308
+	return tagUpdated, nil
309
+}
310
+
311
+// verifyTrustedKeys checks the keys provided against the trust store,
312
+// ensuring that the provided keys are trusted for the namespace. The keys
313
+// provided from this method must come from the signatures provided as part of
314
+// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
315
+func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
316
+	if namespace[0] != '/' {
317
+		namespace = "/" + namespace
318
+	}
319
+
320
+	for _, key := range keys {
321
+		b, err := key.MarshalJSON()
322
+		if err != nil {
323
+			return false, fmt.Errorf("error marshalling public key: %s", err)
324
+		}
325
+		// Check key has read/write permission (0x03)
326
+		v, err := p.trustService.CheckKey(namespace, b, 0x03)
327
+		if err != nil {
328
+			vErr, ok := err.(trust.NotVerifiedError)
329
+			if !ok {
330
+				return false, fmt.Errorf("error running key check: %s", err)
331
+			}
332
+			logrus.Debugf("Key check result: %v", vErr)
333
+		}
334
+		verified = v
335
+	}
336
+
337
+	if verified {
338
+		logrus.Debug("Key check result: verified")
339
+	}
340
+
341
+	return
342
+}
343
+
344
+func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
345
+	// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
346
+	if m == nil {
347
+		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
348
+	}
349
+	if m.SchemaVersion != 1 {
350
+		return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
351
+	}
352
+	if len(m.FSLayers) != len(m.History) {
353
+		return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
354
+	}
355
+	if len(m.FSLayers) == 0 {
356
+		return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
357
+	}
358
+	keys, err := manifest.Verify(m)
359
+	if err != nil {
360
+		return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err)
361
+	}
362
+	verified, err = p.verifyTrustedKeys(m.Name, keys)
363
+	if err != nil {
364
+		return false, fmt.Errorf("error verifying manifest keys: %v", err)
365
+	}
366
+	localDigest, err := digest.ParseDigest(tag)
367
+	// if pull by digest, then verify
368
+	if err == nil {
369
+		verifier, err := digest.NewDigestVerifier(localDigest)
370
+		if err != nil {
371
+			return false, err
372
+		}
373
+		payload, err := m.Payload()
374
+		if err != nil {
375
+			return false, err
376
+		}
377
+		if _, err := verifier.Write(payload); err != nil {
378
+			return false, err
379
+		}
380
+		verified = verified && verifier.Verified()
381
+	}
382
+	return verified, nil
383
+}
... ...
@@ -1,29 +1,15 @@
1 1
 package graph
2 2
 
3 3
 import (
4
-	"encoding/json"
5
-	"errors"
6 4
 	"fmt"
7 5
 	"io"
8
-	"os"
9
-	"sync"
10 6
 
11 7
 	"github.com/Sirupsen/logrus"
12
-	"github.com/docker/distribution/digest"
13
-	"github.com/docker/distribution/registry/client/transport"
14 8
 	"github.com/docker/docker/cliconfig"
15
-	"github.com/docker/docker/pkg/ioutils"
16
-	"github.com/docker/docker/pkg/progressreader"
17 9
 	"github.com/docker/docker/pkg/streamformatter"
18
-	"github.com/docker/docker/pkg/stringid"
19 10
 	"github.com/docker/docker/registry"
20
-	"github.com/docker/docker/runconfig"
21
-	"github.com/docker/docker/utils"
22
-	"github.com/docker/libtrust"
23 11
 )
24 12
 
25
-var ErrV2RegistryUnavailable = errors.New("error v2 registry unavailable")
26
-
27 13
 type ImagePushConfig struct {
28 14
 	MetaHeaders map[string][]string
29 15
 	AuthConfig  *cliconfig.AuthConfig
... ...
@@ -31,468 +17,41 @@ type ImagePushConfig struct {
31 31
 	OutStream   io.Writer
32 32
 }
33 33
 
34
-// Retrieve the all the images to be uploaded in the correct order
35
-func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
36
-	var (
37
-		imageList   []string
38
-		imagesSeen  = make(map[string]bool)
39
-		tagsByImage = make(map[string][]string)
40
-	)
41
-
42
-	for tag, id := range localRepo {
43
-		if requestedTag != "" && requestedTag != tag {
44
-			// Include only the requested tag.
45
-			continue
46
-		}
47
-
48
-		if utils.DigestReference(tag) {
49
-			// Ignore digest references.
50
-			continue
51
-		}
52
-
53
-		var imageListForThisTag []string
54
-
55
-		tagsByImage[id] = append(tagsByImage[id], tag)
56
-
57
-		for img, err := s.graph.Get(id); img != nil; img, err = s.graph.GetParent(img) {
58
-			if err != nil {
59
-				return nil, nil, err
60
-			}
61
-
62
-			if imagesSeen[img.ID] {
63
-				// This image is already on the list, we can ignore it and all its parents
64
-				break
65
-			}
66
-
67
-			imagesSeen[img.ID] = true
68
-			imageListForThisTag = append(imageListForThisTag, img.ID)
69
-		}
70
-
71
-		// reverse the image list for this tag (so the "most"-parent image is first)
72
-		for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
73
-			imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
74
-		}
75
-
76
-		// append to main image list
77
-		imageList = append(imageList, imageListForThisTag...)
78
-	}
79
-	if len(imageList) == 0 {
80
-		return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
81
-	}
82
-	logrus.Debugf("Image list: %v", imageList)
83
-	logrus.Debugf("Tags by image: %v", tagsByImage)
84
-
85
-	return imageList, tagsByImage, nil
34
+type Pusher interface {
35
+	// Push tries to push the image configured at the creation of Pusher.
36
+	// Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint.
37
+	//
38
+	// TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic.
39
+	Push() (fallback bool, err error)
86 40
 }
87 41
 
88
-func (s *TagStore) getImageTags(localRepo map[string]string, askedTag string) ([]string, error) {
89
-	logrus.Debugf("Checking %s against %#v", askedTag, localRepo)
90
-	if len(askedTag) > 0 {
91
-		if _, ok := localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
92
-			return nil, fmt.Errorf("Tag does not exist: %s", askedTag)
93
-		}
94
-		return []string{askedTag}, nil
95
-	}
96
-	var tags []string
97
-	for tag := range localRepo {
98
-		if !utils.DigestReference(tag) {
99
-			tags = append(tags, tag)
100
-		}
101
-	}
102
-	return tags, nil
103
-}
104
-
105
-// createImageIndex returns an index of an image's layer IDs and tags.
106
-func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData {
107
-	var imageIndex []*registry.ImgData
108
-	for _, id := range images {
109
-		if tags, hasTags := tags[id]; hasTags {
110
-			// If an image has tags you must add an entry in the image index
111
-			// for each tag
112
-			for _, tag := range tags {
113
-				imageIndex = append(imageIndex, &registry.ImgData{
114
-					ID:  id,
115
-					Tag: tag,
116
-				})
117
-			}
118
-			continue
119
-		}
120
-		// If the image does not have a tag it still needs to be sent to the
121
-		// registry with an empty tag so that it is accociated with the repository
122
-		imageIndex = append(imageIndex, &registry.ImgData{
123
-			ID:  id,
124
-			Tag: "",
125
-		})
126
-	}
127
-	return imageIndex
128
-}
129
-
130
-type imagePushData struct {
131
-	id       string
132
-	endpoint string
133
-	tokens   []string
134
-}
135
-
136
-// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
137
-// and if it is absent then it sends the image id to the channel to be pushed.
138
-func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *streamformatter.StreamFormatter,
139
-	images chan imagePushData, imagesToPush chan string) {
140
-	defer wg.Done()
141
-	for image := range images {
142
-		if err := r.LookupRemoteImage(image.id, image.endpoint); err != nil {
143
-			logrus.Errorf("Error in LookupRemoteImage: %s", err)
144
-			imagesToPush <- image.id
145
-			continue
146
-		}
147
-		out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id)))
148
-	}
149
-}
150
-
151
-func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteName string, imageIDs []string,
152
-	tags map[string][]string, repo *registry.RepositoryData, sf *streamformatter.StreamFormatter, r *registry.Session) error {
153
-	workerCount := len(imageIDs)
154
-	// start a maximum of 5 workers to check if images exist on the specified endpoint.
155
-	if workerCount > 5 {
156
-		workerCount = 5
157
-	}
158
-	var (
159
-		wg           = &sync.WaitGroup{}
160
-		imageData    = make(chan imagePushData, workerCount*2)
161
-		imagesToPush = make(chan string, workerCount*2)
162
-		pushes       = make(chan map[string]struct{}, 1)
163
-	)
164
-	for i := 0; i < workerCount; i++ {
165
-		wg.Add(1)
166
-		go lookupImageOnEndpoint(wg, r, out, sf, imageData, imagesToPush)
167
-	}
168
-	// start a go routine that consumes the images to push
169
-	go func() {
170
-		shouldPush := make(map[string]struct{})
171
-		for id := range imagesToPush {
172
-			shouldPush[id] = struct{}{}
173
-		}
174
-		pushes <- shouldPush
175
-	}()
176
-	for _, id := range imageIDs {
177
-		imageData <- imagePushData{
178
-			id:       id,
179
-			endpoint: endpoint,
180
-			tokens:   repo.Tokens,
181
-		}
182
-	}
183
-	// close the channel to notify the workers that there will be no more images to check.
184
-	close(imageData)
185
-	wg.Wait()
186
-	close(imagesToPush)
187
-	// wait for all the images that require pushes to be collected into a consumable map.
188
-	shouldPush := <-pushes
189
-	// finish by pushing any images and tags to the endpoint.  The order that the images are pushed
190
-	// is very important that is why we are still iterating over the ordered list of imageIDs.
191
-	for _, id := range imageIDs {
192
-		if _, push := shouldPush[id]; push {
193
-			if _, err := s.pushImage(r, out, id, endpoint, repo.Tokens, sf); err != nil {
194
-				// FIXME: Continue on error?
195
-				return err
196
-			}
197
-		}
198
-		for _, tag := range tags[id] {
199
-			out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
200
-			if err := r.PushRegistryTag(remoteName, id, tag, endpoint); err != nil {
201
-				return err
202
-			}
203
-		}
204
-	}
205
-	return nil
206
-}
207
-
208
-// pushRepository pushes layers that do not already exist on the registry.
209
-func (s *TagStore) pushRepository(r *registry.Session, out io.Writer,
210
-	repoInfo *registry.RepositoryInfo, localRepo map[string]string,
211
-	tag string, sf *streamformatter.StreamFormatter) error {
212
-	logrus.Debugf("Local repo: %s", localRepo)
213
-	out = ioutils.NewWriteFlusher(out)
214
-	imgList, tags, err := s.getImageList(localRepo, tag)
215
-	if err != nil {
216
-		return err
217
-	}
218
-	out.Write(sf.FormatStatus("", "Sending image list"))
219
-
220
-	imageIndex := s.createImageIndex(imgList, tags)
221
-	logrus.Debugf("Preparing to push %s with the following images and tags", localRepo)
222
-	for _, data := range imageIndex {
223
-		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
224
-	}
225
-	// Register all the images in a repository with the registry
226
-	// If an image is not in this list it will not be associated with the repository
227
-	repoData, err := r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, false, nil)
228
-	if err != nil {
229
-		return err
230
-	}
231
-	nTag := 1
232
-	if tag == "" {
233
-		nTag = len(localRepo)
234
-	}
235
-	out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", repoInfo.CanonicalName, nTag))
236
-	// push the repository to each of the endpoints only if it does not exist.
237
-	for _, endpoint := range repoData.Endpoints {
238
-		if err := s.pushImageToEndpoint(endpoint, out, repoInfo.RemoteName, imgList, tags, repoData, sf, r); err != nil {
239
-			return err
240
-		}
241
-	}
242
-	_, err = r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, true, repoData.Endpoints)
243
-	return err
244
-}
245
-
246
-func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *streamformatter.StreamFormatter) (checksum string, err error) {
247
-	out = ioutils.NewWriteFlusher(out)
248
-	jsonRaw, err := s.graph.RawJSON(imgID)
249
-	if err != nil {
250
-		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
251
-	}
252
-	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
253
-
254
-	imgData := &registry.ImgData{
255
-		ID: imgID,
256
-	}
257
-
258
-	// Send the json
259
-	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
260
-		if err == registry.ErrAlreadyExists {
261
-			out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
262
-			return "", nil
263
-		}
264
-		return "", err
265
-	}
266
-
267
-	layerData, err := s.graph.TempLayerArchive(imgID, sf, out)
268
-	if err != nil {
269
-		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
270
-	}
271
-	defer os.RemoveAll(layerData.Name())
272
-
273
-	// Send the layer
274
-	logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
275
-
276
-	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID,
277
-		progressreader.New(progressreader.Config{
278
-			In:        layerData,
279
-			Out:       out,
280
-			Formatter: sf,
281
-			Size:      int(layerData.Size),
282
-			NewLines:  false,
283
-			ID:        stringid.TruncateID(imgData.ID),
284
-			Action:    "Pushing",
285
-		}), ep, jsonRaw)
286
-	if err != nil {
287
-		return "", err
288
-	}
289
-	imgData.Checksum = checksum
290
-	imgData.ChecksumPayload = checksumPayload
291
-	// Send the checksum
292
-	if err := r.PushImageChecksumRegistry(imgData, ep); err != nil {
293
-		return "", err
294
-	}
295
-
296
-	out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
297
-	return imgData.Checksum, nil
298
-}
299
-
300
-func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
301
-	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
302
-	if err != nil {
303
-		if repoInfo.Index.Official {
304
-			logrus.Debugf("Unable to push to V2 registry, falling back to v1: %s", err)
305
-			return ErrV2RegistryUnavailable
306
-		}
307
-		return fmt.Errorf("error getting registry endpoint: %s", err)
308
-	}
309
-
310
-	tags, err := s.getImageTags(localRepo, tag)
311
-	if err != nil {
312
-		return err
313
-	}
314
-	if len(tags) == 0 {
315
-		return fmt.Errorf("No tags to push for %s", repoInfo.LocalName)
316
-	}
317
-
318
-	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false)
319
-	if err != nil {
320
-		return fmt.Errorf("error getting authorization: %s", err)
321
-	}
322
-	if !auth.CanAuthorizeV2() {
323
-		return ErrV2RegistryUnavailable
324
-	}
325
-
326
-	for _, tag := range tags {
327
-		logrus.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag)
328
-
329
-		layerId, exists := localRepo[tag]
330
-		if !exists {
331
-			return fmt.Errorf("tag does not exist: %s", tag)
332
-		}
333
-
334
-		layer, err := s.graph.Get(layerId)
335
-		if err != nil {
336
-			return err
337
-		}
338
-
339
-		m := &registry.ManifestData{
340
-			SchemaVersion: 1,
341
-			Name:          repoInfo.RemoteName,
342
-			Tag:           tag,
343
-			Architecture:  layer.Architecture,
344
-		}
345
-		var metadata runconfig.Config
346
-		if layer.Config != nil {
347
-			metadata = *layer.Config
348
-		}
349
-
350
-		layersSeen := make(map[string]bool)
351
-		layers := []*Image{}
352
-		for ; layer != nil; layer, err = s.graph.GetParent(layer) {
353
-			if err != nil {
354
-				return err
355
-			}
356
-
357
-			if layersSeen[layer.ID] {
358
-				break
359
-			}
360
-			layers = append(layers, layer)
361
-			layersSeen[layer.ID] = true
362
-		}
363
-		m.FSLayers = make([]*registry.FSLayer, len(layers))
364
-		m.History = make([]*registry.ManifestHistory, len(layers))
365
-
366
-		// Schema version 1 requires layer ordering from top to root
367
-		for i, layer := range layers {
368
-			logrus.Debugf("Pushing layer: %s", layer.ID)
369
-
370
-			if layer.Config != nil && metadata.Image != layer.ID {
371
-				if err := runconfig.Merge(&metadata, layer.Config); err != nil {
372
-					return err
373
-				}
374
-			}
375
-			jsonData, err := s.graph.RawJSON(layer.ID)
376
-			if err != nil {
377
-				return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
378
-			}
379
-
380
-			var exists bool
381
-			dgst, err := s.graph.GetDigest(layer.ID)
382
-			if err != nil {
383
-				if err != ErrDigestNotSet {
384
-					return fmt.Errorf("error getting image checksum: %s", err)
385
-				}
386
-			} else {
387
-				// Call mount blob
388
-				exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, dgst, auth)
389
-				if err != nil {
390
-					out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
391
-					return err
392
-				}
393
-			}
394
-			if !exists {
395
-				if pushDigest, err := s.pushV2Image(r, layer, endpoint, repoInfo.RemoteName, sf, out, auth); err != nil {
396
-					return err
397
-				} else if pushDigest != dgst {
398
-					// Cache new checksum
399
-					if err := s.graph.SetDigest(layer.ID, pushDigest); err != nil {
400
-						return err
401
-					}
402
-					dgst = pushDigest
403
-				}
404
-			} else {
405
-				out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
406
-			}
407
-			m.FSLayers[i] = &registry.FSLayer{BlobSum: dgst.String()}
408
-			m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)}
409
-		}
410
-
411
-		if err := validateManifest(m); err != nil {
412
-			return fmt.Errorf("invalid manifest: %s", err)
413
-		}
414
-
415
-		logrus.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
416
-		mBytes, err := json.MarshalIndent(m, "", "   ")
417
-		if err != nil {
418
-			return err
419
-		}
420
-		js, err := libtrust.NewJSONSignature(mBytes)
421
-		if err != nil {
422
-			return err
423
-		}
424
-
425
-		if err = js.Sign(s.trustKey); err != nil {
426
-			return err
427
-		}
428
-
429
-		signedBody, err := js.PrettySignature("signatures")
430
-		if err != nil {
431
-			return err
432
-		}
433
-		logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())
434
-
435
-		// push the manifest
436
-		digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth)
437
-		if err != nil {
438
-			return err
439
-		}
440
-
441
-		out.Write(sf.FormatStatus("", "Digest: %s", digest))
442
-	}
443
-	return nil
444
-}
445
-
446
-// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
447
-func (s *TagStore) pushV2Image(r *registry.Session, img *Image, endpoint *registry.Endpoint, imageName string, sf *streamformatter.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (digest.Digest, error) {
448
-	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
449
-
450
-	image, err := s.graph.Get(img.ID)
451
-	if err != nil {
452
-		return "", err
453
-	}
454
-	arch, err := s.graph.TarLayer(image)
455
-	if err != nil {
456
-		return "", err
457
-	}
458
-	defer arch.Close()
459
-
460
-	tf, err := s.graph.newTempFile()
461
-	if err != nil {
462
-		return "", err
463
-	}
464
-	defer func() {
465
-		tf.Close()
466
-		os.Remove(tf.Name())
467
-	}()
468
-
469
-	size, dgst, err := bufferToFile(tf, arch)
470
-
471
-	// Send the layer
472
-	logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
473
-
474
-	if err := r.PutV2ImageBlob(endpoint, imageName, dgst,
475
-		progressreader.New(progressreader.Config{
476
-			In:        tf,
477
-			Out:       out,
478
-			Formatter: sf,
479
-			Size:      int(size),
480
-			NewLines:  false,
481
-			ID:        stringid.TruncateID(img.ID),
482
-			Action:    "Pushing",
483
-		}), auth); err != nil {
484
-		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image push failed", nil))
485
-		return "", err
486
-	}
487
-	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
488
-	return dgst, nil
42
+func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (Pusher, error) {
43
+	switch endpoint.Version {
44
+	case registry.APIVersion2:
45
+		return &v2Pusher{
46
+			TagStore:  s,
47
+			endpoint:  endpoint,
48
+			localRepo: localRepo,
49
+			repoInfo:  repoInfo,
50
+			config:    imagePushConfig,
51
+			sf:        sf,
52
+		}, nil
53
+	case registry.APIVersion1:
54
+		return &v1Pusher{
55
+			TagStore:  s,
56
+			endpoint:  endpoint,
57
+			localRepo: localRepo,
58
+			repoInfo:  repoInfo,
59
+			config:    imagePushConfig,
60
+			sf:        sf,
61
+		}, nil
62
+	}
63
+	return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
489 64
 }
490 65
 
491 66
 // FIXME: Allow to interrupt current push when new push of same image is done.
492 67
 func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
493
-	var (
494
-		sf = streamformatter.NewJSONStreamFormatter()
495
-	)
68
+	var sf = streamformatter.NewJSONStreamFormatter()
496 69
 
497 70
 	// Resolve the Repository name from fqn to RepositoryInfo
498 71
 	repoInfo, err := s.registryService.ResolveRepository(localName)
... ...
@@ -500,23 +59,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro
500 500
 		return err
501 501
 	}
502 502
 
503
-	if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
504
-		return err
505
-	}
506
-	defer s.poolRemove("push", repoInfo.LocalName)
507
-
508
-	endpoint, err := repoInfo.GetEndpoint(imagePushConfig.MetaHeaders)
509
-	if err != nil {
510
-		return err
511
-	}
512
-	// TODO(tiborvass): reuse client from endpoint?
513
-	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
514
-	tr := transport.NewTransport(
515
-		registry.NewTransport(registry.NoTimeout, endpoint.IsSecure),
516
-		registry.DockerHeaders(imagePushConfig.MetaHeaders)...,
517
-	)
518
-	client := registry.HTTPClient(tr)
519
-	r, err := registry.NewSession(client, imagePushConfig.AuthConfig, endpoint)
503
+	endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName)
520 504
 	if err != nil {
521 505
 		return err
522 506
 	}
... ...
@@ -534,23 +77,31 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro
534 534
 		return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName)
535 535
 	}
536 536
 
537
-	if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
538
-		err := s.pushV2Repository(r, localRepo, imagePushConfig.OutStream, repoInfo, imagePushConfig.Tag, sf)
539
-		if err == nil {
540
-			s.eventsService.Log("push", repoInfo.LocalName, "")
541
-			return nil
537
+	var lastErr error
538
+	for _, endpoint := range endpoints {
539
+		logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version)
540
+
541
+		pusher, err := s.NewPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf)
542
+		if err != nil {
543
+			lastErr = err
544
+			continue
542 545
 		}
546
+		if fallback, err := pusher.Push(); err != nil {
547
+			if fallback {
548
+				lastErr = err
549
+				continue
550
+			}
551
+			logrus.Debugf("Not continuing with error: %v", err)
552
+			return err
543 553
 
544
-		if err != ErrV2RegistryUnavailable {
545
-			return fmt.Errorf("Error pushing to registry: %s", err)
546 554
 		}
547
-		logrus.Debug("V2 registry is unavailable, falling back on V1")
548
-	}
549 555
 
550
-	if err := s.pushRepository(r, imagePushConfig.OutStream, repoInfo, localRepo, imagePushConfig.Tag, sf); err != nil {
551
-		return err
556
+		s.eventsService.Log("push", repoInfo.LocalName, "")
557
+		return nil
552 558
 	}
553
-	s.eventsService.Log("push", repoInfo.LocalName, "")
554
-	return nil
555 559
 
560
+	if lastErr == nil {
561
+		lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName)
562
+	}
563
+	return lastErr
556 564
 }
557 565
new file mode 100644
... ...
@@ -0,0 +1,309 @@
0
+package graph
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"os"
6
+	"sync"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution/registry/client/transport"
10
+	"github.com/docker/docker/pkg/ioutils"
11
+	"github.com/docker/docker/pkg/progressreader"
12
+	"github.com/docker/docker/pkg/streamformatter"
13
+	"github.com/docker/docker/pkg/stringid"
14
+	"github.com/docker/docker/registry"
15
+	"github.com/docker/docker/utils"
16
+)
17
+
18
+type v1Pusher struct {
19
+	*TagStore
20
+	endpoint  registry.APIEndpoint
21
+	localRepo Repository
22
+	repoInfo  *registry.RepositoryInfo
23
+	config    *ImagePushConfig
24
+	sf        *streamformatter.StreamFormatter
25
+	session   *registry.Session
26
+
27
+	out io.Writer
28
+}
29
+
30
+func (p *v1Pusher) Push() (fallback bool, err error) {
31
+	tlsConfig, err := p.registryService.TlsConfig(p.repoInfo.Index.Name)
32
+	if err != nil {
33
+		return false, err
34
+	}
35
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
36
+	tr := transport.NewTransport(
37
+		// TODO(tiborvass): was NoTimeout
38
+		registry.NewTransport(tlsConfig),
39
+		registry.DockerHeaders(p.config.MetaHeaders)...,
40
+	)
41
+	client := registry.HTTPClient(tr)
42
+	v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
43
+	if err != nil {
44
+		logrus.Debugf("Could not get v1 endpoint: %v", err)
45
+		return true, err
46
+	}
47
+	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
48
+	if err != nil {
49
+		// TODO(dmcgowan): Check if should fallback
50
+		return true, err
51
+	}
52
+	if err := p.pushRepository(p.config.Tag); err != nil {
53
+		// TODO(dmcgowan): Check if should fallback
54
+		return false, err
55
+	}
56
+	return false, nil
57
+}
58
+
59
+// Retrieve the all the images to be uploaded in the correct order
60
+func (p *v1Pusher) getImageList(requestedTag string) ([]string, map[string][]string, error) {
61
+	var (
62
+		imageList   []string
63
+		imagesSeen  = make(map[string]bool)
64
+		tagsByImage = make(map[string][]string)
65
+	)
66
+
67
+	for tag, id := range p.localRepo {
68
+		if requestedTag != "" && requestedTag != tag {
69
+			// Include only the requested tag.
70
+			continue
71
+		}
72
+
73
+		if utils.DigestReference(tag) {
74
+			// Ignore digest references.
75
+			continue
76
+		}
77
+
78
+		var imageListForThisTag []string
79
+
80
+		tagsByImage[id] = append(tagsByImage[id], tag)
81
+
82
+		for img, err := p.graph.Get(id); img != nil; img, err = p.graph.GetParent(img) {
83
+			if err != nil {
84
+				return nil, nil, err
85
+			}
86
+
87
+			if imagesSeen[img.ID] {
88
+				// This image is already on the list, we can ignore it and all its parents
89
+				break
90
+			}
91
+
92
+			imagesSeen[img.ID] = true
93
+			imageListForThisTag = append(imageListForThisTag, img.ID)
94
+		}
95
+
96
+		// reverse the image list for this tag (so the "most"-parent image is first)
97
+		for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
98
+			imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
99
+		}
100
+
101
+		// append to main image list
102
+		imageList = append(imageList, imageListForThisTag...)
103
+	}
104
+	if len(imageList) == 0 {
105
+		return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
106
+	}
107
+	logrus.Debugf("Image list: %v", imageList)
108
+	logrus.Debugf("Tags by image: %v", tagsByImage)
109
+
110
+	return imageList, tagsByImage, nil
111
+}
112
+
113
+// createImageIndex returns an index of an image's layer IDs and tags.
114
+func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData {
115
+	var imageIndex []*registry.ImgData
116
+	for _, id := range images {
117
+		if tags, hasTags := tags[id]; hasTags {
118
+			// If an image has tags you must add an entry in the image index
119
+			// for each tag
120
+			for _, tag := range tags {
121
+				imageIndex = append(imageIndex, &registry.ImgData{
122
+					ID:  id,
123
+					Tag: tag,
124
+				})
125
+			}
126
+			continue
127
+		}
128
+		// If the image does not have a tag it still needs to be sent to the
129
+		// registry with an empty tag so that it is accociated with the repository
130
+		imageIndex = append(imageIndex, &registry.ImgData{
131
+			ID:  id,
132
+			Tag: "",
133
+		})
134
+	}
135
+	return imageIndex
136
+}
137
+
138
+type imagePushData struct {
139
+	id       string
140
+	endpoint string
141
+	tokens   []string
142
+}
143
+
144
+// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
145
+// and if it is absent then it sends the image id to the channel to be pushed.
146
+func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) {
147
+	defer wg.Done()
148
+	for image := range images {
149
+		if err := p.session.LookupRemoteImage(image.id, image.endpoint); err != nil {
150
+			logrus.Errorf("Error in LookupRemoteImage: %s", err)
151
+			imagesToPush <- image.id
152
+			continue
153
+		}
154
+		p.out.Write(p.sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id)))
155
+	}
156
+}
157
+
158
+func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags map[string][]string, repo *registry.RepositoryData) error {
159
+	workerCount := len(imageIDs)
160
+	// start a maximum of 5 workers to check if images exist on the specified endpoint.
161
+	if workerCount > 5 {
162
+		workerCount = 5
163
+	}
164
+	var (
165
+		wg           = &sync.WaitGroup{}
166
+		imageData    = make(chan imagePushData, workerCount*2)
167
+		imagesToPush = make(chan string, workerCount*2)
168
+		pushes       = make(chan map[string]struct{}, 1)
169
+	)
170
+	for i := 0; i < workerCount; i++ {
171
+		wg.Add(1)
172
+		go p.lookupImageOnEndpoint(wg, imageData, imagesToPush)
173
+	}
174
+	// start a go routine that consumes the images to push
175
+	go func() {
176
+		shouldPush := make(map[string]struct{})
177
+		for id := range imagesToPush {
178
+			shouldPush[id] = struct{}{}
179
+		}
180
+		pushes <- shouldPush
181
+	}()
182
+	for _, id := range imageIDs {
183
+		imageData <- imagePushData{
184
+			id:       id,
185
+			endpoint: endpoint,
186
+			tokens:   repo.Tokens,
187
+		}
188
+	}
189
+	// close the channel to notify the workers that there will be no more images to check.
190
+	close(imageData)
191
+	wg.Wait()
192
+	close(imagesToPush)
193
+	// wait for all the images that require pushes to be collected into a consumable map.
194
+	shouldPush := <-pushes
195
+	// finish by pushing any images and tags to the endpoint.  The order that the images are pushed
196
+	// is very important that is why we are still iterating over the ordered list of imageIDs.
197
+	for _, id := range imageIDs {
198
+		if _, push := shouldPush[id]; push {
199
+			if _, err := p.pushImage(id, endpoint, repo.Tokens); err != nil {
200
+				// FIXME: Continue on error?
201
+				return err
202
+			}
203
+		}
204
+		for _, tag := range tags[id] {
205
+			p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag))
206
+			if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, id, tag, endpoint); err != nil {
207
+				return err
208
+			}
209
+		}
210
+	}
211
+	return nil
212
+}
213
+
214
+// pushRepository pushes layers that do not already exist on the registry.
215
+func (p *v1Pusher) pushRepository(tag string) error {
216
+
217
+	logrus.Debugf("Local repo: %s", p.localRepo)
218
+	p.out = ioutils.NewWriteFlusher(p.config.OutStream)
219
+	imgList, tags, err := p.getImageList(tag)
220
+	if err != nil {
221
+		return err
222
+	}
223
+	p.out.Write(p.sf.FormatStatus("", "Sending image list"))
224
+
225
+	imageIndex := p.createImageIndex(imgList, tags)
226
+	logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo)
227
+	for _, data := range imageIndex {
228
+		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
229
+	}
230
+
231
+	if _, err := p.poolAdd("push", p.repoInfo.LocalName); err != nil {
232
+		return err
233
+	}
234
+	defer p.poolRemove("push", p.repoInfo.LocalName)
235
+
236
+	// Register all the images in a repository with the registry
237
+	// If an image is not in this list it will not be associated with the repository
238
+	repoData, err := p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, false, nil)
239
+	if err != nil {
240
+		return err
241
+	}
242
+	nTag := 1
243
+	if tag == "" {
244
+		nTag = len(p.localRepo)
245
+	}
246
+	p.out.Write(p.sf.FormatStatus("", "Pushing repository %s (%d tags)", p.repoInfo.CanonicalName, nTag))
247
+	// push the repository to each of the endpoints only if it does not exist.
248
+	for _, endpoint := range repoData.Endpoints {
249
+		if err := p.pushImageToEndpoint(endpoint, imgList, tags, repoData); err != nil {
250
+			return err
251
+		}
252
+	}
253
+	_, err = p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, true, repoData.Endpoints)
254
+	return err
255
+}
256
+
257
+func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string, err error) {
258
+	jsonRaw, err := p.graph.RawJSON(imgID)
259
+	if err != nil {
260
+		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
261
+	}
262
+	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
263
+
264
+	imgData := &registry.ImgData{
265
+		ID: imgID,
266
+	}
267
+
268
+	// Send the json
269
+	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
270
+		if err == registry.ErrAlreadyExists {
271
+			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
272
+			return "", nil
273
+		}
274
+		return "", err
275
+	}
276
+
277
+	layerData, err := p.graph.TempLayerArchive(imgID, p.sf, p.out)
278
+	if err != nil {
279
+		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
280
+	}
281
+	defer os.RemoveAll(layerData.Name())
282
+
283
+	// Send the layer
284
+	logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
285
+
286
+	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID,
287
+		progressreader.New(progressreader.Config{
288
+			In:        layerData,
289
+			Out:       p.out,
290
+			Formatter: p.sf,
291
+			Size:      int(layerData.Size),
292
+			NewLines:  false,
293
+			ID:        stringid.TruncateID(imgData.ID),
294
+			Action:    "Pushing",
295
+		}), ep, jsonRaw)
296
+	if err != nil {
297
+		return "", err
298
+	}
299
+	imgData.Checksum = checksum
300
+	imgData.ChecksumPayload = checksumPayload
301
+	// Send the checksum
302
+	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
303
+		return "", err
304
+	}
305
+
306
+	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
307
+	return imgData.Checksum, nil
308
+}
0 309
new file mode 100644
... ...
@@ -0,0 +1,254 @@
0
+package graph
1
+
2
+import (
3
+	"fmt"
4
+	"io/ioutil"
5
+	"os"
6
+
7
+	"github.com/Sirupsen/logrus"
8
+	"github.com/docker/distribution"
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/distribution/manifest"
11
+	"github.com/docker/docker/pkg/progressreader"
12
+	"github.com/docker/docker/pkg/streamformatter"
13
+	"github.com/docker/docker/pkg/stringid"
14
+	"github.com/docker/docker/registry"
15
+	"github.com/docker/docker/runconfig"
16
+	"github.com/docker/docker/utils"
17
+)
18
+
19
+type v2Pusher struct {
20
+	*TagStore
21
+	endpoint  registry.APIEndpoint
22
+	localRepo Repository
23
+	repoInfo  *registry.RepositoryInfo
24
+	config    *ImagePushConfig
25
+	sf        *streamformatter.StreamFormatter
26
+	repo      distribution.Repository
27
+}
28
+
29
+func (p *v2Pusher) Push() (fallback bool, err error) {
30
+	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig)
31
+	if err != nil {
32
+		logrus.Debugf("Error getting v2 registry: %v", err)
33
+		return true, err
34
+	}
35
+	return false, p.pushV2Repository(p.config.Tag)
36
+}
37
+
38
+func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) {
39
+	logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo)
40
+	if len(askedTag) > 0 {
41
+		if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
42
+			return nil, fmt.Errorf("Tag does not exist for %s", askedTag)
43
+		}
44
+		return []string{askedTag}, nil
45
+	}
46
+	var tags []string
47
+	for tag := range p.localRepo {
48
+		if !utils.DigestReference(tag) {
49
+			tags = append(tags, tag)
50
+		}
51
+	}
52
+	return tags, nil
53
+}
54
+
55
+func (p *v2Pusher) pushV2Repository(tag string) error {
56
+	localName := p.repoInfo.LocalName
57
+	if _, err := p.poolAdd("push", localName); err != nil {
58
+		return err
59
+	}
60
+	defer p.poolRemove("push", localName)
61
+
62
+	tags, err := p.getImageTags(tag)
63
+	if err != nil {
64
+		return fmt.Errorf("error getting tags for %s: %s", localName, err)
65
+	}
66
+	if len(tags) == 0 {
67
+		return fmt.Errorf("no tags to push for %s", localName)
68
+	}
69
+
70
+	for _, tag := range tags {
71
+		if err := p.pushV2Tag(tag); err != nil {
72
+			return err
73
+		}
74
+	}
75
+
76
+	return nil
77
+}
78
+
79
+func (p *v2Pusher) pushV2Tag(tag string) error {
80
+	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)
81
+
82
+	layerId, exists := p.localRepo[tag]
83
+	if !exists {
84
+		return fmt.Errorf("tag does not exist: %s", tag)
85
+	}
86
+
87
+	layersSeen := make(map[string]bool)
88
+
89
+	layer, err := p.graph.Get(layerId)
90
+	if err != nil {
91
+		return err
92
+	}
93
+
94
+	m := &manifest.Manifest{
95
+		Versioned: manifest.Versioned{
96
+			SchemaVersion: 1,
97
+		},
98
+		Name:         p.repo.Name(),
99
+		Tag:          tag,
100
+		Architecture: layer.Architecture,
101
+		FSLayers:     []manifest.FSLayer{},
102
+		History:      []manifest.History{},
103
+	}
104
+
105
+	var metadata runconfig.Config
106
+	if layer != nil && layer.Config != nil {
107
+		metadata = *layer.Config
108
+	}
109
+
110
+	out := p.config.OutStream
111
+
112
+	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
113
+		if err != nil {
114
+			return err
115
+		}
116
+
117
+		if layersSeen[layer.ID] {
118
+			break
119
+		}
120
+
121
+		logrus.Debugf("Pushing layer: %s", layer.ID)
122
+
123
+		if layer.Config != nil && metadata.Image != layer.ID {
124
+			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
125
+				return err
126
+			}
127
+		}
128
+
129
+		jsonData, err := p.graph.RawJSON(layer.ID)
130
+		if err != nil {
131
+			return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
132
+		}
133
+
134
+		var exists bool
135
+		dgst, err := p.graph.GetDigest(layer.ID)
136
+		switch err {
137
+		case nil:
138
+			_, err := p.repo.Blobs(nil).Stat(nil, dgst)
139
+			switch err {
140
+			case nil:
141
+				exists = true
142
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
143
+			case distribution.ErrBlobUnknown:
144
+				// nop
145
+			default:
146
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
147
+				return err
148
+			}
149
+		case ErrDigestNotSet:
150
+			// nop
151
+		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
152
+			return fmt.Errorf("error getting image checksum: %v", err)
153
+		}
154
+
155
+		// if digest was empty or not saved, or if blob does not exist on the remote repository,
156
+		// then fetch it.
157
+		if !exists {
158
+			if pushDigest, err := p.pushV2Image(p.repo.Blobs(nil), layer); err != nil {
159
+				return err
160
+			} else if pushDigest != dgst {
161
+				// Cache new checksum
162
+				if err := p.graph.SetDigest(layer.ID, pushDigest); err != nil {
163
+					return err
164
+				}
165
+				dgst = pushDigest
166
+			}
167
+		}
168
+
169
+		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
170
+		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
171
+
172
+		layersSeen[layer.ID] = true
173
+	}
174
+
175
+	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
176
+	signed, err := manifest.Sign(m, p.trustKey)
177
+	if err != nil {
178
+		return err
179
+	}
180
+
181
+	manifestDigest, err := digestFromManifest(signed, p.repo.Name())
182
+	if err != nil {
183
+		return err
184
+	}
185
+	if manifestDigest != "" {
186
+		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
187
+	}
188
+
189
+	return p.repo.Manifests().Put(signed)
190
+}
191
+
192
+func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *Image) (digest.Digest, error) {
193
+	out := p.config.OutStream
194
+
195
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
196
+
197
+	image, err := p.graph.Get(img.ID)
198
+	if err != nil {
199
+		return "", err
200
+	}
201
+	arch, err := p.graph.TarLayer(image)
202
+	if err != nil {
203
+		return "", err
204
+	}
205
+
206
+	tf, err := p.graph.newTempFile()
207
+	if err != nil {
208
+		return "", err
209
+	}
210
+	defer func() {
211
+		tf.Close()
212
+		os.Remove(tf.Name())
213
+	}()
214
+
215
+	size, dgst, err := bufferToFile(tf, arch)
216
+	if err != nil {
217
+		return "", err
218
+	}
219
+
220
+	// Send the layer
221
+	logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
222
+	layerUpload, err := bs.Create(nil)
223
+	if err != nil {
224
+		return "", err
225
+	}
226
+	defer layerUpload.Close()
227
+
228
+	reader := progressreader.New(progressreader.Config{
229
+		In:        ioutil.NopCloser(tf),
230
+		Out:       out,
231
+		Formatter: p.sf,
232
+		Size:      int(size),
233
+		NewLines:  false,
234
+		ID:        stringid.TruncateID(img.ID),
235
+		Action:    "Pushing",
236
+	})
237
+	n, err := layerUpload.ReadFrom(reader)
238
+	if err != nil {
239
+		return "", err
240
+	}
241
+	if n != size {
242
+		return "", fmt.Errorf("short upload: only wrote %d of %d", n, size)
243
+	}
244
+
245
+	desc := distribution.Descriptor{Digest: dgst}
246
+	if _, err := layerUpload.Commit(nil, desc); err != nil {
247
+		return "", err
248
+	}
249
+
250
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
251
+
252
+	return dgst, nil
253
+}
0 254
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+package graph
1
+
2
+import (
3
+	"errors"
4
+	"net"
5
+	"net/http"
6
+	"net/url"
7
+	"time"
8
+
9
+	"github.com/Sirupsen/logrus"
10
+	"github.com/docker/distribution"
11
+	"github.com/docker/distribution/digest"
12
+	"github.com/docker/distribution/manifest"
13
+	"github.com/docker/distribution/registry/client"
14
+	"github.com/docker/distribution/registry/client/auth"
15
+	"github.com/docker/distribution/registry/client/transport"
16
+	"github.com/docker/docker/cliconfig"
17
+	"github.com/docker/docker/registry"
18
+	"golang.org/x/net/context"
19
+)
20
+
21
+type dumbCredentialStore struct {
22
+	auth *cliconfig.AuthConfig
23
+}
24
+
25
+func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) {
26
+	return dcs.auth.Username, dcs.auth.Password
27
+}
28
+
29
+// v2 only
30
+func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig) (distribution.Repository, error) {
31
+	ctx := context.Background()
32
+
33
+	repoName := repoInfo.CanonicalName
34
+	// If endpoint does not support CanonicalName, use the RemoteName instead
35
+	if endpoint.TrimHostname {
36
+		repoName = repoInfo.RemoteName
37
+	}
38
+
39
+	// TODO(dmcgowan): Call close idle connections when complete, use keep alive
40
+	base := &http.Transport{
41
+		Proxy: http.ProxyFromEnvironment,
42
+		Dial: (&net.Dialer{
43
+			Timeout:   30 * time.Second,
44
+			KeepAlive: 30 * time.Second,
45
+			DualStack: true,
46
+		}).Dial,
47
+		TLSHandshakeTimeout: 10 * time.Second,
48
+		TLSClientConfig:     endpoint.TLSConfig,
49
+		// TODO(dmcgowan): Call close idle connections when complete and use keep alive
50
+		DisableKeepAlives: true,
51
+	}
52
+
53
+	modifiers := registry.DockerHeaders(metaHeaders)
54
+	authTransport := transport.NewTransport(base, modifiers...)
55
+	pingClient := &http.Client{
56
+		Transport: authTransport,
57
+		Timeout:   5 * time.Second,
58
+	}
59
+	endpointStr := endpoint.URL + "/v2/"
60
+	req, err := http.NewRequest("GET", endpointStr, nil)
61
+	if err != nil {
62
+		return nil, err
63
+	}
64
+	resp, err := pingClient.Do(req)
65
+	if err != nil {
66
+		return nil, err
67
+	}
68
+	defer resp.Body.Close()
69
+
70
+	versions := auth.APIVersions(resp, endpoint.VersionHeader)
71
+	if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 {
72
+		var foundVersion bool
73
+		for _, version := range endpoint.Versions {
74
+			for _, pingVersion := range versions {
75
+				if version == pingVersion {
76
+					foundVersion = true
77
+				}
78
+			}
79
+		}
80
+		if !foundVersion {
81
+			return nil, errors.New("endpoint does not support v2 API")
82
+		}
83
+	}
84
+
85
+	challengeManager := auth.NewSimpleChallengeManager()
86
+	if err := challengeManager.AddResponse(resp); err != nil {
87
+		return nil, err
88
+	}
89
+
90
+	creds := dumbCredentialStore{auth: authConfig}
91
+	tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull")
92
+	basicHandler := auth.NewBasicHandler(creds)
93
+	modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
94
+	tr := transport.NewTransport(base, modifiers...)
95
+
96
+	return client.NewRepository(ctx, repoName, endpoint.URL, tr)
97
+}
98
+
99
+func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, error) {
100
+	payload, err := m.Payload()
101
+	if err != nil {
102
+		logrus.Debugf("could not retrieve manifest payload: %v", err)
103
+		return "", err
104
+	}
105
+	manifestDigest, err := digest.FromBytes(payload)
106
+	if err != nil {
107
+		logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err)
108
+	}
109
+	return manifestDigest, nil
110
+}
... ...
@@ -104,7 +104,7 @@ func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
104 104
 	// pull from the registry using the <name>@<digest> reference
105 105
 	imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName)
106 106
 	out, _, err := dockerCmdWithError(c, "pull", imageReference)
107
-	if err == nil || !strings.Contains(out, "pulling with digest reference failed from v2 registry") {
107
+	if err == nil || !strings.Contains(out, "manifest unknown") {
108 108
 		c.Fatalf("expected non-zero exit status and correct error message when pulling non-existing image: %s", out)
109 109
 	}
110 110
 }
... ...
@@ -89,8 +89,6 @@ func (s *DockerSuite) TestPullImageOfficialNames(c *check.C) {
89 89
 	testRequires(c, Network)
90 90
 
91 91
 	names := []string{
92
-		"docker.io/hello-world",
93
-		"index.docker.io/hello-world",
94 92
 		"library/hello-world",
95 93
 		"docker.io/library/hello-world",
96 94
 		"index.docker.io/library/hello-world",
... ...
@@ -125,7 +125,7 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (stri
125 125
 		return "", fmt.Errorf("Server Error: Server Address not set.")
126 126
 	}
127 127
 
128
-	loginAgainstOfficialIndex := serverAddress == IndexServerAddress()
128
+	loginAgainstOfficialIndex := serverAddress == INDEXSERVER
129 129
 
130 130
 	// to avoid sending the server address to the server it should be removed before being marshalled
131 131
 	authCopy := *authConfig
... ...
@@ -37,7 +37,7 @@ func setupTempConfigFile() (*cliconfig.ConfigFile, error) {
37 37
 	root = filepath.Join(root, cliconfig.CONFIGFILE)
38 38
 	configFile := cliconfig.NewConfigFile(root)
39 39
 
40
-	for _, registry := range []string{"testIndex", IndexServerAddress()} {
40
+	for _, registry := range []string{"testIndex", INDEXSERVER} {
41 41
 		configFile.AuthConfigs[registry] = cliconfig.AuthConfig{
42 42
 			Username: "docker-user",
43 43
 			Password: "docker-pass",
... ...
@@ -82,7 +82,7 @@ func TestResolveAuthConfigIndexServer(t *testing.T) {
82 82
 	}
83 83
 	defer os.RemoveAll(configFile.Filename())
84 84
 
85
-	indexConfig := configFile.AuthConfigs[IndexServerAddress()]
85
+	indexConfig := configFile.AuthConfigs[INDEXSERVER]
86 86
 
87 87
 	officialIndex := &IndexInfo{
88 88
 		Official: true,
... ...
@@ -92,10 +92,10 @@ func TestResolveAuthConfigIndexServer(t *testing.T) {
92 92
 	}
93 93
 
94 94
 	resolved := ResolveAuthConfig(configFile, officialIndex)
95
-	assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServerAddress()")
95
+	assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return INDEXSERVER")
96 96
 
97 97
 	resolved = ResolveAuthConfig(configFile, privateIndex)
98
-	assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServerAddress()")
98
+	assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return INDEXSERVER")
99 99
 }
100 100
 
101 101
 func TestResolveAuthConfigFullURL(t *testing.T) {
... ...
@@ -120,7 +120,7 @@ func TestResolveAuthConfigFullURL(t *testing.T) {
120 120
 		Password: "baz-pass",
121 121
 		Email:    "baz@example.com",
122 122
 	}
123
-	configFile.AuthConfigs[IndexServerAddress()] = officialAuth
123
+	configFile.AuthConfigs[INDEXSERVER] = officialAuth
124 124
 
125 125
 	expectedAuths := map[string]cliconfig.AuthConfig{
126 126
 		"registry.example.com": registryAuth,
... ...
@@ -21,9 +21,16 @@ type Options struct {
21 21
 }
22 22
 
23 23
 const (
24
+	DEFAULT_NAMESPACE               = "docker.io"
25
+	DEFAULT_V2_REGISTRY             = "https://registry-1.docker.io"
26
+	DEFAULT_REGISTRY_VERSION_HEADER = "Docker-Distribution-Api-Version"
27
+	DEFAULT_V1_REGISTRY             = "https://index.docker.io"
28
+
29
+	CERTS_DIR = "/etc/docker/certs.d"
30
+
24 31
 	// Only used for user auth + account creation
25
-	INDEXSERVER    = "https://index.docker.io/v1/"
26
-	REGISTRYSERVER = "https://registry-1.docker.io/v2/"
32
+	REGISTRYSERVER = DEFAULT_V2_REGISTRY
33
+	INDEXSERVER    = DEFAULT_V1_REGISTRY + "/v1/"
27 34
 	INDEXNAME      = "docker.io"
28 35
 
29 36
 	// INDEXSERVER = "https://registry-stage.hub.docker.com/v1/"
... ...
@@ -34,14 +41,6 @@ var (
34 34
 	emptyServiceConfig       = NewServiceConfig(nil)
35 35
 )
36 36
 
37
-func IndexServerAddress() string {
38
-	return INDEXSERVER
39
-}
40
-
41
-func IndexServerName() string {
42
-	return INDEXNAME
43
-}
44
-
45 37
 // InstallFlags adds command-line options to the top-level flag parser for
46 38
 // the current process.
47 39
 func (options *Options) InstallFlags() {
... ...
@@ -72,6 +71,7 @@ func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) {
72 72
 type ServiceConfig struct {
73 73
 	InsecureRegistryCIDRs []*netIPNet           `json:"InsecureRegistryCIDRs"`
74 74
 	IndexConfigs          map[string]*IndexInfo `json:"IndexConfigs"`
75
+	Mirrors               []string
75 76
 }
76 77
 
77 78
 // NewServiceConfig returns a new instance of ServiceConfig
... ...
@@ -93,6 +93,9 @@ func NewServiceConfig(options *Options) *ServiceConfig {
93 93
 	config := &ServiceConfig{
94 94
 		InsecureRegistryCIDRs: make([]*netIPNet, 0),
95 95
 		IndexConfigs:          make(map[string]*IndexInfo, 0),
96
+		// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
97
+		// and Mirrors are only for the official registry anyways.
98
+		Mirrors: options.Mirrors.GetAll(),
96 99
 	}
97 100
 	// Split --insecure-registry into CIDR and registry-specific settings.
98 101
 	for _, r := range options.InsecureRegistries.GetAll() {
... ...
@@ -113,9 +116,9 @@ func NewServiceConfig(options *Options) *ServiceConfig {
113 113
 	}
114 114
 
115 115
 	// Configure public registry.
116
-	config.IndexConfigs[IndexServerName()] = &IndexInfo{
117
-		Name:     IndexServerName(),
118
-		Mirrors:  options.Mirrors.GetAll(),
116
+	config.IndexConfigs[INDEXNAME] = &IndexInfo{
117
+		Name:     INDEXNAME,
118
+		Mirrors:  config.Mirrors,
119 119
 		Secure:   true,
120 120
 		Official: true,
121 121
 	}
... ...
@@ -193,8 +196,8 @@ func ValidateMirror(val string) (string, error) {
193 193
 // ValidateIndexName validates an index name.
194 194
 func ValidateIndexName(val string) (string, error) {
195 195
 	// 'index.docker.io' => 'docker.io'
196
-	if val == "index."+IndexServerName() {
197
-		val = IndexServerName()
196
+	if val == "index."+INDEXNAME {
197
+		val = INDEXNAME
198 198
 	}
199 199
 	if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") {
200 200
 		return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val)
... ...
@@ -264,7 +267,7 @@ func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error)
264 264
 // index as the AuthConfig key, and uses the (host)name[:port] for private indexes.
265 265
 func (index *IndexInfo) GetAuthConfigKey() string {
266 266
 	if index.Official {
267
-		return IndexServerAddress()
267
+		return INDEXSERVER
268 268
 	}
269 269
 	return index.Name
270 270
 }
... ...
@@ -277,7 +280,7 @@ func splitReposName(reposName string) (string, string) {
277 277
 		!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
278 278
 		// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
279 279
 		// 'docker.io'
280
-		indexName = IndexServerName()
280
+		indexName = INDEXNAME
281 281
 		remoteName = reposName
282 282
 	} else {
283 283
 		indexName = nameParts[0]
... ...
@@ -1,6 +1,7 @@
1 1
 package registry
2 2
 
3 3
 import (
4
+	"crypto/tls"
4 5
 	"encoding/json"
5 6
 	"fmt"
6 7
 	"io/ioutil"
... ...
@@ -12,6 +13,7 @@ import (
12 12
 	"github.com/Sirupsen/logrus"
13 13
 	"github.com/docker/distribution/registry/api/v2"
14 14
 	"github.com/docker/distribution/registry/client/transport"
15
+	"github.com/docker/docker/pkg/tlsconfig"
15 16
 )
16 17
 
17 18
 // for mocking in unit tests
... ...
@@ -44,7 +46,9 @@ func scanForAPIVersion(address string) (string, APIVersion) {
44 44
 // NewEndpoint parses the given address to return a registry endpoint.
45 45
 func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) {
46 46
 	// *TODO: Allow per-registry configuration of endpoints.
47
-	endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure, metaHeaders)
47
+	tlsConfig := tlsconfig.ServerDefault
48
+	tlsConfig.InsecureSkipVerify = !index.Secure
49
+	endpoint, err := newEndpoint(index.GetAuthConfigKey(), &tlsConfig, metaHeaders)
48 50
 	if err != nil {
49 51
 		return nil, err
50 52
 	}
... ...
@@ -82,7 +86,7 @@ func validateEndpoint(endpoint *Endpoint) error {
82 82
 	return nil
83 83
 }
84 84
 
85
-func newEndpoint(address string, secure bool, metaHeaders http.Header) (*Endpoint, error) {
85
+func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) {
86 86
 	var (
87 87
 		endpoint       = new(Endpoint)
88 88
 		trimmedAddress string
... ...
@@ -93,13 +97,16 @@ func newEndpoint(address string, secure bool, metaHeaders http.Header) (*Endpoin
93 93
 		address = "https://" + address
94 94
 	}
95 95
 
96
+	endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify)
97
+
96 98
 	trimmedAddress, endpoint.Version = scanForAPIVersion(address)
97 99
 
98 100
 	if endpoint.URL, err = url.Parse(trimmedAddress); err != nil {
99 101
 		return nil, err
100 102
 	}
101
-	endpoint.IsSecure = secure
102
-	tr := NewTransport(ConnectTimeout, endpoint.IsSecure)
103
+
104
+	// TODO(tiborvass): make sure a ConnectTimeout transport is used
105
+	tr := NewTransport(tlsConfig)
103 106
 	endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...))
104 107
 	return endpoint, nil
105 108
 }
... ...
@@ -166,7 +173,7 @@ func (e *Endpoint) Ping() (RegistryInfo, error) {
166 166
 func (e *Endpoint) pingV1() (RegistryInfo, error) {
167 167
 	logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
168 168
 
169
-	if e.String() == IndexServerAddress() {
169
+	if e.String() == INDEXSERVER {
170 170
 		// Skip the check, we know this one is valid
171 171
 		// (and we never want to fallback to http in case of error)
172 172
 		return RegistryInfo{Standalone: false}, nil
... ...
@@ -12,14 +12,14 @@ func TestEndpointParse(t *testing.T) {
12 12
 		str      string
13 13
 		expected string
14 14
 	}{
15
-		{IndexServerAddress(), IndexServerAddress()},
15
+		{INDEXSERVER, INDEXSERVER},
16 16
 		{"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"},
17 17
 		{"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"},
18 18
 		{"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"},
19 19
 		{"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"},
20 20
 	}
21 21
 	for _, td := range testData {
22
-		e, err := newEndpoint(td.str, false, nil)
22
+		e, err := newEndpoint(td.str, nil, nil)
23 23
 		if err != nil {
24 24
 			t.Errorf("%q: %s", td.str, err)
25 25
 		}
... ...
@@ -60,7 +60,7 @@ func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) {
60 60
 	testEndpoint := Endpoint{
61 61
 		URL:     testServerURL,
62 62
 		Version: APIVersionUnknown,
63
-		client:  HTTPClient(NewTransport(ConnectTimeout, false)),
63
+		client:  HTTPClient(NewTransport(nil)),
64 64
 	}
65 65
 
66 66
 	if err = validateEndpoint(&testEndpoint); err != nil {
... ...
@@ -2,25 +2,20 @@ package registry
2 2
 
3 3
 import (
4 4
 	"crypto/tls"
5
-	"crypto/x509"
6 5
 	"errors"
7
-	"fmt"
8
-	"io/ioutil"
9 6
 	"net"
10 7
 	"net/http"
11 8
 	"os"
12
-	"path"
13
-	"path/filepath"
14 9
 	"runtime"
15 10
 	"strings"
16
-	"sync"
17 11
 	"time"
18 12
 
19 13
 	"github.com/Sirupsen/logrus"
14
+	"github.com/docker/distribution/registry/api/errcode"
15
+	"github.com/docker/distribution/registry/api/v2"
20 16
 	"github.com/docker/distribution/registry/client/transport"
21 17
 	"github.com/docker/docker/autogen/dockerversion"
22 18
 	"github.com/docker/docker/pkg/parsers/kernel"
23
-	"github.com/docker/docker/pkg/timeoutconn"
24 19
 	"github.com/docker/docker/pkg/tlsconfig"
25 20
 	"github.com/docker/docker/pkg/useragent"
26 21
 )
... ...
@@ -57,135 +52,13 @@ func init() {
57 57
 	dockerUserAgent = useragent.AppendVersions("", httpVersion...)
58 58
 }
59 59
 
60
-type httpsRequestModifier struct {
61
-	mu        sync.Mutex
62
-	tlsConfig *tls.Config
63
-}
64
-
65
-// DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,
66
-// it's because it's so as to match the current behavior in master: we generate the
67
-// certpool on every-goddam-request. It's not great, but it allows people to just put
68
-// the certs in /etc/docker/certs.d/.../ and let docker "pick it up" immediately. Would
69
-// prefer an fsnotify implementation, but that was out of scope of my refactoring.
70
-func (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {
71
-	var (
72
-		roots   *x509.CertPool
73
-		certs   []tls.Certificate
74
-		hostDir string
75
-	)
76
-
77
-	if req.URL.Scheme == "https" {
78
-		hasFile := func(files []os.FileInfo, name string) bool {
79
-			for _, f := range files {
80
-				if f.Name() == name {
81
-					return true
82
-				}
83
-			}
84
-			return false
85
-		}
86
-
87
-		if runtime.GOOS == "windows" {
88
-			hostDir = path.Join(os.TempDir(), "/docker/certs.d", req.URL.Host)
89
-		} else {
90
-			hostDir = path.Join("/etc/docker/certs.d", req.URL.Host)
91
-		}
92
-		logrus.Debugf("hostDir: %s", hostDir)
93
-		fs, err := ioutil.ReadDir(hostDir)
94
-		if err != nil && !os.IsNotExist(err) {
95
-			return err
96
-		}
97
-
98
-		for _, f := range fs {
99
-			if strings.HasSuffix(f.Name(), ".crt") {
100
-				if roots == nil {
101
-					roots = x509.NewCertPool()
102
-				}
103
-				logrus.Debugf("crt: %s", hostDir+"/"+f.Name())
104
-				data, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))
105
-				if err != nil {
106
-					return err
107
-				}
108
-				roots.AppendCertsFromPEM(data)
109
-			}
110
-			if strings.HasSuffix(f.Name(), ".cert") {
111
-				certName := f.Name()
112
-				keyName := certName[:len(certName)-5] + ".key"
113
-				logrus.Debugf("cert: %s", hostDir+"/"+f.Name())
114
-				if !hasFile(fs, keyName) {
115
-					return fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
116
-				}
117
-				cert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), path.Join(hostDir, keyName))
118
-				if err != nil {
119
-					return err
120
-				}
121
-				certs = append(certs, cert)
122
-			}
123
-			if strings.HasSuffix(f.Name(), ".key") {
124
-				keyName := f.Name()
125
-				certName := keyName[:len(keyName)-4] + ".cert"
126
-				logrus.Debugf("key: %s", hostDir+"/"+f.Name())
127
-				if !hasFile(fs, certName) {
128
-					return fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
129
-				}
130
-			}
131
-		}
132
-		m.mu.Lock()
133
-		m.tlsConfig.RootCAs = roots
134
-		m.tlsConfig.Certificates = certs
135
-		m.mu.Unlock()
136
-	}
137
-	return nil
138
-}
139
-
140
-func NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {
141
-	tlsConfig := &tls.Config{
142
-		// Avoid fallback to SSL protocols < TLS1.0
143
-		MinVersion:         tls.VersionTLS10,
144
-		InsecureSkipVerify: !secure,
145
-		CipherSuites:       tlsconfig.DefaultServerAcceptedCiphers,
146
-	}
147
-
148
-	tr := &http.Transport{
149
-		DisableKeepAlives: true,
150
-		Proxy:             http.ProxyFromEnvironment,
151
-		TLSClientConfig:   tlsConfig,
152
-	}
153
-
154
-	switch timeout {
155
-	case ConnectTimeout:
156
-		tr.Dial = func(proto string, addr string) (net.Conn, error) {
157
-			// Set the connect timeout to 30 seconds to allow for slower connection
158
-			// times...
159
-			d := net.Dialer{Timeout: 30 * time.Second, DualStack: true}
160
-
161
-			conn, err := d.Dial(proto, addr)
162
-			if err != nil {
163
-				return nil, err
164
-			}
165
-			// Set the recv timeout to 10 seconds
166
-			conn.SetDeadline(time.Now().Add(10 * time.Second))
167
-			return conn, nil
168
-		}
169
-	case ReceiveTimeout:
170
-		tr.Dial = func(proto string, addr string) (net.Conn, error) {
171
-			d := net.Dialer{DualStack: true}
172
-
173
-			conn, err := d.Dial(proto, addr)
174
-			if err != nil {
175
-				return nil, err
176
-			}
177
-			conn = timeoutconn.New(conn, 1*time.Minute)
178
-			return conn, nil
60
+func hasFile(files []os.FileInfo, name string) bool {
61
+	for _, f := range files {
62
+		if f.Name() == name {
63
+			return true
179 64
 		}
180 65
 	}
181
-
182
-	if secure {
183
-		// note: httpsTransport also handles http transport
184
-		// but for HTTPS, it sets up the certs
185
-		return transport.NewTransport(tr, &httpsRequestModifier{tlsConfig: tlsConfig})
186
-	}
187
-
188
-	return tr
66
+	return false
189 67
 }
190 68
 
191 69
 // DockerHeaders returns request modifiers that ensure requests have
... ...
@@ -202,10 +75,6 @@ func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {
202 202
 }
203 203
 
204 204
 func HTTPClient(transport http.RoundTripper) *http.Client {
205
-	if transport == nil {
206
-		transport = NewTransport(ConnectTimeout, true)
207
-	}
208
-
209 205
 	return &http.Client{
210 206
 		Transport:     transport,
211 207
 		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
... ...
@@ -245,3 +114,52 @@ func AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Reque
245 245
 	}
246 246
 	return nil
247 247
 }
248
+
249
+func shouldV2Fallback(err errcode.Error) bool {
250
+	logrus.Debugf("v2 error: %T %v", err, err)
251
+	switch err.Code {
252
+	case v2.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown:
253
+		return true
254
+	}
255
+	return false
256
+}
257
+
258
+type ErrNoSupport struct{ Err error }
259
+
260
+func (e ErrNoSupport) Error() string {
261
+	if e.Err == nil {
262
+		return "not supported"
263
+	}
264
+	return e.Err.Error()
265
+}
266
+
267
+func ContinueOnError(err error) bool {
268
+	switch v := err.(type) {
269
+	case errcode.Errors:
270
+		return ContinueOnError(v[0])
271
+	case ErrNoSupport:
272
+		return ContinueOnError(v.Err)
273
+	case errcode.Error:
274
+		return shouldV2Fallback(v)
275
+	}
276
+	return false
277
+}
278
+
279
+func NewTransport(tlsConfig *tls.Config) *http.Transport {
280
+	if tlsConfig == nil {
281
+		var cfg = tlsconfig.ServerDefault
282
+		tlsConfig = &cfg
283
+	}
284
+	return &http.Transport{
285
+		Proxy: http.ProxyFromEnvironment,
286
+		Dial: (&net.Dialer{
287
+			Timeout:   30 * time.Second,
288
+			KeepAlive: 30 * time.Second,
289
+			DualStack: true,
290
+		}).Dial,
291
+		TLSHandshakeTimeout: 10 * time.Second,
292
+		TLSClientConfig:     tlsConfig,
293
+		// TODO(dmcgowan): Call close idle connections when complete and use keep alive
294
+		DisableKeepAlives: true,
295
+	}
296
+}
... ...
@@ -165,7 +165,7 @@ func makeHttpsIndex(req string) *IndexInfo {
165 165
 
166 166
 func makePublicIndex() *IndexInfo {
167 167
 	index := &IndexInfo{
168
-		Name:     IndexServerAddress(),
168
+		Name:     INDEXSERVER,
169 169
 		Secure:   true,
170 170
 		Official: true,
171 171
 	}
... ...
@@ -27,7 +27,7 @@ func spawnTestRegistrySession(t *testing.T) *Session {
27 27
 	if err != nil {
28 28
 		t.Fatal(err)
29 29
 	}
30
-	var tr http.RoundTripper = debugTransport{NewTransport(ReceiveTimeout, endpoint.IsSecure), t.Log}
30
+	var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log}
31 31
 	tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...)
32 32
 	client := HTTPClient(tr)
33 33
 	r, err := NewSession(client, authConfig, endpoint)
... ...
@@ -332,7 +332,7 @@ func TestParseRepositoryInfo(t *testing.T) {
332 332
 	expectedRepoInfos := map[string]RepositoryInfo{
333 333
 		"fooo/bar": {
334 334
 			Index: &IndexInfo{
335
-				Name:     IndexServerName(),
335
+				Name:     INDEXNAME,
336 336
 				Official: true,
337 337
 			},
338 338
 			RemoteName:    "fooo/bar",
... ...
@@ -342,7 +342,7 @@ func TestParseRepositoryInfo(t *testing.T) {
342 342
 		},
343 343
 		"library/ubuntu": {
344 344
 			Index: &IndexInfo{
345
-				Name:     IndexServerName(),
345
+				Name:     INDEXNAME,
346 346
 				Official: true,
347 347
 			},
348 348
 			RemoteName:    "library/ubuntu",
... ...
@@ -352,7 +352,7 @@ func TestParseRepositoryInfo(t *testing.T) {
352 352
 		},
353 353
 		"nonlibrary/ubuntu": {
354 354
 			Index: &IndexInfo{
355
-				Name:     IndexServerName(),
355
+				Name:     INDEXNAME,
356 356
 				Official: true,
357 357
 			},
358 358
 			RemoteName:    "nonlibrary/ubuntu",
... ...
@@ -362,7 +362,7 @@ func TestParseRepositoryInfo(t *testing.T) {
362 362
 		},
363 363
 		"ubuntu": {
364 364
 			Index: &IndexInfo{
365
-				Name:     IndexServerName(),
365
+				Name:     INDEXNAME,
366 366
 				Official: true,
367 367
 			},
368 368
 			RemoteName:    "library/ubuntu",
... ...
@@ -372,7 +372,7 @@ func TestParseRepositoryInfo(t *testing.T) {
372 372
 		},
373 373
 		"other/library": {
374 374
 			Index: &IndexInfo{
375
-				Name:     IndexServerName(),
375
+				Name:     INDEXNAME,
376 376
 				Official: true,
377 377
 			},
378 378
 			RemoteName:    "other/library",
... ...
@@ -480,9 +480,9 @@ func TestParseRepositoryInfo(t *testing.T) {
480 480
 			CanonicalName: "localhost/privatebase",
481 481
 			Official:      false,
482 482
 		},
483
-		IndexServerName() + "/public/moonbase": {
483
+		INDEXNAME + "/public/moonbase": {
484 484
 			Index: &IndexInfo{
485
-				Name:     IndexServerName(),
485
+				Name:     INDEXNAME,
486 486
 				Official: true,
487 487
 			},
488 488
 			RemoteName:    "public/moonbase",
... ...
@@ -490,19 +490,9 @@ func TestParseRepositoryInfo(t *testing.T) {
490 490
 			CanonicalName: "docker.io/public/moonbase",
491 491
 			Official:      false,
492 492
 		},
493
-		"index." + IndexServerName() + "/public/moonbase": {
493
+		"index." + INDEXNAME + "/public/moonbase": {
494 494
 			Index: &IndexInfo{
495
-				Name:     IndexServerName(),
496
-				Official: true,
497
-			},
498
-			RemoteName:    "public/moonbase",
499
-			LocalName:     "public/moonbase",
500
-			CanonicalName: "docker.io/public/moonbase",
501
-			Official:      false,
502
-		},
503
-		IndexServerName() + "/public/moonbase": {
504
-			Index: &IndexInfo{
505
-				Name:     IndexServerName(),
495
+				Name:     INDEXNAME,
506 496
 				Official: true,
507 497
 			},
508 498
 			RemoteName:    "public/moonbase",
... ...
@@ -512,17 +502,7 @@ func TestParseRepositoryInfo(t *testing.T) {
512 512
 		},
513 513
 		"ubuntu-12.04-base": {
514 514
 			Index: &IndexInfo{
515
-				Name:     IndexServerName(),
516
-				Official: true,
517
-			},
518
-			RemoteName:    "library/ubuntu-12.04-base",
519
-			LocalName:     "ubuntu-12.04-base",
520
-			CanonicalName: "docker.io/library/ubuntu-12.04-base",
521
-			Official:      true,
522
-		},
523
-		IndexServerName() + "/ubuntu-12.04-base": {
524
-			Index: &IndexInfo{
525
-				Name:     IndexServerName(),
515
+				Name:     INDEXNAME,
526 516
 				Official: true,
527 517
 			},
528 518
 			RemoteName:    "library/ubuntu-12.04-base",
... ...
@@ -530,9 +510,9 @@ func TestParseRepositoryInfo(t *testing.T) {
530 530
 			CanonicalName: "docker.io/library/ubuntu-12.04-base",
531 531
 			Official:      true,
532 532
 		},
533
-		IndexServerName() + "/ubuntu-12.04-base": {
533
+		INDEXNAME + "/ubuntu-12.04-base": {
534 534
 			Index: &IndexInfo{
535
-				Name:     IndexServerName(),
535
+				Name:     INDEXNAME,
536 536
 				Official: true,
537 537
 			},
538 538
 			RemoteName:    "library/ubuntu-12.04-base",
... ...
@@ -540,9 +520,9 @@ func TestParseRepositoryInfo(t *testing.T) {
540 540
 			CanonicalName: "docker.io/library/ubuntu-12.04-base",
541 541
 			Official:      true,
542 542
 		},
543
-		"index." + IndexServerName() + "/ubuntu-12.04-base": {
543
+		"index." + INDEXNAME + "/ubuntu-12.04-base": {
544 544
 			Index: &IndexInfo{
545
-				Name:     IndexServerName(),
545
+				Name:     INDEXNAME,
546 546
 				Official: true,
547 547
 			},
548 548
 			RemoteName:    "library/ubuntu-12.04-base",
... ...
@@ -585,14 +565,14 @@ func TestNewIndexInfo(t *testing.T) {
585 585
 	config := NewServiceConfig(nil)
586 586
 	noMirrors := make([]string, 0)
587 587
 	expectedIndexInfos := map[string]*IndexInfo{
588
-		IndexServerName(): {
589
-			Name:     IndexServerName(),
588
+		INDEXNAME: {
589
+			Name:     INDEXNAME,
590 590
 			Official: true,
591 591
 			Secure:   true,
592 592
 			Mirrors:  noMirrors,
593 593
 		},
594
-		"index." + IndexServerName(): {
595
-			Name:     IndexServerName(),
594
+		"index." + INDEXNAME: {
595
+			Name:     INDEXNAME,
596 596
 			Official: true,
597 597
 			Secure:   true,
598 598
 			Mirrors:  noMirrors,
... ...
@@ -616,14 +596,14 @@ func TestNewIndexInfo(t *testing.T) {
616 616
 	config = makeServiceConfig(publicMirrors, []string{"example.com"})
617 617
 
618 618
 	expectedIndexInfos = map[string]*IndexInfo{
619
-		IndexServerName(): {
620
-			Name:     IndexServerName(),
619
+		INDEXNAME: {
620
+			Name:     INDEXNAME,
621 621
 			Official: true,
622 622
 			Secure:   true,
623 623
 			Mirrors:  publicMirrors,
624 624
 		},
625
-		"index." + IndexServerName(): {
626
-			Name:     IndexServerName(),
625
+		"index." + INDEXNAME: {
626
+			Name:     INDEXNAME,
627 627
 			Official: true,
628 628
 			Secure:   true,
629 629
 			Mirrors:  publicMirrors,
... ...
@@ -880,7 +860,7 @@ func TestIsSecureIndex(t *testing.T) {
880 880
 		insecureRegistries []string
881 881
 		expected           bool
882 882
 	}{
883
-		{IndexServerName(), nil, true},
883
+		{INDEXNAME, nil, true},
884 884
 		{"example.com", []string{}, true},
885 885
 		{"example.com", []string{"example.com"}, false},
886 886
 		{"localhost", []string{"localhost:5000"}, false},
... ...
@@ -1,9 +1,19 @@
1 1
 package registry
2 2
 
3 3
 import (
4
+	"crypto/tls"
5
+	"crypto/x509"
6
+	"fmt"
7
+	"io/ioutil"
4 8
 	"net/http"
9
+	"os"
10
+	"path/filepath"
11
+	"strings"
5 12
 
13
+	"github.com/Sirupsen/logrus"
14
+	"github.com/docker/distribution/registry/client/auth"
6 15
 	"github.com/docker/docker/cliconfig"
16
+	"github.com/docker/docker/pkg/tlsconfig"
7 17
 )
8 18
 
9 19
 type Service struct {
... ...
@@ -25,7 +35,7 @@ func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) {
25 25
 	addr := authConfig.ServerAddress
26 26
 	if addr == "" {
27 27
 		// Use the official registry address if not specified.
28
-		addr = IndexServerAddress()
28
+		addr = INDEXSERVER
29 29
 	}
30 30
 	index, err := s.ResolveIndex(addr)
31 31
 	if err != nil {
... ...
@@ -69,3 +79,186 @@ func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) {
69 69
 func (s *Service) ResolveIndex(name string) (*IndexInfo, error) {
70 70
 	return s.Config.NewIndexInfo(name)
71 71
 }
72
+
73
+type APIEndpoint struct {
74
+	Mirror        bool
75
+	URL           string
76
+	Version       APIVersion
77
+	Official      bool
78
+	TrimHostname  bool
79
+	TLSConfig     *tls.Config
80
+	VersionHeader string
81
+	Versions      []auth.APIVersion
82
+}
83
+
84
+func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) {
85
+	return newEndpoint(e.URL, e.TLSConfig, metaHeaders)
86
+}
87
+
88
+func (s *Service) TlsConfig(hostname string) (*tls.Config, error) {
89
+	// we construct a client tls config from server defaults
90
+	// PreferredServerCipherSuites should have no effect
91
+	tlsConfig := tlsconfig.ServerDefault
92
+
93
+	isSecure := s.Config.isSecureIndex(hostname)
94
+
95
+	tlsConfig.InsecureSkipVerify = !isSecure
96
+
97
+	if isSecure {
98
+		hasFile := func(files []os.FileInfo, name string) bool {
99
+			for _, f := range files {
100
+				if f.Name() == name {
101
+					return true
102
+				}
103
+			}
104
+			return false
105
+		}
106
+
107
+		hostDir := filepath.Join(CERTS_DIR, hostname)
108
+		logrus.Debugf("hostDir: %s", hostDir)
109
+		fs, err := ioutil.ReadDir(hostDir)
110
+		if err != nil && !os.IsNotExist(err) {
111
+			return nil, err
112
+		}
113
+
114
+		for _, f := range fs {
115
+			if strings.HasSuffix(f.Name(), ".crt") {
116
+				if tlsConfig.RootCAs == nil {
117
+					// TODO(dmcgowan): Copy system pool
118
+					tlsConfig.RootCAs = x509.NewCertPool()
119
+				}
120
+				logrus.Debugf("crt: %s", filepath.Join(hostDir, f.Name()))
121
+				data, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))
122
+				if err != nil {
123
+					return nil, err
124
+				}
125
+				tlsConfig.RootCAs.AppendCertsFromPEM(data)
126
+			}
127
+			if strings.HasSuffix(f.Name(), ".cert") {
128
+				certName := f.Name()
129
+				keyName := certName[:len(certName)-5] + ".key"
130
+				logrus.Debugf("cert: %s", filepath.Join(hostDir, f.Name()))
131
+				if !hasFile(fs, keyName) {
132
+					return nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
133
+				}
134
+				cert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), filepath.Join(hostDir, keyName))
135
+				if err != nil {
136
+					return nil, err
137
+				}
138
+				tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
139
+			}
140
+			if strings.HasSuffix(f.Name(), ".key") {
141
+				keyName := f.Name()
142
+				certName := keyName[:len(keyName)-4] + ".cert"
143
+				logrus.Debugf("key: %s", filepath.Join(hostDir, f.Name()))
144
+				if !hasFile(fs, certName) {
145
+					return nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
146
+				}
147
+			}
148
+		}
149
+	}
150
+
151
+	return &tlsConfig, nil
152
+}
153
+
154
+func (s *Service) LookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) {
155
+	var cfg = tlsconfig.ServerDefault
156
+	tlsConfig := &cfg
157
+	if strings.HasPrefix(repoName, DEFAULT_NAMESPACE+"/") {
158
+		// v2 mirrors
159
+		for _, mirror := range s.Config.Mirrors {
160
+			endpoints = append(endpoints, APIEndpoint{
161
+				URL: mirror,
162
+				// guess mirrors are v2
163
+				Version:      APIVersion2,
164
+				Mirror:       true,
165
+				TrimHostname: true,
166
+				TLSConfig:    tlsConfig,
167
+			})
168
+		}
169
+		// v2 registry
170
+		endpoints = append(endpoints, APIEndpoint{
171
+			URL:          DEFAULT_V2_REGISTRY,
172
+			Version:      APIVersion2,
173
+			Official:     true,
174
+			TrimHostname: true,
175
+			TLSConfig:    tlsConfig,
176
+		})
177
+		// v1 mirrors
178
+		// TODO(tiborvass): shouldn't we remove v1 mirrors from here, since v1 mirrors are kinda special?
179
+		for _, mirror := range s.Config.Mirrors {
180
+			endpoints = append(endpoints, APIEndpoint{
181
+				URL: mirror,
182
+				// guess mirrors are v1
183
+				Version:      APIVersion1,
184
+				Mirror:       true,
185
+				TrimHostname: true,
186
+				TLSConfig:    tlsConfig,
187
+			})
188
+		}
189
+		// v1 registry
190
+		endpoints = append(endpoints, APIEndpoint{
191
+			URL:          DEFAULT_V1_REGISTRY,
192
+			Version:      APIVersion1,
193
+			Official:     true,
194
+			TrimHostname: true,
195
+			TLSConfig:    tlsConfig,
196
+		})
197
+		return endpoints, nil
198
+	}
199
+
200
+	slashIndex := strings.IndexRune(repoName, '/')
201
+	if slashIndex <= 0 {
202
+		return nil, fmt.Errorf("invalid repo name: missing '/':  %s", repoName)
203
+	}
204
+	hostname := repoName[:slashIndex]
205
+
206
+	tlsConfig, err = s.TlsConfig(hostname)
207
+	if err != nil {
208
+		return nil, err
209
+	}
210
+	isSecure := !tlsConfig.InsecureSkipVerify
211
+
212
+	v2Versions := []auth.APIVersion{
213
+		{
214
+			Type:    "registry",
215
+			Version: "2.0",
216
+		},
217
+	}
218
+	endpoints = []APIEndpoint{
219
+		{
220
+			URL:           "https://" + hostname,
221
+			Version:       APIVersion2,
222
+			TrimHostname:  true,
223
+			TLSConfig:     tlsConfig,
224
+			VersionHeader: DEFAULT_REGISTRY_VERSION_HEADER,
225
+			Versions:      v2Versions,
226
+		},
227
+		{
228
+			URL:          "https://" + hostname,
229
+			Version:      APIVersion1,
230
+			TrimHostname: true,
231
+			TLSConfig:    tlsConfig,
232
+		},
233
+	}
234
+
235
+	if !isSecure {
236
+		endpoints = append(endpoints, APIEndpoint{
237
+			URL:          "http://" + hostname,
238
+			Version:      APIVersion2,
239
+			TrimHostname: true,
240
+			// used to check if supposed to be secure via InsecureSkipVerify
241
+			TLSConfig:     tlsConfig,
242
+			VersionHeader: DEFAULT_REGISTRY_VERSION_HEADER,
243
+			Versions:      v2Versions,
244
+		}, APIEndpoint{
245
+			URL:          "http://" + hostname,
246
+			Version:      APIVersion1,
247
+			TrimHostname: true,
248
+			// used to check if supposed to be secure via InsecureSkipVerify
249
+			TLSConfig: tlsConfig,
250
+		})
251
+	}
252
+
253
+	return endpoints, nil
254
+}
... ...
@@ -98,7 +98,7 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
98 98
 		return tr.RoundTripper.RoundTrip(orig)
99 99
 	}
100 100
 
101
-	req := transport.CloneRequest(orig)
101
+	req := cloneRequest(orig)
102 102
 	tr.mu.Lock()
103 103
 	tr.modReq[orig] = req
104 104
 	tr.mu.Unlock()
... ...
@@ -164,12 +164,11 @@ func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint
164 164
 
165 165
 	// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
166 166
 	// alongside all our requests.
167
-	if endpoint.VersionString(1) != IndexServerAddress() && endpoint.URL.Scheme == "https" {
167
+	if endpoint.VersionString(1) != INDEXSERVER && endpoint.URL.Scheme == "https" {
168 168
 		info, err := endpoint.Ping()
169 169
 		if err != nil {
170 170
 			return nil, err
171 171
 		}
172
-
173 172
 		if info.Standalone && authConfig != nil {
174 173
 			logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String())
175 174
 			alwaysSetBasicAuth = true
... ...
@@ -265,7 +264,7 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io
265 265
 	if err != nil {
266 266
 		return nil, fmt.Errorf("Error while getting from the server: %v", err)
267 267
 	}
268
-	// TODO: why are we doing retries at this level?
268
+	// TODO(tiborvass): why are we doing retries at this level?
269 269
 	// These retries should be generic to both v1 and v2
270 270
 	for i := 1; i <= retries; i++ {
271 271
 		statusCode = 0
... ...
@@ -432,7 +431,7 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
432 432
 	}
433 433
 
434 434
 	// Forge a better object from the retrieved data
435
-	imgsData := make(map[string]*ImgData)
435
+	imgsData := make(map[string]*ImgData, len(remoteChecksums))
436 436
 	for _, elem := range remoteChecksums {
437 437
 		imgsData[elem.ID] = elem
438 438
 	}
439 439
deleted file mode 100644
... ...
@@ -1,414 +0,0 @@
1
-package registry
2
-
3
-import (
4
-	"bytes"
5
-	"encoding/json"
6
-	"fmt"
7
-	"io"
8
-	"io/ioutil"
9
-	"net/http"
10
-	"strconv"
11
-
12
-	"github.com/Sirupsen/logrus"
13
-	"github.com/docker/distribution/digest"
14
-	"github.com/docker/distribution/registry/api/v2"
15
-	"github.com/docker/docker/pkg/httputils"
16
-)
17
-
18
-const DockerDigestHeader = "Docker-Content-Digest"
19
-
20
-func getV2Builder(e *Endpoint) *v2.URLBuilder {
21
-	if e.URLBuilder == nil {
22
-		e.URLBuilder = v2.NewURLBuilder(e.URL)
23
-	}
24
-	return e.URLBuilder
25
-}
26
-
27
-func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) {
28
-	// TODO check if should use Mirror
29
-	if index.Official {
30
-		ep, err = newEndpoint(REGISTRYSERVER, true, nil)
31
-		if err != nil {
32
-			return
33
-		}
34
-		err = validateEndpoint(ep)
35
-		if err != nil {
36
-			return
37
-		}
38
-	} else if r.indexEndpoint.String() == index.GetAuthConfigKey() {
39
-		ep = r.indexEndpoint
40
-	} else {
41
-		ep, err = NewEndpoint(index, nil)
42
-		if err != nil {
43
-			return
44
-		}
45
-	}
46
-
47
-	ep.URLBuilder = v2.NewURLBuilder(ep.URL)
48
-	return
49
-}
50
-
51
-// GetV2Authorization gets the authorization needed to the given image
52
-// If readonly access is requested, then the authorization may
53
-// only be used for Get operations.
54
-func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) {
55
-	scopes := []string{"pull"}
56
-	if !readOnly {
57
-		scopes = append(scopes, "push")
58
-	}
59
-
60
-	logrus.Debugf("Getting authorization for %s %s", imageName, scopes)
61
-	return NewRequestAuthorization(r.GetAuthConfig(true), ep, "repository", imageName, scopes), nil
62
-}
63
-
64
-//
65
-// 1) Check if TarSum of each layer exists /v2/
66
-//  1.a) if 200, continue
67
-//  1.b) if 300, then push the
68
-//  1.c) if anything else, err
69
-// 2) PUT the created/signed manifest
70
-//
71
-
72
-// GetV2ImageManifest simply fetches the bytes of a manifest and the remote
73
-// digest, if available in the request. Note that the application shouldn't
74
-// rely on the untrusted remoteDigest, and should also verify against a
75
-// locally provided digest, if applicable.
76
-func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) (remoteDigest digest.Digest, p []byte, err error) {
77
-	routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
78
-	if err != nil {
79
-		return "", nil, err
80
-	}
81
-
82
-	method := "GET"
83
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
84
-
85
-	req, err := http.NewRequest(method, routeURL, nil)
86
-	if err != nil {
87
-		return "", nil, err
88
-	}
89
-
90
-	if err := auth.Authorize(req); err != nil {
91
-		return "", nil, err
92
-	}
93
-
94
-	res, err := r.client.Do(req)
95
-	if err != nil {
96
-		return "", nil, err
97
-	}
98
-	defer res.Body.Close()
99
-
100
-	if res.StatusCode != 200 {
101
-		if res.StatusCode == 401 {
102
-			return "", nil, errLoginRequired
103
-		} else if res.StatusCode == 404 {
104
-			return "", nil, ErrDoesNotExist
105
-		}
106
-		return "", nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
107
-	}
108
-
109
-	p, err = ioutil.ReadAll(res.Body)
110
-	if err != nil {
111
-		return "", nil, fmt.Errorf("Error while reading the http response: %s", err)
112
-	}
113
-
114
-	dgstHdr := res.Header.Get(DockerDigestHeader)
115
-	if dgstHdr != "" {
116
-		remoteDigest, err = digest.ParseDigest(dgstHdr)
117
-		if err != nil {
118
-			// NOTE(stevvooe): Including the remote digest is optional. We
119
-			// don't need to verify against it, but it is good practice.
120
-			remoteDigest = ""
121
-			logrus.Debugf("error parsing remote digest when fetching %v: %v", routeURL, err)
122
-		}
123
-	}
124
-
125
-	return
126
-}
127
-
128
-// - Succeeded to head image blob (already exists)
129
-// - Failed with no error (continue to Push the Blob)
130
-// - Failed with error
131
-func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (bool, error) {
132
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
133
-	if err != nil {
134
-		return false, err
135
-	}
136
-
137
-	method := "HEAD"
138
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
139
-
140
-	req, err := http.NewRequest(method, routeURL, nil)
141
-	if err != nil {
142
-		return false, err
143
-	}
144
-	if err := auth.Authorize(req); err != nil {
145
-		return false, err
146
-	}
147
-	res, err := r.client.Do(req)
148
-	if err != nil {
149
-		return false, err
150
-	}
151
-	res.Body.Close() // close early, since we're not needing a body on this call .. yet?
152
-	switch {
153
-	case res.StatusCode >= 200 && res.StatusCode < 400:
154
-		// return something indicating no push needed
155
-		return true, nil
156
-	case res.StatusCode == 401:
157
-		return false, errLoginRequired
158
-	case res.StatusCode == 404:
159
-		// return something indicating blob push needed
160
-		return false, nil
161
-	}
162
-
163
-	return false, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s", res.StatusCode, imageName, dgst), res)
164
-}
165
-
166
-func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobWrtr io.Writer, auth *RequestAuthorization) error {
167
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
168
-	if err != nil {
169
-		return err
170
-	}
171
-
172
-	method := "GET"
173
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
174
-	req, err := http.NewRequest(method, routeURL, nil)
175
-	if err != nil {
176
-		return err
177
-	}
178
-	if err := auth.Authorize(req); err != nil {
179
-		return err
180
-	}
181
-	res, err := r.client.Do(req)
182
-	if err != nil {
183
-		return err
184
-	}
185
-	defer res.Body.Close()
186
-	if res.StatusCode != 200 {
187
-		if res.StatusCode == 401 {
188
-			return errLoginRequired
189
-		}
190
-		return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res)
191
-	}
192
-
193
-	_, err = io.Copy(blobWrtr, res.Body)
194
-	return err
195
-}
196
-
197
-func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (io.ReadCloser, int64, error) {
198
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
199
-	if err != nil {
200
-		return nil, 0, err
201
-	}
202
-
203
-	method := "GET"
204
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
205
-	req, err := http.NewRequest(method, routeURL, nil)
206
-	if err != nil {
207
-		return nil, 0, err
208
-	}
209
-	if err := auth.Authorize(req); err != nil {
210
-		return nil, 0, err
211
-	}
212
-	res, err := r.client.Do(req)
213
-	if err != nil {
214
-		return nil, 0, err
215
-	}
216
-	if res.StatusCode != 200 {
217
-		if res.StatusCode == 401 {
218
-			return nil, 0, errLoginRequired
219
-		}
220
-		return nil, 0, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s", res.StatusCode, imageName, dgst), res)
221
-	}
222
-	lenStr := res.Header.Get("Content-Length")
223
-	l, err := strconv.ParseInt(lenStr, 10, 64)
224
-	if err != nil {
225
-		return nil, 0, err
226
-	}
227
-
228
-	return res.Body, l, err
229
-}
230
-
231
-// Push the image to the server for storage.
232
-// 'layer' is an uncompressed reader of the blob to be pushed.
233
-// The server will generate it's own checksum calculation.
234
-func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobRdr io.Reader, auth *RequestAuthorization) error {
235
-	location, err := r.initiateBlobUpload(ep, imageName, auth)
236
-	if err != nil {
237
-		return err
238
-	}
239
-
240
-	method := "PUT"
241
-	logrus.Debugf("[registry] Calling %q %s", method, location)
242
-	req, err := http.NewRequest(method, location, ioutil.NopCloser(blobRdr))
243
-	if err != nil {
244
-		return err
245
-	}
246
-	queryParams := req.URL.Query()
247
-	queryParams.Add("digest", dgst.String())
248
-	req.URL.RawQuery = queryParams.Encode()
249
-	if err := auth.Authorize(req); err != nil {
250
-		return err
251
-	}
252
-	res, err := r.client.Do(req)
253
-	if err != nil {
254
-		return err
255
-	}
256
-	defer res.Body.Close()
257
-
258
-	if res.StatusCode != 201 {
259
-		if res.StatusCode == 401 {
260
-			return errLoginRequired
261
-		}
262
-		errBody, err := ioutil.ReadAll(res.Body)
263
-		if err != nil {
264
-			return err
265
-		}
266
-		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
267
-		return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s", res.StatusCode, imageName, dgst), res)
268
-	}
269
-
270
-	return nil
271
-}
272
-
273
-// initiateBlobUpload gets the blob upload location for the given image name.
274
-func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) {
275
-	routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)
276
-	if err != nil {
277
-		return "", err
278
-	}
279
-
280
-	logrus.Debugf("[registry] Calling %q %s", "POST", routeURL)
281
-	req, err := http.NewRequest("POST", routeURL, nil)
282
-	if err != nil {
283
-		return "", err
284
-	}
285
-
286
-	if err := auth.Authorize(req); err != nil {
287
-		return "", err
288
-	}
289
-	res, err := r.client.Do(req)
290
-	if err != nil {
291
-		return "", err
292
-	}
293
-
294
-	if res.StatusCode != http.StatusAccepted {
295
-		if res.StatusCode == http.StatusUnauthorized {
296
-			return "", errLoginRequired
297
-		}
298
-		if res.StatusCode == http.StatusNotFound {
299
-			return "", ErrDoesNotExist
300
-		}
301
-
302
-		errBody, err := ioutil.ReadAll(res.Body)
303
-		if err != nil {
304
-			return "", err
305
-		}
306
-
307
-		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
308
-		return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res)
309
-	}
310
-
311
-	if location = res.Header.Get("Location"); location == "" {
312
-		return "", fmt.Errorf("registry did not return a Location header for resumable blob upload for image %s", imageName)
313
-	}
314
-
315
-	return
316
-}
317
-
318
-// Finally Push the (signed) manifest of the blobs we've just pushed
319
-func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) {
320
-	routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
321
-	if err != nil {
322
-		return "", err
323
-	}
324
-
325
-	method := "PUT"
326
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
327
-	req, err := http.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
328
-	if err != nil {
329
-		return "", err
330
-	}
331
-	if err := auth.Authorize(req); err != nil {
332
-		return "", err
333
-	}
334
-	res, err := r.client.Do(req)
335
-	if err != nil {
336
-		return "", err
337
-	}
338
-	defer res.Body.Close()
339
-
340
-	// All 2xx and 3xx responses can be accepted for a put.
341
-	if res.StatusCode >= 400 {
342
-		if res.StatusCode == 401 {
343
-			return "", errLoginRequired
344
-		}
345
-		errBody, err := ioutil.ReadAll(res.Body)
346
-		if err != nil {
347
-			return "", err
348
-		}
349
-		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
350
-		return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res)
351
-	}
352
-
353
-	hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader))
354
-	if err != nil {
355
-		return "", fmt.Errorf("invalid manifest digest from registry: %s", err)
356
-	}
357
-
358
-	dgstVerifier, err := digest.NewDigestVerifier(hdrDigest)
359
-	if err != nil {
360
-		return "", fmt.Errorf("invalid manifest digest from registry: %s", err)
361
-	}
362
-
363
-	dgstVerifier.Write(rawManifest)
364
-
365
-	if !dgstVerifier.Verified() {
366
-		computedDigest, _ := digest.FromBytes(rawManifest)
367
-		return "", fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", hdrDigest, computedDigest)
368
-	}
369
-
370
-	return hdrDigest, nil
371
-}
372
-
373
-type remoteTags struct {
374
-	Name string   `json:"name"`
375
-	Tags []string `json:"tags"`
376
-}
377
-
378
-// Given a repository name, returns a json array of string tags
379
-func (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestAuthorization) ([]string, error) {
380
-	routeURL, err := getV2Builder(ep).BuildTagsURL(imageName)
381
-	if err != nil {
382
-		return nil, err
383
-	}
384
-
385
-	method := "GET"
386
-	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
387
-
388
-	req, err := http.NewRequest(method, routeURL, nil)
389
-	if err != nil {
390
-		return nil, err
391
-	}
392
-	if err := auth.Authorize(req); err != nil {
393
-		return nil, err
394
-	}
395
-	res, err := r.client.Do(req)
396
-	if err != nil {
397
-		return nil, err
398
-	}
399
-	defer res.Body.Close()
400
-	if res.StatusCode != 200 {
401
-		if res.StatusCode == 401 {
402
-			return nil, errLoginRequired
403
-		} else if res.StatusCode == 404 {
404
-			return nil, ErrDoesNotExist
405
-		}
406
-		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res)
407
-	}
408
-
409
-	var remote remoteTags
410
-	if err := json.NewDecoder(res.Body).Decode(&remote); err != nil {
411
-		return nil, fmt.Errorf("Error while decoding the http response: %s", err)
412
-	}
413
-	return remote.Tags, nil
414
-}