Browse code

Add distribution package

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>

Aaron Lehmann authored on 2015/11/19 07:18:44
Showing 23 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,38 @@
0
+{
1
+   "schemaVersion": 2,
2
+   "name": "library/hello-world",
3
+   "tag": "latest",
4
+   "architecture": "amd64",
5
+   "fsLayers": [
6
+      {
7
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
8
+      },
9
+      {
10
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
11
+      }
12
+   ],
13
+   "history": [
14
+      {
15
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
16
+      },
17
+      {
18
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
19
+      }
20
+   ],
21
+   "signatures": [
22
+      {
23
+         "header": {
24
+            "jwk": {
25
+               "crv": "P-256",
26
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
27
+               "kty": "EC",
28
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
29
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
30
+            },
31
+            "alg": "ES256"
32
+         },
33
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
34
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
35
+      }
36
+   ]
37
+}
0 38
new file mode 100644
... ...
@@ -0,0 +1,46 @@
0
+{
1
+   "schemaVersion": 1,
2
+   "name": "library/hello-world",
3
+   "tag": "latest",
4
+   "architecture": "amd64",
5
+   "fsLayers": [
6
+      {
7
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
8
+      },
9
+      {
10
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
11
+      }
12
+   ],
13
+   "history": [
14
+      {
15
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
16
+      },
17
+      {
18
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
19
+      }
20
+   ],
21
+   "fsLayers": [
22
+      {
23
+         "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
24
+      },
25
+      {
26
+         "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
27
+      }
28
+   ],
29
+   "signatures": [
30
+      {
31
+         "header": {
32
+            "jwk": {
33
+               "crv": "P-256",
34
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
35
+               "kty": "EC",
36
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
37
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
38
+            },
39
+            "alg": "ES256"
40
+         },
41
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
42
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
43
+      }
44
+   ]
45
+}
0 46
new file mode 100644
... ...
@@ -0,0 +1,38 @@
0
+{
1
+   "schemaVersion": 1,
2
+   "name": "library/hello-world",
3
+   "tag": "latest",
4
+   "architecture": "amd64",
5
+   "fsLayers": [
6
+      {
7
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
8
+      },
9
+      {
10
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
11
+      }
12
+   ],
13
+   "history": [
14
+      {
15
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
16
+      },
17
+      {
18
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
19
+      }
20
+   ],
21
+   "signatures": [
22
+      {
23
+         "header": {
24
+            "jwk": {
25
+               "crv": "P-256",
26
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
27
+               "kty": "EC",
28
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
29
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
30
+            },
31
+            "alg": "ES256"
32
+         },
33
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
34
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
35
+      }
36
+   ]
37
+}
0 38
\ No newline at end of file
1 39
new file mode 100644
... ...
@@ -0,0 +1,100 @@
0
+package metadata
1
+
2
+import (
3
+	"encoding/json"
4
+
5
+	"github.com/docker/distribution/digest"
6
+	"github.com/docker/docker/layer"
7
+)
8
+
9
+// BlobSumService maps layer IDs to a set of known blobsums for
10
+// the layer.
11
+type BlobSumService struct {
12
+	store Store
13
+}
14
+
15
+// maxBlobSums is the number of blobsums to keep per layer DiffID.
16
+const maxBlobSums = 5
17
+
18
+// NewBlobSumService creates a new blobsum mapping service.
19
+func NewBlobSumService(store Store) *BlobSumService {
20
+	return &BlobSumService{
21
+		store: store,
22
+	}
23
+}
24
+
25
+func (blobserv *BlobSumService) diffIDNamespace() string {
26
+	return "blobsum-storage"
27
+}
28
+
29
+func (blobserv *BlobSumService) blobSumNamespace() string {
30
+	return "blobsum-lookup"
31
+}
32
+
33
+func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string {
34
+	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
35
+}
36
+
37
+func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string {
38
+	return string(blobsum.Algorithm()) + "/" + blobsum.Hex()
39
+}
40
+
41
+// GetBlobSums finds the blobsums associated with a layer DiffID.
42
+func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]digest.Digest, error) {
43
+	jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
44
+	if err != nil {
45
+		return nil, err
46
+	}
47
+
48
+	var blobsums []digest.Digest
49
+	if err := json.Unmarshal(jsonBytes, &blobsums); err != nil {
50
+		return nil, err
51
+	}
52
+
53
+	return blobsums, nil
54
+}
55
+
56
+// GetDiffID finds a layer DiffID from a blobsum hash.
57
+func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) {
58
+	diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
59
+	if err != nil {
60
+		return layer.DiffID(""), err
61
+	}
62
+
63
+	return layer.DiffID(diffIDBytes), nil
64
+}
65
+
66
+// Add associates a blobsum with a layer DiffID. If too many blobsums are
67
+// present, the oldest one is dropped.
68
+func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum digest.Digest) error {
69
+	oldBlobSums, err := blobserv.GetBlobSums(diffID)
70
+	if err != nil {
71
+		oldBlobSums = nil
72
+	}
73
+	newBlobSums := make([]digest.Digest, 0, len(oldBlobSums)+1)
74
+
75
+	// Copy all other blobsums to new slice
76
+	for _, oldSum := range oldBlobSums {
77
+		if oldSum != blobsum {
78
+			newBlobSums = append(newBlobSums, oldSum)
79
+		}
80
+	}
81
+
82
+	newBlobSums = append(newBlobSums, blobsum)
83
+
84
+	if len(newBlobSums) > maxBlobSums {
85
+		newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:]
86
+	}
87
+
88
+	jsonBytes, err := json.Marshal(newBlobSums)
89
+	if err != nil {
90
+		return err
91
+	}
92
+
93
+	err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
94
+	if err != nil {
95
+		return err
96
+	}
97
+
98
+	return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID))
99
+}
0 100
new file mode 100644
... ...
@@ -0,0 +1,105 @@
0
+package metadata
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"reflect"
6
+	"testing"
7
+
8
+	"github.com/docker/distribution/digest"
9
+	"github.com/docker/docker/layer"
10
+)
11
+
12
+func TestBlobSumService(t *testing.T) {
13
+	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
14
+	if err != nil {
15
+		t.Fatalf("could not create temp dir: %v", err)
16
+	}
17
+	defer os.RemoveAll(tmpDir)
18
+
19
+	metadataStore, err := NewFSMetadataStore(tmpDir)
20
+	if err != nil {
21
+		t.Fatalf("could not create metadata store: %v", err)
22
+	}
23
+	blobSumService := NewBlobSumService(metadataStore)
24
+
25
+	testVectors := []struct {
26
+		diffID   layer.DiffID
27
+		blobsums []digest.Digest
28
+	}{
29
+		{
30
+			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
31
+			blobsums: []digest.Digest{
32
+				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
33
+			},
34
+		},
35
+		{
36
+			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
37
+			blobsums: []digest.Digest{
38
+				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
39
+				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
40
+			},
41
+		},
42
+		{
43
+			diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
44
+			blobsums: []digest.Digest{
45
+				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
46
+				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
47
+				digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"),
48
+				digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"),
49
+				digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"),
50
+				digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"),
51
+			},
52
+		},
53
+	}
54
+
55
+	// Set some associations
56
+	for _, vec := range testVectors {
57
+		for _, blobsum := range vec.blobsums {
58
+			err := blobSumService.Add(vec.diffID, blobsum)
59
+			if err != nil {
60
+				t.Fatalf("error calling Set: %v", err)
61
+			}
62
+		}
63
+	}
64
+
65
+	// Check the correct values are read back
66
+	for _, vec := range testVectors {
67
+		blobsums, err := blobSumService.GetBlobSums(vec.diffID)
68
+		if err != nil {
69
+			t.Fatalf("error calling Get: %v", err)
70
+		}
71
+		expectedBlobsums := len(vec.blobsums)
72
+		if expectedBlobsums > 5 {
73
+			expectedBlobsums = 5
74
+		}
75
+		if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
76
+			t.Fatal("Get returned incorrect layer ID")
77
+		}
78
+	}
79
+
80
+	// Test GetBlobSums on a nonexistent entry
81
+	_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
82
+	if err == nil {
83
+		t.Fatal("expected error looking up nonexistent entry")
84
+	}
85
+
86
+	// Test GetDiffID on a nonexistent entry
87
+	_, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
88
+	if err == nil {
89
+		t.Fatal("expected error looking up nonexistent entry")
90
+	}
91
+
92
+	// Overwrite one of the entries and read it back
93
+	err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
94
+	if err != nil {
95
+		t.Fatalf("error calling Add: %v", err)
96
+	}
97
+	diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
98
+	if err != nil {
99
+		t.Fatalf("error calling GetDiffID: %v", err)
100
+	}
101
+	if diffID != testVectors[1].diffID {
102
+		t.Fatal("GetDiffID returned incorrect diffID")
103
+	}
104
+}
0 105
new file mode 100644
... ...
@@ -0,0 +1,65 @@
0
+package metadata
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"path/filepath"
6
+	"sync"
7
+)
8
+
9
+// Store implements a K/V store for mapping distribution-related IDs
10
+// to on-disk layer IDs and image IDs. The namespace identifies the type of
11
+// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe.
12
+type Store interface {
13
+	// Get retrieves data by namespace and key.
14
+	Get(namespace string, key string) ([]byte, error)
15
+	// Set writes data indexed by namespace and key.
16
+	Set(namespace, key string, value []byte) error
17
+}
18
+
19
+// FSMetadataStore uses the filesystem to associate metadata with layer and
20
+// image IDs.
21
+type FSMetadataStore struct {
22
+	sync.RWMutex
23
+	basePath string
24
+}
25
+
26
+// NewFSMetadataStore creates a new filesystem-based metadata store.
27
+func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) {
28
+	if err := os.MkdirAll(basePath, 0700); err != nil {
29
+		return nil, err
30
+	}
31
+	return &FSMetadataStore{
32
+		basePath: basePath,
33
+	}, nil
34
+}
35
+
36
+func (store *FSMetadataStore) path(namespace, key string) string {
37
+	return filepath.Join(store.basePath, namespace, key)
38
+}
39
+
40
+// Get retrieves data by namespace and key. The data is read from a file named
41
+// after the key, stored in the namespace's directory.
42
+func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) {
43
+	store.RLock()
44
+	defer store.RUnlock()
45
+
46
+	return ioutil.ReadFile(store.path(namespace, key))
47
+}
48
+
49
+// Set writes data indexed by namespace and key. The data is written to a file
50
+// named after the key, stored in the namespace's directory.
51
+func (store *FSMetadataStore) Set(namespace, key string, value []byte) error {
52
+	store.Lock()
53
+	defer store.Unlock()
54
+
55
+	path := store.path(namespace, key)
56
+	tempFilePath := path + ".tmp"
57
+	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
58
+		return err
59
+	}
60
+	if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil {
61
+		return err
62
+	}
63
+	return os.Rename(tempFilePath, path)
64
+}
0 65
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+package metadata
1
+
2
+import (
3
+	"github.com/docker/docker/image/v1"
4
+	"github.com/docker/docker/layer"
5
+)
6
+
7
+// V1IDService maps v1 IDs to layers on disk.
8
+type V1IDService struct {
9
+	store Store
10
+}
11
+
12
+// NewV1IDService creates a new V1 ID mapping service.
13
+func NewV1IDService(store Store) *V1IDService {
14
+	return &V1IDService{
15
+		store: store,
16
+	}
17
+}
18
+
19
+// namespace returns the namespace used by this service.
20
+func (idserv *V1IDService) namespace() string {
21
+	return "v1id"
22
+}
23
+
24
+// Get finds a layer by its V1 ID.
25
+func (idserv *V1IDService) Get(v1ID, registry string) (layer.ChainID, error) {
26
+	if err := v1.ValidateID(v1ID); err != nil {
27
+		return layer.ChainID(""), err
28
+	}
29
+
30
+	idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID)
31
+	if err != nil {
32
+		return layer.ChainID(""), err
33
+	}
34
+	return layer.ChainID(idBytes), nil
35
+}
36
+
37
+// Set associates an image with a V1 ID.
38
+func (idserv *V1IDService) Set(v1ID, registry string, id layer.ChainID) error {
39
+	if err := v1.ValidateID(v1ID); err != nil {
40
+		return err
41
+	}
42
+	return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id))
43
+}
0 44
new file mode 100644
... ...
@@ -0,0 +1,83 @@
0
+package metadata
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"testing"
6
+
7
+	"github.com/docker/docker/layer"
8
+)
9
+
10
+func TestV1IDService(t *testing.T) {
11
+	tmpDir, err := ioutil.TempDir("", "v1-id-service-test")
12
+	if err != nil {
13
+		t.Fatalf("could not create temp dir: %v", err)
14
+	}
15
+	defer os.RemoveAll(tmpDir)
16
+
17
+	metadataStore, err := NewFSMetadataStore(tmpDir)
18
+	if err != nil {
19
+		t.Fatalf("could not create metadata store: %v", err)
20
+	}
21
+	v1IDService := NewV1IDService(metadataStore)
22
+
23
+	testVectors := []struct {
24
+		registry string
25
+		v1ID     string
26
+		layerID  layer.ChainID
27
+	}{
28
+		{
29
+			registry: "registry1",
30
+			v1ID:     "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937",
31
+			layerID:  layer.ChainID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
32
+		},
33
+		{
34
+			registry: "registry2",
35
+			v1ID:     "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
36
+			layerID:  layer.ChainID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
37
+		},
38
+		{
39
+			registry: "registry1",
40
+			v1ID:     "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
41
+			layerID:  layer.ChainID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
42
+		},
43
+	}
44
+
45
+	// Set some associations
46
+	for _, vec := range testVectors {
47
+		err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID)
48
+		if err != nil {
49
+			t.Fatalf("error calling Set: %v", err)
50
+		}
51
+	}
52
+
53
+	// Check the correct values are read back
54
+	for _, vec := range testVectors {
55
+		layerID, err := v1IDService.Get(vec.v1ID, vec.registry)
56
+		if err != nil {
57
+			t.Fatalf("error calling Get: %v", err)
58
+		}
59
+		if layerID != vec.layerID {
60
+			t.Fatal("Get returned incorrect layer ID")
61
+		}
62
+	}
63
+
64
+	// Test Get on a nonexistent entry
65
+	_, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1")
66
+	if err == nil {
67
+		t.Fatal("expected error looking up nonexistent entry")
68
+	}
69
+
70
+	// Overwrite one of the entries and read it back
71
+	err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID)
72
+	if err != nil {
73
+		t.Fatalf("error calling Set: %v", err)
74
+	}
75
+	layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry)
76
+	if err != nil {
77
+		t.Fatalf("error calling Get: %v", err)
78
+	}
79
+	if layerID != testVectors[1].layerID {
80
+		t.Fatal("Get returned incorrect layer ID")
81
+	}
82
+}
0 83
new file mode 100644
... ...
@@ -0,0 +1,51 @@
0
+package distribution
1
+
2
+import (
3
+	"sync"
4
+
5
+	"github.com/docker/docker/pkg/broadcaster"
6
+)
7
+
8
+// A Pool manages concurrent pulls. It deduplicates in-progress downloads.
9
+type Pool struct {
10
+	sync.Mutex
11
+	pullingPool map[string]*broadcaster.Buffered
12
+}
13
+
14
+// NewPool creates a new Pool.
15
+func NewPool() *Pool {
16
+	return &Pool{
17
+		pullingPool: make(map[string]*broadcaster.Buffered),
18
+	}
19
+}
20
+
21
+// add checks if a pull is already running, and returns (broadcaster, true)
22
+// if a running operation is found. Otherwise, it creates a new one and returns
23
+// (broadcaster, false).
24
+func (pool *Pool) add(key string) (*broadcaster.Buffered, bool) {
25
+	pool.Lock()
26
+	defer pool.Unlock()
27
+
28
+	if p, exists := pool.pullingPool[key]; exists {
29
+		return p, true
30
+	}
31
+
32
+	broadcaster := broadcaster.NewBuffered()
33
+	pool.pullingPool[key] = broadcaster
34
+
35
+	return broadcaster, false
36
+}
37
+
38
+func (pool *Pool) removeWithError(key string, broadcasterResult error) error {
39
+	pool.Lock()
40
+	defer pool.Unlock()
41
+	if broadcaster, exists := pool.pullingPool[key]; exists {
42
+		broadcaster.CloseWithError(broadcasterResult)
43
+		delete(pool.pullingPool, key)
44
+	}
45
+	return nil
46
+}
47
+
48
+func (pool *Pool) remove(key string) error {
49
+	return pool.removeWithError(key, nil)
50
+}
0 51
new file mode 100644
... ...
@@ -0,0 +1,28 @@
0
+package distribution
1
+
2
+import (
3
+	"testing"
4
+)
5
+
6
+func TestPools(t *testing.T) {
7
+	p := NewPool()
8
+
9
+	if _, found := p.add("test1"); found {
10
+		t.Fatal("Expected pull test1 not to be in progress")
11
+	}
12
+	if _, found := p.add("test2"); found {
13
+		t.Fatal("Expected pull test2 not to be in progress")
14
+	}
15
+	if _, found := p.add("test1"); !found {
16
+		t.Fatalf("Expected pull test1 to be in progress`")
17
+	}
18
+	if err := p.remove("test2"); err != nil {
19
+		t.Fatal(err)
20
+	}
21
+	if err := p.remove("test2"); err != nil {
22
+		t.Fatal(err)
23
+	}
24
+	if err := p.remove("test1"); err != nil {
25
+		t.Fatal(err)
26
+	}
27
+}
0 28
new file mode 100644
... ...
@@ -0,0 +1,185 @@
0
+package distribution
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"strings"
6
+
7
+	"github.com/Sirupsen/logrus"
8
+	"github.com/docker/distribution/reference"
9
+	"github.com/docker/docker/cliconfig"
10
+	"github.com/docker/docker/daemon/events"
11
+	"github.com/docker/docker/distribution/metadata"
12
+	"github.com/docker/docker/image"
13
+	"github.com/docker/docker/layer"
14
+	"github.com/docker/docker/pkg/streamformatter"
15
+	"github.com/docker/docker/registry"
16
+	"github.com/docker/docker/tag"
17
+)
18
+
19
+// ImagePullConfig stores pull configuration.
20
+type ImagePullConfig struct {
21
+	// MetaHeaders stores HTTP headers with metadata about the image
22
+	// (DockerHeaders with prefix X-Meta- in the request).
23
+	MetaHeaders map[string][]string
24
+	// AuthConfig holds authentication credentials for authenticating with
25
+	// the registry.
26
+	AuthConfig *cliconfig.AuthConfig
27
+	// OutStream is the output writer for showing the status of the pull
28
+	// operation.
29
+	OutStream io.Writer
30
+	// RegistryService is the registry service to use for TLS configuration
31
+	// and endpoint lookup.
32
+	RegistryService *registry.Service
33
+	// EventsService is the events service to use for logging.
34
+	EventsService *events.Events
35
+	// MetadataStore is the storage backend for distribution-specific
36
+	// metadata.
37
+	MetadataStore metadata.Store
38
+	// LayerStore manages layers.
39
+	LayerStore layer.Store
40
+	// ImageStore manages images.
41
+	ImageStore image.Store
42
+	// TagStore manages tags.
43
+	TagStore tag.Store
44
+	// Pool manages concurrent pulls.
45
+	Pool *Pool
46
+}
47
+
48
+// Puller is an interface that abstracts pulling for different API versions.
49
+type Puller interface {
50
+	// Pull tries to pull the image referenced by `tag`
51
+	// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint.
52
+	//
53
+	Pull(ref reference.Named) (fallback bool, err error)
54
+}
55
+
56
+// newPuller returns a Puller interface that will pull from either a v1 or v2
57
+// registry. The endpoint argument contains a Version field that determines
58
+// whether a v1 or v2 puller will be created. The other parameters are passed
59
+// through to the underlying puller implementation for use during the actual
60
+// pull operation.
61
+func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (Puller, error) {
62
+	switch endpoint.Version {
63
+	case registry.APIVersion2:
64
+		return &v2Puller{
65
+			blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore),
66
+			endpoint:       endpoint,
67
+			config:         imagePullConfig,
68
+			sf:             sf,
69
+			repoInfo:       repoInfo,
70
+		}, nil
71
+	case registry.APIVersion1:
72
+		return &v1Puller{
73
+			v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore),
74
+			endpoint:    endpoint,
75
+			config:      imagePullConfig,
76
+			sf:          sf,
77
+			repoInfo:    repoInfo,
78
+		}, nil
79
+	}
80
+	return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
81
+}
82
+
83
+// Pull initiates a pull operation. image is the repository name to pull, and
84
+// tag may be either empty, or indicate a specific tag to pull.
85
+func Pull(ref reference.Named, imagePullConfig *ImagePullConfig) error {
86
+	var sf = streamformatter.NewJSONStreamFormatter()
87
+
88
+	// Resolve the Repository name from fqn to RepositoryInfo
89
+	repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref)
90
+	if err != nil {
91
+		return err
92
+	}
93
+
94
+	// makes sure name is not empty or `scratch`
95
+	if err := validateRepoName(repoInfo.LocalName.Name()); err != nil {
96
+		return err
97
+	}
98
+
99
+	endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.CanonicalName)
100
+	if err != nil {
101
+		return err
102
+	}
103
+
104
+	logName := registry.NormalizeLocalReference(ref)
105
+
106
+	var (
107
+		// use a slice to append the error strings and return a joined string to caller
108
+		errors []string
109
+
110
+		// discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport
111
+		// By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors.
112
+		// As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of
113
+		// any subsequent ErrNoSupport errors in errors.
114
+		// It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be
115
+		// returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant
116
+		// error is the ones from v2 endpoints not v1.
117
+		discardNoSupportErrors bool
118
+	)
119
+	for _, endpoint := range endpoints {
120
+		logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version)
121
+
122
+		puller, err := newPuller(endpoint, repoInfo, imagePullConfig, sf)
123
+		if err != nil {
124
+			errors = append(errors, err.Error())
125
+			continue
126
+		}
127
+		if fallback, err := puller.Pull(ref); err != nil {
128
+			if fallback {
129
+				if _, ok := err.(registry.ErrNoSupport); !ok {
130
+					// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
131
+					discardNoSupportErrors = true
132
+					// append subsequent errors
133
+					errors = append(errors, err.Error())
134
+				} else if !discardNoSupportErrors {
135
+					// Save the ErrNoSupport error, because it's either the first error or all encountered errors
136
+					// were also ErrNoSupport errors.
137
+					// append subsequent errors
138
+					errors = append(errors, err.Error())
139
+				}
140
+				continue
141
+			}
142
+			errors = append(errors, err.Error())
143
+			logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n")))
144
+			if len(errors) > 0 {
145
+				return fmt.Errorf(strings.Join(errors, "\n"))
146
+			}
147
+		}
148
+
149
+		imagePullConfig.EventsService.Log("pull", logName.String(), "")
150
+		return nil
151
+	}
152
+
153
+	if len(errors) == 0 {
154
+		return fmt.Errorf("no endpoints found for %s", ref.String())
155
+	}
156
+
157
+	if len(errors) > 0 {
158
+		return fmt.Errorf(strings.Join(errors, "\n"))
159
+	}
160
+	return nil
161
+}
162
+
163
+// writeStatus writes a status message to out. If layersDownloaded is true, the
164
+// status message indicates that a newer image was downloaded. Otherwise, it
165
+// indicates that the image is up to date. requestedTag is the tag the message
166
+// will refer to.
167
+func writeStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) {
168
+	if layersDownloaded {
169
+		out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag))
170
+	} else {
171
+		out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag))
172
+	}
173
+}
174
+
175
+// validateRepoName validates the name of a repository.
176
+func validateRepoName(name string) error {
177
+	if name == "" {
178
+		return fmt.Errorf("Repository name can't be empty")
179
+	}
180
+	if name == "scratch" {
181
+		return fmt.Errorf("'scratch' is a reserved name")
182
+	}
183
+	return nil
184
+}
0 185
new file mode 100644
... ...
@@ -0,0 +1,454 @@
0
+package distribution
1
+
2
+import (
3
+	"encoding/json"
4
+	"errors"
5
+	"fmt"
6
+	"io"
7
+	"net"
8
+	"net/url"
9
+	"strings"
10
+	"sync"
11
+	"time"
12
+
13
+	"github.com/Sirupsen/logrus"
14
+	"github.com/docker/distribution/reference"
15
+	"github.com/docker/distribution/registry/client/transport"
16
+	"github.com/docker/docker/distribution/metadata"
17
+	"github.com/docker/docker/image"
18
+	"github.com/docker/docker/image/v1"
19
+	"github.com/docker/docker/layer"
20
+	"github.com/docker/docker/pkg/archive"
21
+	"github.com/docker/docker/pkg/progressreader"
22
+	"github.com/docker/docker/pkg/streamformatter"
23
+	"github.com/docker/docker/pkg/stringid"
24
+	"github.com/docker/docker/registry"
25
+)
26
+
27
+type v1Puller struct {
28
+	v1IDService *metadata.V1IDService
29
+	endpoint    registry.APIEndpoint
30
+	config      *ImagePullConfig
31
+	sf          *streamformatter.StreamFormatter
32
+	repoInfo    *registry.RepositoryInfo
33
+	session     *registry.Session
34
+}
35
+
36
+func (p *v1Puller) Pull(ref reference.Named) (fallback bool, err error) {
37
+	if _, isDigested := ref.(reference.Digested); isDigested {
38
+		// Allowing fallback, because HTTPS v1 is before HTTP v2
39
+		return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}
40
+	}
41
+
42
+	tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
43
+	if err != nil {
44
+		return false, err
45
+	}
46
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
47
+	tr := transport.NewTransport(
48
+		// TODO(tiborvass): was ReceiveTimeout
49
+		registry.NewTransport(tlsConfig),
50
+		registry.DockerHeaders(p.config.MetaHeaders)...,
51
+	)
52
+	client := registry.HTTPClient(tr)
53
+	v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
54
+	if err != nil {
55
+		logrus.Debugf("Could not get v1 endpoint: %v", err)
56
+		return true, err
57
+	}
58
+	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
59
+	if err != nil {
60
+		// TODO(dmcgowan): Check if should fallback
61
+		logrus.Debugf("Fallback from error: %s", err)
62
+		return true, err
63
+	}
64
+	if err := p.pullRepository(ref); err != nil {
65
+		// TODO(dmcgowan): Check if should fallback
66
+		return false, err
67
+	}
68
+	out := p.config.OutStream
69
+	out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry.  Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName.Name()))
70
+
71
+	return false, nil
72
+}
73
+
74
+func (p *v1Puller) pullRepository(ref reference.Named) error {
75
+	out := p.config.OutStream
76
+	out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName.Name()))
77
+
78
+	repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName)
79
+	if err != nil {
80
+		if strings.Contains(err.Error(), "HTTP code: 404") {
81
+			return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName.Name())
82
+		}
83
+		// Unexpected HTTP error
84
+		return err
85
+	}
86
+
87
+	logrus.Debugf("Retrieving the tag list")
88
+	var tagsList map[string]string
89
+	tagged, isTagged := ref.(reference.Tagged)
90
+	if !isTagged {
91
+		tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName)
92
+	} else {
93
+		var tagID string
94
+		tagsList = make(map[string]string)
95
+		tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, tagged.Tag())
96
+		if err == registry.ErrRepoNotFound {
97
+			return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.CanonicalName.Name())
98
+		}
99
+		tagsList[tagged.Tag()] = tagID
100
+	}
101
+	if err != nil {
102
+		logrus.Errorf("unable to get remote tags: %s", err)
103
+		return err
104
+	}
105
+
106
+	for tag, id := range tagsList {
107
+		repoData.ImgList[id] = &registry.ImgData{
108
+			ID:       id,
109
+			Tag:      tag,
110
+			Checksum: "",
111
+		}
112
+	}
113
+
114
+	errors := make(chan error)
115
+	layerDownloaded := make(chan struct{})
116
+
117
+	layersDownloaded := false
118
+	var wg sync.WaitGroup
119
+	for _, imgData := range repoData.ImgList {
120
+		if isTagged && imgData.Tag != tagged.Tag() {
121
+			continue
122
+		}
123
+
124
+		wg.Add(1)
125
+		go func(img *registry.ImgData) {
126
+			p.downloadImage(out, repoData, img, layerDownloaded, errors)
127
+			wg.Done()
128
+		}(imgData)
129
+	}
130
+
131
+	go func() {
132
+		wg.Wait()
133
+		close(errors)
134
+	}()
135
+
136
+	var lastError error
137
+selectLoop:
138
+	for {
139
+		select {
140
+		case err, ok := <-errors:
141
+			if !ok {
142
+				break selectLoop
143
+			}
144
+			lastError = err
145
+		case <-layerDownloaded:
146
+			layersDownloaded = true
147
+		}
148
+	}
149
+
150
+	if lastError != nil {
151
+		return lastError
152
+	}
153
+
154
+	localNameRef := p.repoInfo.LocalName
155
+	if isTagged {
156
+		localNameRef, err = reference.WithTag(localNameRef, tagged.Tag())
157
+		if err != nil {
158
+			localNameRef = p.repoInfo.LocalName
159
+		}
160
+	}
161
+	writeStatus(localNameRef.String(), out, p.sf, layersDownloaded)
162
+	return nil
163
+}
164
+
165
+func (p *v1Puller) downloadImage(out io.Writer, repoData *registry.RepositoryData, img *registry.ImgData, layerDownloaded chan struct{}, errors chan error) {
166
+	if img.Tag == "" {
167
+		logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
168
+		return
169
+	}
170
+
171
+	localNameRef, err := reference.WithTag(p.repoInfo.LocalName, img.Tag)
172
+	if err != nil {
173
+		retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
174
+		logrus.Debug(retErr.Error())
175
+		errors <- retErr
176
+	}
177
+
178
+	if err := v1.ValidateID(img.ID); err != nil {
179
+		errors <- err
180
+		return
181
+	}
182
+
183
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName.Name()), nil))
184
+	success := false
185
+	var lastErr error
186
+	var isDownloaded bool
187
+	for _, ep := range p.repoInfo.Index.Mirrors {
188
+		ep += "v1/"
189
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep), nil))
190
+		if isDownloaded, err = p.pullImage(out, img.ID, ep, localNameRef); err != nil {
191
+			// Don't report errors when pulling from mirrors.
192
+			logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err)
193
+			continue
194
+		}
195
+		if isDownloaded {
196
+			layerDownloaded <- struct{}{}
197
+		}
198
+		success = true
199
+		break
200
+	}
201
+	if !success {
202
+		for _, ep := range repoData.Endpoints {
203
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep), nil))
204
+			if isDownloaded, err = p.pullImage(out, img.ID, ep, localNameRef); err != nil {
205
+				// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
206
+				// As the error is also given to the output stream the user will see the error.
207
+				lastErr = err
208
+				out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err), nil))
209
+				continue
210
+			}
211
+			if isDownloaded {
212
+				layerDownloaded <- struct{}{}
213
+			}
214
+			success = true
215
+			break
216
+		}
217
+	}
218
+	if !success {
219
+		err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName.Name(), lastErr)
220
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
221
+		errors <- err
222
+		return
223
+	}
224
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
225
+}
226
+
227
+func (p *v1Puller) pullImage(out io.Writer, v1ID, endpoint string, localNameRef reference.Named) (layersDownloaded bool, err error) {
228
+	var history []string
229
+	history, err = p.session.GetRemoteHistory(v1ID, endpoint)
230
+	if err != nil {
231
+		return false, err
232
+	}
233
+	if len(history) < 1 {
234
+		return false, fmt.Errorf("empty history for image %s", v1ID)
235
+	}
236
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pulling dependent layers", nil))
237
+	// FIXME: Try to stream the images?
238
+	// FIXME: Launch the getRemoteImage() in goroutines
239
+
240
+	var (
241
+		referencedLayers []layer.Layer
242
+		parentID         layer.ChainID
243
+		newHistory       []image.History
244
+		img              *image.V1Image
245
+		imgJSON          []byte
246
+		imgSize          int64
247
+	)
248
+
249
+	defer func() {
250
+		for _, l := range referencedLayers {
251
+			layer.ReleaseAndLog(p.config.LayerStore, l)
252
+		}
253
+	}()
254
+
255
+	layersDownloaded = false
256
+
257
+	// Iterate over layers from top-most to bottom-most, checking if any
258
+	// already exist on disk.
259
+	var i int
260
+	for i = 0; i != len(history); i++ {
261
+		v1LayerID := history[i]
262
+		// Do we have a mapping for this particular v1 ID on this
263
+		// registry?
264
+		if layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name); err == nil {
265
+			// Does the layer actually exist
266
+			if l, err := p.config.LayerStore.Get(layerID); err == nil {
267
+				for j := i; j >= 0; j-- {
268
+					logrus.Debugf("Layer already exists: %s", history[j])
269
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(history[j]), "Already exists", nil))
270
+				}
271
+				referencedLayers = append(referencedLayers, l)
272
+				parentID = layerID
273
+				break
274
+			}
275
+		}
276
+	}
277
+
278
+	needsDownload := i
279
+
280
+	// Iterate over layers, in order from bottom-most to top-most. Download
281
+	// config for all layers, and download actual layer data if needed.
282
+	for i = len(history) - 1; i >= 0; i-- {
283
+		v1LayerID := history[i]
284
+		imgJSON, imgSize, err = p.downloadLayerConfig(out, v1LayerID, endpoint)
285
+		if err != nil {
286
+			return layersDownloaded, err
287
+		}
288
+
289
+		img = &image.V1Image{}
290
+		if err := json.Unmarshal(imgJSON, img); err != nil {
291
+			return layersDownloaded, err
292
+		}
293
+
294
+		if i < needsDownload {
295
+			l, err := p.downloadLayer(out, v1LayerID, endpoint, parentID, imgSize, &layersDownloaded)
296
+
297
+			// Note: This needs to be done even in the error case to avoid
298
+			// stale references to the layer.
299
+			if l != nil {
300
+				referencedLayers = append(referencedLayers, l)
301
+			}
302
+			if err != nil {
303
+				return layersDownloaded, err
304
+			}
305
+
306
+			parentID = l.ChainID()
307
+		}
308
+
309
+		// Create a new-style config from the legacy configs
310
+		h, err := v1.HistoryFromConfig(imgJSON, false)
311
+		if err != nil {
312
+			return layersDownloaded, err
313
+		}
314
+		newHistory = append(newHistory, h)
315
+	}
316
+
317
+	rootFS := image.NewRootFS()
318
+	l := referencedLayers[len(referencedLayers)-1]
319
+	for l != nil {
320
+		rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...)
321
+		l = l.Parent()
322
+	}
323
+
324
+	config, err := v1.MakeConfigFromV1Config(imgJSON, rootFS, newHistory)
325
+	if err != nil {
326
+		return layersDownloaded, err
327
+	}
328
+
329
+	imageID, err := p.config.ImageStore.Create(config)
330
+	if err != nil {
331
+		return layersDownloaded, err
332
+	}
333
+
334
+	if err := p.config.TagStore.Add(localNameRef, imageID, true); err != nil {
335
+		return layersDownloaded, err
336
+	}
337
+
338
+	return layersDownloaded, nil
339
+}
340
+
341
+func (p *v1Puller) downloadLayerConfig(out io.Writer, v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) {
342
+	out.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Pulling metadata", nil))
343
+
344
+	retries := 5
345
+	for j := 1; j <= retries; j++ {
346
+		imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint)
347
+		if err != nil && j == retries {
348
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling layer metadata", nil))
349
+			return nil, 0, err
350
+		} else if err != nil {
351
+			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
352
+			continue
353
+		}
354
+
355
+		return imgJSON, imgSize, nil
356
+	}
357
+
358
+	// not reached
359
+	return nil, 0, nil
360
+}
361
+
362
+func (p *v1Puller) downloadLayer(out io.Writer, v1LayerID, endpoint string, parentID layer.ChainID, layerSize int64, layersDownloaded *bool) (l layer.Layer, err error) {
363
+	// ensure no two downloads of the same layer happen at the same time
364
+	poolKey := "layer:" + v1LayerID
365
+	broadcaster, found := p.config.Pool.add(poolKey)
366
+	broadcaster.Add(out)
367
+	if found {
368
+		logrus.Debugf("Image (id: %s) pull is already running, skipping", v1LayerID)
369
+		if err = broadcaster.Wait(); err != nil {
370
+			return nil, err
371
+		}
372
+		layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name)
373
+		if err != nil {
374
+			return nil, err
375
+		}
376
+		// Does the layer actually exist
377
+		l, err := p.config.LayerStore.Get(layerID)
378
+		if err != nil {
379
+			return nil, err
380
+		}
381
+		return l, nil
382
+	}
383
+
384
+	// This must use a closure so it captures the value of err when
385
+	// the function returns, not when the 'defer' is evaluated.
386
+	defer func() {
387
+		p.config.Pool.removeWithError(poolKey, err)
388
+	}()
389
+
390
+	retries := 5
391
+	for j := 1; j <= retries; j++ {
392
+		// Get the layer
393
+		status := "Pulling fs layer"
394
+		if j > 1 {
395
+			status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
396
+		}
397
+		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), status, nil))
398
+		layerReader, err := p.session.GetRemoteImageLayer(v1LayerID, endpoint, layerSize)
399
+		if uerr, ok := err.(*url.Error); ok {
400
+			err = uerr.Err
401
+		}
402
+		if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
403
+			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
404
+			continue
405
+		} else if err != nil {
406
+			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling dependent layers", nil))
407
+			return nil, err
408
+		}
409
+		*layersDownloaded = true
410
+		defer layerReader.Close()
411
+
412
+		reader := progressreader.New(progressreader.Config{
413
+			In:        layerReader,
414
+			Out:       broadcaster,
415
+			Formatter: p.sf,
416
+			Size:      layerSize,
417
+			NewLines:  false,
418
+			ID:        stringid.TruncateID(v1LayerID),
419
+			Action:    "Downloading",
420
+		})
421
+
422
+		inflatedLayerData, err := archive.DecompressStream(reader)
423
+		if err != nil {
424
+			return nil, fmt.Errorf("could not get decompression stream: %v", err)
425
+		}
426
+
427
+		l, err := p.config.LayerStore.Register(inflatedLayerData, parentID)
428
+		if err != nil {
429
+			return nil, fmt.Errorf("failed to register layer: %v", err)
430
+		}
431
+		logrus.Debugf("layer %s registered successfully", l.DiffID())
432
+
433
+		if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
434
+			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
435
+			continue
436
+		} else if err != nil {
437
+			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error downloading dependent layers", nil))
438
+			return nil, err
439
+		}
440
+
441
+		// Cache mapping from this v1 ID to content-addressable layer ID
442
+		if err := p.v1IDService.Set(v1LayerID, p.repoInfo.Index.Name, l.ChainID()); err != nil {
443
+			return nil, err
444
+		}
445
+
446
+		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Download complete", nil))
447
+		broadcaster.Close()
448
+		return l, nil
449
+	}
450
+
451
+	// not reached
452
+	return nil, nil
453
+}
0 454
new file mode 100644
... ...
@@ -0,0 +1,512 @@
0
+package distribution
1
+
2
+import (
3
+	"encoding/json"
4
+	"errors"
5
+	"fmt"
6
+	"io"
7
+	"io/ioutil"
8
+	"os"
9
+	"runtime"
10
+
11
+	"github.com/Sirupsen/logrus"
12
+	"github.com/docker/distribution"
13
+	"github.com/docker/distribution/digest"
14
+	"github.com/docker/distribution/manifest/schema1"
15
+	"github.com/docker/distribution/reference"
16
+	"github.com/docker/docker/distribution/metadata"
17
+	"github.com/docker/docker/image"
18
+	"github.com/docker/docker/image/v1"
19
+	"github.com/docker/docker/layer"
20
+	"github.com/docker/docker/pkg/archive"
21
+	"github.com/docker/docker/pkg/broadcaster"
22
+	"github.com/docker/docker/pkg/progressreader"
23
+	"github.com/docker/docker/pkg/streamformatter"
24
+	"github.com/docker/docker/pkg/stringid"
25
+	"github.com/docker/docker/registry"
26
+	"golang.org/x/net/context"
27
+)
28
+
29
+type v2Puller struct {
30
+	blobSumService *metadata.BlobSumService
31
+	endpoint       registry.APIEndpoint
32
+	config         *ImagePullConfig
33
+	sf             *streamformatter.StreamFormatter
34
+	repoInfo       *registry.RepositoryInfo
35
+	repo           distribution.Repository
36
+	sessionID      string
37
+}
38
+
39
+func (p *v2Puller) Pull(ref reference.Named) (fallback bool, err error) {
40
+	// TODO(tiborvass): was ReceiveTimeout
41
+	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
42
+	if err != nil {
43
+		logrus.Debugf("Error getting v2 registry: %v", err)
44
+		return true, err
45
+	}
46
+
47
+	p.sessionID = stringid.GenerateRandomID()
48
+
49
+	if err := p.pullV2Repository(ref); err != nil {
50
+		if registry.ContinueOnError(err) {
51
+			logrus.Debugf("Error trying v2 registry: %v", err)
52
+			return true, err
53
+		}
54
+		return false, err
55
+	}
56
+	return false, nil
57
+}
58
+
59
+func (p *v2Puller) pullV2Repository(ref reference.Named) (err error) {
60
+	var refs []reference.Named
61
+	taggedName := p.repoInfo.LocalName
62
+	if tagged, isTagged := ref.(reference.Tagged); isTagged {
63
+		taggedName, err = reference.WithTag(p.repoInfo.LocalName, tagged.Tag())
64
+		if err != nil {
65
+			return err
66
+		}
67
+		refs = []reference.Named{taggedName}
68
+	} else if digested, isDigested := ref.(reference.Digested); isDigested {
69
+		taggedName, err = reference.WithDigest(p.repoInfo.LocalName, digested.Digest())
70
+		if err != nil {
71
+			return err
72
+		}
73
+		refs = []reference.Named{taggedName}
74
+	} else {
75
+		manSvc, err := p.repo.Manifests(context.Background())
76
+		if err != nil {
77
+			return err
78
+		}
79
+
80
+		tags, err := manSvc.Tags()
81
+		if err != nil {
82
+			return err
83
+		}
84
+
85
+		// This probably becomes a lot nicer after the manifest
86
+		// refactor...
87
+		for _, tag := range tags {
88
+			tagRef, err := reference.WithTag(p.repoInfo.LocalName, tag)
89
+			if err != nil {
90
+				return err
91
+			}
92
+			refs = append(refs, tagRef)
93
+		}
94
+	}
95
+
96
+	var layersDownloaded bool
97
+	for _, pullRef := range refs {
98
+		// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
99
+		// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
100
+		pulledNew, err := p.pullV2Tag(p.config.OutStream, pullRef)
101
+		if err != nil {
102
+			return err
103
+		}
104
+		layersDownloaded = layersDownloaded || pulledNew
105
+	}
106
+
107
+	writeStatus(taggedName.String(), p.config.OutStream, p.sf, layersDownloaded)
108
+
109
+	return nil
110
+}
111
+
112
+// downloadInfo is used to pass information from download to extractor
113
+type downloadInfo struct {
114
+	tmpFile     *os.File
115
+	digest      digest.Digest
116
+	layer       distribution.ReadSeekCloser
117
+	size        int64
118
+	err         chan error
119
+	poolKey     string
120
+	broadcaster *broadcaster.Buffered
121
+}
122
+
123
+type errVerification struct{}
124
+
125
+func (errVerification) Error() string { return "verification failed" }
126
+
127
+func (p *v2Puller) download(di *downloadInfo) {
128
+	logrus.Debugf("pulling blob %q", di.digest)
129
+
130
+	blobs := p.repo.Blobs(context.Background())
131
+
132
+	desc, err := blobs.Stat(context.Background(), di.digest)
133
+	if err != nil {
134
+		logrus.Debugf("Error statting layer: %v", err)
135
+		di.err <- err
136
+		return
137
+	}
138
+	di.size = desc.Size
139
+
140
+	layerDownload, err := blobs.Open(context.Background(), di.digest)
141
+	if err != nil {
142
+		logrus.Debugf("Error fetching layer: %v", err)
143
+		di.err <- err
144
+		return
145
+	}
146
+	defer layerDownload.Close()
147
+
148
+	verifier, err := digest.NewDigestVerifier(di.digest)
149
+	if err != nil {
150
+		di.err <- err
151
+		return
152
+	}
153
+
154
+	digestStr := di.digest.String()
155
+
156
+	reader := progressreader.New(progressreader.Config{
157
+		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
158
+		Out:       di.broadcaster,
159
+		Formatter: p.sf,
160
+		Size:      di.size,
161
+		NewLines:  false,
162
+		ID:        stringid.TruncateID(digestStr),
163
+		Action:    "Downloading",
164
+	})
165
+	io.Copy(di.tmpFile, reader)
166
+
167
+	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))
168
+
169
+	if !verifier.Verified() {
170
+		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
171
+		logrus.Error(err)
172
+		di.err <- err
173
+		return
174
+	}
175
+
176
+	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))
177
+
178
+	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
179
+	di.layer = layerDownload
180
+
181
+	di.err <- nil
182
+}
183
+
184
+func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) {
185
+	tagOrDigest := ""
186
+	if tagged, isTagged := ref.(reference.Tagged); isTagged {
187
+		tagOrDigest = tagged.Tag()
188
+	} else if digested, isDigested := ref.(reference.Digested); isDigested {
189
+		tagOrDigest = digested.Digest().String()
190
+	} else {
191
+		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
192
+	}
193
+
194
+	logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest)
195
+
196
+	manSvc, err := p.repo.Manifests(context.Background())
197
+	if err != nil {
198
+		return false, err
199
+	}
200
+
201
+	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
202
+	if err != nil {
203
+		return false, err
204
+	}
205
+	if unverifiedManifest == nil {
206
+		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
207
+	}
208
+	var verifiedManifest *schema1.Manifest
209
+	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
210
+	if err != nil {
211
+		return false, err
212
+	}
213
+
214
+	rootFS := image.NewRootFS()
215
+
216
+	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
217
+		return false, err
218
+	}
219
+
220
+	// remove duplicate layers and check parent chain validity
221
+	err = fixManifestLayers(verifiedManifest)
222
+	if err != nil {
223
+		return false, err
224
+	}
225
+
226
+	out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name()))
227
+
228
+	var downloads []*downloadInfo
229
+
230
+	defer func() {
231
+		for _, d := range downloads {
232
+			p.config.Pool.removeWithError(d.poolKey, err)
233
+			if d.tmpFile != nil {
234
+				d.tmpFile.Close()
235
+				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
236
+					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
237
+				}
238
+			}
239
+		}
240
+	}()
241
+
242
+	// Image history converted to the new format
243
+	var history []image.History
244
+
245
+	poolKey := "v2layer:"
246
+	notFoundLocally := false
247
+
248
+	// Note that the order of this loop is in the direction of bottom-most
249
+	// to top-most, so that the downloads slice gets ordered correctly.
250
+	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
251
+		blobSum := verifiedManifest.FSLayers[i].BlobSum
252
+		poolKey += blobSum.String()
253
+
254
+		var throwAway struct {
255
+			ThrowAway bool `json:"throwaway,omitempty"`
256
+		}
257
+		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
258
+			return false, err
259
+		}
260
+
261
+		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
262
+		if err != nil {
263
+			return false, err
264
+		}
265
+		history = append(history, h)
266
+
267
+		if throwAway.ThrowAway {
268
+			continue
269
+		}
270
+
271
+		// Do we have a layer on disk corresponding to the set of
272
+		// blobsums up to this point?
273
+		if !notFoundLocally {
274
+			notFoundLocally = true
275
+			diffID, err := p.blobSumService.GetDiffID(blobSum)
276
+			if err == nil {
277
+				rootFS.Append(diffID)
278
+				if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil {
279
+					notFoundLocally = false
280
+					logrus.Debugf("Layer already exists: %s", blobSum.String())
281
+					out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil))
282
+					defer layer.ReleaseAndLog(p.config.LayerStore, l)
283
+					continue
284
+				} else {
285
+					rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1]
286
+				}
287
+			}
288
+		}
289
+
290
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil))
291
+
292
+		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
293
+		if err != nil {
294
+			return false, err
295
+		}
296
+
297
+		d := &downloadInfo{
298
+			poolKey: poolKey,
299
+			digest:  blobSum,
300
+			tmpFile: tmpFile,
301
+			// TODO: seems like this chan buffer solved hanging problem in go1.5,
302
+			// this can indicate some deeper problem that somehow we never take
303
+			// error from channel in loop below
304
+			err: make(chan error, 1),
305
+		}
306
+
307
+		downloads = append(downloads, d)
308
+
309
+		broadcaster, found := p.config.Pool.add(d.poolKey)
310
+		broadcaster.Add(out)
311
+		d.broadcaster = broadcaster
312
+		if found {
313
+			d.err <- nil
314
+		} else {
315
+			go p.download(d)
316
+		}
317
+	}
318
+
319
+	for _, d := range downloads {
320
+		if err := <-d.err; err != nil {
321
+			return false, err
322
+		}
323
+
324
+		if d.layer == nil {
325
+			// Wait for a different pull to download and extract
326
+			// this layer.
327
+			err = d.broadcaster.Wait()
328
+			if err != nil {
329
+				return false, err
330
+			}
331
+
332
+			diffID, err := p.blobSumService.GetDiffID(d.digest)
333
+			if err != nil {
334
+				return false, err
335
+			}
336
+			rootFS.Append(diffID)
337
+
338
+			l, err := p.config.LayerStore.Get(rootFS.ChainID())
339
+			if err != nil {
340
+				return false, err
341
+			}
342
+
343
+			defer layer.ReleaseAndLog(p.config.LayerStore, l)
344
+
345
+			continue
346
+		}
347
+
348
+		d.tmpFile.Seek(0, 0)
349
+		reader := progressreader.New(progressreader.Config{
350
+			In:        d.tmpFile,
351
+			Out:       d.broadcaster,
352
+			Formatter: p.sf,
353
+			Size:      d.size,
354
+			NewLines:  false,
355
+			ID:        stringid.TruncateID(d.digest.String()),
356
+			Action:    "Extracting",
357
+		})
358
+
359
+		inflatedLayerData, err := archive.DecompressStream(reader)
360
+		if err != nil {
361
+			return false, fmt.Errorf("could not get decompression stream: %v", err)
362
+		}
363
+
364
+		l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID())
365
+		if err != nil {
366
+			return false, fmt.Errorf("failed to register layer: %v", err)
367
+		}
368
+		logrus.Debugf("layer %s registered successfully", l.DiffID())
369
+		rootFS.Append(l.DiffID())
370
+
371
+		// Cache mapping from this layer's DiffID to the blobsum
372
+		if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil {
373
+			return false, err
374
+		}
375
+
376
+		defer layer.ReleaseAndLog(p.config.LayerStore, l)
377
+
378
+		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil))
379
+		d.broadcaster.Close()
380
+		tagUpdated = true
381
+	}
382
+
383
+	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history)
384
+	if err != nil {
385
+		return false, err
386
+	}
387
+
388
+	imageID, err := p.config.ImageStore.Create(config)
389
+	if err != nil {
390
+		return false, err
391
+	}
392
+
393
+	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name())
394
+	if err != nil {
395
+		return false, err
396
+	}
397
+
398
+	// Check for new tag if no layers downloaded
399
+	var oldTagImageID image.ID
400
+	if !tagUpdated {
401
+		oldTagImageID, err = p.config.TagStore.Get(ref)
402
+		if err != nil || oldTagImageID != imageID {
403
+			tagUpdated = true
404
+		}
405
+	}
406
+
407
+	if tagUpdated {
408
+		if err = p.config.TagStore.Add(ref, imageID, true); err != nil {
409
+			return false, err
410
+		}
411
+	}
412
+
413
+	if manifestDigest != "" {
414
+		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
415
+	}
416
+
417
+	return tagUpdated, nil
418
+}
419
+
420
+func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
421
+	// If pull by digest, then verify the manifest digest. NOTE: It is
422
+	// important to do this first, before any other content validation. If the
423
+	// digest cannot be verified, don't even bother with those other things.
424
+	if digested, isDigested := ref.(reference.Digested); isDigested {
425
+		verifier, err := digest.NewDigestVerifier(digested.Digest())
426
+		if err != nil {
427
+			return nil, err
428
+		}
429
+		payload, err := signedManifest.Payload()
430
+		if err != nil {
431
+			// If this failed, the signatures section was corrupted
432
+			// or missing. Treat the entire manifest as the payload.
433
+			payload = signedManifest.Raw
434
+		}
435
+		if _, err := verifier.Write(payload); err != nil {
436
+			return nil, err
437
+		}
438
+		if !verifier.Verified() {
439
+			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
440
+			logrus.Error(err)
441
+			return nil, err
442
+		}
443
+
444
+		var verifiedManifest schema1.Manifest
445
+		if err = json.Unmarshal(payload, &verifiedManifest); err != nil {
446
+			return nil, err
447
+		}
448
+		m = &verifiedManifest
449
+	} else {
450
+		m = &signedManifest.Manifest
451
+	}
452
+
453
+	if m.SchemaVersion != 1 {
454
+		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
455
+	}
456
+	if len(m.FSLayers) != len(m.History) {
457
+		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
458
+	}
459
+	if len(m.FSLayers) == 0 {
460
+		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
461
+	}
462
+	return m, nil
463
+}
464
+
465
+// fixManifestLayers removes repeated layers from the manifest and checks the
466
+// correctness of the parent chain.
467
+func fixManifestLayers(m *schema1.Manifest) error {
468
+	imgs := make([]*image.V1Image, len(m.FSLayers))
469
+	for i := range m.FSLayers {
470
+		img := &image.V1Image{}
471
+
472
+		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
473
+			return err
474
+		}
475
+
476
+		imgs[i] = img
477
+		if err := v1.ValidateID(img.ID); err != nil {
478
+			return err
479
+		}
480
+	}
481
+
482
+	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
483
+		// Windows base layer can point to a base layer parent that is not in manifest.
484
+		return errors.New("Invalid parent ID in the base layer of the image.")
485
+	}
486
+
487
+	// check general duplicates to error instead of a deadlock
488
+	idmap := make(map[string]struct{})
489
+
490
+	var lastID string
491
+	for _, img := range imgs {
492
+		// skip IDs that appear after each other, we handle those later
493
+		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
494
+			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
495
+		}
496
+		lastID = img.ID
497
+		idmap[lastID] = struct{}{}
498
+	}
499
+
500
+	// backwards loop so that we keep the remaining indexes after removing items
501
+	for i := len(imgs) - 2; i >= 0; i-- {
502
+		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
503
+			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
504
+			m.History = append(m.History[:i], m.History[i+1:]...)
505
+		} else if imgs[i].Parent != imgs[i+1].ID {
506
+			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
507
+		}
508
+	}
509
+
510
+	return nil
511
+}
0 512
new file mode 100644
... ...
@@ -0,0 +1,174 @@
0
+package distribution
1
+
2
+import (
3
+	"encoding/json"
4
+	"io/ioutil"
5
+	"reflect"
6
+	"strings"
7
+	"testing"
8
+
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/distribution/manifest/schema1"
11
+	"github.com/docker/distribution/reference"
12
+)
13
+
14
+// TestFixManifestLayers checks that fixManifestLayers removes a duplicate
15
+// layer, and that it makes no changes to the manifest when called a second
16
+// time, after the duplicate is removed.
17
+func TestFixManifestLayers(t *testing.T) {
18
+	duplicateLayerManifest := schema1.Manifest{
19
+		FSLayers: []schema1.FSLayer{
20
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
21
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
22
+			{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
23
+		},
24
+		History: []schema1.History{
25
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
26
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
27
+			{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026     go get -v github.com/tools/godep \\u0026\\u0026     godep restore \\u0026\\u0026     go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
28
+		},
29
+	}
30
+
31
+	duplicateLayerManifestExpectedOutput := schema1.Manifest{
32
+		FSLayers: []schema1.FSLayer{
33
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
34
+			{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
35
+		},
36
+		History: []schema1.History{
37
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
38
+			{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026     go get -v github.com/tools/godep \\u0026\\u0026     godep restore \\u0026\\u0026     go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
39
+		},
40
+	}
41
+
42
+	if err := fixManifestLayers(&duplicateLayerManifest); err != nil {
43
+		t.Fatalf("unexpected error from fixManifestLayers: %v", err)
44
+	}
45
+
46
+	if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) {
47
+		t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest")
48
+	}
49
+
50
+	// Run fixManifestLayers again and confirm that it doesn't change the
51
+	// manifest (which no longer has duplicate layers).
52
+	if err := fixManifestLayers(&duplicateLayerManifest); err != nil {
53
+		t.Fatalf("unexpected error from fixManifestLayers: %v", err)
54
+	}
55
+
56
+	if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) {
57
+		t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)")
58
+	}
59
+}
60
+
61
+// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails
62
+// if the base layer configuration specifies a parent.
63
+func TestFixManifestLayersBaseLayerParent(t *testing.T) {
64
+	duplicateLayerManifest := schema1.Manifest{
65
+		FSLayers: []schema1.FSLayer{
66
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
67
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
68
+			{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
69
+		},
70
+		History: []schema1.History{
71
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
72
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
73
+			{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026     go get -v github.com/tools/godep \\u0026\\u0026     godep restore \\u0026\\u0026     go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
74
+		},
75
+	}
76
+
77
+	if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") {
78
+		t.Fatalf("expected an invalid parent ID error from fixManifestLayers")
79
+	}
80
+}
81
+
82
+// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails
83
+// if an image configuration specifies a parent that doesn't directly follow
84
+// that (deduplicated) image in the image history.
85
+func TestFixManifestLayersBadParent(t *testing.T) {
86
+	duplicateLayerManifest := schema1.Manifest{
87
+		FSLayers: []schema1.FSLayer{
88
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
89
+			{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
90
+			{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
91
+		},
92
+		History: []schema1.History{
93
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
94
+			{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
95
+			{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026     go get -v github.com/tools/godep \\u0026\\u0026     godep restore \\u0026\\u0026     go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
96
+		},
97
+	}
98
+
99
+	if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") {
100
+		t.Fatalf("expected an invalid parent ID error from fixManifestLayers")
101
+	}
102
+}
103
+
104
+// TestValidateManifest verifies the validateManifest function
105
+func TestValidateManifest(t *testing.T) {
106
+	expectedDigest, err := reference.Parse("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd")
107
+	if err != nil {
108
+		t.Fatal("could not parse reference")
109
+	}
110
+	expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
111
+
112
+	// Good manifest
113
+
114
+	goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest")
115
+	if err != nil {
116
+		t.Fatal("error reading fixture:", err)
117
+	}
118
+
119
+	var goodSignedManifest schema1.SignedManifest
120
+	err = json.Unmarshal(goodManifestBytes, &goodSignedManifest)
121
+	if err != nil {
122
+		t.Fatal("error unmarshaling manifest:", err)
123
+	}
124
+
125
+	verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest)
126
+	if err != nil {
127
+		t.Fatal("validateManifest failed:", err)
128
+	}
129
+
130
+	if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 {
131
+		t.Fatal("unexpected FSLayer in good manifest")
132
+	}
133
+
134
+	// "Extra data" manifest
135
+
136
+	extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest")
137
+	if err != nil {
138
+		t.Fatal("error reading fixture:", err)
139
+	}
140
+
141
+	var extraDataSignedManifest schema1.SignedManifest
142
+	err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest)
143
+	if err != nil {
144
+		t.Fatal("error unmarshaling manifest:", err)
145
+	}
146
+
147
+	verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest)
148
+	if err != nil {
149
+		t.Fatal("validateManifest failed:", err)
150
+	}
151
+
152
+	if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 {
153
+		t.Fatal("unexpected FSLayer in extra data manifest")
154
+	}
155
+
156
+	// Bad manifest
157
+
158
+	badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest")
159
+	if err != nil {
160
+		t.Fatal("error reading fixture:", err)
161
+	}
162
+
163
+	var badSignedManifest schema1.SignedManifest
164
+	err = json.Unmarshal(badManifestBytes, &badSignedManifest)
165
+	if err != nil {
166
+		t.Fatal("error unmarshaling manifest:", err)
167
+	}
168
+
169
+	verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest)
170
+	if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") {
171
+		t.Fatal("expected validateManifest to fail with digest error")
172
+	}
173
+}
0 174
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+// +build !windows
1
+
2
+package distribution
3
+
4
+import (
5
+	"github.com/docker/distribution/manifest/schema1"
6
+	"github.com/docker/docker/image"
7
+)
8
+
9
+func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error {
10
+	return nil
11
+}
0 12
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+// +build windows
1
+
2
+package distribution
3
+
4
+import (
5
+	"encoding/json"
6
+	"fmt"
7
+
8
+	"github.com/docker/distribution/manifest/schema1"
9
+	"github.com/docker/docker/image"
10
+)
11
+
12
+func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error {
13
+	v1img := &image.V1Image{}
14
+	if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil {
15
+		return err
16
+	}
17
+	if v1img.Parent == "" {
18
+		return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID)
19
+	}
20
+	// There must be an image that already references the baselayer.
21
+	for _, img := range is.Map() {
22
+		if img.RootFS.BaseLayerID() == v1img.Parent {
23
+			rootFS.BaseLayer = img.RootFS.BaseLayer
24
+			return nil
25
+		}
26
+	}
27
+	return fmt.Errorf("Invalid base layer %q", v1img.Parent)
28
+}
0 29
new file mode 100644
... ...
@@ -0,0 +1,179 @@
0
+package distribution
1
+
2
+import (
3
+	"bufio"
4
+	"compress/gzip"
5
+	"fmt"
6
+	"io"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/distribution/reference"
11
+	"github.com/docker/docker/cliconfig"
12
+	"github.com/docker/docker/daemon/events"
13
+	"github.com/docker/docker/distribution/metadata"
14
+	"github.com/docker/docker/image"
15
+	"github.com/docker/docker/layer"
16
+	"github.com/docker/docker/pkg/streamformatter"
17
+	"github.com/docker/docker/registry"
18
+	"github.com/docker/docker/tag"
19
+	"github.com/docker/libtrust"
20
+)
21
+
22
+// ImagePushConfig stores push configuration.
23
+type ImagePushConfig struct {
24
+	// MetaHeaders store HTTP headers with metadata about the image
25
+	// (DockerHeaders with prefix X-Meta- in the request).
26
+	MetaHeaders map[string][]string
27
+	// AuthConfig holds authentication credentials for authenticating with
28
+	// the registry.
29
+	AuthConfig *cliconfig.AuthConfig
30
+	// OutStream is the output writer for showing the status of the push
31
+	// operation.
32
+	OutStream io.Writer
33
+	// RegistryService is the registry service to use for TLS configuration
34
+	// and endpoint lookup.
35
+	RegistryService *registry.Service
36
+	// EventsService is the events service to use for logging.
37
+	EventsService *events.Events
38
+	// MetadataStore is the storage backend for distribution-specific
39
+	// metadata.
40
+	MetadataStore metadata.Store
41
+	// LayerStore manges layers.
42
+	LayerStore layer.Store
43
+	// ImageStore manages images.
44
+	ImageStore image.Store
45
+	// TagStore manages tags.
46
+	TagStore tag.Store
47
+	// TrustKey is the private key for legacy signatures. This is typically
48
+	// an ephemeral key, since these signatures are no longer verified.
49
+	TrustKey libtrust.PrivateKey
50
+}
51
+
52
+// Pusher is an interface that abstracts pushing for different API versions.
53
+type Pusher interface {
54
+	// Push tries to push the image configured at the creation of Pusher.
55
+	// Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint.
56
+	//
57
+	// TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic.
58
+	Push() (fallback bool, err error)
59
+}
60
+
61
+const compressionBufSize = 32768
62
+
63
+// NewPusher creates a new Pusher interface that will push to either a v1 or v2
64
+// registry. The endpoint argument contains a Version field that determines
65
+// whether a v1 or v2 pusher will be created. The other parameters are passed
66
+// through to the underlying pusher implementation for use during the actual
67
+// push operation.
68
+func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (Pusher, error) {
69
+	switch endpoint.Version {
70
+	case registry.APIVersion2:
71
+		return &v2Pusher{
72
+			blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore),
73
+			ref:            ref,
74
+			endpoint:       endpoint,
75
+			repoInfo:       repoInfo,
76
+			config:         imagePushConfig,
77
+			sf:             sf,
78
+			layersPushed:   make(map[digest.Digest]bool),
79
+		}, nil
80
+	case registry.APIVersion1:
81
+		return &v1Pusher{
82
+			v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore),
83
+			ref:         ref,
84
+			endpoint:    endpoint,
85
+			repoInfo:    repoInfo,
86
+			config:      imagePushConfig,
87
+			sf:          sf,
88
+		}, nil
89
+	}
90
+	return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
91
+}
92
+
93
+// Push initiates a push operation on the repository named localName.
94
+// ref is the specific variant of the image to be pushed.
95
+// If no tag is provided, all tags will be pushed.
96
+func Push(ref reference.Named, imagePushConfig *ImagePushConfig) error {
97
+	// FIXME: Allow to interrupt current push when new push of same image is done.
98
+
99
+	var sf = streamformatter.NewJSONStreamFormatter()
100
+
101
+	// Resolve the Repository name from fqn to RepositoryInfo
102
+	repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref)
103
+	if err != nil {
104
+		return err
105
+	}
106
+
107
+	endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.CanonicalName)
108
+	if err != nil {
109
+		return err
110
+	}
111
+
112
+	imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s]", repoInfo.CanonicalName))
113
+
114
+	associations := imagePushConfig.TagStore.ReferencesByName(repoInfo.LocalName)
115
+	if len(associations) == 0 {
116
+		return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName)
117
+	}
118
+
119
+	var lastErr error
120
+	for _, endpoint := range endpoints {
121
+		logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version)
122
+
123
+		pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig, sf)
124
+		if err != nil {
125
+			lastErr = err
126
+			continue
127
+		}
128
+		if fallback, err := pusher.Push(); err != nil {
129
+			if fallback {
130
+				lastErr = err
131
+				continue
132
+			}
133
+			logrus.Debugf("Not continuing with error: %v", err)
134
+			return err
135
+
136
+		}
137
+
138
+		imagePushConfig.EventsService.Log("push", repoInfo.LocalName.Name(), "")
139
+		return nil
140
+	}
141
+
142
+	if lastErr == nil {
143
+		lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName)
144
+	}
145
+	return lastErr
146
+}
147
+
148
+// compress returns an io.ReadCloser which will supply a compressed version of
149
+// the provided Reader. The caller must close the ReadCloser after reading the
150
+// compressed data.
151
+//
152
+// Note that this function returns a reader instead of taking a writer as an
153
+// argument so that it can be used with httpBlobWriter's ReadFrom method.
154
+// Using httpBlobWriter's Write method would send a PATCH request for every
155
+// Write call.
156
+func compress(in io.Reader) io.ReadCloser {
157
+	pipeReader, pipeWriter := io.Pipe()
158
+	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
159
+	bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize)
160
+	compressor := gzip.NewWriter(bufWriter)
161
+
162
+	go func() {
163
+		_, err := io.Copy(compressor, in)
164
+		if err == nil {
165
+			err = compressor.Close()
166
+		}
167
+		if err == nil {
168
+			err = bufWriter.Flush()
169
+		}
170
+		if err != nil {
171
+			pipeWriter.CloseWithError(err)
172
+		} else {
173
+			pipeWriter.Close()
174
+		}
175
+	}()
176
+
177
+	return pipeReader
178
+}
0 179
new file mode 100644
... ...
@@ -0,0 +1,466 @@
0
+package distribution
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"io/ioutil"
6
+	"sync"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/distribution/reference"
11
+	"github.com/docker/distribution/registry/client/transport"
12
+	"github.com/docker/docker/distribution/metadata"
13
+	"github.com/docker/docker/image"
14
+	"github.com/docker/docker/image/v1"
15
+	"github.com/docker/docker/layer"
16
+	"github.com/docker/docker/pkg/ioutils"
17
+	"github.com/docker/docker/pkg/progressreader"
18
+	"github.com/docker/docker/pkg/streamformatter"
19
+	"github.com/docker/docker/pkg/stringid"
20
+	"github.com/docker/docker/registry"
21
+)
22
+
23
+type v1Pusher struct {
24
+	v1IDService *metadata.V1IDService
25
+	endpoint    registry.APIEndpoint
26
+	ref         reference.Named
27
+	repoInfo    *registry.RepositoryInfo
28
+	config      *ImagePushConfig
29
+	sf          *streamformatter.StreamFormatter
30
+	session     *registry.Session
31
+
32
+	out io.Writer
33
+}
34
+
35
+func (p *v1Pusher) Push() (fallback bool, err error) {
36
+	tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
37
+	if err != nil {
38
+		return false, err
39
+	}
40
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
41
+	tr := transport.NewTransport(
42
+		// TODO(tiborvass): was NoTimeout
43
+		registry.NewTransport(tlsConfig),
44
+		registry.DockerHeaders(p.config.MetaHeaders)...,
45
+	)
46
+	client := registry.HTTPClient(tr)
47
+	v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
48
+	if err != nil {
49
+		logrus.Debugf("Could not get v1 endpoint: %v", err)
50
+		return true, err
51
+	}
52
+	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
53
+	if err != nil {
54
+		// TODO(dmcgowan): Check if should fallback
55
+		return true, err
56
+	}
57
+	if err := p.pushRepository(); err != nil {
58
+		// TODO(dmcgowan): Check if should fallback
59
+		return false, err
60
+	}
61
+	return false, nil
62
+}
63
+
64
+// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an
65
+// image being pushed to a v1 registry.
66
+type v1Image interface {
67
+	Config() []byte
68
+	Layer() layer.Layer
69
+	V1ID() string
70
+}
71
+
72
+type v1ImageCommon struct {
73
+	layer  layer.Layer
74
+	config []byte
75
+	v1ID   string
76
+}
77
+
78
+func (common *v1ImageCommon) Config() []byte {
79
+	return common.config
80
+}
81
+
82
+func (common *v1ImageCommon) V1ID() string {
83
+	return common.v1ID
84
+}
85
+
86
+func (common *v1ImageCommon) Layer() layer.Layer {
87
+	return common.layer
88
+}
89
+
90
+// v1TopImage defines a runnable (top layer) image being pushed to a v1
91
+// registry.
92
+type v1TopImage struct {
93
+	v1ImageCommon
94
+	imageID image.ID
95
+}
96
+
97
+func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) {
98
+	v1ID := digest.Digest(imageID).Hex()
99
+	parentV1ID := ""
100
+	if parent != nil {
101
+		parentV1ID = parent.V1ID()
102
+	}
103
+
104
+	config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false)
105
+	if err != nil {
106
+		return nil, err
107
+	}
108
+
109
+	return &v1TopImage{
110
+		v1ImageCommon: v1ImageCommon{
111
+			v1ID:   v1ID,
112
+			config: config,
113
+			layer:  l,
114
+		},
115
+		imageID: imageID,
116
+	}, nil
117
+}
118
+
119
+// v1DependencyImage defines a dependency layer being pushed to a v1 registry.
120
+type v1DependencyImage struct {
121
+	v1ImageCommon
122
+}
123
+
124
+func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) {
125
+	v1ID := digest.Digest(l.ChainID()).Hex()
126
+
127
+	config := ""
128
+	if parent != nil {
129
+		config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID())
130
+	} else {
131
+		config = fmt.Sprintf(`{"id":"%s"}`, v1ID)
132
+	}
133
+	return &v1DependencyImage{
134
+		v1ImageCommon: v1ImageCommon{
135
+			v1ID:   v1ID,
136
+			config: []byte(config),
137
+			layer:  l,
138
+		},
139
+	}, nil
140
+}
141
+
142
+// Retrieve the all the images to be uploaded in the correct order
143
+func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) {
144
+	tagsByImage = make(map[image.ID][]string)
145
+
146
+	// Ignore digest references
147
+	_, isDigested := p.ref.(reference.Digested)
148
+	if isDigested {
149
+		return
150
+	}
151
+
152
+	tagged, isTagged := p.ref.(reference.Tagged)
153
+	if isTagged {
154
+		// Push a specific tag
155
+		var imgID image.ID
156
+		imgID, err = p.config.TagStore.Get(p.ref)
157
+		if err != nil {
158
+			return
159
+		}
160
+
161
+		imageList, err = p.imageListForTag(imgID, nil, &referencedLayers)
162
+		if err != nil {
163
+			return
164
+		}
165
+
166
+		tagsByImage[imgID] = []string{tagged.Tag()}
167
+
168
+		return
169
+	}
170
+
171
+	imagesSeen := make(map[image.ID]struct{})
172
+	dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage)
173
+
174
+	associations := p.config.TagStore.ReferencesByName(p.ref)
175
+	for _, association := range associations {
176
+		if tagged, isTagged = association.Ref.(reference.Tagged); !isTagged {
177
+			// Ignore digest references.
178
+			continue
179
+		}
180
+
181
+		tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag())
182
+
183
+		if _, present := imagesSeen[association.ImageID]; present {
184
+			// Skip generating image list for already-seen image
185
+			continue
186
+		}
187
+		imagesSeen[association.ImageID] = struct{}{}
188
+
189
+		imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers)
190
+		if err != nil {
191
+			return nil, nil, nil, err
192
+		}
193
+
194
+		// append to main image list
195
+		imageList = append(imageList, imageListForThisTag...)
196
+	}
197
+	if len(imageList) == 0 {
198
+		return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag")
199
+	}
200
+	logrus.Debugf("Image list: %v", imageList)
201
+	logrus.Debugf("Tags by image: %v", tagsByImage)
202
+
203
+	return
204
+}
205
+
206
+func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) {
207
+	img, err := p.config.ImageStore.Get(imgID)
208
+	if err != nil {
209
+		return nil, err
210
+	}
211
+
212
+	topLayerID := img.RootFS.ChainID()
213
+
214
+	var l layer.Layer
215
+	if topLayerID == "" {
216
+		l = layer.EmptyLayer
217
+	} else {
218
+		l, err = p.config.LayerStore.Get(topLayerID)
219
+		*referencedLayers = append(*referencedLayers, l)
220
+		if err != nil {
221
+			return nil, fmt.Errorf("failed to get top layer from image: %v", err)
222
+		}
223
+	}
224
+
225
+	dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen)
226
+	if err != nil {
227
+		return nil, err
228
+	}
229
+
230
+	topImage, err := newV1TopImage(imgID, img, l, parent)
231
+	if err != nil {
232
+		return nil, err
233
+	}
234
+
235
+	imageListForThisTag = append(dependencyImages, topImage)
236
+
237
+	return
238
+}
239
+
240
+func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) {
241
+	if l == nil {
242
+		return nil, nil, nil
243
+	}
244
+
245
+	imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen)
246
+
247
+	if dependenciesSeen != nil {
248
+		if dependencyImage, present := dependenciesSeen[l.ChainID()]; present {
249
+			// This layer is already on the list, we can ignore it
250
+			// and all its parents.
251
+			return imageListForThisTag, dependencyImage, nil
252
+		}
253
+	}
254
+
255
+	dependencyImage, err := newV1DependencyImage(l, parent)
256
+	if err != nil {
257
+		return nil, nil, err
258
+	}
259
+	imageListForThisTag = append(imageListForThisTag, dependencyImage)
260
+
261
+	if dependenciesSeen != nil {
262
+		dependenciesSeen[l.ChainID()] = dependencyImage
263
+	}
264
+
265
+	return imageListForThisTag, dependencyImage, nil
266
+}
267
+
268
+// createImageIndex returns an index of an image's layer IDs and tags.
269
+func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData {
270
+	var imageIndex []*registry.ImgData
271
+	for _, img := range images {
272
+		v1ID := img.V1ID()
273
+
274
+		if topImage, isTopImage := img.(*v1TopImage); isTopImage {
275
+			if tags, hasTags := tags[topImage.imageID]; hasTags {
276
+				// If an image has tags you must add an entry in the image index
277
+				// for each tag
278
+				for _, tag := range tags {
279
+					imageIndex = append(imageIndex, &registry.ImgData{
280
+						ID:  v1ID,
281
+						Tag: tag,
282
+					})
283
+				}
284
+				continue
285
+			}
286
+		}
287
+
288
+		// If the image does not have a tag it still needs to be sent to the
289
+		// registry with an empty tag so that it is associated with the repository
290
+		imageIndex = append(imageIndex, &registry.ImgData{
291
+			ID:  v1ID,
292
+			Tag: "",
293
+		})
294
+	}
295
+	return imageIndex
296
+}
297
+
298
+// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
299
+// and if it is absent then it sends the image id to the channel to be pushed.
300
+func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) {
301
+	defer wg.Done()
302
+	for image := range images {
303
+		v1ID := image.V1ID()
304
+		if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil {
305
+			logrus.Errorf("Error in LookupRemoteImage: %s", err)
306
+			imagesToPush <- v1ID
307
+		} else {
308
+			p.out.Write(p.sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(v1ID)))
309
+		}
310
+	}
311
+}
312
+
313
+func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error {
314
+	workerCount := len(imageList)
315
+	// start a maximum of 5 workers to check if images exist on the specified endpoint.
316
+	if workerCount > 5 {
317
+		workerCount = 5
318
+	}
319
+	var (
320
+		wg           = &sync.WaitGroup{}
321
+		imageData    = make(chan v1Image, workerCount*2)
322
+		imagesToPush = make(chan string, workerCount*2)
323
+		pushes       = make(chan map[string]struct{}, 1)
324
+	)
325
+	for i := 0; i < workerCount; i++ {
326
+		wg.Add(1)
327
+		go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush)
328
+	}
329
+	// start a go routine that consumes the images to push
330
+	go func() {
331
+		shouldPush := make(map[string]struct{})
332
+		for id := range imagesToPush {
333
+			shouldPush[id] = struct{}{}
334
+		}
335
+		pushes <- shouldPush
336
+	}()
337
+	for _, v1Image := range imageList {
338
+		imageData <- v1Image
339
+	}
340
+	// close the channel to notify the workers that there will be no more images to check.
341
+	close(imageData)
342
+	wg.Wait()
343
+	close(imagesToPush)
344
+	// wait for all the images that require pushes to be collected into a consumable map.
345
+	shouldPush := <-pushes
346
+	// finish by pushing any images and tags to the endpoint.  The order that the images are pushed
347
+	// is very important that is why we are still iterating over the ordered list of imageIDs.
348
+	for _, img := range imageList {
349
+		v1ID := img.V1ID()
350
+		if _, push := shouldPush[v1ID]; push {
351
+			if _, err := p.pushImage(img, endpoint); err != nil {
352
+				// FIXME: Continue on error?
353
+				return err
354
+			}
355
+		}
356
+		if topImage, isTopImage := img.(*v1TopImage); isTopImage {
357
+			for _, tag := range tags[topImage.imageID] {
358
+				p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName.Name()+"/tags/"+tag))
359
+				if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, v1ID, tag, endpoint); err != nil {
360
+					return err
361
+				}
362
+			}
363
+		}
364
+	}
365
+	return nil
366
+}
367
+
368
+// pushRepository pushes layers that do not already exist on the registry.
369
+func (p *v1Pusher) pushRepository() error {
370
+	p.out = ioutils.NewWriteFlusher(p.config.OutStream)
371
+	imgList, tags, referencedLayers, err := p.getImageList()
372
+	defer func() {
373
+		for _, l := range referencedLayers {
374
+			p.config.LayerStore.Release(l)
375
+		}
376
+	}()
377
+	if err != nil {
378
+		return err
379
+	}
380
+	p.out.Write(p.sf.FormatStatus("", "Sending image list"))
381
+
382
+	imageIndex := createImageIndex(imgList, tags)
383
+	for _, data := range imageIndex {
384
+		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
385
+	}
386
+
387
+	// Register all the images in a repository with the registry
388
+	// If an image is not in this list it will not be associated with the repository
389
+	repoData, err := p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, false, nil)
390
+	if err != nil {
391
+		return err
392
+	}
393
+	p.out.Write(p.sf.FormatStatus("", "Pushing repository %s", p.repoInfo.CanonicalName))
394
+	// push the repository to each of the endpoints only if it does not exist.
395
+	for _, endpoint := range repoData.Endpoints {
396
+		if err := p.pushImageToEndpoint(endpoint, imgList, tags, repoData); err != nil {
397
+			return err
398
+		}
399
+	}
400
+	_, err = p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, true, repoData.Endpoints)
401
+	return err
402
+}
403
+
404
+func (p *v1Pusher) pushImage(v1Image v1Image, ep string) (checksum string, err error) {
405
+	v1ID := v1Image.V1ID()
406
+
407
+	jsonRaw := v1Image.Config()
408
+	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pushing", nil))
409
+
410
+	// General rule is to use ID for graph accesses and compatibilityID for
411
+	// calls to session.registry()
412
+	imgData := &registry.ImgData{
413
+		ID: v1ID,
414
+	}
415
+
416
+	// Send the json
417
+	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
418
+		if err == registry.ErrAlreadyExists {
419
+			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image already pushed, skipping", nil))
420
+			return "", nil
421
+		}
422
+		return "", err
423
+	}
424
+
425
+	l := v1Image.Layer()
426
+
427
+	arch, err := l.TarStream()
428
+	if err != nil {
429
+		return "", err
430
+	}
431
+
432
+	// don't care if this fails; best effort
433
+	size, _ := l.Size()
434
+
435
+	// Send the layer
436
+	logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)
437
+
438
+	reader := progressreader.New(progressreader.Config{
439
+		In:        ioutil.NopCloser(arch),
440
+		Out:       p.out,
441
+		Formatter: p.sf,
442
+		Size:      size,
443
+		NewLines:  false,
444
+		ID:        stringid.TruncateID(v1ID),
445
+		Action:    "Pushing",
446
+	})
447
+
448
+	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
449
+	if err != nil {
450
+		return "", err
451
+	}
452
+	imgData.Checksum = checksum
453
+	imgData.ChecksumPayload = checksumPayload
454
+	// Send the checksum
455
+	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
456
+		return "", err
457
+	}
458
+
459
+	if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.ChainID()); err != nil {
460
+		logrus.Warnf("Could not set v1 ID mapping: %v", err)
461
+	}
462
+
463
+	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image successfully pushed", nil))
464
+	return imgData.Checksum, nil
465
+}
0 466
new file mode 100644
... ...
@@ -0,0 +1,410 @@
0
+package distribution
1
+
2
+import (
3
+	"encoding/json"
4
+	"errors"
5
+	"fmt"
6
+	"io"
7
+	"io/ioutil"
8
+	"time"
9
+
10
+	"github.com/Sirupsen/logrus"
11
+	"github.com/docker/distribution"
12
+	"github.com/docker/distribution/digest"
13
+	"github.com/docker/distribution/manifest"
14
+	"github.com/docker/distribution/manifest/schema1"
15
+	"github.com/docker/distribution/reference"
16
+	"github.com/docker/docker/distribution/metadata"
17
+	"github.com/docker/docker/image"
18
+	"github.com/docker/docker/image/v1"
19
+	"github.com/docker/docker/layer"
20
+	"github.com/docker/docker/pkg/progressreader"
21
+	"github.com/docker/docker/pkg/streamformatter"
22
+	"github.com/docker/docker/pkg/stringid"
23
+	"github.com/docker/docker/registry"
24
+	"github.com/docker/docker/tag"
25
+	"golang.org/x/net/context"
26
+)
27
+
28
+type v2Pusher struct {
29
+	blobSumService *metadata.BlobSumService
30
+	ref            reference.Named
31
+	endpoint       registry.APIEndpoint
32
+	repoInfo       *registry.RepositoryInfo
33
+	config         *ImagePushConfig
34
+	sf             *streamformatter.StreamFormatter
35
+	repo           distribution.Repository
36
+
37
+	// layersPushed is the set of layers known to exist on the remote side.
38
+	// This avoids redundant queries when pushing multiple tags that
39
+	// involve the same layers.
40
+	layersPushed map[digest.Digest]bool
41
+}
42
+
43
+func (p *v2Pusher) Push() (fallback bool, err error) {
44
+	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
45
+	if err != nil {
46
+		logrus.Debugf("Error getting v2 registry: %v", err)
47
+		return true, err
48
+	}
49
+
50
+	localName := p.repoInfo.LocalName.Name()
51
+
52
+	var associations []tag.Association
53
+	if _, isTagged := p.ref.(reference.Tagged); isTagged {
54
+		imageID, err := p.config.TagStore.Get(p.ref)
55
+		if err != nil {
56
+			return false, fmt.Errorf("tag does not exist: %s", p.ref.String())
57
+		}
58
+
59
+		associations = []tag.Association{
60
+			{
61
+				Ref:     p.ref,
62
+				ImageID: imageID,
63
+			},
64
+		}
65
+	} else {
66
+		// Pull all tags
67
+		associations = p.config.TagStore.ReferencesByName(p.ref)
68
+	}
69
+	if err != nil {
70
+		return false, fmt.Errorf("error getting tags for %s: %s", localName, err)
71
+	}
72
+	if len(associations) == 0 {
73
+		return false, fmt.Errorf("no tags to push for %s", localName)
74
+	}
75
+
76
+	for _, association := range associations {
77
+		if err := p.pushV2Tag(association); err != nil {
78
+			return false, err
79
+		}
80
+	}
81
+
82
+	return false, nil
83
+}
84
+
85
+func (p *v2Pusher) pushV2Tag(association tag.Association) error {
86
+	ref := association.Ref
87
+	logrus.Debugf("Pushing repository: %s", ref.String())
88
+
89
+	img, err := p.config.ImageStore.Get(association.ImageID)
90
+	if err != nil {
91
+		return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
92
+	}
93
+
94
+	out := p.config.OutStream
95
+
96
+	var l layer.Layer
97
+
98
+	topLayerID := img.RootFS.ChainID()
99
+	if topLayerID == "" {
100
+		l = layer.EmptyLayer
101
+	} else {
102
+		l, err = p.config.LayerStore.Get(topLayerID)
103
+		if err != nil {
104
+			return fmt.Errorf("failed to get top layer from image: %v", err)
105
+		}
106
+		defer layer.ReleaseAndLog(p.config.LayerStore, l)
107
+	}
108
+
109
+	fsLayers := make(map[layer.DiffID]schema1.FSLayer)
110
+
111
+	// Push empty layer if necessary
112
+	for _, h := range img.History {
113
+		if h.EmptyLayer {
114
+			dgst, err := p.pushLayerIfNecessary(out, layer.EmptyLayer)
115
+			if err != nil {
116
+				return err
117
+			}
118
+			p.layersPushed[dgst] = true
119
+			fsLayers[layer.EmptyLayer.DiffID()] = schema1.FSLayer{BlobSum: dgst}
120
+			break
121
+		}
122
+	}
123
+
124
+	for i := 0; i < len(img.RootFS.DiffIDs); i++ {
125
+		dgst, err := p.pushLayerIfNecessary(out, l)
126
+		if err != nil {
127
+			return err
128
+		}
129
+
130
+		p.layersPushed[dgst] = true
131
+		fsLayers[l.DiffID()] = schema1.FSLayer{BlobSum: dgst}
132
+
133
+		l = l.Parent()
134
+	}
135
+
136
+	var tag string
137
+	if tagged, isTagged := ref.(reference.Tagged); isTagged {
138
+		tag = tagged.Tag()
139
+	}
140
+	m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
141
+	if err != nil {
142
+		return err
143
+	}
144
+
145
+	logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
146
+	signed, err := schema1.Sign(m, p.config.TrustKey)
147
+	if err != nil {
148
+		return err
149
+	}
150
+
151
+	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
152
+	if err != nil {
153
+		return err
154
+	}
155
+	if manifestDigest != "" {
156
+		if tagged, isTagged := ref.(reference.Tagged); isTagged {
157
+			// NOTE: do not change this format without first changing the trust client
158
+			// code. This information is used to determine what was pushed and should be signed.
159
+			out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize))
160
+		}
161
+	}
162
+
163
+	manSvc, err := p.repo.Manifests(context.Background())
164
+	if err != nil {
165
+		return err
166
+	}
167
+	return manSvc.Put(signed)
168
+}
169
+
170
+func (p *v2Pusher) pushLayerIfNecessary(out io.Writer, l layer.Layer) (digest.Digest, error) {
171
+	logrus.Debugf("Pushing layer: %s", l.DiffID())
172
+
173
+	// Do we have any blobsums associated with this layer's DiffID?
174
+	possibleBlobsums, err := p.blobSumService.GetBlobSums(l.DiffID())
175
+	if err == nil {
176
+		dgst, exists, err := p.blobSumAlreadyExists(possibleBlobsums)
177
+		if err != nil {
178
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Image push failed", nil))
179
+			return "", err
180
+		}
181
+		if exists {
182
+			out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Layer already exists", nil))
183
+			return dgst, nil
184
+		}
185
+	}
186
+
187
+	// if digest was empty or not saved, or if blob does not exist on the remote repository,
188
+	// then push the blob.
189
+	pushDigest, err := p.pushV2Layer(p.repo.Blobs(context.Background()), l)
190
+	if err != nil {
191
+		return "", err
192
+	}
193
+	// Cache mapping from this layer's DiffID to the blobsum
194
+	if err := p.blobSumService.Add(l.DiffID(), pushDigest); err != nil {
195
+		return "", err
196
+	}
197
+
198
+	return pushDigest, nil
199
+}
200
+
201
+// blobSumAlreadyExists checks if the registry already know about any of the
202
+// blobsums passed in the "blobsums" slice. If it finds one that the registry
203
+// knows about, it returns the known digest and "true".
204
+func (p *v2Pusher) blobSumAlreadyExists(blobsums []digest.Digest) (digest.Digest, bool, error) {
205
+	for _, dgst := range blobsums {
206
+		if p.layersPushed[dgst] {
207
+			// it is already known that the push is not needed and
208
+			// therefore doing a stat is unnecessary
209
+			return dgst, true, nil
210
+		}
211
+		_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
212
+		switch err {
213
+		case nil:
214
+			return dgst, true, nil
215
+		case distribution.ErrBlobUnknown:
216
+			// nop
217
+		default:
218
+			return "", false, err
219
+		}
220
+	}
221
+	return "", false, nil
222
+}
223
+
224
+// CreateV2Manifest creates a V2 manifest from an image config and set of
225
+// FSLayer digests.
226
+// FIXME: This should be moved to the distribution repo, since it will also
227
+// be useful for converting new manifests to the old format.
228
+func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]schema1.FSLayer) (*schema1.Manifest, error) {
229
+	if len(img.History) == 0 {
230
+		return nil, errors.New("empty history when trying to create V2 manifest")
231
+	}
232
+
233
+	// Generate IDs for each layer
234
+	// For non-top-level layers, create fake V1Compatibility strings that
235
+	// fit the format and don't collide with anything else, but don't
236
+	// result in runnable images on their own.
237
+	type v1Compatibility struct {
238
+		ID              string    `json:"id"`
239
+		Parent          string    `json:"parent,omitempty"`
240
+		Comment         string    `json:"comment,omitempty"`
241
+		Created         time.Time `json:"created"`
242
+		ContainerConfig struct {
243
+			Cmd []string
244
+		} `json:"container_config,omitempty"`
245
+		ThrowAway bool `json:"throwaway,omitempty"`
246
+	}
247
+
248
+	fsLayerList := make([]schema1.FSLayer, len(img.History))
249
+	history := make([]schema1.History, len(img.History))
250
+
251
+	parent := ""
252
+	layerCounter := 0
253
+	for i, h := range img.History {
254
+		if i == len(img.History)-1 {
255
+			break
256
+		}
257
+
258
+		var diffID layer.DiffID
259
+		if h.EmptyLayer {
260
+			diffID = layer.EmptyLayer.DiffID()
261
+		} else {
262
+			if len(img.RootFS.DiffIDs) <= layerCounter {
263
+				return nil, errors.New("too many non-empty layers in History section")
264
+			}
265
+			diffID = img.RootFS.DiffIDs[layerCounter]
266
+			layerCounter++
267
+		}
268
+
269
+		fsLayer, present := fsLayers[diffID]
270
+		if !present {
271
+			return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
272
+		}
273
+		dgst, err := digest.FromBytes([]byte(fsLayer.BlobSum.Hex() + " " + parent))
274
+		if err != nil {
275
+			return nil, err
276
+		}
277
+		v1ID := dgst.Hex()
278
+
279
+		v1Compatibility := v1Compatibility{
280
+			ID:      v1ID,
281
+			Parent:  parent,
282
+			Comment: h.Comment,
283
+			Created: h.Created,
284
+		}
285
+		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
286
+		if h.EmptyLayer {
287
+			v1Compatibility.ThrowAway = true
288
+		}
289
+		jsonBytes, err := json.Marshal(&v1Compatibility)
290
+		if err != nil {
291
+			return nil, err
292
+		}
293
+
294
+		reversedIndex := len(img.History) - i - 1
295
+		history[reversedIndex].V1Compatibility = string(jsonBytes)
296
+		fsLayerList[reversedIndex] = fsLayer
297
+
298
+		parent = v1ID
299
+	}
300
+
301
+	latestHistory := img.History[len(img.History)-1]
302
+
303
+	var diffID layer.DiffID
304
+	if latestHistory.EmptyLayer {
305
+		diffID = layer.EmptyLayer.DiffID()
306
+	} else {
307
+		if len(img.RootFS.DiffIDs) <= layerCounter {
308
+			return nil, errors.New("too many non-empty layers in History section")
309
+		}
310
+		diffID = img.RootFS.DiffIDs[layerCounter]
311
+	}
312
+	fsLayer, present := fsLayers[diffID]
313
+	if !present {
314
+		return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
315
+	}
316
+
317
+	dgst, err := digest.FromBytes([]byte(fsLayer.BlobSum.Hex() + " " + parent + " " + string(img.RawJSON())))
318
+	if err != nil {
319
+		return nil, err
320
+	}
321
+	fsLayerList[0] = fsLayer
322
+
323
+	// Top-level v1compatibility string should be a modified version of the
324
+	// image config.
325
+	transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
326
+	if err != nil {
327
+		return nil, err
328
+	}
329
+
330
+	history[0].V1Compatibility = string(transformedConfig)
331
+
332
+	// windows-only baselayer setup
333
+	if err := setupBaseLayer(history, *img.RootFS); err != nil {
334
+		return nil, err
335
+	}
336
+
337
+	return &schema1.Manifest{
338
+		Versioned: manifest.Versioned{
339
+			SchemaVersion: 1,
340
+		},
341
+		Name:         name,
342
+		Tag:          tag,
343
+		Architecture: img.Architecture,
344
+		FSLayers:     fsLayerList,
345
+		History:      history,
346
+	}, nil
347
+}
348
+
349
+func rawJSON(value interface{}) *json.RawMessage {
350
+	jsonval, err := json.Marshal(value)
351
+	if err != nil {
352
+		return nil
353
+	}
354
+	return (*json.RawMessage)(&jsonval)
355
+}
356
+
357
+func (p *v2Pusher) pushV2Layer(bs distribution.BlobService, l layer.Layer) (digest.Digest, error) {
358
+	out := p.config.OutStream
359
+	displayID := stringid.TruncateID(string(l.DiffID()))
360
+
361
+	out.Write(p.sf.FormatProgress(displayID, "Preparing", nil))
362
+
363
+	arch, err := l.TarStream()
364
+	if err != nil {
365
+		return "", err
366
+	}
367
+
368
+	// Send the layer
369
+	layerUpload, err := bs.Create(context.Background())
370
+	if err != nil {
371
+		return "", err
372
+	}
373
+	defer layerUpload.Close()
374
+
375
+	// don't care if this fails; best effort
376
+	size, _ := l.DiffSize()
377
+
378
+	reader := progressreader.New(progressreader.Config{
379
+		In:        ioutil.NopCloser(arch), // we'll take care of close here.
380
+		Out:       out,
381
+		Formatter: p.sf,
382
+		Size:      size,
383
+		NewLines:  false,
384
+		ID:        displayID,
385
+		Action:    "Pushing",
386
+	})
387
+
388
+	compressedReader := compress(reader)
389
+
390
+	digester := digest.Canonical.New()
391
+	tee := io.TeeReader(compressedReader, digester.Hash())
392
+
393
+	out.Write(p.sf.FormatProgress(displayID, "Pushing", nil))
394
+	nn, err := layerUpload.ReadFrom(tee)
395
+	compressedReader.Close()
396
+	if err != nil {
397
+		return "", err
398
+	}
399
+
400
+	dgst := digester.Digest()
401
+	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
402
+		return "", err
403
+	}
404
+
405
+	logrus.Debugf("uploaded layer %s (%s), %d bytes", l.DiffID(), dgst, nn)
406
+	out.Write(p.sf.FormatProgress(displayID, "Pushed", nil))
407
+
408
+	return dgst, nil
409
+}
0 410
new file mode 100644
... ...
@@ -0,0 +1,176 @@
0
+package distribution
1
+
2
+import (
3
+	"reflect"
4
+	"testing"
5
+
6
+	"github.com/docker/distribution/digest"
7
+	"github.com/docker/distribution/manifest/schema1"
8
+	"github.com/docker/docker/image"
9
+	"github.com/docker/docker/layer"
10
+)
11
+
12
+func TestCreateV2Manifest(t *testing.T) {
13
+	imgJSON := `{
14
+    "architecture": "amd64",
15
+    "config": {
16
+        "AttachStderr": false,
17
+        "AttachStdin": false,
18
+        "AttachStdout": false,
19
+        "Cmd": [
20
+            "/bin/sh",
21
+            "-c",
22
+            "echo hi"
23
+        ],
24
+        "Domainname": "",
25
+        "Entrypoint": null,
26
+        "Env": [
27
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
28
+            "derived=true",
29
+            "asdf=true"
30
+        ],
31
+        "Hostname": "23304fc829f9",
32
+        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
33
+        "Labels": {},
34
+        "OnBuild": [],
35
+        "OpenStdin": false,
36
+        "StdinOnce": false,
37
+        "Tty": false,
38
+        "User": "",
39
+        "Volumes": null,
40
+        "WorkingDir": ""
41
+    },
42
+    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
43
+    "container_config": {
44
+        "AttachStderr": false,
45
+        "AttachStdin": false,
46
+        "AttachStdout": false,
47
+        "Cmd": [
48
+            "/bin/sh",
49
+            "-c",
50
+            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
51
+        ],
52
+        "Domainname": "",
53
+        "Entrypoint": null,
54
+        "Env": [
55
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
56
+            "derived=true",
57
+            "asdf=true"
58
+        ],
59
+        "Hostname": "23304fc829f9",
60
+        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
61
+        "Labels": {},
62
+        "OnBuild": [],
63
+        "OpenStdin": false,
64
+        "StdinOnce": false,
65
+        "Tty": false,
66
+        "User": "",
67
+        "Volumes": null,
68
+        "WorkingDir": ""
69
+    },
70
+    "created": "2015-11-04T23:06:32.365666163Z",
71
+    "docker_version": "1.9.0-dev",
72
+    "history": [
73
+        {
74
+            "created": "2015-10-31T22:22:54.690851953Z",
75
+            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
76
+        },
77
+        {
78
+            "created": "2015-10-31T22:22:55.613815829Z",
79
+            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
80
+        },
81
+        {
82
+            "created": "2015-11-04T23:06:30.934316144Z",
83
+            "created_by": "/bin/sh -c #(nop) ENV derived=true",
84
+            "empty_layer": true
85
+        },
86
+        {
87
+            "created": "2015-11-04T23:06:31.192097572Z",
88
+            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
89
+            "empty_layer": true
90
+        },
91
+        {
92
+            "created": "2015-11-04T23:06:32.083868454Z",
93
+            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
94
+        },
95
+        {
96
+            "created": "2015-11-04T23:06:32.365666163Z",
97
+            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
98
+            "empty_layer": true
99
+        }
100
+    ],
101
+    "os": "linux",
102
+    "rootfs": {
103
+        "diff_ids": [
104
+            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
105
+            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
106
+            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
107
+        ],
108
+        "type": "layers"
109
+    }
110
+}`
111
+
112
+	// To fill in rawJSON
113
+	img, err := image.NewFromJSON([]byte(imgJSON))
114
+	if err != nil {
115
+		t.Fatalf("json decoding failed: %v", err)
116
+	}
117
+
118
+	fsLayers := map[layer.DiffID]schema1.FSLayer{
119
+		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
120
+		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
121
+		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
122
+	}
123
+
124
+	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
125
+	if err != nil {
126
+		t.Fatalf("CreateV2Manifest returned error: %v", err)
127
+	}
128
+
129
+	if manifest.Versioned.SchemaVersion != 1 {
130
+		t.Fatal("SchemaVersion != 1")
131
+	}
132
+	if manifest.Name != "testrepo" {
133
+		t.Fatal("incorrect name in manifest")
134
+	}
135
+	if manifest.Tag != "testtag" {
136
+		t.Fatal("incorrect tag in manifest")
137
+	}
138
+	if manifest.Architecture != "amd64" {
139
+		t.Fatal("incorrect arch in manifest")
140
+	}
141
+
142
+	expectedFSLayers := []schema1.FSLayer{
143
+		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
144
+		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
145
+		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
146
+		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
147
+		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
148
+		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
149
+	}
150
+
151
+	if len(manifest.FSLayers) != len(expectedFSLayers) {
152
+		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
153
+	}
154
+	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
155
+		t.Fatal("wrong FSLayers list")
156
+	}
157
+
158
+	expectedV1Compatibility := []string{
159
+		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
160
+		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
161
+		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
162
+		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
163
+		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
164
+		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
165
+	}
166
+
167
+	if len(manifest.History) != len(expectedV1Compatibility) {
168
+		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
169
+	}
170
+	for i := range expectedV1Compatibility {
171
+		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
172
+			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
173
+		}
174
+	}
175
+}
0 176
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+// +build !windows
1
+
2
+package distribution
3
+
4
+import (
5
+	"github.com/docker/distribution/manifest/schema1"
6
+	"github.com/docker/docker/image"
7
+)
8
+
9
+func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
10
+	return nil
11
+}
0 12
new file mode 100644
... ...
@@ -0,0 +1,28 @@
0
+// +build windows
1
+
2
+package distribution
3
+
4
+import (
5
+	"encoding/json"
6
+
7
+	"github.com/docker/distribution/manifest/schema1"
8
+	"github.com/docker/docker/image"
9
+)
10
+
11
+func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
12
+	var v1Config map[string]*json.RawMessage
13
+	if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
14
+		return err
15
+	}
16
+	baseID, err := json.Marshal(rootFS.BaseLayerID())
17
+	if err != nil {
18
+		return err
19
+	}
20
+	v1Config["parent"] = (*json.RawMessage)(&baseID)
21
+	configJSON, err := json.Marshal(v1Config)
22
+	if err != nil {
23
+		return err
24
+	}
25
+	history[len(history)-1].V1Compatibility = string(configJSON)
26
+	return nil
27
+}
0 28
new file mode 100644
... ...
@@ -0,0 +1,115 @@
0
+package distribution
1
+
2
+import (
3
+	"errors"
4
+	"net"
5
+	"net/http"
6
+	"net/url"
7
+	"strings"
8
+	"time"
9
+
10
+	"github.com/Sirupsen/logrus"
11
+	"github.com/docker/distribution"
12
+	"github.com/docker/distribution/digest"
13
+	"github.com/docker/distribution/manifest/schema1"
14
+	"github.com/docker/distribution/registry/client"
15
+	"github.com/docker/distribution/registry/client/auth"
16
+	"github.com/docker/distribution/registry/client/transport"
17
+	"github.com/docker/docker/cliconfig"
18
+	"github.com/docker/docker/registry"
19
+	"golang.org/x/net/context"
20
+)
21
+
22
+type dumbCredentialStore struct {
23
+	auth *cliconfig.AuthConfig
24
+}
25
+
26
+func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) {
27
+	return dcs.auth.Username, dcs.auth.Password
28
+}
29
+
30
+// NewV2Repository returns a repository (v2 only). It creates a HTTP transport
31
+// providing timeout settings and authentication support, and also verifies the
32
+// remote API version.
33
+func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig, actions ...string) (distribution.Repository, error) {
34
+	ctx := context.Background()
35
+
36
+	repoName := repoInfo.CanonicalName
37
+	// If endpoint does not support CanonicalName, use the RemoteName instead
38
+	if endpoint.TrimHostname {
39
+		repoName = repoInfo.RemoteName
40
+	}
41
+
42
+	// TODO(dmcgowan): Call close idle connections when complete, use keep alive
43
+	base := &http.Transport{
44
+		Proxy: http.ProxyFromEnvironment,
45
+		Dial: (&net.Dialer{
46
+			Timeout:   30 * time.Second,
47
+			KeepAlive: 30 * time.Second,
48
+			DualStack: true,
49
+		}).Dial,
50
+		TLSHandshakeTimeout: 10 * time.Second,
51
+		TLSClientConfig:     endpoint.TLSConfig,
52
+		// TODO(dmcgowan): Call close idle connections when complete and use keep alive
53
+		DisableKeepAlives: true,
54
+	}
55
+
56
+	modifiers := registry.DockerHeaders(metaHeaders)
57
+	authTransport := transport.NewTransport(base, modifiers...)
58
+	pingClient := &http.Client{
59
+		Transport: authTransport,
60
+		Timeout:   5 * time.Second,
61
+	}
62
+	endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/"
63
+	req, err := http.NewRequest("GET", endpointStr, nil)
64
+	if err != nil {
65
+		return nil, err
66
+	}
67
+	resp, err := pingClient.Do(req)
68
+	if err != nil {
69
+		return nil, err
70
+	}
71
+	defer resp.Body.Close()
72
+
73
+	versions := auth.APIVersions(resp, endpoint.VersionHeader)
74
+	if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 {
75
+		var foundVersion bool
76
+		for _, version := range endpoint.Versions {
77
+			for _, pingVersion := range versions {
78
+				if version == pingVersion {
79
+					foundVersion = true
80
+				}
81
+			}
82
+		}
83
+		if !foundVersion {
84
+			return nil, errors.New("endpoint does not support v2 API")
85
+		}
86
+	}
87
+
88
+	challengeManager := auth.NewSimpleChallengeManager()
89
+	if err := challengeManager.AddResponse(resp); err != nil {
90
+		return nil, err
91
+	}
92
+
93
+	creds := dumbCredentialStore{auth: authConfig}
94
+	tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName.Name(), actions...)
95
+	basicHandler := auth.NewBasicHandler(creds)
96
+	modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
97
+	tr := transport.NewTransport(base, modifiers...)
98
+
99
+	return client.NewRepository(ctx, repoName.Name(), endpoint.URL, tr)
100
+}
101
+
102
+func digestFromManifest(m *schema1.SignedManifest, localName string) (digest.Digest, int, error) {
103
+	payload, err := m.Payload()
104
+	if err != nil {
105
+		// If this failed, the signatures section was corrupted
106
+		// or missing. Treat the entire manifest as the payload.
107
+		payload = m.Raw
108
+	}
109
+	manifestDigest, err := digest.FromBytes(payload)
110
+	if err != nil {
111
+		logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err)
112
+	}
113
+	return manifestDigest, len(payload), nil
114
+}