Browse code

Add image store

The image store abstracts image handling. It keeps track of the
available images, and makes it possible to delete existing images or
register new ones. The image store holds references to the underlying
layers for each image.

The image/v1 package provides compatibility functions for interoperating
with older (non-content-addressable) image structures.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>

Tonis Tiigi authored on 2015/11/19 07:18:07
Showing 23 changed files
1 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:f2722a8ec6926e02fa9f2674072cbc2a25cf0f449f27350f613cd843b02c9105
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-{"architecture":"amd64","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-09-08T21:30:30.807853054Z","docker_version":"1.9.0-dev","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"}
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-{"id":"8dfb96b5d09e6cf6f376d81f1e2770ee5ede309f9bd9e079688c9782649ab326","parent":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","created":"2015-09-08T21:30:30.807853054Z","container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.9.0-dev","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"}
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:fd6ebfedda8ea140a9380767e15bd32c6e899303cfe34bc4580c931f2f816f89
2 1
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-{"architecture":"amd64","config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"created":"2015-08-19T16:49:11.368300679Z","docker_version":"1.6.2","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"}
2
-
3 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-{"id":"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9","parent":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","created":"2015-08-19T16:49:11.368300679Z","container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"docker_version":"1.6.2","config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"architecture":"amd64","os":"linux","Size":0}
2 1
new file mode 100644
... ...
@@ -0,0 +1,192 @@
0
+package image
1
+
2
+import (
3
+	"fmt"
4
+	"io/ioutil"
5
+	"os"
6
+	"path/filepath"
7
+	"sync"
8
+
9
+	"github.com/Sirupsen/logrus"
10
+	"github.com/docker/distribution/digest"
11
+)
12
+
13
+// IDWalkFunc is function called by StoreBackend.Walk
14
+type IDWalkFunc func(id ID) error
15
+
16
+// StoreBackend provides interface for image.Store persistence
17
+type StoreBackend interface {
18
+	Walk(f IDWalkFunc) error
19
+	Get(id ID) ([]byte, error)
20
+	Set(data []byte) (ID, error)
21
+	Delete(id ID) error
22
+	SetMetadata(id ID, key string, data []byte) error
23
+	GetMetadata(id ID, key string) ([]byte, error)
24
+	DeleteMetadata(id ID, key string) error
25
+}
26
+
27
+// fs implements StoreBackend using the filesystem.
28
+type fs struct {
29
+	sync.RWMutex
30
+	root string
31
+}
32
+
33
+const (
34
+	contentDirName  = "content"
35
+	metadataDirName = "metadata"
36
+)
37
+
38
+// NewFSStoreBackend returns new filesystem based backend for image.Store
39
+func NewFSStoreBackend(root string) (StoreBackend, error) {
40
+	return newFSStore(root)
41
+}
42
+
43
+func newFSStore(root string) (*fs, error) {
44
+	s := &fs{
45
+		root: root,
46
+	}
47
+	if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
48
+		return nil, err
49
+	}
50
+	if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
51
+		return nil, err
52
+	}
53
+	return s, nil
54
+}
55
+
56
+func (s *fs) contentFile(id ID) string {
57
+	dgst := digest.Digest(id)
58
+	return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
59
+}
60
+
61
+func (s *fs) metadataDir(id ID) string {
62
+	dgst := digest.Digest(id)
63
+	return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
64
+}
65
+
66
+// Walk calls the supplied callback for each image ID in the storage backend.
67
+func (s *fs) Walk(f IDWalkFunc) error {
68
+	// Only Canonical digest (sha256) is currently supported
69
+	s.RLock()
70
+	dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
71
+	s.RUnlock()
72
+	if err != nil {
73
+		return err
74
+	}
75
+	for _, v := range dir {
76
+		dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
77
+		if err := dgst.Validate(); err != nil {
78
+			logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
79
+			continue
80
+		}
81
+		if err := f(ID(dgst)); err != nil {
82
+			return err
83
+		}
84
+	}
85
+	return nil
86
+}
87
+
88
+// Get returns the content stored under a given ID.
89
+func (s *fs) Get(id ID) ([]byte, error) {
90
+	s.RLock()
91
+	defer s.RUnlock()
92
+
93
+	return s.get(id)
94
+}
95
+
96
+func (s *fs) get(id ID) ([]byte, error) {
97
+	content, err := ioutil.ReadFile(s.contentFile(id))
98
+	if err != nil {
99
+		return nil, err
100
+	}
101
+
102
+	// todo: maybe optional
103
+	validated, err := digest.FromBytes(content)
104
+	if err != nil {
105
+		return nil, err
106
+	}
107
+	if ID(validated) != id {
108
+		return nil, fmt.Errorf("failed to verify image: %v", id)
109
+	}
110
+
111
+	return content, nil
112
+}
113
+
114
+// Set stores content under a given ID.
115
+func (s *fs) Set(data []byte) (ID, error) {
116
+	s.Lock()
117
+	defer s.Unlock()
118
+
119
+	if len(data) == 0 {
120
+		return "", fmt.Errorf("Invalid empty data")
121
+	}
122
+
123
+	dgst, err := digest.FromBytes(data)
124
+	if err != nil {
125
+		return "", err
126
+	}
127
+	id := ID(dgst)
128
+	filePath := s.contentFile(id)
129
+	tempFilePath := s.contentFile(id) + ".tmp"
130
+	if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
131
+		return "", err
132
+	}
133
+	if err := os.Rename(tempFilePath, filePath); err != nil {
134
+		return "", err
135
+	}
136
+
137
+	return id, nil
138
+}
139
+
140
+// Delete removes content and metadata files associated with the ID.
141
+func (s *fs) Delete(id ID) error {
142
+	s.Lock()
143
+	defer s.Unlock()
144
+
145
+	if err := os.RemoveAll(s.metadataDir(id)); err != nil {
146
+		return err
147
+	}
148
+	if err := os.Remove(s.contentFile(id)); err != nil {
149
+		return err
150
+	}
151
+	return nil
152
+}
153
+
154
+// SetMetadata sets metadata for a given ID. It fails if there's no base file.
155
+func (s *fs) SetMetadata(id ID, key string, data []byte) error {
156
+	s.Lock()
157
+	defer s.Unlock()
158
+	if _, err := s.get(id); err != nil {
159
+		return err
160
+	}
161
+
162
+	baseDir := filepath.Join(s.metadataDir(id))
163
+	if err := os.MkdirAll(baseDir, 0700); err != nil {
164
+		return err
165
+	}
166
+	filePath := filepath.Join(s.metadataDir(id), key)
167
+	tempFilePath := filePath + ".tmp"
168
+	if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
169
+		return err
170
+	}
171
+	return os.Rename(tempFilePath, filePath)
172
+}
173
+
174
+// GetMetadata returns metadata for a given ID.
175
+func (s *fs) GetMetadata(id ID, key string) ([]byte, error) {
176
+	s.RLock()
177
+	defer s.RUnlock()
178
+
179
+	if _, err := s.get(id); err != nil {
180
+		return nil, err
181
+	}
182
+	return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key))
183
+}
184
+
185
+// DeleteMetadata removes the metadata associated with an ID.
186
+func (s *fs) DeleteMetadata(id ID, key string) error {
187
+	s.Lock()
188
+	defer s.Unlock()
189
+
190
+	return os.RemoveAll(filepath.Join(s.metadataDir(id), key))
191
+}
0 192
new file mode 100644
... ...
@@ -0,0 +1,391 @@
0
+package image
1
+
2
+import (
3
+	"bytes"
4
+	"crypto/rand"
5
+	"crypto/sha256"
6
+	"encoding/hex"
7
+	"errors"
8
+	"io/ioutil"
9
+	"os"
10
+	"path/filepath"
11
+	"testing"
12
+
13
+	"github.com/docker/distribution/digest"
14
+)
15
+
16
+func TestFSGetSet(t *testing.T) {
17
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
18
+	if err != nil {
19
+		t.Fatal(err)
20
+	}
21
+	defer os.RemoveAll(tmpdir)
22
+	fs, err := NewFSStoreBackend(tmpdir)
23
+	if err != nil {
24
+		t.Fatal(err)
25
+	}
26
+
27
+	testGetSet(t, fs)
28
+}
29
+
30
+func TestFSGetInvalidData(t *testing.T) {
31
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
32
+	if err != nil {
33
+		t.Fatal(err)
34
+	}
35
+	defer os.RemoveAll(tmpdir)
36
+	fs, err := NewFSStoreBackend(tmpdir)
37
+	if err != nil {
38
+		t.Fatal(err)
39
+	}
40
+
41
+	id, err := fs.Set([]byte("foobar"))
42
+	if err != nil {
43
+		t.Fatal(err)
44
+	}
45
+
46
+	dgst := digest.Digest(id)
47
+
48
+	if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil {
49
+		t.Fatal(err)
50
+	}
51
+
52
+	_, err = fs.Get(id)
53
+	if err == nil {
54
+		t.Fatal("Expected get to fail after data modification.")
55
+	}
56
+}
57
+
58
+func TestFSInvalidSet(t *testing.T) {
59
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
60
+	if err != nil {
61
+		t.Fatal(err)
62
+	}
63
+	defer os.RemoveAll(tmpdir)
64
+	fs, err := NewFSStoreBackend(tmpdir)
65
+	if err != nil {
66
+		t.Fatal(err)
67
+	}
68
+
69
+	id, err := digest.FromBytes([]byte("foobar"))
70
+	if err != nil {
71
+		t.Fatal(err)
72
+	}
73
+	err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
74
+	if err != nil {
75
+		t.Fatal(err)
76
+	}
77
+
78
+	_, err = fs.Set([]byte("foobar"))
79
+	if err == nil {
80
+		t.Fatal("Expecting error from invalid filesystem data.")
81
+	}
82
+}
83
+
84
+func TestFSInvalidRoot(t *testing.T) {
85
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
86
+	if err != nil {
87
+		t.Fatal(err)
88
+	}
89
+	defer os.RemoveAll(tmpdir)
90
+
91
+	tcases := []struct {
92
+		root, invalidFile string
93
+	}{
94
+		{"root", "root"},
95
+		{"root", "root/content"},
96
+		{"root", "root/metadata"},
97
+	}
98
+
99
+	for _, tc := range tcases {
100
+		root := filepath.Join(tmpdir, tc.root)
101
+		filePath := filepath.Join(tmpdir, tc.invalidFile)
102
+		err := os.MkdirAll(filepath.Dir(filePath), 0700)
103
+		if err != nil {
104
+			t.Fatal(err)
105
+		}
106
+		f, err := os.Create(filePath)
107
+		if err != nil {
108
+			t.Fatal(err)
109
+		}
110
+		f.Close()
111
+
112
+		_, err = NewFSStoreBackend(root)
113
+		if err == nil {
114
+			t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile)
115
+		}
116
+
117
+		os.RemoveAll(root)
118
+	}
119
+
120
+}
121
+
122
+func testMetadataGetSet(t *testing.T, store StoreBackend) {
123
+	id, err := store.Set([]byte("foo"))
124
+	if err != nil {
125
+		t.Fatal(err)
126
+	}
127
+	id2, err := store.Set([]byte("bar"))
128
+	if err != nil {
129
+		t.Fatal(err)
130
+	}
131
+
132
+	tcases := []struct {
133
+		id    ID
134
+		key   string
135
+		value []byte
136
+	}{
137
+		{id, "tkey", []byte("tval1")},
138
+		{id, "tkey2", []byte("tval2")},
139
+		{id2, "tkey", []byte("tval3")},
140
+	}
141
+
142
+	for _, tc := range tcases {
143
+		err = store.SetMetadata(tc.id, tc.key, tc.value)
144
+		if err != nil {
145
+			t.Fatal(err)
146
+		}
147
+
148
+		actual, err := store.GetMetadata(tc.id, tc.key)
149
+		if err != nil {
150
+			t.Fatal(err)
151
+		}
152
+		if bytes.Compare(actual, tc.value) != 0 {
153
+			t.Fatalf("Metadata expected %q, got %q", tc.value, actual)
154
+		}
155
+	}
156
+
157
+	_, err = store.GetMetadata(id2, "tkey2")
158
+	if err == nil {
159
+		t.Fatal("Expected error for getting metadata for unknown key")
160
+	}
161
+
162
+	id3, err := digest.FromBytes([]byte("baz"))
163
+	if err != nil {
164
+		t.Fatal(err)
165
+	}
166
+
167
+	err = store.SetMetadata(ID(id3), "tkey", []byte("tval"))
168
+	if err == nil {
169
+		t.Fatal("Expected error for setting metadata for unknown ID.")
170
+	}
171
+
172
+	_, err = store.GetMetadata(ID(id3), "tkey")
173
+	if err == nil {
174
+		t.Fatal("Expected error for getting metadata for unknown ID.")
175
+	}
176
+}
177
+
178
+func TestFSMetadataGetSet(t *testing.T) {
179
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
180
+	if err != nil {
181
+		t.Fatal(err)
182
+	}
183
+	defer os.RemoveAll(tmpdir)
184
+	fs, err := NewFSStoreBackend(tmpdir)
185
+	if err != nil {
186
+		t.Fatal(err)
187
+	}
188
+
189
+	testMetadataGetSet(t, fs)
190
+}
191
+
192
+func TestFSDelete(t *testing.T) {
193
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
194
+	if err != nil {
195
+		t.Fatal(err)
196
+	}
197
+	defer os.RemoveAll(tmpdir)
198
+	fs, err := NewFSStoreBackend(tmpdir)
199
+	if err != nil {
200
+		t.Fatal(err)
201
+	}
202
+
203
+	testDelete(t, fs)
204
+}
205
+
206
+func TestFSWalker(t *testing.T) {
207
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
208
+	if err != nil {
209
+		t.Fatal(err)
210
+	}
211
+	defer os.RemoveAll(tmpdir)
212
+	fs, err := NewFSStoreBackend(tmpdir)
213
+	if err != nil {
214
+		t.Fatal(err)
215
+	}
216
+
217
+	testWalker(t, fs)
218
+}
219
+
220
+func TestFSInvalidWalker(t *testing.T) {
221
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
222
+	if err != nil {
223
+		t.Fatal(err)
224
+	}
225
+	defer os.RemoveAll(tmpdir)
226
+	fs, err := NewFSStoreBackend(tmpdir)
227
+	if err != nil {
228
+		t.Fatal(err)
229
+	}
230
+
231
+	fooID, err := fs.Set([]byte("foo"))
232
+	if err != nil {
233
+		t.Fatal(err)
234
+	}
235
+
236
+	if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil {
237
+		t.Fatal(err)
238
+	}
239
+
240
+	n := 0
241
+	err = fs.Walk(func(id ID) error {
242
+		if id != fooID {
243
+			t.Fatalf("Invalid walker ID %q, expected %q", id, fooID)
244
+		}
245
+		n++
246
+		return nil
247
+	})
248
+	if err != nil {
249
+		t.Fatalf("Invalid data should not have caused walker error, got %v", err)
250
+	}
251
+	if n != 1 {
252
+		t.Fatalf("Expected 1 walk initialization, got %d", n)
253
+	}
254
+}
255
+
256
+func testGetSet(t *testing.T, store StoreBackend) {
257
+	type tcase struct {
258
+		input    []byte
259
+		expected ID
260
+	}
261
+	tcases := []tcase{
262
+		{[]byte("foobar"), ID("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")},
263
+	}
264
+
265
+	randomInput := make([]byte, 8*1024)
266
+	_, err := rand.Read(randomInput)
267
+	if err != nil {
268
+		t.Fatal(err)
269
+	}
270
+	// skipping use of digest pkg because its used by the imlementation
271
+	h := sha256.New()
272
+	_, err = h.Write(randomInput)
273
+	if err != nil {
274
+		t.Fatal(err)
275
+	}
276
+	tcases = append(tcases, tcase{
277
+		input:    randomInput,
278
+		expected: ID("sha256:" + hex.EncodeToString(h.Sum(nil))),
279
+	})
280
+
281
+	for _, tc := range tcases {
282
+		id, err := store.Set([]byte(tc.input))
283
+		if err != nil {
284
+			t.Fatal(err)
285
+		}
286
+		if id != tc.expected {
287
+			t.Fatalf("Expected ID %q, got %q", tc.expected, id)
288
+		}
289
+	}
290
+
291
+	for _, emptyData := range [][]byte{nil, {}} {
292
+		_, err := store.Set(emptyData)
293
+		if err == nil {
294
+			t.Fatal("Expected error for nil input.")
295
+		}
296
+	}
297
+
298
+	for _, tc := range tcases {
299
+		data, err := store.Get(tc.expected)
300
+		if err != nil {
301
+			t.Fatal(err)
302
+		}
303
+		if bytes.Compare(data, tc.input) != 0 {
304
+			t.Fatalf("Expected data %q, got %q", tc.input, data)
305
+		}
306
+	}
307
+
308
+	for _, key := range []ID{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} {
309
+		_, err := store.Get(key)
310
+		if err == nil {
311
+			t.Fatalf("Expected error for ID %q.", key)
312
+		}
313
+	}
314
+
315
+}
316
+
317
+func testDelete(t *testing.T, store StoreBackend) {
318
+	id, err := store.Set([]byte("foo"))
319
+	if err != nil {
320
+		t.Fatal(err)
321
+	}
322
+	id2, err := store.Set([]byte("bar"))
323
+	if err != nil {
324
+		t.Fatal(err)
325
+	}
326
+
327
+	err = store.Delete(id)
328
+	if err != nil {
329
+		t.Fatal(err)
330
+	}
331
+
332
+	_, err = store.Get(id)
333
+	if err == nil {
334
+		t.Fatalf("Expected getting deleted item %q to fail", id)
335
+	}
336
+	_, err = store.Get(id2)
337
+	if err != nil {
338
+		t.Fatal(err)
339
+	}
340
+
341
+	err = store.Delete(id2)
342
+	if err != nil {
343
+		t.Fatal(err)
344
+	}
345
+	_, err = store.Get(id2)
346
+	if err == nil {
347
+		t.Fatalf("Expected getting deleted item %q to fail", id2)
348
+	}
349
+}
350
+
351
+func testWalker(t *testing.T, store StoreBackend) {
352
+	id, err := store.Set([]byte("foo"))
353
+	if err != nil {
354
+		t.Fatal(err)
355
+	}
356
+	id2, err := store.Set([]byte("bar"))
357
+	if err != nil {
358
+		t.Fatal(err)
359
+	}
360
+
361
+	tcases := make(map[ID]struct{})
362
+	tcases[id] = struct{}{}
363
+	tcases[id2] = struct{}{}
364
+	n := 0
365
+	err = store.Walk(func(id ID) error {
366
+		delete(tcases, id)
367
+		n++
368
+		return nil
369
+	})
370
+	if err != nil {
371
+		t.Fatal(err)
372
+	}
373
+
374
+	if n != 2 {
375
+		t.Fatalf("Expected 2 walk initializations, got %d", n)
376
+	}
377
+	if len(tcases) != 0 {
378
+		t.Fatalf("Expected empty unwalked set, got %+v", tcases)
379
+	}
380
+
381
+	// stop on error
382
+	tcases = make(map[ID]struct{})
383
+	tcases[id] = struct{}{}
384
+	err = store.Walk(func(id ID) error {
385
+		return errors.New("")
386
+	})
387
+	if err == nil {
388
+		t.Fatalf("Exected error from walker.")
389
+	}
390
+}
... ...
@@ -2,36 +2,23 @@ package image
2 2
 
3 3
 import (
4 4
 	"encoding/json"
5
-	"fmt"
6
-	"regexp"
5
+	"errors"
6
+	"io"
7 7
 	"time"
8 8
 
9
-	"github.com/Sirupsen/logrus"
10 9
 	"github.com/docker/distribution/digest"
11
-	derr "github.com/docker/docker/errors"
12
-	"github.com/docker/docker/pkg/version"
13 10
 	"github.com/docker/docker/runconfig"
14 11
 )
15 12
 
16
-var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
13
+// ID is the content-addressable ID of an image.
14
+type ID digest.Digest
17 15
 
18
-// noFallbackMinVersion is the minimum version for which v1compatibility
19
-// information will not be marshaled through the Image struct to remove
20
-// blank fields.
21
-var noFallbackMinVersion = version.Version("1.8.3")
22
-
23
-// Descriptor provides the information necessary to register an image in
24
-// the graph.
25
-type Descriptor interface {
26
-	ID() string
27
-	Parent() string
28
-	MarshalConfig() ([]byte, error)
16
+func (id ID) String() string {
17
+	return digest.Digest(id).String()
29 18
 }
30 19
 
31
-// Image stores the image configuration.
32
-// All fields in this struct must be marked `omitempty` to keep getting
33
-// predictable hashes from the old `v1Compatibility` configuration.
34
-type Image struct {
20
+// V1Image stores the V1 image configuration.
21
+type V1Image struct {
35 22
 	// ID a unique 64 character identifier of the image
36 23
 	ID string `json:"id,omitempty"`
37 24
 	// Parent id of the image
... ...
@@ -55,95 +42,87 @@ type Image struct {
55 55
 	// OS is the operating system used to build and run the image
56 56
 	OS string `json:"os,omitempty"`
57 57
 	// Size is the total size of the image including all layers it is composed of
58
-	Size int64 `json:",omitempty"` // capitalized for backwards compatibility
59
-	// ParentID specifies the strong, content address of the parent configuration.
60
-	ParentID digest.Digest `json:"parent_id,omitempty"`
61
-	// LayerID provides the content address of the associated layer.
62
-	LayerID digest.Digest `json:"layer_id,omitempty"`
58
+	Size int64 `json:",omitempty"`
63 59
 }
64 60
 
65
-// NewImgJSON creates an Image configuration from json.
66
-func NewImgJSON(src []byte) (*Image, error) {
67
-	ret := &Image{}
61
+// Image stores the image configuration
62
+type Image struct {
63
+	V1Image
64
+	Parent  ID        `json:"parent,omitempty"`
65
+	RootFS  *RootFS   `json:"rootfs,omitempty"`
66
+	History []History `json:"history,omitempty"`
68 67
 
69
-	// FIXME: Is there a cleaner way to "purify" the input json?
70
-	if err := json.Unmarshal(src, ret); err != nil {
71
-		return nil, err
72
-	}
73
-	return ret, nil
68
+	// rawJSON caches the immutable JSON associated with this image.
69
+	rawJSON []byte
70
+
71
+	// computedID is the ID computed from the hash of the image config.
72
+	// Not to be confused with the legacy V1 ID in V1Image.
73
+	computedID ID
74 74
 }
75 75
 
76
-// ValidateID checks whether an ID string is a valid image ID.
77
-func ValidateID(id string) error {
78
-	if ok := validHex.MatchString(id); !ok {
79
-		return derr.ErrorCodeInvalidImageID.WithArgs(id)
80
-	}
81
-	return nil
76
+// RawJSON returns the immutable JSON associated with the image.
77
+func (img *Image) RawJSON() []byte {
78
+	return img.rawJSON
79
+}
80
+
81
+// ID returns the image's content-addressable ID.
82
+func (img *Image) ID() ID {
83
+	return img.computedID
82 84
 }
83 85
 
84
-// MakeImageConfig returns immutable configuration JSON for image based on the
85
-// v1Compatibility object, layer digest and parent StrongID. SHA256() of this
86
-// config is the new image ID (strongID).
87
-func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) {
86
+// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
87
+// that JSON that's been manipulated by a push/pull cycle with a legacy
88
+// registry won't end up with a different key order.
89
+func (img *Image) MarshalJSON() ([]byte, error) {
90
+	type MarshalImage Image
88 91
 
89
-	// Detect images created after 1.8.3
90
-	img, err := NewImgJSON(v1Compatibility)
92
+	pass1, err := json.Marshal(MarshalImage(*img))
91 93
 	if err != nil {
92 94
 		return nil, err
93 95
 	}
94
-	useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion)
95
-
96
-	if useFallback {
97
-		// Fallback for pre-1.8.3. Calculate base config based on Image struct
98
-		// so that fields with default values added by Docker will use same ID
99
-		logrus.Debugf("Using fallback hash for %v", layerID)
100
-
101
-		v1Compatibility, err = json.Marshal(img)
102
-		if err != nil {
103
-			return nil, err
104
-		}
105
-	}
106 96
 
107 97
 	var c map[string]*json.RawMessage
108
-	if err := json.Unmarshal(v1Compatibility, &c); err != nil {
98
+	if err := json.Unmarshal(pass1, &c); err != nil {
109 99
 		return nil, err
110 100
 	}
111
-
112
-	if err := layerID.Validate(); err != nil {
113
-		return nil, fmt.Errorf("invalid layerID: %v", err)
114
-	}
115
-
116
-	c["layer_id"] = rawJSON(layerID)
117
-
118
-	if parentID != "" {
119
-		if err := parentID.Validate(); err != nil {
120
-			return nil, fmt.Errorf("invalid parentID %v", err)
121
-		}
122
-		c["parent_id"] = rawJSON(parentID)
123
-	}
124
-
125
-	delete(c, "id")
126
-	delete(c, "parent")
127
-	delete(c, "Size") // Size is calculated from data on disk and is inconsitent
128
-
129 101
 	return json.Marshal(c)
130 102
 }
131 103
 
132
-// StrongID returns image ID for the config JSON.
133
-func StrongID(configJSON []byte) (digest.Digest, error) {
134
-	digester := digest.Canonical.New()
135
-	if _, err := digester.Hash().Write(configJSON); err != nil {
136
-		return "", err
137
-	}
138
-	dgst := digester.Digest()
139
-	logrus.Debugf("H(%v) = %v", string(configJSON), dgst)
140
-	return dgst, nil
104
+// History stores build commands that were used to create an image
105
+type History struct {
106
+	// Created timestamp for build point
107
+	Created time.Time `json:"created"`
108
+	// Author of the build point
109
+	Author string `json:"author,omitempty"`
110
+	// CreatedBy keeps the Dockerfile command used while building image.
111
+	CreatedBy string `json:"created_by,omitempty"`
112
+	// Comment is custom mesage set by the user when creating the image.
113
+	Comment string `json:"comment,omitempty"`
114
+	// EmptyLayer is set to true if this history item did not generate a
115
+	// layer. Otherwise, the history item is associated with the next
116
+	// layer in the RootFS section.
117
+	EmptyLayer bool `json:"empty_layer,omitempty"`
141 118
 }
142 119
 
143
-func rawJSON(value interface{}) *json.RawMessage {
144
-	jsonval, err := json.Marshal(value)
145
-	if err != nil {
146
-		return nil
120
+// Exporter provides interface for exporting and importing images
121
+type Exporter interface {
122
+	Load(io.ReadCloser, io.Writer) error
123
+	// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
124
+	Save([]string, io.Writer) error
125
+}
126
+
127
+// NewFromJSON creates an Image configuration from json.
128
+func NewFromJSON(src []byte) (*Image, error) {
129
+	img := &Image{}
130
+
131
+	if err := json.Unmarshal(src, img); err != nil {
132
+		return nil, err
147 133
 	}
148
-	return (*json.RawMessage)(&jsonval)
134
+	if img.RootFS == nil {
135
+		return nil, errors.New("Invalid image JSON, no RootFS key.")
136
+	}
137
+
138
+	img.rawJSON = src
139
+
140
+	return img, nil
149 141
 }
... ...
@@ -1,55 +1,59 @@
1 1
 package image
2 2
 
3 3
 import (
4
-	"bytes"
5
-	"io/ioutil"
4
+	"encoding/json"
5
+	"sort"
6
+	"strings"
6 7
 	"testing"
7
-
8
-	"github.com/docker/distribution/digest"
9 8
 )
10 9
 
11
-var fixtures = []string{
12
-	"fixtures/pre1.9",
13
-	"fixtures/post1.9",
14
-}
10
+const sampleImageJSON = `{
11
+	"architecture": "amd64",
12
+	"os": "linux",
13
+	"config": {},
14
+	"rootfs": {
15
+		"type": "layers",
16
+		"diff_ids": []
17
+	}
18
+}`
15 19
 
16
-func loadFixtureFile(t *testing.T, path string) []byte {
17
-	fileData, err := ioutil.ReadFile(path)
20
+func TestJSON(t *testing.T) {
21
+	img, err := NewFromJSON([]byte(sampleImageJSON))
18 22
 	if err != nil {
19
-		t.Fatalf("error opening %s: %v", path, err)
23
+		t.Fatal(err)
24
+	}
25
+	rawJSON := img.RawJSON()
26
+	if string(rawJSON) != sampleImageJSON {
27
+		t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON)
20 28
 	}
21
-
22
-	return bytes.TrimSpace(fileData)
23 29
 }
24 30
 
25
-// TestMakeImageConfig makes sure that MakeImageConfig returns the expected
26
-// canonical JSON for a reference Image.
27
-func TestMakeImageConfig(t *testing.T) {
28
-	for _, fixture := range fixtures {
29
-		v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility")
30
-		expectedConfig := loadFixtureFile(t, fixture+"/expected_config")
31
-		layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id"))
32
-		parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id"))
33
-
34
-		json, err := MakeImageConfig(v1Compatibility, layerID, parentID)
35
-		if err != nil {
36
-			t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err)
37
-		}
38
-		if !bytes.Equal(json, expectedConfig) {
39
-			t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json)
40
-		}
31
+func TestInvalidJSON(t *testing.T) {
32
+	_, err := NewFromJSON([]byte("{}"))
33
+	if err == nil {
34
+		t.Fatal("Expected JSON parse error")
41 35
 	}
42 36
 }
43 37
 
44
-// TestGetStrongID makes sure that GetConfigJSON returns the expected
45
-// hash for a reference Image.
46
-func TestGetStrongID(t *testing.T) {
47
-	for _, fixture := range fixtures {
48
-		expectedConfig := loadFixtureFile(t, fixture+"/expected_config")
49
-		expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id"))
38
+func TestMarshalKeyOrder(t *testing.T) {
39
+	b, err := json.Marshal(&Image{
40
+		V1Image: V1Image{
41
+			Comment:      "a",
42
+			Author:       "b",
43
+			Architecture: "c",
44
+		},
45
+	})
46
+	if err != nil {
47
+		t.Fatal(err)
48
+	}
49
+
50
+	expectedOrder := []string{"architecture", "author", "comment"}
51
+	var indexes []int
52
+	for _, k := range expectedOrder {
53
+		indexes = append(indexes, strings.Index(string(b), k))
54
+	}
50 55
 
51
-		if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID {
52
-			t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err)
53
-		}
56
+	if !sort.IntsAreSorted(indexes) {
57
+		t.Fatal("invalid key order in JSON: ", string(b))
54 58
 	}
55 59
 }
56 60
new file mode 100644
... ...
@@ -0,0 +1,8 @@
0
+package image
1
+
2
+import "github.com/docker/docker/layer"
3
+
4
+// Append appends a new diffID to rootfs
5
+func (r *RootFS) Append(id layer.DiffID) {
6
+	r.DiffIDs = append(r.DiffIDs, id)
7
+}
0 8
new file mode 100644
... ...
@@ -0,0 +1,23 @@
0
+// +build !windows
1
+
2
+package image
3
+
4
+import "github.com/docker/docker/layer"
5
+
6
+// RootFS describes images root filesystem
7
+// This is currently a placeholder that only supports layers. In the future
8
+// this can be made into a interface that supports different implementaions.
9
+type RootFS struct {
10
+	Type    string         `json:"type"`
11
+	DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
12
+}
13
+
14
+// ChainID returns the ChainID for the top layer in RootFS.
15
+func (r *RootFS) ChainID() layer.ChainID {
16
+	return layer.CreateChainID(r.DiffIDs)
17
+}
18
+
19
+// NewRootFS returns empty RootFS struct
20
+func NewRootFS() *RootFS {
21
+	return &RootFS{Type: "layers"}
22
+}
0 23
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+// +build windows
1
+
2
+package image
3
+
4
+import (
5
+	"crypto/sha512"
6
+	"fmt"
7
+
8
+	"github.com/docker/distribution/digest"
9
+	"github.com/docker/docker/layer"
10
+)
11
+
12
+// RootFS describes images root filesystem
13
+// This is currently a placeholder that only supports layers. In the future
14
+// this can be made into a interface that supports different implementaions.
15
+type RootFS struct {
16
+	Type      string         `json:"type"`
17
+	DiffIDs   []layer.DiffID `json:"diff_ids,omitempty"`
18
+	BaseLayer string         `json:"base_layer,omitempty"`
19
+}
20
+
21
+// BaseLayerID returns the 64 byte hex ID for the baselayer name.
22
+func (r *RootFS) BaseLayerID() string {
23
+	baseID := sha512.Sum384([]byte(r.BaseLayer))
24
+	return fmt.Sprintf("%x", baseID[:32])
25
+}
26
+
27
+// ChainID returns the ChainID for the top layer in RootFS.
28
+func (r *RootFS) ChainID() layer.ChainID {
29
+	baseDiffID, _ := digest.FromBytes([]byte(r.BaseLayerID())) // can never error
30
+	return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
31
+}
32
+
33
+// NewRootFS returns empty RootFS struct
34
+func NewRootFS() *RootFS {
35
+	return &RootFS{Type: "layers+base"}
36
+}
0 37
new file mode 100644
... ...
@@ -0,0 +1,286 @@
0
+package image
1
+
2
+import (
3
+	"encoding/json"
4
+	"errors"
5
+	"fmt"
6
+	"sync"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/docker/layer"
11
+)
12
+
13
+// Store is an interface for creating and accessing images
14
+type Store interface {
15
+	Create(config []byte) (ID, error)
16
+	Get(id ID) (*Image, error)
17
+	Delete(id ID) ([]layer.Metadata, error)
18
+	Search(partialID string) (ID, error)
19
+	SetParent(id ID, parent ID) error
20
+	GetParent(id ID) (ID, error)
21
+	Children(id ID) []ID
22
+	Map() map[ID]*Image
23
+	Heads() map[ID]*Image
24
+}
25
+
26
+// LayerGetReleaser is a minimal interface for getting and releasing images.
27
+type LayerGetReleaser interface {
28
+	Get(layer.ChainID) (layer.Layer, error)
29
+	Release(layer.Layer) ([]layer.Metadata, error)
30
+}
31
+
32
+type imageMeta struct {
33
+	layer    layer.Layer
34
+	children map[ID]struct{}
35
+}
36
+
37
+type store struct {
38
+	sync.Mutex
39
+	ls        LayerGetReleaser
40
+	images    map[ID]*imageMeta
41
+	fs        StoreBackend
42
+	digestSet *digest.Set
43
+}
44
+
45
+// NewImageStore returns new store object for given layer store
46
+func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
47
+	is := &store{
48
+		ls:        ls,
49
+		images:    make(map[ID]*imageMeta),
50
+		fs:        fs,
51
+		digestSet: digest.NewSet(),
52
+	}
53
+
54
+	// load all current images and retain layers
55
+	if err := is.restore(); err != nil {
56
+		return nil, err
57
+	}
58
+
59
+	return is, nil
60
+}
61
+
62
+func (is *store) restore() error {
63
+	err := is.fs.Walk(func(id ID) error {
64
+		img, err := is.Get(id)
65
+		if err != nil {
66
+			logrus.Errorf("invalid image %v, %v", id, err)
67
+			return nil
68
+		}
69
+		var l layer.Layer
70
+		if chainID := img.RootFS.ChainID(); chainID != "" {
71
+			l, err = is.ls.Get(chainID)
72
+			if err != nil {
73
+				return err
74
+			}
75
+		}
76
+		if err := is.digestSet.Add(digest.Digest(id)); err != nil {
77
+			return err
78
+		}
79
+
80
+		imageMeta := &imageMeta{
81
+			layer:    l,
82
+			children: make(map[ID]struct{}),
83
+		}
84
+
85
+		is.images[ID(id)] = imageMeta
86
+
87
+		return nil
88
+	})
89
+	if err != nil {
90
+		return err
91
+	}
92
+
93
+	// Second pass to fill in children maps
94
+	for id := range is.images {
95
+		if parent, err := is.GetParent(id); err == nil {
96
+			if parentMeta := is.images[parent]; parentMeta != nil {
97
+				parentMeta.children[id] = struct{}{}
98
+			}
99
+		}
100
+	}
101
+
102
+	return nil
103
+}
104
+
105
+func (is *store) Create(config []byte) (ID, error) {
106
+	var img Image
107
+	err := json.Unmarshal(config, &img)
108
+	if err != nil {
109
+		return "", err
110
+	}
111
+
112
+	// Must reject any config that references diffIDs from the history
113
+	// which aren't among the rootfs layers.
114
+	rootFSLayers := make(map[layer.DiffID]struct{})
115
+	for _, diffID := range img.RootFS.DiffIDs {
116
+		rootFSLayers[diffID] = struct{}{}
117
+	}
118
+
119
+	layerCounter := 0
120
+	for _, h := range img.History {
121
+		if !h.EmptyLayer {
122
+			layerCounter++
123
+		}
124
+	}
125
+	if layerCounter > len(img.RootFS.DiffIDs) {
126
+		return "", errors.New("too many non-empty layers in History section")
127
+	}
128
+
129
+	dgst, err := is.fs.Set(config)
130
+	if err != nil {
131
+		return "", err
132
+	}
133
+	imageID := ID(dgst)
134
+
135
+	is.Lock()
136
+	defer is.Unlock()
137
+
138
+	if _, exists := is.images[imageID]; exists {
139
+		return imageID, nil
140
+	}
141
+
142
+	layerID := img.RootFS.ChainID()
143
+
144
+	var l layer.Layer
145
+	if layerID != "" {
146
+		l, err = is.ls.Get(layerID)
147
+		if err != nil {
148
+			return "", err
149
+		}
150
+	}
151
+
152
+	imageMeta := &imageMeta{
153
+		layer:    l,
154
+		children: make(map[ID]struct{}),
155
+	}
156
+
157
+	is.images[imageID] = imageMeta
158
+	if err := is.digestSet.Add(digest.Digest(imageID)); err != nil {
159
+		delete(is.images, imageID)
160
+		return "", err
161
+	}
162
+
163
+	return imageID, nil
164
+}
165
+
166
+func (is *store) Search(term string) (ID, error) {
167
+	is.Lock()
168
+	defer is.Unlock()
169
+
170
+	dgst, err := is.digestSet.Lookup(term)
171
+	if err != nil {
172
+		return "", err
173
+	}
174
+	return ID(dgst), nil
175
+}
176
+
177
+func (is *store) Get(id ID) (*Image, error) {
178
+	// todo: Check if image is in images
179
+	// todo: Detect manual insertions and start using them
180
+	config, err := is.fs.Get(id)
181
+	if err != nil {
182
+		return nil, err
183
+	}
184
+
185
+	img, err := NewFromJSON(config)
186
+	if err != nil {
187
+		return nil, err
188
+	}
189
+	img.computedID = id
190
+
191
+	img.Parent, err = is.GetParent(id)
192
+	if err != nil {
193
+		img.Parent = ""
194
+	}
195
+
196
+	return img, nil
197
+}
198
+
199
+func (is *store) Delete(id ID) ([]layer.Metadata, error) {
200
+	is.Lock()
201
+	defer is.Unlock()
202
+
203
+	imageMeta := is.images[id]
204
+	if imageMeta == nil {
205
+		return nil, fmt.Errorf("unrecognized image ID %s", id.String())
206
+	}
207
+	for id := range imageMeta.children {
208
+		is.fs.DeleteMetadata(id, "parent")
209
+	}
210
+	if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
211
+		delete(is.images[parent].children, id)
212
+	}
213
+
214
+	delete(is.images, id)
215
+	is.fs.Delete(id)
216
+
217
+	if imageMeta.layer != nil {
218
+		return is.ls.Release(imageMeta.layer)
219
+	}
220
+	return nil, nil
221
+}
222
+
223
+func (is *store) SetParent(id, parent ID) error {
224
+	is.Lock()
225
+	defer is.Unlock()
226
+	parentMeta := is.images[parent]
227
+	if parentMeta == nil {
228
+		return fmt.Errorf("unknown parent image ID %s", parent.String())
229
+	}
230
+	parentMeta.children[id] = struct{}{}
231
+	return is.fs.SetMetadata(id, "parent", []byte(parent))
232
+}
233
+
234
+func (is *store) GetParent(id ID) (ID, error) {
235
+	d, err := is.fs.GetMetadata(id, "parent")
236
+	if err != nil {
237
+		return "", err
238
+	}
239
+	return ID(d), nil // todo: validate?
240
+}
241
+
242
+func (is *store) Children(id ID) []ID {
243
+	is.Lock()
244
+	defer is.Unlock()
245
+
246
+	return is.children(id)
247
+}
248
+
249
+func (is *store) children(id ID) []ID {
250
+	var ids []ID
251
+	if is.images[id] != nil {
252
+		for id := range is.images[id].children {
253
+			ids = append(ids, id)
254
+		}
255
+	}
256
+	return ids
257
+}
258
+
259
+func (is *store) Heads() map[ID]*Image {
260
+	return is.imagesMap(false)
261
+}
262
+
263
+func (is *store) Map() map[ID]*Image {
264
+	return is.imagesMap(true)
265
+}
266
+
267
+func (is *store) imagesMap(all bool) map[ID]*Image {
268
+	is.Lock()
269
+	defer is.Unlock()
270
+
271
+	images := make(map[ID]*Image)
272
+
273
+	for id := range is.images {
274
+		if !all && len(is.children(id)) > 0 {
275
+			continue
276
+		}
277
+		img, err := is.Get(id)
278
+		if err != nil {
279
+			logrus.Errorf("invalid image access: %q, error: %q", id, err)
280
+			continue
281
+		}
282
+		images[id] = img
283
+	}
284
+	return images
285
+}
0 286
new file mode 100644
... ...
@@ -0,0 +1,205 @@
0
+package image
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"testing"
6
+
7
+	"github.com/docker/distribution/digest"
8
+	"github.com/docker/docker/layer"
9
+)
10
+
11
+func TestRestore(t *testing.T) {
12
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
13
+	if err != nil {
14
+		t.Fatal(err)
15
+	}
16
+	defer os.RemoveAll(tmpdir)
17
+	fs, err := NewFSStoreBackend(tmpdir)
18
+	if err != nil {
19
+		t.Fatal(err)
20
+	}
21
+
22
+	id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`))
23
+	if err != nil {
24
+		t.Fatal(err)
25
+	}
26
+	_, err = fs.Set([]byte(`invalid`))
27
+	if err != nil {
28
+		t.Fatal(err)
29
+	}
30
+	id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
31
+	if err != nil {
32
+		t.Fatal(err)
33
+	}
34
+	err = fs.SetMetadata(id2, "parent", []byte(id1))
35
+	if err != nil {
36
+		t.Fatal(err)
37
+	}
38
+
39
+	is, err := NewImageStore(fs, &mockLayerGetReleaser{})
40
+	if err != nil {
41
+		t.Fatal(err)
42
+	}
43
+
44
+	imgs := is.Map()
45
+	if actual, expected := len(imgs), 2; actual != expected {
46
+		t.Fatalf("invalid images length, expected 2, got %q", len(imgs))
47
+	}
48
+
49
+	img1, err := is.Get(ID(id1))
50
+	if err != nil {
51
+		t.Fatal(err)
52
+	}
53
+
54
+	if actual, expected := img1.computedID, ID(id1); actual != expected {
55
+		t.Fatalf("invalid image ID: expected %q, got %q", expected, actual)
56
+	}
57
+
58
+	if actual, expected := img1.computedID.String(), string(id1); actual != expected {
59
+		t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual)
60
+	}
61
+
62
+	img2, err := is.Get(ID(id2))
63
+	if err != nil {
64
+		t.Fatal(err)
65
+	}
66
+
67
+	if actual, expected := img1.Comment, "abc"; actual != expected {
68
+		t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual)
69
+	}
70
+
71
+	if actual, expected := img2.Comment, "def"; actual != expected {
72
+		t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual)
73
+	}
74
+
75
+	p, err := is.GetParent(ID(id1))
76
+	if err == nil {
77
+		t.Fatal("expected error for getting parent")
78
+	}
79
+
80
+	p, err = is.GetParent(ID(id2))
81
+	if err != nil {
82
+		t.Fatal(err)
83
+	}
84
+	if actual, expected := p, ID(id1); actual != expected {
85
+		t.Fatalf("invalid parent: expected %q, got %q", expected, actual)
86
+	}
87
+
88
+	children := is.Children(ID(id1))
89
+	if len(children) != 1 {
90
+		t.Fatalf("invalid children length: %q", len(children))
91
+	}
92
+	if actual, expected := children[0], ID(id2); actual != expected {
93
+		t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual)
94
+	}
95
+
96
+	heads := is.Heads()
97
+	if actual, expected := len(heads), 1; actual != expected {
98
+		t.Fatalf("invalid images length: expected %q, got %q", expected, actual)
99
+	}
100
+
101
+	sid1, err := is.Search(string(id1)[:10])
102
+	if err != nil {
103
+		t.Fatal(err)
104
+	}
105
+	if actual, expected := sid1, ID(id1); actual != expected {
106
+		t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual)
107
+	}
108
+
109
+	sid1, err = is.Search(digest.Digest(id1).Hex()[:6])
110
+	if err != nil {
111
+		t.Fatal(err)
112
+	}
113
+	if actual, expected := sid1, ID(id1); actual != expected {
114
+		t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual)
115
+	}
116
+
117
+	invalidPattern := digest.Digest(id1).Hex()[1:6]
118
+	_, err = is.Search(invalidPattern)
119
+	if err == nil {
120
+		t.Fatalf("expected search for %q to fail", invalidPattern)
121
+	}
122
+
123
+}
124
+
125
+func TestAddDelete(t *testing.T) {
126
+	tmpdir, err := ioutil.TempDir("", "images-fs-store")
127
+	if err != nil {
128
+		t.Fatal(err)
129
+	}
130
+	defer os.RemoveAll(tmpdir)
131
+	fs, err := NewFSStoreBackend(tmpdir)
132
+	if err != nil {
133
+		t.Fatal(err)
134
+	}
135
+
136
+	is, err := NewImageStore(fs, &mockLayerGetReleaser{})
137
+	if err != nil {
138
+		t.Fatal(err)
139
+	}
140
+
141
+	id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
142
+	if err != nil {
143
+		t.Fatal(err)
144
+	}
145
+
146
+	if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected {
147
+		t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual)
148
+	}
149
+
150
+	img, err := is.Get(id1)
151
+	if err != nil {
152
+		t.Fatal(err)
153
+	}
154
+
155
+	if actual, expected := img.Comment, "abc"; actual != expected {
156
+		t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual)
157
+	}
158
+
159
+	id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
160
+	if err != nil {
161
+		t.Fatal(err)
162
+	}
163
+
164
+	err = is.SetParent(id2, id1)
165
+	if err != nil {
166
+		t.Fatal(err)
167
+	}
168
+
169
+	pid1, err := is.GetParent(id2)
170
+	if err != nil {
171
+		t.Fatal(err)
172
+	}
173
+	if actual, expected := pid1, id1; actual != expected {
174
+		t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual)
175
+	}
176
+
177
+	_, err = is.Delete(id1)
178
+	if err != nil {
179
+		t.Fatal(err)
180
+	}
181
+	_, err = is.Get(id1)
182
+	if err == nil {
183
+		t.Fatalf("expected get for deleted image %q to fail", id1)
184
+	}
185
+	_, err = is.Get(id2)
186
+	if err != nil {
187
+		t.Fatal(err)
188
+	}
189
+	pid1, err = is.GetParent(id2)
190
+	if err == nil {
191
+		t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1)
192
+	}
193
+
194
+}
195
+
196
+type mockLayerGetReleaser struct{}
197
+
198
+func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) {
199
+	return nil, nil
200
+}
201
+
202
+func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) {
203
+	return nil, nil
204
+}
0 205
new file mode 100644
... ...
@@ -0,0 +1,284 @@
0
+package tarexport
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"io"
6
+	"io/ioutil"
7
+	"os"
8
+	"path/filepath"
9
+
10
+	"github.com/Sirupsen/logrus"
11
+	"github.com/docker/distribution/reference"
12
+	"github.com/docker/docker/image"
13
+	"github.com/docker/docker/image/v1"
14
+	"github.com/docker/docker/layer"
15
+	"github.com/docker/docker/pkg/archive"
16
+	"github.com/docker/docker/pkg/chrootarchive"
17
+	"github.com/docker/docker/pkg/symlink"
18
+)
19
+
20
+func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error {
21
+	tmpDir, err := ioutil.TempDir("", "docker-import-")
22
+	if err != nil {
23
+		return err
24
+	}
25
+	defer os.RemoveAll(tmpDir)
26
+
27
+	if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil {
28
+		return err
29
+	}
30
+	// read manifest, if no file then load in legacy mode
31
+	manifestPath, err := safePath(tmpDir, manifestFileName)
32
+	if err != nil {
33
+		return err
34
+	}
35
+	manifestFile, err := os.Open(manifestPath)
36
+	if err != nil {
37
+		if os.IsNotExist(err) {
38
+			return l.legacyLoad(tmpDir, outStream)
39
+		}
40
+		return manifestFile.Close()
41
+	}
42
+	defer manifestFile.Close()
43
+
44
+	var manifest []manifestItem
45
+	if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil {
46
+		return err
47
+	}
48
+
49
+	for _, m := range manifest {
50
+		configPath, err := safePath(tmpDir, m.Config)
51
+		if err != nil {
52
+			return err
53
+		}
54
+		config, err := ioutil.ReadFile(configPath)
55
+		if err != nil {
56
+			return err
57
+		}
58
+		img, err := image.NewFromJSON(config)
59
+		if err != nil {
60
+			return err
61
+		}
62
+		var rootFS image.RootFS
63
+		rootFS = *img.RootFS
64
+		rootFS.DiffIDs = nil
65
+
66
+		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
67
+			return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual)
68
+		}
69
+
70
+		for i, diffID := range img.RootFS.DiffIDs {
71
+			layerPath, err := safePath(tmpDir, m.Layers[i])
72
+			if err != nil {
73
+				return err
74
+			}
75
+			newLayer, err := l.loadLayer(layerPath, rootFS)
76
+			if err != nil {
77
+				return err
78
+			}
79
+			defer layer.ReleaseAndLog(l.ls, newLayer)
80
+			if expected, actual := diffID, newLayer.DiffID(); expected != actual {
81
+				return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual)
82
+			}
83
+			rootFS.Append(diffID)
84
+		}
85
+
86
+		imgID, err := l.is.Create(config)
87
+		if err != nil {
88
+			return err
89
+		}
90
+
91
+		for _, repoTag := range m.RepoTags {
92
+			named, err := reference.ParseNamed(repoTag)
93
+			if err != nil {
94
+				return err
95
+			}
96
+			ref, ok := named.(reference.NamedTagged)
97
+			if !ok {
98
+				return fmt.Errorf("invalid tag %q", repoTag)
99
+			}
100
+			l.setLoadedTag(ref, imgID, outStream)
101
+		}
102
+
103
+	}
104
+
105
+	return nil
106
+}
107
+
108
+func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) {
109
+	rawTar, err := os.Open(filename)
110
+	if err != nil {
111
+		logrus.Debugf("Error reading embedded tar: %v", err)
112
+		return nil, err
113
+	}
114
+	inflatedLayerData, err := archive.DecompressStream(rawTar)
115
+	if err != nil {
116
+		return nil, err
117
+	}
118
+
119
+	defer rawTar.Close()
120
+	defer inflatedLayerData.Close()
121
+
122
+	return l.ls.Register(inflatedLayerData, rootFS.ChainID())
123
+}
124
+
125
+func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error {
126
+	if prevID, err := l.ts.Get(ref); err == nil && prevID != imgID {
127
+		fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags
128
+	}
129
+
130
+	if err := l.ts.Add(ref, imgID, true); err != nil {
131
+		return err
132
+	}
133
+	return nil
134
+}
135
+
136
+func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer) error {
137
+	legacyLoadedMap := make(map[string]image.ID)
138
+
139
+	dirs, err := ioutil.ReadDir(tmpDir)
140
+	if err != nil {
141
+		return err
142
+	}
143
+
144
+	// every dir represents an image
145
+	for _, d := range dirs {
146
+		if d.IsDir() {
147
+			if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap); err != nil {
148
+				return err
149
+			}
150
+		}
151
+	}
152
+
153
+	// load tags from repositories file
154
+	repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName)
155
+	if err != nil {
156
+		return err
157
+	}
158
+	repositoriesFile, err := os.Open(repositoriesPath)
159
+	if err != nil {
160
+		if !os.IsNotExist(err) {
161
+			return err
162
+		}
163
+		return repositoriesFile.Close()
164
+	}
165
+	defer repositoriesFile.Close()
166
+
167
+	repositories := make(map[string]map[string]string)
168
+	if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil {
169
+		return err
170
+	}
171
+
172
+	for name, tagMap := range repositories {
173
+		for tag, oldID := range tagMap {
174
+			imgID, ok := legacyLoadedMap[oldID]
175
+			if !ok {
176
+				return fmt.Errorf("invalid target ID: %v", oldID)
177
+			}
178
+			named, err := reference.WithName(name)
179
+			if err != nil {
180
+				return err
181
+			}
182
+			ref, err := reference.WithTag(named, tag)
183
+			if err != nil {
184
+				return err
185
+			}
186
+			l.setLoadedTag(ref, imgID, outStream)
187
+		}
188
+	}
189
+
190
+	return nil
191
+}
192
+
193
+func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID) error {
194
+	if _, loaded := loadedMap[oldID]; loaded {
195
+		return nil
196
+	}
197
+	configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName))
198
+	if err != nil {
199
+		return err
200
+	}
201
+	imageJSON, err := ioutil.ReadFile(configPath)
202
+	if err != nil {
203
+		logrus.Debugf("Error reading json: %v", err)
204
+		return err
205
+	}
206
+
207
+	var img struct{ Parent string }
208
+	if err := json.Unmarshal(imageJSON, &img); err != nil {
209
+		return err
210
+	}
211
+
212
+	var parentID image.ID
213
+	if img.Parent != "" {
214
+		for {
215
+			var loaded bool
216
+			if parentID, loaded = loadedMap[img.Parent]; !loaded {
217
+				if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap); err != nil {
218
+					return err
219
+				}
220
+			} else {
221
+				break
222
+			}
223
+		}
224
+	}
225
+
226
+	// todo: try to connect with migrate code
227
+	rootFS := image.NewRootFS()
228
+	var history []image.History
229
+
230
+	if parentID != "" {
231
+		parentImg, err := l.is.Get(parentID)
232
+		if err != nil {
233
+			return err
234
+		}
235
+
236
+		rootFS = parentImg.RootFS
237
+		history = parentImg.History
238
+	}
239
+
240
+	layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName))
241
+	if err != nil {
242
+		return err
243
+	}
244
+	newLayer, err := l.loadLayer(layerPath, *rootFS)
245
+	if err != nil {
246
+		return err
247
+	}
248
+	rootFS.Append(newLayer.DiffID())
249
+
250
+	h, err := v1.HistoryFromConfig(imageJSON, false)
251
+	if err != nil {
252
+		return err
253
+	}
254
+	history = append(history, h)
255
+
256
+	config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history)
257
+	if err != nil {
258
+		return err
259
+	}
260
+	imgID, err := l.is.Create(config)
261
+	if err != nil {
262
+		return err
263
+	}
264
+
265
+	metadata, err := l.ls.Release(newLayer)
266
+	layer.LogReleaseMetadata(metadata)
267
+	if err != nil {
268
+		return err
269
+	}
270
+
271
+	if parentID != "" {
272
+		if err := l.is.SetParent(imgID, parentID); err != nil {
273
+			return err
274
+		}
275
+	}
276
+
277
+	loadedMap[oldID] = imgID
278
+	return nil
279
+}
280
+
281
+func safePath(base, path string) (string, error) {
282
+	return symlink.FollowSymlinkInScope(filepath.Join(base, path), base)
283
+}
0 284
new file mode 100644
... ...
@@ -0,0 +1,303 @@
0
+package tarexport
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"io"
6
+	"io/ioutil"
7
+	"os"
8
+	"path/filepath"
9
+	"time"
10
+
11
+	"github.com/docker/distribution/digest"
12
+	"github.com/docker/distribution/reference"
13
+	"github.com/docker/docker/image"
14
+	"github.com/docker/docker/image/v1"
15
+	"github.com/docker/docker/layer"
16
+	"github.com/docker/docker/pkg/archive"
17
+	"github.com/docker/docker/registry"
18
+	"github.com/docker/docker/tag"
19
+)
20
+
21
+type imageDescriptor struct {
22
+	refs   []reference.NamedTagged
23
+	layers []string
24
+}
25
+
26
+type saveSession struct {
27
+	*tarexporter
28
+	outDir      string
29
+	images      map[image.ID]*imageDescriptor
30
+	savedLayers map[string]struct{}
31
+}
32
+
33
+func (l *tarexporter) Save(names []string, outStream io.Writer) error {
34
+	images, err := l.parseNames(names)
35
+	if err != nil {
36
+		return err
37
+	}
38
+
39
+	return (&saveSession{tarexporter: l, images: images}).save(outStream)
40
+}
41
+
42
+func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) {
43
+	imgDescr := make(map[image.ID]*imageDescriptor)
44
+
45
+	addAssoc := func(id image.ID, ref reference.Named) {
46
+		if _, ok := imgDescr[id]; !ok {
47
+			imgDescr[id] = &imageDescriptor{}
48
+		}
49
+
50
+		if ref != nil {
51
+			var tagged reference.NamedTagged
52
+			if _, ok := ref.(reference.Digested); ok {
53
+				return
54
+			}
55
+			var ok bool
56
+			if tagged, ok = ref.(reference.NamedTagged); !ok {
57
+				var err error
58
+				if tagged, err = reference.WithTag(ref, tag.DefaultTag); err != nil {
59
+					return
60
+				}
61
+			}
62
+
63
+			for _, t := range imgDescr[id].refs {
64
+				if tagged.String() == t.String() {
65
+					return
66
+				}
67
+			}
68
+			imgDescr[id].refs = append(imgDescr[id].refs, tagged)
69
+		}
70
+	}
71
+
72
+	for _, name := range names {
73
+		ref, err := reference.ParseNamed(name)
74
+		if err != nil {
75
+			return nil, err
76
+		}
77
+		ref = registry.NormalizeLocalReference(ref)
78
+		if ref.Name() == string(digest.Canonical) {
79
+			imgID, err := l.is.Search(name)
80
+			if err != nil {
81
+				return nil, err
82
+			}
83
+			addAssoc(imgID, nil)
84
+			continue
85
+		}
86
+		if _, ok := ref.(reference.Digested); !ok {
87
+			if _, ok := ref.(reference.NamedTagged); !ok {
88
+				assocs := l.ts.ReferencesByName(ref)
89
+				for _, assoc := range assocs {
90
+					addAssoc(assoc.ImageID, assoc.Ref)
91
+				}
92
+				if len(assocs) == 0 {
93
+					imgID, err := l.is.Search(name)
94
+					if err != nil {
95
+						return nil, err
96
+					}
97
+					addAssoc(imgID, nil)
98
+				}
99
+				continue
100
+			}
101
+		}
102
+		var imgID image.ID
103
+		if imgID, err = l.ts.Get(ref); err != nil {
104
+			return nil, err
105
+		}
106
+		addAssoc(imgID, ref)
107
+
108
+	}
109
+	return imgDescr, nil
110
+}
111
+
112
+func (s *saveSession) save(outStream io.Writer) error {
113
+	s.savedLayers = make(map[string]struct{})
114
+
115
+	// get image json
116
+	tempDir, err := ioutil.TempDir("", "docker-export-")
117
+	if err != nil {
118
+		return err
119
+	}
120
+	defer os.RemoveAll(tempDir)
121
+
122
+	s.outDir = tempDir
123
+	reposLegacy := make(map[string]map[string]string)
124
+
125
+	var manifest []manifestItem
126
+
127
+	for id, imageDescr := range s.images {
128
+		if err = s.saveImage(id); err != nil {
129
+			return err
130
+		}
131
+
132
+		var repoTags []string
133
+		var layers []string
134
+
135
+		for _, ref := range imageDescr.refs {
136
+			if _, ok := reposLegacy[ref.Name()]; !ok {
137
+				reposLegacy[ref.Name()] = make(map[string]string)
138
+			}
139
+			reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1]
140
+			repoTags = append(repoTags, ref.String())
141
+		}
142
+
143
+		for _, l := range imageDescr.layers {
144
+			layers = append(layers, filepath.Join(l, legacyLayerFileName))
145
+		}
146
+
147
+		manifest = append(manifest, manifestItem{
148
+			Config:   digest.Digest(id).Hex() + ".json",
149
+			RepoTags: repoTags,
150
+			Layers:   layers,
151
+		})
152
+	}
153
+
154
+	if len(reposLegacy) > 0 {
155
+		reposFile := filepath.Join(tempDir, legacyRepositoriesFileName)
156
+		f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
157
+		if err != nil {
158
+			f.Close()
159
+			return err
160
+		}
161
+		if err := json.NewEncoder(f).Encode(reposLegacy); err != nil {
162
+			return err
163
+		}
164
+		if err := f.Close(); err != nil {
165
+			return err
166
+		}
167
+		if err := os.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
168
+			return err
169
+		}
170
+	}
171
+
172
+	manifestFileName := filepath.Join(tempDir, manifestFileName)
173
+	f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
174
+	if err != nil {
175
+		f.Close()
176
+		return err
177
+	}
178
+	if err := json.NewEncoder(f).Encode(manifest); err != nil {
179
+		return err
180
+	}
181
+	if err := f.Close(); err != nil {
182
+		return err
183
+	}
184
+	if err := os.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
185
+		return err
186
+	}
187
+
188
+	fs, err := archive.Tar(tempDir, archive.Uncompressed)
189
+	if err != nil {
190
+		return err
191
+	}
192
+	defer fs.Close()
193
+
194
+	if _, err := io.Copy(outStream, fs); err != nil {
195
+		return err
196
+	}
197
+	return nil
198
+}
199
+
200
+func (s *saveSession) saveImage(id image.ID) error {
201
+	img, err := s.is.Get(id)
202
+	if err != nil {
203
+		return err
204
+	}
205
+
206
+	if len(img.RootFS.DiffIDs) == 0 {
207
+		return fmt.Errorf("empty export - not implemented")
208
+	}
209
+
210
+	var parent digest.Digest
211
+	var layers []string
212
+	for i := range img.RootFS.DiffIDs {
213
+		v1Img := image.V1Image{}
214
+		if i == len(img.RootFS.DiffIDs)-1 {
215
+			v1Img = img.V1Image
216
+		}
217
+		rootFS := *img.RootFS
218
+		rootFS.DiffIDs = rootFS.DiffIDs[:i+1]
219
+		v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent)
220
+		if err != nil {
221
+			return err
222
+		}
223
+
224
+		v1Img.ID = v1ID.Hex()
225
+		if parent != "" {
226
+			v1Img.Parent = parent.Hex()
227
+		}
228
+
229
+		if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil {
230
+			return err
231
+		}
232
+		layers = append(layers, v1Img.ID)
233
+		parent = v1ID
234
+	}
235
+
236
+	configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json")
237
+	if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil {
238
+		return err
239
+	}
240
+	if err := os.Chtimes(configFile, img.Created, img.Created); err != nil {
241
+		return err
242
+	}
243
+
244
+	s.images[id].layers = layers
245
+	return nil
246
+}
247
+
248
+func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error {
249
+	if _, exists := s.savedLayers[legacyImg.ID]; exists {
250
+		return nil
251
+	}
252
+
253
+	outDir := filepath.Join(s.outDir, legacyImg.ID)
254
+	if err := os.Mkdir(outDir, 0755); err != nil {
255
+		return err
256
+	}
257
+
258
+	// todo: why is this version file here?
259
+	if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil {
260
+		return err
261
+	}
262
+
263
+	imageConfig, err := json.Marshal(legacyImg)
264
+	if err != nil {
265
+		return err
266
+	}
267
+
268
+	if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil {
269
+		return err
270
+	}
271
+
272
+	// serialize filesystem
273
+	tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName))
274
+	if err != nil {
275
+		return err
276
+	}
277
+	defer tarFile.Close()
278
+
279
+	l, err := s.ls.Get(id)
280
+	if err != nil {
281
+		return err
282
+	}
283
+	defer layer.ReleaseAndLog(s.ls, l)
284
+
285
+	arch, err := l.TarStream()
286
+	if err != nil {
287
+		return err
288
+	}
289
+	if _, err := io.Copy(tarFile, arch); err != nil {
290
+		return err
291
+	}
292
+
293
+	for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} {
294
+		// todo: maybe save layer created timestamp?
295
+		if err := os.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil {
296
+			return err
297
+		}
298
+	}
299
+
300
+	s.savedLayers[legacyImg.ID] = struct{}{}
301
+	return nil
302
+}
0 303
new file mode 100644
... ...
@@ -0,0 +1,36 @@
0
+package tarexport
1
+
2
+import (
3
+	"github.com/docker/docker/image"
4
+	"github.com/docker/docker/layer"
5
+	"github.com/docker/docker/tag"
6
+)
7
+
8
+const (
9
+	manifestFileName           = "manifest.json"
10
+	legacyLayerFileName        = "layer.tar"
11
+	legacyConfigFileName       = "json"
12
+	legacyVersionFileName      = "VERSION"
13
+	legacyRepositoriesFileName = "repositories"
14
+)
15
+
16
+type manifestItem struct {
17
+	Config   string
18
+	RepoTags []string
19
+	Layers   []string
20
+}
21
+
22
+type tarexporter struct {
23
+	is image.Store
24
+	ls layer.Store
25
+	ts tag.Store
26
+}
27
+
28
+// NewTarExporter returns new ImageExporter for tar packages
29
+func NewTarExporter(is image.Store, ls layer.Store, ts tag.Store) image.Exporter {
30
+	return &tarexporter{
31
+		is: is,
32
+		ls: ls,
33
+		ts: ts,
34
+	}
35
+}
0 36
new file mode 100644
... ...
@@ -0,0 +1,148 @@
0
+package v1
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"regexp"
6
+	"strings"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/distribution/digest"
10
+	"github.com/docker/docker/image"
11
+	"github.com/docker/docker/layer"
12
+	"github.com/docker/docker/pkg/version"
13
+)
14
+
15
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
16
+
17
+// noFallbackMinVersion is the minimum version for which v1compatibility
18
+// information will not be marshaled through the Image struct to remove
19
+// blank fields.
20
+var noFallbackMinVersion = version.Version("1.8.3")
21
+
22
+// HistoryFromConfig creates a History struct from v1 configuration JSON
23
+func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
24
+	h := image.History{}
25
+	var v1Image image.V1Image
26
+	if err := json.Unmarshal(imageJSON, &v1Image); err != nil {
27
+		return h, err
28
+	}
29
+
30
+	return image.History{
31
+		Author:     v1Image.Author,
32
+		Created:    v1Image.Created,
33
+		CreatedBy:  strings.Join(v1Image.ContainerConfig.Cmd.Slice(), " "),
34
+		Comment:    v1Image.Comment,
35
+		EmptyLayer: emptyLayer,
36
+	}, nil
37
+}
38
+
39
+// CreateID creates an ID from v1 image, layerID and parent ID.
40
+// Used for backwards compatibility with old clients.
41
+func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) {
42
+	v1Image.ID = ""
43
+	v1JSON, err := json.Marshal(v1Image)
44
+	if err != nil {
45
+		return "", err
46
+	}
47
+
48
+	var config map[string]*json.RawMessage
49
+	if err := json.Unmarshal(v1JSON, &config); err != nil {
50
+		return "", err
51
+	}
52
+
53
+	// FIXME: note that this is slightly incompatible with RootFS logic
54
+	config["layer_id"] = rawJSON(layerID)
55
+	if parent != "" {
56
+		config["parent"] = rawJSON(parent)
57
+	}
58
+
59
+	configJSON, err := json.Marshal(config)
60
+	if err != nil {
61
+		return "", err
62
+	}
63
+	logrus.Debugf("CreateV1ID %s", configJSON)
64
+
65
+	return digest.FromBytes(configJSON)
66
+}
67
+
68
+// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
69
+func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) {
70
+	var dver struct {
71
+		DockerVersion string `json:"docker_version"`
72
+	}
73
+
74
+	if err := json.Unmarshal(imageJSON, &dver); err != nil {
75
+		return nil, err
76
+	}
77
+
78
+	useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion)
79
+
80
+	if useFallback {
81
+		var v1Image image.V1Image
82
+		err := json.Unmarshal(imageJSON, &v1Image)
83
+		if err != nil {
84
+			return nil, err
85
+		}
86
+		imageJSON, err = json.Marshal(v1Image)
87
+		if err != nil {
88
+			return nil, err
89
+		}
90
+	}
91
+
92
+	var c map[string]*json.RawMessage
93
+	if err := json.Unmarshal(imageJSON, &c); err != nil {
94
+		return nil, err
95
+	}
96
+
97
+	delete(c, "id")
98
+	delete(c, "parent")
99
+	delete(c, "Size") // Size is calculated from data on disk and is inconsitent
100
+	delete(c, "parent_id")
101
+	delete(c, "layer_id")
102
+	delete(c, "throwaway")
103
+
104
+	c["rootfs"] = rawJSON(rootfs)
105
+	c["history"] = rawJSON(history)
106
+
107
+	return json.Marshal(c)
108
+}
109
+
110
+// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct
111
+func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
112
+	// Top-level v1compatibility string should be a modified version of the
113
+	// image config.
114
+	var configAsMap map[string]*json.RawMessage
115
+	if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil {
116
+		return nil, err
117
+	}
118
+
119
+	// Delete fields that didn't exist in old manifest
120
+	delete(configAsMap, "rootfs")
121
+	delete(configAsMap, "history")
122
+	configAsMap["id"] = rawJSON(v1ID)
123
+	if parentV1ID != "" {
124
+		configAsMap["parent"] = rawJSON(parentV1ID)
125
+	}
126
+	if throwaway {
127
+		configAsMap["throwaway"] = rawJSON(true)
128
+	}
129
+
130
+	return json.Marshal(configAsMap)
131
+}
132
+
133
+func rawJSON(value interface{}) *json.RawMessage {
134
+	jsonval, err := json.Marshal(value)
135
+	if err != nil {
136
+		return nil
137
+	}
138
+	return (*json.RawMessage)(&jsonval)
139
+}
140
+
141
+// ValidateID checks whether an ID string is a valid image ID.
142
+func ValidateID(id string) error {
143
+	if ok := validHex.MatchString(id); !ok {
144
+		return fmt.Errorf("image ID '%s' is invalid ", id)
145
+	}
146
+	return nil
147
+}