Layer store manages read-only and read-write layers on a union file system.
Read only layers are always referenced by content addresses.
Read-write layer identifiers are handled by the caller but upon registering
its difference, the committed read-only layer will be referenced by content
hash.
Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
| 1 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,47 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "archive/tar" |
|
| 4 |
+ "bytes" |
|
| 5 |
+ "io" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - |
|
| 9 |
+// (1024 NULL bytes) |
|
| 10 |
+const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef")
|
|
| 11 |
+ |
|
| 12 |
+type emptyLayer struct{}
|
|
| 13 |
+ |
|
| 14 |
+// EmptyLayer is a layer that corresponds to empty tar. |
|
| 15 |
+var EmptyLayer = &emptyLayer{}
|
|
| 16 |
+ |
|
| 17 |
+func (el *emptyLayer) TarStream() (io.Reader, error) {
|
|
| 18 |
+ buf := new(bytes.Buffer) |
|
| 19 |
+ tarWriter := tar.NewWriter(buf) |
|
| 20 |
+ tarWriter.Close() |
|
| 21 |
+ return buf, nil |
|
| 22 |
+} |
|
| 23 |
+ |
|
| 24 |
+func (el *emptyLayer) ChainID() ChainID {
|
|
| 25 |
+ return ChainID(DigestSHA256EmptyTar) |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+func (el *emptyLayer) DiffID() DiffID {
|
|
| 29 |
+ return DigestSHA256EmptyTar |
|
| 30 |
+} |
|
| 31 |
+ |
|
| 32 |
+func (el *emptyLayer) Parent() Layer {
|
|
| 33 |
+ return nil |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+func (el *emptyLayer) Size() (size int64, err error) {
|
|
| 37 |
+ return 0, nil |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+func (el *emptyLayer) DiffSize() (size int64, err error) {
|
|
| 41 |
+ return 0, nil |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+func (el *emptyLayer) Metadata() (map[string]string, error) {
|
|
| 45 |
+ return make(map[string]string), nil |
|
| 46 |
+} |
| 0 | 47 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,46 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "io" |
|
| 4 |
+ "testing" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/distribution/digest" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func TestEmptyLayer(t *testing.T) {
|
|
| 10 |
+ if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) {
|
|
| 11 |
+ t.Fatal("wrong ID for empty layer")
|
|
| 12 |
+ } |
|
| 13 |
+ |
|
| 14 |
+ if EmptyLayer.DiffID() != DigestSHA256EmptyTar {
|
|
| 15 |
+ t.Fatal("wrong DiffID for empty layer")
|
|
| 16 |
+ } |
|
| 17 |
+ |
|
| 18 |
+ if EmptyLayer.Parent() != nil {
|
|
| 19 |
+ t.Fatal("expected no parent for empty layer")
|
|
| 20 |
+ } |
|
| 21 |
+ |
|
| 22 |
+ if size, err := EmptyLayer.Size(); err != nil || size != 0 {
|
|
| 23 |
+ t.Fatal("expected zero size for empty layer")
|
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 {
|
|
| 27 |
+ t.Fatal("expected zero diffsize for empty layer")
|
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ tarStream, err := EmptyLayer.TarStream() |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ t.Fatalf("error streaming tar for empty layer: %v", err)
|
|
| 33 |
+ } |
|
| 34 |
+ |
|
| 35 |
+ digester := digest.Canonical.New() |
|
| 36 |
+ _, err = io.Copy(digester.Hash(), tarStream) |
|
| 37 |
+ |
|
| 38 |
+ if err != nil {
|
|
| 39 |
+ t.Fatalf("error hashing empty tar layer: %v", err)
|
|
| 40 |
+ } |
|
| 41 |
+ |
|
| 42 |
+ if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) {
|
|
| 43 |
+ t.Fatal("empty layer tar stream hashes to wrong value")
|
|
| 44 |
+ } |
|
| 45 |
+} |
| 0 | 46 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,318 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "compress/gzip" |
|
| 4 |
+ "errors" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "os" |
|
| 9 |
+ "path/filepath" |
|
| 10 |
+ "regexp" |
|
| 11 |
+ "strconv" |
|
| 12 |
+ |
|
| 13 |
+ "github.com/Sirupsen/logrus" |
|
| 14 |
+ "github.com/docker/distribution/digest" |
|
| 15 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 16 |
+) |
|
| 17 |
+ |
|
| 18 |
+var ( |
|
| 19 |
+ stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
|
|
| 20 |
+ supportedAlgorithms = []digest.Algorithm{
|
|
| 21 |
+ digest.SHA256, |
|
| 22 |
+ // digest.SHA384, // Currently not used |
|
| 23 |
+ // digest.SHA512, // Currently not used |
|
| 24 |
+ } |
|
| 25 |
+) |
|
| 26 |
+ |
|
| 27 |
+type fileMetadataStore struct {
|
|
| 28 |
+ root string |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+type fileMetadataTransaction struct {
|
|
| 32 |
+ store *fileMetadataStore |
|
| 33 |
+ root string |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// NewFSMetadataStore returns an instance of a metadata store |
|
| 37 |
+// which is backed by files on disk using the provided root |
|
| 38 |
+// as the root of metadata files. |
|
| 39 |
+func NewFSMetadataStore(root string) (MetadataStore, error) {
|
|
| 40 |
+ if err := os.MkdirAll(root, 0700); err != nil {
|
|
| 41 |
+ return nil, err |
|
| 42 |
+ } |
|
| 43 |
+ return &fileMetadataStore{
|
|
| 44 |
+ root: root, |
|
| 45 |
+ }, nil |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
|
|
| 49 |
+ dgst := digest.Digest(layer) |
|
| 50 |
+ return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
|
|
| 54 |
+ return filepath.Join(fms.getLayerDirectory(layer), filename) |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+func (fms *fileMetadataStore) getMountDirectory(mount string) string {
|
|
| 58 |
+ return filepath.Join(fms.root, "mounts", mount) |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
|
|
| 62 |
+ return filepath.Join(fms.getMountDirectory(mount), filename) |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) {
|
|
| 66 |
+ tmpDir := filepath.Join(fms.root, "tmp") |
|
| 67 |
+ if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
|
| 68 |
+ return nil, err |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ td, err := ioutil.TempDir(tmpDir, "layer-") |
|
| 72 |
+ if err != nil {
|
|
| 73 |
+ return nil, err |
|
| 74 |
+ } |
|
| 75 |
+ // Create a new tempdir |
|
| 76 |
+ return &fileMetadataTransaction{
|
|
| 77 |
+ store: fms, |
|
| 78 |
+ root: td, |
|
| 79 |
+ }, nil |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+func (fm *fileMetadataTransaction) SetSize(size int64) error {
|
|
| 83 |
+ content := fmt.Sprintf("%d", size)
|
|
| 84 |
+ return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
|
|
| 88 |
+ return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) |
|
| 89 |
+} |
|
| 90 |
+ |
|
| 91 |
+func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
|
|
| 92 |
+ return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
|
|
| 96 |
+ return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func (fm *fileMetadataTransaction) TarSplitWriter() (io.WriteCloser, error) {
|
|
| 100 |
+ f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) |
|
| 101 |
+ if err != nil {
|
|
| 102 |
+ return nil, err |
|
| 103 |
+ } |
|
| 104 |
+ |
|
| 105 |
+ fz := gzip.NewWriter(f) |
|
| 106 |
+ |
|
| 107 |
+ return ioutils.NewWriteCloserWrapper(fz, func() error {
|
|
| 108 |
+ fz.Close() |
|
| 109 |
+ return f.Close() |
|
| 110 |
+ }), nil |
|
| 111 |
+} |
|
| 112 |
+ |
|
| 113 |
+func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
|
|
| 114 |
+ finalDir := fm.store.getLayerDirectory(layer) |
|
| 115 |
+ if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
|
|
| 116 |
+ return err |
|
| 117 |
+ } |
|
| 118 |
+ return os.Rename(fm.root, finalDir) |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+func (fm *fileMetadataTransaction) Cancel() error {
|
|
| 122 |
+ return os.RemoveAll(fm.root) |
|
| 123 |
+} |
|
| 124 |
+ |
|
| 125 |
+func (fm *fileMetadataTransaction) String() string {
|
|
| 126 |
+ return fm.root |
|
| 127 |
+} |
|
| 128 |
+ |
|
| 129 |
+func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
|
|
| 130 |
+ content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) |
|
| 131 |
+ if err != nil {
|
|
| 132 |
+ return 0, err |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ size, err := strconv.ParseInt(string(content), 10, 64) |
|
| 136 |
+ if err != nil {
|
|
| 137 |
+ return 0, err |
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ return size, nil |
|
| 141 |
+} |
|
| 142 |
+ |
|
| 143 |
+func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
|
|
| 144 |
+ content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) |
|
| 145 |
+ if err != nil {
|
|
| 146 |
+ if os.IsNotExist(err) {
|
|
| 147 |
+ return "", nil |
|
| 148 |
+ } |
|
| 149 |
+ return "", err |
|
| 150 |
+ } |
|
| 151 |
+ |
|
| 152 |
+ dgst, err := digest.ParseDigest(string(content)) |
|
| 153 |
+ if err != nil {
|
|
| 154 |
+ return "", err |
|
| 155 |
+ } |
|
| 156 |
+ |
|
| 157 |
+ return ChainID(dgst), nil |
|
| 158 |
+} |
|
| 159 |
+ |
|
| 160 |
+func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
|
|
| 161 |
+ content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) |
|
| 162 |
+ if err != nil {
|
|
| 163 |
+ return "", err |
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ dgst, err := digest.ParseDigest(string(content)) |
|
| 167 |
+ if err != nil {
|
|
| 168 |
+ return "", err |
|
| 169 |
+ } |
|
| 170 |
+ |
|
| 171 |
+ return DiffID(dgst), nil |
|
| 172 |
+} |
|
| 173 |
+ |
|
| 174 |
+func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
|
|
| 175 |
+ content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) |
|
| 176 |
+ if err != nil {
|
|
| 177 |
+ return "", err |
|
| 178 |
+ } |
|
| 179 |
+ |
|
| 180 |
+ if !stringIDRegexp.MatchString(string(content)) {
|
|
| 181 |
+ return "", errors.New("invalid cache id value")
|
|
| 182 |
+ } |
|
| 183 |
+ |
|
| 184 |
+ return string(content), nil |
|
| 185 |
+} |
|
| 186 |
+ |
|
| 187 |
+func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
|
|
| 188 |
+ fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) |
|
| 189 |
+ if err != nil {
|
|
| 190 |
+ return nil, err |
|
| 191 |
+ } |
|
| 192 |
+ f, err := gzip.NewReader(fz) |
|
| 193 |
+ if err != nil {
|
|
| 194 |
+ return nil, err |
|
| 195 |
+ } |
|
| 196 |
+ |
|
| 197 |
+ return ioutils.NewReadCloserWrapper(f, func() error {
|
|
| 198 |
+ f.Close() |
|
| 199 |
+ return fz.Close() |
|
| 200 |
+ }), nil |
|
| 201 |
+} |
|
| 202 |
+ |
|
| 203 |
+func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
|
|
| 204 |
+ if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
| 205 |
+ return err |
|
| 206 |
+ } |
|
| 207 |
+ return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) |
|
| 208 |
+} |
|
| 209 |
+ |
|
| 210 |
+func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
|
|
| 211 |
+ if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
| 212 |
+ return err |
|
| 213 |
+ } |
|
| 214 |
+ return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) |
|
| 215 |
+} |
|
| 216 |
+ |
|
| 217 |
+func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
|
|
| 218 |
+ if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
| 219 |
+ return err |
|
| 220 |
+ } |
|
| 221 |
+ return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) |
|
| 222 |
+} |
|
| 223 |
+ |
|
| 224 |
+func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
|
|
| 225 |
+ content, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) |
|
| 226 |
+ if err != nil {
|
|
| 227 |
+ return "", err |
|
| 228 |
+ } |
|
| 229 |
+ |
|
| 230 |
+ if !stringIDRegexp.MatchString(string(content)) {
|
|
| 231 |
+ return "", errors.New("invalid mount id value")
|
|
| 232 |
+ } |
|
| 233 |
+ |
|
| 234 |
+ return string(content), nil |
|
| 235 |
+} |
|
| 236 |
+ |
|
| 237 |
+func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
|
|
| 238 |
+ content, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) |
|
| 239 |
+ if err != nil {
|
|
| 240 |
+ if os.IsNotExist(err) {
|
|
| 241 |
+ return "", nil |
|
| 242 |
+ } |
|
| 243 |
+ return "", err |
|
| 244 |
+ } |
|
| 245 |
+ |
|
| 246 |
+ if !stringIDRegexp.MatchString(string(content)) {
|
|
| 247 |
+ return "", errors.New("invalid init id value")
|
|
| 248 |
+ } |
|
| 249 |
+ |
|
| 250 |
+ return string(content), nil |
|
| 251 |
+} |
|
| 252 |
+ |
|
| 253 |
+func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
|
|
| 254 |
+ content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) |
|
| 255 |
+ if err != nil {
|
|
| 256 |
+ if os.IsNotExist(err) {
|
|
| 257 |
+ return "", nil |
|
| 258 |
+ } |
|
| 259 |
+ return "", err |
|
| 260 |
+ } |
|
| 261 |
+ |
|
| 262 |
+ dgst, err := digest.ParseDigest(string(content)) |
|
| 263 |
+ if err != nil {
|
|
| 264 |
+ return "", err |
|
| 265 |
+ } |
|
| 266 |
+ |
|
| 267 |
+ return ChainID(dgst), nil |
|
| 268 |
+} |
|
| 269 |
+ |
|
| 270 |
+func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
|
|
| 271 |
+ var ids []ChainID |
|
| 272 |
+ for _, algorithm := range supportedAlgorithms {
|
|
| 273 |
+ fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) |
|
| 274 |
+ if err != nil {
|
|
| 275 |
+ if os.IsNotExist(err) {
|
|
| 276 |
+ continue |
|
| 277 |
+ } |
|
| 278 |
+ return nil, nil, err |
|
| 279 |
+ } |
|
| 280 |
+ |
|
| 281 |
+ for _, fi := range fileInfos {
|
|
| 282 |
+ if fi.IsDir() && fi.Name() != "mounts" {
|
|
| 283 |
+ dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) |
|
| 284 |
+ if err := dgst.Validate(); err != nil {
|
|
| 285 |
+ logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
|
|
| 286 |
+ } else {
|
|
| 287 |
+ ids = append(ids, ChainID(dgst)) |
|
| 288 |
+ } |
|
| 289 |
+ } |
|
| 290 |
+ } |
|
| 291 |
+ } |
|
| 292 |
+ |
|
| 293 |
+ fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) |
|
| 294 |
+ if err != nil {
|
|
| 295 |
+ if os.IsNotExist(err) {
|
|
| 296 |
+ return ids, []string{}, nil
|
|
| 297 |
+ } |
|
| 298 |
+ return nil, nil, err |
|
| 299 |
+ } |
|
| 300 |
+ |
|
| 301 |
+ var mounts []string |
|
| 302 |
+ for _, fi := range fileInfos {
|
|
| 303 |
+ if fi.IsDir() {
|
|
| 304 |
+ mounts = append(mounts, fi.Name()) |
|
| 305 |
+ } |
|
| 306 |
+ } |
|
| 307 |
+ |
|
| 308 |
+ return ids, mounts, nil |
|
| 309 |
+} |
|
| 310 |
+ |
|
| 311 |
+func (fms *fileMetadataStore) Remove(layer ChainID) error {
|
|
| 312 |
+ return os.RemoveAll(fms.getLayerDirectory(layer)) |
|
| 313 |
+} |
|
| 314 |
+ |
|
| 315 |
+func (fms *fileMetadataStore) RemoveMount(mount string) error {
|
|
| 316 |
+ return os.RemoveAll(fms.getMountDirectory(mount)) |
|
| 317 |
+} |
| 0 | 318 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,119 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "math/rand" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "strings" |
|
| 9 |
+ "syscall" |
|
| 10 |
+ "testing" |
|
| 11 |
+ |
|
| 12 |
+ "github.com/docker/distribution/digest" |
|
| 13 |
+) |
|
| 14 |
+ |
|
| 15 |
+func randomLayerID(seed int64) ChainID {
|
|
| 16 |
+ r := rand.New(rand.NewSource(seed)) |
|
| 17 |
+ dgst, err := digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))
|
|
| 18 |
+ if err != nil {
|
|
| 19 |
+ panic(err) |
|
| 20 |
+ } |
|
| 21 |
+ |
|
| 22 |
+ return ChainID(dgst) |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) {
|
|
| 26 |
+ td, err := ioutil.TempDir("", "layers-")
|
|
| 27 |
+ if err != nil {
|
|
| 28 |
+ t.Fatal(err) |
|
| 29 |
+ } |
|
| 30 |
+ fms, err := NewFSMetadataStore(td) |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ t.Fatal(err) |
|
| 33 |
+ } |
|
| 34 |
+ |
|
| 35 |
+ return fms.(*fileMetadataStore), td, func() {
|
|
| 36 |
+ if err := os.RemoveAll(td); err != nil {
|
|
| 37 |
+ t.Logf("Failed to cleanup %q: %s", td, err)
|
|
| 38 |
+ } |
|
| 39 |
+ } |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+func assertNotDirectoryError(t *testing.T, err error) {
|
|
| 43 |
+ perr, ok := err.(*os.PathError) |
|
| 44 |
+ if !ok {
|
|
| 45 |
+ t.Fatalf("Unexpected error %#v, expected path error", err)
|
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ if perr.Err != syscall.ENOTDIR {
|
|
| 49 |
+ t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR)
|
|
| 50 |
+ } |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+func assertPermissionError(t *testing.T, err error) {
|
|
| 54 |
+ perr, ok := err.(*os.PathError) |
|
| 55 |
+ if !ok {
|
|
| 56 |
+ t.Fatalf("Unexpected error %#v, expected path error", err)
|
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ if perr.Err != syscall.EACCES {
|
|
| 60 |
+ t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.EACCES)
|
|
| 61 |
+ } |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+func TestCommitFailure(t *testing.T) {
|
|
| 65 |
+ fms, td, cleanup := newFileMetadataStore(t) |
|
| 66 |
+ defer cleanup() |
|
| 67 |
+ |
|
| 68 |
+ if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil {
|
|
| 69 |
+ t.Fatal(err) |
|
| 70 |
+ } |
|
| 71 |
+ |
|
| 72 |
+ tx, err := fms.StartTransaction() |
|
| 73 |
+ if err != nil {
|
|
| 74 |
+ t.Fatal(err) |
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 77 |
+ if err := tx.SetSize(0); err != nil {
|
|
| 78 |
+ t.Fatal(err) |
|
| 79 |
+ } |
|
| 80 |
+ |
|
| 81 |
+ err = tx.Commit(randomLayerID(5)) |
|
| 82 |
+ if err == nil {
|
|
| 83 |
+ t.Fatalf("Expected error committing with invalid layer parent directory")
|
|
| 84 |
+ } |
|
| 85 |
+ assertNotDirectoryError(t, err) |
|
| 86 |
+} |
|
| 87 |
+ |
|
| 88 |
+func TestStartTransactionFailure(t *testing.T) {
|
|
| 89 |
+ fms, td, cleanup := newFileMetadataStore(t) |
|
| 90 |
+ defer cleanup() |
|
| 91 |
+ |
|
| 92 |
+ if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil {
|
|
| 93 |
+ t.Fatal(err) |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ _, err := fms.StartTransaction() |
|
| 97 |
+ if err == nil {
|
|
| 98 |
+ t.Fatalf("Expected error starting transaction with invalid layer parent directory")
|
|
| 99 |
+ } |
|
| 100 |
+ assertNotDirectoryError(t, err) |
|
| 101 |
+ |
|
| 102 |
+ if err := os.Remove(filepath.Join(td, "tmp")); err != nil {
|
|
| 103 |
+ t.Fatal(err) |
|
| 104 |
+ } |
|
| 105 |
+ |
|
| 106 |
+ tx, err := fms.StartTransaction() |
|
| 107 |
+ if err != nil {
|
|
| 108 |
+ t.Fatal(err) |
|
| 109 |
+ } |
|
| 110 |
+ |
|
| 111 |
+ if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) {
|
|
| 112 |
+ t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected)
|
|
| 113 |
+ } |
|
| 114 |
+ |
|
| 115 |
+ if err := tx.Cancel(); err != nil {
|
|
| 116 |
+ t.Fatal(err) |
|
| 117 |
+ } |
|
| 118 |
+} |
| 0 | 119 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,238 @@ |
| 0 |
+// Package layer is package for managing read only |
|
| 1 |
+// and read-write mounts on the union file system |
|
| 2 |
+// driver. Read-only mounts are refenced using a |
|
| 3 |
+// content hash and are protected from mutation in |
|
| 4 |
+// the exposed interface. The tar format is used |
|
| 5 |
+// to create read only layers and export both |
|
| 6 |
+// read only and writable layers. The exported |
|
| 7 |
+// tar data for a read only layer should match |
|
| 8 |
+// the tar used to create the layer. |
|
| 9 |
+package layer |
|
| 10 |
+ |
|
| 11 |
+import ( |
|
| 12 |
+ "errors" |
|
| 13 |
+ "io" |
|
| 14 |
+ |
|
| 15 |
+ "github.com/Sirupsen/logrus" |
|
| 16 |
+ "github.com/docker/distribution/digest" |
|
| 17 |
+ "github.com/docker/docker/pkg/archive" |
|
| 18 |
+) |
|
| 19 |
+ |
|
| 20 |
+var ( |
|
| 21 |
+ // ErrLayerDoesNotExist is used when an operation is |
|
| 22 |
+ // attempted on a layer which does not exist. |
|
| 23 |
+ ErrLayerDoesNotExist = errors.New("layer does not exist")
|
|
| 24 |
+ |
|
| 25 |
+ // ErrLayerNotRetained is used when a release is |
|
| 26 |
+ // attempted on a layer which is not retained. |
|
| 27 |
+ ErrLayerNotRetained = errors.New("layer not retained")
|
|
| 28 |
+ |
|
| 29 |
+ // ErrMountDoesNotExist is used when an operation is |
|
| 30 |
+ // attempted on a mount layer which does not exist. |
|
| 31 |
+ ErrMountDoesNotExist = errors.New("mount does not exist")
|
|
| 32 |
+ |
|
| 33 |
+ // ErrActiveMount is used when an operation on a |
|
| 34 |
+ // mount is attempted but the layer is still |
|
| 35 |
+ // mounted and the operation cannot be performed. |
|
| 36 |
+ ErrActiveMount = errors.New("mount still active")
|
|
| 37 |
+ |
|
| 38 |
+ // ErrNotMounted is used when requesting an active |
|
| 39 |
+ // mount but the layer is not mounted. |
|
| 40 |
+ ErrNotMounted = errors.New("not mounted")
|
|
| 41 |
+ |
|
| 42 |
+ // ErrMaxDepthExceeded is used when a layer is attempted |
|
| 43 |
+ // to be created which would result in a layer depth |
|
| 44 |
+ // greater than the 125 max. |
|
| 45 |
+ ErrMaxDepthExceeded = errors.New("max depth exceeded")
|
|
| 46 |
+) |
|
| 47 |
+ |
|
| 48 |
+// ChainID is the content-addressable ID of a layer. |
|
| 49 |
+type ChainID digest.Digest |
|
| 50 |
+ |
|
| 51 |
+// String returns a string rendition of a layer ID |
|
| 52 |
+func (id ChainID) String() string {
|
|
| 53 |
+ return string(id) |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+// DiffID is the hash of an individual layer tar. |
|
| 57 |
+type DiffID digest.Digest |
|
| 58 |
+ |
|
| 59 |
+// String returns a string rendition of a layer DiffID |
|
| 60 |
+func (diffID DiffID) String() string {
|
|
| 61 |
+ return string(diffID) |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// TarStreamer represents an object which may |
|
| 65 |
+// have its contents exported as a tar stream. |
|
| 66 |
+type TarStreamer interface {
|
|
| 67 |
+ // TarStream returns a tar archive stream |
|
| 68 |
+ // for the contents of a layer. |
|
| 69 |
+ TarStream() (io.Reader, error) |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 72 |
+// Layer represents a read only layer |
|
| 73 |
+type Layer interface {
|
|
| 74 |
+ TarStreamer |
|
| 75 |
+ |
|
| 76 |
+ // ChainID returns the content hash of the entire layer chain. The hash |
|
| 77 |
+ // chain is made up of DiffID of top layer and all of its parents. |
|
| 78 |
+ ChainID() ChainID |
|
| 79 |
+ |
|
| 80 |
+ // DiffID returns the content hash of the layer |
|
| 81 |
+ // tar stream used to create this layer. |
|
| 82 |
+ DiffID() DiffID |
|
| 83 |
+ |
|
| 84 |
+ // Parent returns the next layer in the layer chain. |
|
| 85 |
+ Parent() Layer |
|
| 86 |
+ |
|
| 87 |
+ // Size returns the size of the entire layer chain. The size |
|
| 88 |
+ // is calculated from the total size of all files in the layers. |
|
| 89 |
+ Size() (int64, error) |
|
| 90 |
+ |
|
| 91 |
+ // DiffSize returns the size difference of the top layer |
|
| 92 |
+ // from parent layer. |
|
| 93 |
+ DiffSize() (int64, error) |
|
| 94 |
+ |
|
| 95 |
+ // Metadata returns the low level storage metadata associated |
|
| 96 |
+ // with layer. |
|
| 97 |
+ Metadata() (map[string]string, error) |
|
| 98 |
+} |
|
| 99 |
+ |
|
| 100 |
+// RWLayer represents a layer which is |
|
| 101 |
+// read and writable |
|
| 102 |
+type RWLayer interface {
|
|
| 103 |
+ TarStreamer |
|
| 104 |
+ |
|
| 105 |
+ // Path returns the filesystem path to the writable |
|
| 106 |
+ // layer. |
|
| 107 |
+ Path() (string, error) |
|
| 108 |
+ |
|
| 109 |
+ // Parent returns the layer which the writable |
|
| 110 |
+ // layer was created from. |
|
| 111 |
+ Parent() Layer |
|
| 112 |
+ |
|
| 113 |
+ // Size represents the size of the writable layer |
|
| 114 |
+ // as calculated by the total size of the files |
|
| 115 |
+ // changed in the mutable layer. |
|
| 116 |
+ Size() (int64, error) |
|
| 117 |
+} |
|
| 118 |
+ |
|
| 119 |
+// Metadata holds information about a |
|
| 120 |
+// read only layer |
|
| 121 |
+type Metadata struct {
|
|
| 122 |
+ // ChainID is the content hash of the layer |
|
| 123 |
+ ChainID ChainID |
|
| 124 |
+ |
|
| 125 |
+ // DiffID is the hash of the tar data used to |
|
| 126 |
+ // create the layer |
|
| 127 |
+ DiffID DiffID |
|
| 128 |
+ |
|
| 129 |
+ // Size is the size of the layer and all parents |
|
| 130 |
+ Size int64 |
|
| 131 |
+ |
|
| 132 |
+ // DiffSize is the size of the top layer |
|
| 133 |
+ DiffSize int64 |
|
| 134 |
+} |
|
| 135 |
+ |
|
| 136 |
+// MountInit is a function to initialize a |
|
| 137 |
+// writable mount. Changes made here will |
|
| 138 |
+// not be included in the Tar stream of the |
|
| 139 |
+// RWLayer. |
|
| 140 |
+type MountInit func(root string) error |
|
| 141 |
+ |
|
| 142 |
+// Store represents a backend for managing both |
|
| 143 |
+// read-only and read-write layers. |
|
| 144 |
+type Store interface {
|
|
| 145 |
+ Register(io.Reader, ChainID) (Layer, error) |
|
| 146 |
+ Get(ChainID) (Layer, error) |
|
| 147 |
+ Release(Layer) ([]Metadata, error) |
|
| 148 |
+ |
|
| 149 |
+ Mount(id string, parent ChainID, label string, init MountInit) (RWLayer, error) |
|
| 150 |
+ Unmount(id string) error |
|
| 151 |
+ DeleteMount(id string) ([]Metadata, error) |
|
| 152 |
+ Changes(id string) ([]archive.Change, error) |
|
| 153 |
+} |
|
| 154 |
+ |
|
| 155 |
+// MetadataTransaction represents functions for setting layer metadata |
|
| 156 |
+// with a single transaction. |
|
| 157 |
+type MetadataTransaction interface {
|
|
| 158 |
+ SetSize(int64) error |
|
| 159 |
+ SetParent(parent ChainID) error |
|
| 160 |
+ SetDiffID(DiffID) error |
|
| 161 |
+ SetCacheID(string) error |
|
| 162 |
+ TarSplitWriter() (io.WriteCloser, error) |
|
| 163 |
+ |
|
| 164 |
+ Commit(ChainID) error |
|
| 165 |
+ Cancel() error |
|
| 166 |
+ String() string |
|
| 167 |
+} |
|
| 168 |
+ |
|
| 169 |
+// MetadataStore represents a backend for persisting |
|
| 170 |
+// metadata about layers and providing the metadata |
|
| 171 |
+// for restoring a Store. |
|
| 172 |
+type MetadataStore interface {
|
|
| 173 |
+ // StartTransaction starts an update for new metadata |
|
| 174 |
+ // which will be used to represent an ID on commit. |
|
| 175 |
+ StartTransaction() (MetadataTransaction, error) |
|
| 176 |
+ |
|
| 177 |
+ GetSize(ChainID) (int64, error) |
|
| 178 |
+ GetParent(ChainID) (ChainID, error) |
|
| 179 |
+ GetDiffID(ChainID) (DiffID, error) |
|
| 180 |
+ GetCacheID(ChainID) (string, error) |
|
| 181 |
+ TarSplitReader(ChainID) (io.ReadCloser, error) |
|
| 182 |
+ |
|
| 183 |
+ SetMountID(string, string) error |
|
| 184 |
+ SetInitID(string, string) error |
|
| 185 |
+ SetMountParent(string, ChainID) error |
|
| 186 |
+ |
|
| 187 |
+ GetMountID(string) (string, error) |
|
| 188 |
+ GetInitID(string) (string, error) |
|
| 189 |
+ GetMountParent(string) (ChainID, error) |
|
| 190 |
+ |
|
| 191 |
+ // List returns the full list of referened |
|
| 192 |
+ // read-only and read-write layers |
|
| 193 |
+ List() ([]ChainID, []string, error) |
|
| 194 |
+ |
|
| 195 |
+ Remove(ChainID) error |
|
| 196 |
+ RemoveMount(string) error |
|
| 197 |
+} |
|
| 198 |
+ |
|
| 199 |
+// CreateChainID returns ID for a layerDigest slice |
|
| 200 |
+func CreateChainID(dgsts []DiffID) ChainID {
|
|
| 201 |
+ return createChainIDFromParent("", dgsts...)
|
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
|
|
| 205 |
+ if len(dgsts) == 0 {
|
|
| 206 |
+ return parent |
|
| 207 |
+ } |
|
| 208 |
+ if parent == "" {
|
|
| 209 |
+ return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) |
|
| 210 |
+ } |
|
| 211 |
+ // H = "H(n-1) SHA256(n)" |
|
| 212 |
+ dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) |
|
| 213 |
+ if err != nil {
|
|
| 214 |
+ // Digest calculation is not expected to throw an error, |
|
| 215 |
+ // any error at this point is a program error |
|
| 216 |
+ panic(err) |
|
| 217 |
+ } |
|
| 218 |
+ return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) |
|
| 219 |
+} |
|
| 220 |
+ |
|
| 221 |
+// ReleaseAndLog releases the provided layer from the given layer |
|
| 222 |
+// store, logging any error and release metadata |
|
| 223 |
+func ReleaseAndLog(ls Store, l Layer) {
|
|
| 224 |
+ metadata, err := ls.Release(l) |
|
| 225 |
+ if err != nil {
|
|
| 226 |
+ logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err)
|
|
| 227 |
+ } |
|
| 228 |
+ LogReleaseMetadata(metadata) |
|
| 229 |
+} |
|
| 230 |
+ |
|
| 231 |
+// LogReleaseMetadata logs a metadata array, use this to |
|
| 232 |
+// ensure consistent logging for release metadata |
|
| 233 |
+func LogReleaseMetadata(metadatas []Metadata) {
|
|
| 234 |
+ for _, metadata := range metadatas {
|
|
| 235 |
+ logrus.Infof("Layer %s cleaned up", metadata.ChainID)
|
|
| 236 |
+ } |
|
| 237 |
+} |
| 0 | 238 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,649 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "runtime" |
|
| 8 |
+ "sync" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/Sirupsen/logrus" |
|
| 11 |
+ "github.com/docker/distribution/digest" |
|
| 12 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
| 13 |
+ "github.com/docker/docker/pkg/archive" |
|
| 14 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 15 |
+ "github.com/vbatts/tar-split/tar/asm" |
|
| 16 |
+ "github.com/vbatts/tar-split/tar/storage" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+// maxLayerDepth represents the maximum number of |
|
| 20 |
+// layers which can be chained together. 125 was |
|
| 21 |
+// chosen to account for the 127 max in some |
|
| 22 |
+// graphdrivers plus the 2 additional layers |
|
| 23 |
+// used to create a rwlayer. |
|
| 24 |
+const maxLayerDepth = 125 |
|
| 25 |
+ |
|
| 26 |
+type layerStore struct {
|
|
| 27 |
+ store MetadataStore |
|
| 28 |
+ driver graphdriver.Driver |
|
| 29 |
+ |
|
| 30 |
+ layerMap map[ChainID]*roLayer |
|
| 31 |
+ layerL sync.Mutex |
|
| 32 |
+ |
|
| 33 |
+ mounts map[string]*mountedLayer |
|
| 34 |
+ mountL sync.Mutex |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+// NewStore creates a new Store instance using |
|
| 38 |
+// the provided metadata store and graph driver. |
|
| 39 |
+// The metadata store will be used to restore |
|
| 40 |
+// the Store. |
|
| 41 |
+func NewStore(store MetadataStore, driver graphdriver.Driver) (Store, error) {
|
|
| 42 |
+ ls := &layerStore{
|
|
| 43 |
+ store: store, |
|
| 44 |
+ driver: driver, |
|
| 45 |
+ layerMap: map[ChainID]*roLayer{},
|
|
| 46 |
+ mounts: map[string]*mountedLayer{},
|
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ ids, mounts, err := store.List() |
|
| 50 |
+ if err != nil {
|
|
| 51 |
+ return nil, err |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ for _, id := range ids {
|
|
| 55 |
+ l, err := ls.loadLayer(id) |
|
| 56 |
+ if err != nil {
|
|
| 57 |
+ logrus.Debugf("Failed to load layer %s: %s", id, err)
|
|
| 58 |
+ } |
|
| 59 |
+ if l.parent != nil {
|
|
| 60 |
+ l.parent.referenceCount++ |
|
| 61 |
+ } |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ for _, mount := range mounts {
|
|
| 65 |
+ if err := ls.loadMount(mount); err != nil {
|
|
| 66 |
+ logrus.Debugf("Failed to load mount %s: %s", mount, err)
|
|
| 67 |
+ } |
|
| 68 |
+ } |
|
| 69 |
+ |
|
| 70 |
+ return ls, nil |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
|
|
| 74 |
+ cl, ok := ls.layerMap[layer] |
|
| 75 |
+ if ok {
|
|
| 76 |
+ return cl, nil |
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ diff, err := ls.store.GetDiffID(layer) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ return nil, err |
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ size, err := ls.store.GetSize(layer) |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return nil, err |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ cacheID, err := ls.store.GetCacheID(layer) |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return nil, err |
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ parent, err := ls.store.GetParent(layer) |
|
| 95 |
+ if err != nil {
|
|
| 96 |
+ return nil, err |
|
| 97 |
+ } |
|
| 98 |
+ |
|
| 99 |
+ cl = &roLayer{
|
|
| 100 |
+ chainID: layer, |
|
| 101 |
+ diffID: diff, |
|
| 102 |
+ size: size, |
|
| 103 |
+ cacheID: cacheID, |
|
| 104 |
+ layerStore: ls, |
|
| 105 |
+ references: map[Layer]struct{}{},
|
|
| 106 |
+ } |
|
| 107 |
+ |
|
| 108 |
+ if parent != "" {
|
|
| 109 |
+ p, err := ls.loadLayer(parent) |
|
| 110 |
+ if err != nil {
|
|
| 111 |
+ return nil, err |
|
| 112 |
+ } |
|
| 113 |
+ cl.parent = p |
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ ls.layerMap[cl.chainID] = cl |
|
| 117 |
+ |
|
| 118 |
+ return cl, nil |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+func (ls *layerStore) loadMount(mount string) error {
|
|
| 122 |
+ if _, ok := ls.mounts[mount]; ok {
|
|
| 123 |
+ return nil |
|
| 124 |
+ } |
|
| 125 |
+ |
|
| 126 |
+ mountID, err := ls.store.GetMountID(mount) |
|
| 127 |
+ if err != nil {
|
|
| 128 |
+ return err |
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ initID, err := ls.store.GetInitID(mount) |
|
| 132 |
+ if err != nil {
|
|
| 133 |
+ return err |
|
| 134 |
+ } |
|
| 135 |
+ |
|
| 136 |
+ parent, err := ls.store.GetMountParent(mount) |
|
| 137 |
+ if err != nil {
|
|
| 138 |
+ return err |
|
| 139 |
+ } |
|
| 140 |
+ |
|
| 141 |
+ ml := &mountedLayer{
|
|
| 142 |
+ name: mount, |
|
| 143 |
+ mountID: mountID, |
|
| 144 |
+ initID: initID, |
|
| 145 |
+ layerStore: ls, |
|
| 146 |
+ } |
|
| 147 |
+ |
|
| 148 |
+ if parent != "" {
|
|
| 149 |
+ p, err := ls.loadLayer(parent) |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ return err |
|
| 152 |
+ } |
|
| 153 |
+ ml.parent = p |
|
| 154 |
+ |
|
| 155 |
+ p.referenceCount++ |
|
| 156 |
+ } |
|
| 157 |
+ |
|
| 158 |
+ ls.mounts[ml.name] = ml |
|
| 159 |
+ |
|
| 160 |
+ return nil |
|
| 161 |
+} |
|
| 162 |
+ |
|
| 163 |
+func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
|
|
| 164 |
+ digester := digest.Canonical.New() |
|
| 165 |
+ tr := io.TeeReader(ts, digester.Hash()) |
|
| 166 |
+ |
|
| 167 |
+ tsw, err := tx.TarSplitWriter() |
|
| 168 |
+ if err != nil {
|
|
| 169 |
+ return err |
|
| 170 |
+ } |
|
| 171 |
+ metaPacker := storage.NewJSONPacker(tsw) |
|
| 172 |
+ defer tsw.Close() |
|
| 173 |
+ |
|
| 174 |
+ // we're passing nil here for the file putter, because the ApplyDiff will |
|
| 175 |
+ // handle the extraction of the archive |
|
| 176 |
+ rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) |
|
| 177 |
+ if err != nil {
|
|
| 178 |
+ return err |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 181 |
+ applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) |
|
| 182 |
+ if err != nil {
|
|
| 183 |
+ return err |
|
| 184 |
+ } |
|
| 185 |
+ |
|
| 186 |
+ // Discard trailing data but ensure metadata is picked up to reconstruct stream |
|
| 187 |
+ io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed |
|
| 188 |
+ |
|
| 189 |
+ layer.size = applySize |
|
| 190 |
+ layer.diffID = DiffID(digester.Digest()) |
|
| 191 |
+ |
|
| 192 |
+ logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
|
|
| 193 |
+ |
|
| 194 |
+ return nil |
|
| 195 |
+} |
|
| 196 |
+ |
|
| 197 |
+func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
|
|
| 198 |
+ // err is used to hold the error which will always trigger |
|
| 199 |
+ // cleanup of creates sources but may not be an error returned |
|
| 200 |
+ // to the caller (already exists). |
|
| 201 |
+ var err error |
|
| 202 |
+ var pid string |
|
| 203 |
+ var p *roLayer |
|
| 204 |
+ if string(parent) != "" {
|
|
| 205 |
+ p = ls.get(parent) |
|
| 206 |
+ if p == nil {
|
|
| 207 |
+ return nil, ErrLayerDoesNotExist |
|
| 208 |
+ } |
|
| 209 |
+ pid = p.cacheID |
|
| 210 |
+ // Release parent chain if error |
|
| 211 |
+ defer func() {
|
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ ls.layerL.Lock() |
|
| 214 |
+ ls.releaseLayer(p) |
|
| 215 |
+ ls.layerL.Unlock() |
|
| 216 |
+ } |
|
| 217 |
+ }() |
|
| 218 |
+ if p.depth() >= maxLayerDepth {
|
|
| 219 |
+ err = ErrMaxDepthExceeded |
|
| 220 |
+ return nil, err |
|
| 221 |
+ } |
|
| 222 |
+ } |
|
| 223 |
+ |
|
| 224 |
+ // Create new roLayer |
|
| 225 |
+ layer := &roLayer{
|
|
| 226 |
+ parent: p, |
|
| 227 |
+ cacheID: stringid.GenerateRandomID(), |
|
| 228 |
+ referenceCount: 1, |
|
| 229 |
+ layerStore: ls, |
|
| 230 |
+ references: map[Layer]struct{}{},
|
|
| 231 |
+ } |
|
| 232 |
+ |
|
| 233 |
+ if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil {
|
|
| 234 |
+ return nil, err |
|
| 235 |
+ } |
|
| 236 |
+ |
|
| 237 |
+ tx, err := ls.store.StartTransaction() |
|
| 238 |
+ if err != nil {
|
|
| 239 |
+ return nil, err |
|
| 240 |
+ } |
|
| 241 |
+ |
|
| 242 |
+ defer func() {
|
|
| 243 |
+ if err != nil {
|
|
| 244 |
+ logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
|
|
| 245 |
+ if err := ls.driver.Remove(layer.cacheID); err != nil {
|
|
| 246 |
+ logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
|
|
| 247 |
+ } |
|
| 248 |
+ if err := tx.Cancel(); err != nil {
|
|
| 249 |
+ logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
|
| 250 |
+ } |
|
| 251 |
+ } |
|
| 252 |
+ }() |
|
| 253 |
+ |
|
| 254 |
+ if err = ls.applyTar(tx, ts, pid, layer); err != nil {
|
|
| 255 |
+ return nil, err |
|
| 256 |
+ } |
|
| 257 |
+ |
|
| 258 |
+ if layer.parent == nil {
|
|
| 259 |
+ layer.chainID = ChainID(layer.diffID) |
|
| 260 |
+ } else {
|
|
| 261 |
+ layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) |
|
| 262 |
+ } |
|
| 263 |
+ |
|
| 264 |
+ if err = storeLayer(tx, layer); err != nil {
|
|
| 265 |
+ return nil, err |
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ ls.layerL.Lock() |
|
| 269 |
+ defer ls.layerL.Unlock() |
|
| 270 |
+ |
|
| 271 |
+ if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil {
|
|
| 272 |
+ // Set error for cleanup, but do not return the error |
|
| 273 |
+ err = errors.New("layer already exists")
|
|
| 274 |
+ return existingLayer.getReference(), nil |
|
| 275 |
+ } |
|
| 276 |
+ |
|
| 277 |
+ if err = tx.Commit(layer.chainID); err != nil {
|
|
| 278 |
+ return nil, err |
|
| 279 |
+ } |
|
| 280 |
+ |
|
| 281 |
+ ls.layerMap[layer.chainID] = layer |
|
| 282 |
+ |
|
| 283 |
+ return layer.getReference(), nil |
|
| 284 |
+} |
|
| 285 |
+ |
|
| 286 |
+func (ls *layerStore) get(l ChainID) *roLayer {
|
|
| 287 |
+ ls.layerL.Lock() |
|
| 288 |
+ defer ls.layerL.Unlock() |
|
| 289 |
+ |
|
| 290 |
+ layer, ok := ls.layerMap[l] |
|
| 291 |
+ if !ok {
|
|
| 292 |
+ return nil |
|
| 293 |
+ } |
|
| 294 |
+ |
|
| 295 |
+ layer.referenceCount++ |
|
| 296 |
+ |
|
| 297 |
+ return layer |
|
| 298 |
+} |
|
| 299 |
+ |
|
| 300 |
+func (ls *layerStore) Get(l ChainID) (Layer, error) {
|
|
| 301 |
+ layer := ls.get(l) |
|
| 302 |
+ if layer == nil {
|
|
| 303 |
+ return nil, ErrLayerDoesNotExist |
|
| 304 |
+ } |
|
| 305 |
+ |
|
| 306 |
+ return layer.getReference(), nil |
|
| 307 |
+} |
|
| 308 |
+ |
|
| 309 |
+func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
|
|
| 310 |
+ err := ls.driver.Remove(layer.cacheID) |
|
| 311 |
+ if err != nil {
|
|
| 312 |
+ return err |
|
| 313 |
+ } |
|
| 314 |
+ |
|
| 315 |
+ err = ls.store.Remove(layer.chainID) |
|
| 316 |
+ if err != nil {
|
|
| 317 |
+ return err |
|
| 318 |
+ } |
|
| 319 |
+ metadata.DiffID = layer.diffID |
|
| 320 |
+ metadata.ChainID = layer.chainID |
|
| 321 |
+ metadata.Size, err = layer.Size() |
|
| 322 |
+ if err != nil {
|
|
| 323 |
+ return err |
|
| 324 |
+ } |
|
| 325 |
+ metadata.DiffSize = layer.size |
|
| 326 |
+ |
|
| 327 |
+ return nil |
|
| 328 |
+} |
|
| 329 |
+ |
|
| 330 |
+func (ls *layerStore) releaseLayers(l *roLayer, removed *[]Metadata, depth int) error {
|
|
| 331 |
+ if l.referenceCount == 0 {
|
|
| 332 |
+ panic("layer not retained")
|
|
| 333 |
+ } |
|
| 334 |
+ l.referenceCount-- |
|
| 335 |
+ if l.referenceCount != 0 {
|
|
| 336 |
+ return nil |
|
| 337 |
+ } |
|
| 338 |
+ |
|
| 339 |
+ if len(*removed) == 0 && depth > 0 {
|
|
| 340 |
+ panic("cannot remove layer with child")
|
|
| 341 |
+ } |
|
| 342 |
+ if l.hasReferences() {
|
|
| 343 |
+ panic("cannot delete referenced layer")
|
|
| 344 |
+ } |
|
| 345 |
+ var metadata Metadata |
|
| 346 |
+ if err := ls.deleteLayer(l, &metadata); err != nil {
|
|
| 347 |
+ return err |
|
| 348 |
+ } |
|
| 349 |
+ |
|
| 350 |
+ delete(ls.layerMap, l.chainID) |
|
| 351 |
+ *removed = append(*removed, metadata) |
|
| 352 |
+ |
|
| 353 |
+ if l.parent != nil {
|
|
| 354 |
+ if err := ls.releaseLayers(l.parent, removed, depth+1); err != nil {
|
|
| 355 |
+ return err |
|
| 356 |
+ } |
|
| 357 |
+ } |
|
| 358 |
+ |
|
| 359 |
+ return nil |
|
| 360 |
+} |
|
| 361 |
+ |
|
| 362 |
+func (ls *layerStore) releaseLayer(layer *roLayer) ([]Metadata, error) {
|
|
| 363 |
+ removed := []Metadata{}
|
|
| 364 |
+ err := ls.releaseLayers(layer, &removed, 0) |
|
| 365 |
+ return removed, err |
|
| 366 |
+} |
|
| 367 |
+ |
|
| 368 |
+func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
|
|
| 369 |
+ ls.layerL.Lock() |
|
| 370 |
+ defer ls.layerL.Unlock() |
|
| 371 |
+ layer, ok := ls.layerMap[l.ChainID()] |
|
| 372 |
+ if !ok {
|
|
| 373 |
+ return []Metadata{}, nil
|
|
| 374 |
+ } |
|
| 375 |
+ if !layer.hasReference(l) {
|
|
| 376 |
+ return nil, ErrLayerNotRetained |
|
| 377 |
+ } |
|
| 378 |
+ |
|
| 379 |
+ layer.deleteReference(l) |
|
| 380 |
+ |
|
| 381 |
+ return ls.releaseLayer(layer) |
|
| 382 |
+} |
|
| 383 |
+ |
|
| 384 |
+func (ls *layerStore) mount(m *mountedLayer, mountLabel string) error {
|
|
| 385 |
+ dir, err := ls.driver.Get(m.mountID, mountLabel) |
|
| 386 |
+ if err != nil {
|
|
| 387 |
+ return err |
|
| 388 |
+ } |
|
| 389 |
+ m.path = dir |
|
| 390 |
+ m.activityCount++ |
|
| 391 |
+ |
|
| 392 |
+ return nil |
|
| 393 |
+} |
|
| 394 |
+ |
|
| 395 |
+func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
|
| 396 |
+ if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
|
|
| 397 |
+ return err |
|
| 398 |
+ } |
|
| 399 |
+ |
|
| 400 |
+ if mount.initID != "" {
|
|
| 401 |
+ if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
|
|
| 402 |
+ return err |
|
| 403 |
+ } |
|
| 404 |
+ } |
|
| 405 |
+ |
|
| 406 |
+ if mount.parent != nil {
|
|
| 407 |
+ if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
|
|
| 408 |
+ return err |
|
| 409 |
+ } |
|
| 410 |
+ } |
|
| 411 |
+ |
|
| 412 |
+ ls.mounts[mount.name] = mount |
|
| 413 |
+ |
|
| 414 |
+ return nil |
|
| 415 |
+} |
|
| 416 |
+ |
|
| 417 |
+func (ls *layerStore) getAndRetainLayer(layer ChainID) *roLayer {
|
|
| 418 |
+ l, ok := ls.layerMap[layer] |
|
| 419 |
+ if !ok {
|
|
| 420 |
+ return nil |
|
| 421 |
+ } |
|
| 422 |
+ |
|
| 423 |
+ l.referenceCount++ |
|
| 424 |
+ |
|
| 425 |
+ return l |
|
| 426 |
+} |
|
| 427 |
+ |
|
| 428 |
+func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
|
|
| 429 |
+ // Use "<graph-id>-init" to maintain compatibility with graph drivers |
|
| 430 |
+ // which are expecting this layer with this special name. If all |
|
| 431 |
+ // graph drivers can be updated to not rely on knowin about this layer |
|
| 432 |
+ // then the initID should be randomly generated. |
|
| 433 |
+ initID := fmt.Sprintf("%s-init", graphID)
|
|
| 434 |
+ |
|
| 435 |
+ if err := ls.driver.Create(initID, parent, mountLabel); err != nil {
|
|
| 436 |
+ |
|
| 437 |
+ } |
|
| 438 |
+ p, err := ls.driver.Get(initID, "") |
|
| 439 |
+ if err != nil {
|
|
| 440 |
+ return "", err |
|
| 441 |
+ } |
|
| 442 |
+ |
|
| 443 |
+ if err := initFunc(p); err != nil {
|
|
| 444 |
+ ls.driver.Put(initID) |
|
| 445 |
+ return "", err |
|
| 446 |
+ } |
|
| 447 |
+ |
|
| 448 |
+ if err := ls.driver.Put(initID); err != nil {
|
|
| 449 |
+ return "", err |
|
| 450 |
+ } |
|
| 451 |
+ |
|
| 452 |
+ return initID, nil |
|
| 453 |
+} |
|
| 454 |
+ |
|
| 455 |
+func (ls *layerStore) Mount(name string, parent ChainID, mountLabel string, initFunc MountInit) (l RWLayer, err error) {
|
|
| 456 |
+ ls.mountL.Lock() |
|
| 457 |
+ defer ls.mountL.Unlock() |
|
| 458 |
+ m, ok := ls.mounts[name] |
|
| 459 |
+ if ok {
|
|
| 460 |
+ // Check if has path |
|
| 461 |
+ if err := ls.mount(m, mountLabel); err != nil {
|
|
| 462 |
+ return nil, err |
|
| 463 |
+ } |
|
| 464 |
+ return m, nil |
|
| 465 |
+ } |
|
| 466 |
+ |
|
| 467 |
+ var pid string |
|
| 468 |
+ var p *roLayer |
|
| 469 |
+ if string(parent) != "" {
|
|
| 470 |
+ ls.layerL.Lock() |
|
| 471 |
+ p = ls.getAndRetainLayer(parent) |
|
| 472 |
+ ls.layerL.Unlock() |
|
| 473 |
+ if p == nil {
|
|
| 474 |
+ return nil, ErrLayerDoesNotExist |
|
| 475 |
+ } |
|
| 476 |
+ pid = p.cacheID |
|
| 477 |
+ |
|
| 478 |
+ // Release parent chain if error |
|
| 479 |
+ defer func() {
|
|
| 480 |
+ if err != nil {
|
|
| 481 |
+ ls.layerL.Lock() |
|
| 482 |
+ ls.releaseLayer(p) |
|
| 483 |
+ ls.layerL.Unlock() |
|
| 484 |
+ } |
|
| 485 |
+ }() |
|
| 486 |
+ } |
|
| 487 |
+ |
|
| 488 |
+ mountID := name |
|
| 489 |
+ if runtime.GOOS != "windows" {
|
|
| 490 |
+ // windows has issues if container ID doesn't match mount ID |
|
| 491 |
+ mountID = stringid.GenerateRandomID() |
|
| 492 |
+ } |
|
| 493 |
+ |
|
| 494 |
+ m = &mountedLayer{
|
|
| 495 |
+ name: name, |
|
| 496 |
+ parent: p, |
|
| 497 |
+ mountID: mountID, |
|
| 498 |
+ layerStore: ls, |
|
| 499 |
+ } |
|
| 500 |
+ |
|
| 501 |
+ if initFunc != nil {
|
|
| 502 |
+ pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) |
|
| 503 |
+ if err != nil {
|
|
| 504 |
+ return nil, err |
|
| 505 |
+ } |
|
| 506 |
+ m.initID = pid |
|
| 507 |
+ } |
|
| 508 |
+ |
|
| 509 |
+ if err = ls.driver.Create(m.mountID, pid, ""); err != nil {
|
|
| 510 |
+ return nil, err |
|
| 511 |
+ } |
|
| 512 |
+ |
|
| 513 |
+ if err = ls.saveMount(m); err != nil {
|
|
| 514 |
+ return nil, err |
|
| 515 |
+ } |
|
| 516 |
+ |
|
| 517 |
+ if err = ls.mount(m, mountLabel); err != nil {
|
|
| 518 |
+ return nil, err |
|
| 519 |
+ } |
|
| 520 |
+ |
|
| 521 |
+ return m, nil |
|
| 522 |
+} |
|
| 523 |
+ |
|
| 524 |
+func (ls *layerStore) Unmount(name string) error {
|
|
| 525 |
+ ls.mountL.Lock() |
|
| 526 |
+ defer ls.mountL.Unlock() |
|
| 527 |
+ |
|
| 528 |
+ m := ls.mounts[name] |
|
| 529 |
+ if m == nil {
|
|
| 530 |
+ return ErrMountDoesNotExist |
|
| 531 |
+ } |
|
| 532 |
+ |
|
| 533 |
+ m.activityCount-- |
|
| 534 |
+ |
|
| 535 |
+ if err := ls.driver.Put(m.mountID); err != nil {
|
|
| 536 |
+ return err |
|
| 537 |
+ } |
|
| 538 |
+ |
|
| 539 |
+ return nil |
|
| 540 |
+} |
|
| 541 |
+ |
|
| 542 |
+func (ls *layerStore) DeleteMount(name string) ([]Metadata, error) {
|
|
| 543 |
+ ls.mountL.Lock() |
|
| 544 |
+ defer ls.mountL.Unlock() |
|
| 545 |
+ |
|
| 546 |
+ m := ls.mounts[name] |
|
| 547 |
+ if m == nil {
|
|
| 548 |
+ return nil, ErrMountDoesNotExist |
|
| 549 |
+ } |
|
| 550 |
+ if m.activityCount > 0 {
|
|
| 551 |
+ return nil, ErrActiveMount |
|
| 552 |
+ } |
|
| 553 |
+ |
|
| 554 |
+ delete(ls.mounts, name) |
|
| 555 |
+ |
|
| 556 |
+ if err := ls.driver.Remove(m.mountID); err != nil {
|
|
| 557 |
+ logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
|
|
| 558 |
+ return nil, err |
|
| 559 |
+ } |
|
| 560 |
+ |
|
| 561 |
+ if m.initID != "" {
|
|
| 562 |
+ if err := ls.driver.Remove(m.initID); err != nil {
|
|
| 563 |
+ logrus.Errorf("Error removing init layer %s: %s", m.name, err)
|
|
| 564 |
+ return nil, err |
|
| 565 |
+ } |
|
| 566 |
+ } |
|
| 567 |
+ |
|
| 568 |
+ if err := ls.store.RemoveMount(m.name); err != nil {
|
|
| 569 |
+ logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err)
|
|
| 570 |
+ return nil, err |
|
| 571 |
+ } |
|
| 572 |
+ |
|
| 573 |
+ ls.layerL.Lock() |
|
| 574 |
+ defer ls.layerL.Unlock() |
|
| 575 |
+ if m.parent != nil {
|
|
| 576 |
+ return ls.releaseLayer(m.parent) |
|
| 577 |
+ } |
|
| 578 |
+ |
|
| 579 |
+ return []Metadata{}, nil
|
|
| 580 |
+} |
|
| 581 |
+ |
|
| 582 |
+func (ls *layerStore) Changes(name string) ([]archive.Change, error) {
|
|
| 583 |
+ ls.mountL.Lock() |
|
| 584 |
+ m := ls.mounts[name] |
|
| 585 |
+ ls.mountL.Unlock() |
|
| 586 |
+ if m == nil {
|
|
| 587 |
+ return nil, ErrMountDoesNotExist |
|
| 588 |
+ } |
|
| 589 |
+ pid := m.initID |
|
| 590 |
+ if pid == "" && m.parent != nil {
|
|
| 591 |
+ pid = m.parent.cacheID |
|
| 592 |
+ } |
|
| 593 |
+ return ls.driver.Changes(m.mountID, pid) |
|
| 594 |
+} |
|
| 595 |
+ |
|
| 596 |
+func (ls *layerStore) assembleTar(graphID string, metadata io.ReadCloser, size *int64) (io.Reader, error) {
|
|
| 597 |
+ type diffPathDriver interface {
|
|
| 598 |
+ DiffPath(string) (string, func() error, error) |
|
| 599 |
+ } |
|
| 600 |
+ |
|
| 601 |
+ diffDriver, ok := ls.driver.(diffPathDriver) |
|
| 602 |
+ if !ok {
|
|
| 603 |
+ diffDriver = &naiveDiffPathDriver{ls.driver}
|
|
| 604 |
+ } |
|
| 605 |
+ |
|
| 606 |
+ // get our relative path to the container |
|
| 607 |
+ fsPath, releasePath, err := diffDriver.DiffPath(graphID) |
|
| 608 |
+ if err != nil {
|
|
| 609 |
+ metadata.Close() |
|
| 610 |
+ return nil, err |
|
| 611 |
+ } |
|
| 612 |
+ |
|
| 613 |
+ pR, pW := io.Pipe() |
|
| 614 |
+ // this will need to be in a goroutine, as we are returning the stream of a |
|
| 615 |
+ // tar archive, but can not close the metadata reader early (when this |
|
| 616 |
+ // function returns)... |
|
| 617 |
+ go func() {
|
|
| 618 |
+ defer releasePath() |
|
| 619 |
+ defer metadata.Close() |
|
| 620 |
+ |
|
| 621 |
+ metaUnpacker := storage.NewJSONUnpacker(metadata) |
|
| 622 |
+ upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
|
| 623 |
+ fileGetter := storage.NewPathFileGetter(fsPath) |
|
| 624 |
+ logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
|
|
| 625 |
+ ots := asm.NewOutputTarStream(fileGetter, upackerCounter) |
|
| 626 |
+ defer ots.Close() |
|
| 627 |
+ if _, err := io.Copy(pW, ots); err != nil {
|
|
| 628 |
+ pW.CloseWithError(err) |
|
| 629 |
+ return |
|
| 630 |
+ } |
|
| 631 |
+ pW.Close() |
|
| 632 |
+ }() |
|
| 633 |
+ return pR, nil |
|
| 634 |
+} |
|
| 635 |
+ |
|
| 636 |
+type naiveDiffPathDriver struct {
|
|
| 637 |
+ graphdriver.Driver |
|
| 638 |
+} |
|
| 639 |
+ |
|
| 640 |
+func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) {
|
|
| 641 |
+ p, err := n.Driver.Get(id, "") |
|
| 642 |
+ if err != nil {
|
|
| 643 |
+ return "", nil, err |
|
| 644 |
+ } |
|
| 645 |
+ return p, func() error {
|
|
| 646 |
+ return n.Driver.Put(id) |
|
| 647 |
+ }, nil |
|
| 648 |
+} |
| 0 | 649 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,725 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "io" |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "testing" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/distribution/digest" |
|
| 11 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
| 12 |
+ "github.com/docker/docker/daemon/graphdriver/vfs" |
|
| 13 |
+ "github.com/docker/docker/pkg/archive" |
|
| 14 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 15 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 16 |
+) |
|
| 17 |
+ |
|
| 18 |
+func init() {
|
|
| 19 |
+ graphdriver.ApplyUncompressedLayer = archive.UnpackLayer |
|
| 20 |
+ vfs.CopyWithTar = archive.CopyWithTar |
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+func newVFSGraphDriver(td string) (graphdriver.Driver, error) {
|
|
| 24 |
+ uidMap := []idtools.IDMap{
|
|
| 25 |
+ {
|
|
| 26 |
+ ContainerID: 0, |
|
| 27 |
+ HostID: os.Getuid(), |
|
| 28 |
+ Size: 1, |
|
| 29 |
+ }, |
|
| 30 |
+ } |
|
| 31 |
+ gidMap := []idtools.IDMap{
|
|
| 32 |
+ {
|
|
| 33 |
+ ContainerID: 0, |
|
| 34 |
+ HostID: os.Getgid(), |
|
| 35 |
+ Size: 1, |
|
| 36 |
+ }, |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ return graphdriver.GetDriver("vfs", td, nil, uidMap, gidMap)
|
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) {
|
|
| 43 |
+ td, err := ioutil.TempDir("", "graph-")
|
|
| 44 |
+ if err != nil {
|
|
| 45 |
+ t.Fatal(err) |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ driver, err := newVFSGraphDriver(td) |
|
| 49 |
+ if err != nil {
|
|
| 50 |
+ t.Fatal(err) |
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ return driver, func() {
|
|
| 54 |
+ os.RemoveAll(td) |
|
| 55 |
+ } |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func newTestStore(t *testing.T) (Store, func()) {
|
|
| 59 |
+ td, err := ioutil.TempDir("", "layerstore-")
|
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ t.Fatal(err) |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ graph, graphcleanup := newTestGraphDriver(t) |
|
| 65 |
+ fms, err := NewFSMetadataStore(td) |
|
| 66 |
+ if err != nil {
|
|
| 67 |
+ t.Fatal(err) |
|
| 68 |
+ } |
|
| 69 |
+ ls, err := NewStore(fms, graph) |
|
| 70 |
+ if err != nil {
|
|
| 71 |
+ t.Fatal(err) |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ return ls, func() {
|
|
| 75 |
+ graphcleanup() |
|
| 76 |
+ os.RemoveAll(td) |
|
| 77 |
+ } |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+type layerInit func(root string) error |
|
| 81 |
+ |
|
| 82 |
+func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
|
| 83 |
+ containerID := stringid.GenerateRandomID() |
|
| 84 |
+ mount, err := ls.Mount(containerID, parent, "", nil) |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return nil, err |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ path, err := mount.Path() |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return nil, err |
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ if err := layerFunc(path); err != nil {
|
|
| 95 |
+ return nil, err |
|
| 96 |
+ } |
|
| 97 |
+ |
|
| 98 |
+ ts, err := mount.TarStream() |
|
| 99 |
+ if err != nil {
|
|
| 100 |
+ return nil, err |
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ layer, err := ls.Register(ts, parent) |
|
| 104 |
+ if err != nil {
|
|
| 105 |
+ return nil, err |
|
| 106 |
+ } |
|
| 107 |
+ |
|
| 108 |
+ if err := ls.Unmount(containerID); err != nil {
|
|
| 109 |
+ return nil, err |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 112 |
+ if _, err := ls.DeleteMount(containerID); err != nil {
|
|
| 113 |
+ return nil, err |
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ return layer, nil |
|
| 117 |
+} |
|
| 118 |
+ |
|
| 119 |
+type FileApplier interface {
|
|
| 120 |
+ ApplyFile(root string) error |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+type testFile struct {
|
|
| 124 |
+ name string |
|
| 125 |
+ content []byte |
|
| 126 |
+ permission os.FileMode |
|
| 127 |
+} |
|
| 128 |
+ |
|
| 129 |
+func newTestFile(name string, content []byte, perm os.FileMode) FileApplier {
|
|
| 130 |
+ return &testFile{
|
|
| 131 |
+ name: name, |
|
| 132 |
+ content: content, |
|
| 133 |
+ permission: perm, |
|
| 134 |
+ } |
|
| 135 |
+} |
|
| 136 |
+ |
|
| 137 |
+func (tf *testFile) ApplyFile(root string) error {
|
|
| 138 |
+ fullPath := filepath.Join(root, tf.name) |
|
| 139 |
+ if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
|
|
| 140 |
+ return err |
|
| 141 |
+ } |
|
| 142 |
+ // Check if already exists |
|
| 143 |
+ if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
|
|
| 144 |
+ if err := os.Chmod(fullPath, tf.permission); err != nil {
|
|
| 145 |
+ return err |
|
| 146 |
+ } |
|
| 147 |
+ } |
|
| 148 |
+ if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil {
|
|
| 149 |
+ return err |
|
| 150 |
+ } |
|
| 151 |
+ return nil |
|
| 152 |
+} |
|
| 153 |
+ |
|
| 154 |
+func initWithFiles(files ...FileApplier) layerInit {
|
|
| 155 |
+ return func(root string) error {
|
|
| 156 |
+ for _, f := range files {
|
|
| 157 |
+ if err := f.ApplyFile(root); err != nil {
|
|
| 158 |
+ return err |
|
| 159 |
+ } |
|
| 160 |
+ } |
|
| 161 |
+ return nil |
|
| 162 |
+ } |
|
| 163 |
+} |
|
| 164 |
+ |
|
| 165 |
+func getCachedLayer(l Layer) *roLayer {
|
|
| 166 |
+ if rl, ok := l.(*referencedCacheLayer); ok {
|
|
| 167 |
+ return rl.roLayer |
|
| 168 |
+ } |
|
| 169 |
+ return l.(*roLayer) |
|
| 170 |
+} |
|
| 171 |
+ |
|
| 172 |
+func createMetadata(layers ...Layer) []Metadata {
|
|
| 173 |
+ metadata := make([]Metadata, len(layers)) |
|
| 174 |
+ for i := range layers {
|
|
| 175 |
+ size, err := layers[i].Size() |
|
| 176 |
+ if err != nil {
|
|
| 177 |
+ panic(err) |
|
| 178 |
+ } |
|
| 179 |
+ |
|
| 180 |
+ metadata[i].ChainID = layers[i].ChainID() |
|
| 181 |
+ metadata[i].DiffID = layers[i].DiffID() |
|
| 182 |
+ metadata[i].Size = size |
|
| 183 |
+ metadata[i].DiffSize = getCachedLayer(layers[i]).size |
|
| 184 |
+ } |
|
| 185 |
+ |
|
| 186 |
+ return metadata |
|
| 187 |
+} |
|
| 188 |
+ |
|
| 189 |
+func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) {
|
|
| 190 |
+ if len(metadata) != len(expectedMetadata) {
|
|
| 191 |
+ t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata))
|
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ for i := range metadata {
|
|
| 195 |
+ if metadata[i] != expectedMetadata[i] {
|
|
| 196 |
+ t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i])
|
|
| 197 |
+ } |
|
| 198 |
+ } |
|
| 199 |
+ if t.Failed() {
|
|
| 200 |
+ t.FailNow() |
|
| 201 |
+ } |
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) {
|
|
| 205 |
+ layerCount := len(ls.(*layerStore).layerMap) |
|
| 206 |
+ expectedMetadata := createMetadata(removed...) |
|
| 207 |
+ metadata, err := ls.Release(layer) |
|
| 208 |
+ if err != nil {
|
|
| 209 |
+ t.Fatal(err) |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ assertMetadata(t, metadata, expectedMetadata) |
|
| 213 |
+ |
|
| 214 |
+ if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected {
|
|
| 215 |
+ t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected)
|
|
| 216 |
+ } |
|
| 217 |
+} |
|
| 218 |
+ |
|
| 219 |
+func cacheID(l Layer) string {
|
|
| 220 |
+ return getCachedLayer(l).cacheID |
|
| 221 |
+} |
|
| 222 |
+ |
|
| 223 |
+func assertLayerEqual(t *testing.T, l1, l2 Layer) {
|
|
| 224 |
+ if l1.ChainID() != l2.ChainID() {
|
|
| 225 |
+ t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID())
|
|
| 226 |
+ } |
|
| 227 |
+ if l1.DiffID() != l2.DiffID() {
|
|
| 228 |
+ t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID())
|
|
| 229 |
+ } |
|
| 230 |
+ |
|
| 231 |
+ size1, err := l1.Size() |
|
| 232 |
+ if err != nil {
|
|
| 233 |
+ t.Fatal(err) |
|
| 234 |
+ } |
|
| 235 |
+ |
|
| 236 |
+ size2, err := l2.Size() |
|
| 237 |
+ if err != nil {
|
|
| 238 |
+ t.Fatal(err) |
|
| 239 |
+ } |
|
| 240 |
+ |
|
| 241 |
+ if size1 != size2 {
|
|
| 242 |
+ t.Fatalf("Mismatched size: %d vs %d", size1, size2)
|
|
| 243 |
+ } |
|
| 244 |
+ |
|
| 245 |
+ if cacheID(l1) != cacheID(l2) {
|
|
| 246 |
+ t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2))
|
|
| 247 |
+ } |
|
| 248 |
+ |
|
| 249 |
+ p1 := l1.Parent() |
|
| 250 |
+ p2 := l2.Parent() |
|
| 251 |
+ if p1 != nil && p2 != nil {
|
|
| 252 |
+ assertLayerEqual(t, p1, p2) |
|
| 253 |
+ } else if p1 != nil || p2 != nil {
|
|
| 254 |
+ t.Fatalf("Mismatched parents: %v vs %v", p1, p2)
|
|
| 255 |
+ } |
|
| 256 |
+} |
|
| 257 |
+ |
|
| 258 |
+func TestMountAndRegister(t *testing.T) {
|
|
| 259 |
+ ls, cleanup := newTestStore(t) |
|
| 260 |
+ defer cleanup() |
|
| 261 |
+ |
|
| 262 |
+ li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644))
|
|
| 263 |
+ layer, err := createLayer(ls, "", li) |
|
| 264 |
+ if err != nil {
|
|
| 265 |
+ t.Fatal(err) |
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ size, _ := layer.Size() |
|
| 269 |
+ t.Logf("Layer size: %d", size)
|
|
| 270 |
+ |
|
| 271 |
+ mount2, err := ls.Mount("new-test-mount", layer.ChainID(), "", nil)
|
|
| 272 |
+ if err != nil {
|
|
| 273 |
+ t.Fatal(err) |
|
| 274 |
+ } |
|
| 275 |
+ |
|
| 276 |
+ path2, err := mount2.Path() |
|
| 277 |
+ if err != nil {
|
|
| 278 |
+ t.Fatal(err) |
|
| 279 |
+ } |
|
| 280 |
+ |
|
| 281 |
+ b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) |
|
| 282 |
+ if err != nil {
|
|
| 283 |
+ t.Fatal(err) |
|
| 284 |
+ } |
|
| 285 |
+ |
|
| 286 |
+ if expected := "some test data"; string(b) != expected {
|
|
| 287 |
+ t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b))
|
|
| 288 |
+ } |
|
| 289 |
+ |
|
| 290 |
+ if err := ls.Unmount("new-test-mount"); err != nil {
|
|
| 291 |
+ t.Fatal(err) |
|
| 292 |
+ } |
|
| 293 |
+ |
|
| 294 |
+ if _, err := ls.DeleteMount("new-test-mount"); err != nil {
|
|
| 295 |
+ t.Fatal(err) |
|
| 296 |
+ } |
|
| 297 |
+} |
|
| 298 |
+ |
|
| 299 |
+func TestLayerRelease(t *testing.T) {
|
|
| 300 |
+ ls, cleanup := newTestStore(t) |
|
| 301 |
+ defer cleanup() |
|
| 302 |
+ |
|
| 303 |
+ layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
|
|
| 304 |
+ if err != nil {
|
|
| 305 |
+ t.Fatal(err) |
|
| 306 |
+ } |
|
| 307 |
+ |
|
| 308 |
+ layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644)))
|
|
| 309 |
+ if err != nil {
|
|
| 310 |
+ t.Fatal(err) |
|
| 311 |
+ } |
|
| 312 |
+ |
|
| 313 |
+ if _, err := ls.Release(layer1); err != nil {
|
|
| 314 |
+ t.Fatal(err) |
|
| 315 |
+ } |
|
| 316 |
+ |
|
| 317 |
+ layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644)))
|
|
| 318 |
+ if err != nil {
|
|
| 319 |
+ t.Fatal(err) |
|
| 320 |
+ } |
|
| 321 |
+ |
|
| 322 |
+ layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644)))
|
|
| 323 |
+ if err != nil {
|
|
| 324 |
+ t.Fatal(err) |
|
| 325 |
+ } |
|
| 326 |
+ |
|
| 327 |
+ if _, err := ls.Release(layer2); err != nil {
|
|
| 328 |
+ t.Fatal(err) |
|
| 329 |
+ } |
|
| 330 |
+ |
|
| 331 |
+ t.Logf("Layer1: %s", layer1.ChainID())
|
|
| 332 |
+ t.Logf("Layer2: %s", layer2.ChainID())
|
|
| 333 |
+ t.Logf("Layer3a: %s", layer3a.ChainID())
|
|
| 334 |
+ t.Logf("Layer3b: %s", layer3b.ChainID())
|
|
| 335 |
+ |
|
| 336 |
+ if expected := 4; len(ls.(*layerStore).layerMap) != expected {
|
|
| 337 |
+ t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected)
|
|
| 338 |
+ } |
|
| 339 |
+ |
|
| 340 |
+ releaseAndCheckDeleted(t, ls, layer3b, layer3b) |
|
| 341 |
+ releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) |
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+func TestStoreRestore(t *testing.T) {
|
|
| 345 |
+ ls, cleanup := newTestStore(t) |
|
| 346 |
+ defer cleanup() |
|
| 347 |
+ |
|
| 348 |
+ layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
|
|
| 349 |
+ if err != nil {
|
|
| 350 |
+ t.Fatal(err) |
|
| 351 |
+ } |
|
| 352 |
+ |
|
| 353 |
+ layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644)))
|
|
| 354 |
+ if err != nil {
|
|
| 355 |
+ t.Fatal(err) |
|
| 356 |
+ } |
|
| 357 |
+ |
|
| 358 |
+ if _, err := ls.Release(layer1); err != nil {
|
|
| 359 |
+ t.Fatal(err) |
|
| 360 |
+ } |
|
| 361 |
+ |
|
| 362 |
+ layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644)))
|
|
| 363 |
+ if err != nil {
|
|
| 364 |
+ t.Fatal(err) |
|
| 365 |
+ } |
|
| 366 |
+ |
|
| 367 |
+ if _, err := ls.Release(layer2); err != nil {
|
|
| 368 |
+ t.Fatal(err) |
|
| 369 |
+ } |
|
| 370 |
+ |
|
| 371 |
+ m, err := ls.Mount("some-mount_name", layer3.ChainID(), "", nil)
|
|
| 372 |
+ if err != nil {
|
|
| 373 |
+ t.Fatal(err) |
|
| 374 |
+ } |
|
| 375 |
+ |
|
| 376 |
+ path, err := m.Path() |
|
| 377 |
+ if err != nil {
|
|
| 378 |
+ t.Fatal(err) |
|
| 379 |
+ } |
|
| 380 |
+ |
|
| 381 |
+ if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
|
|
| 382 |
+ t.Fatal(err) |
|
| 383 |
+ } |
|
| 384 |
+ |
|
| 385 |
+ if err := ls.Unmount("some-mount_name"); err != nil {
|
|
| 386 |
+ t.Fatal(err) |
|
| 387 |
+ } |
|
| 388 |
+ |
|
| 389 |
+ ls2, err := NewStore(ls.(*layerStore).store, ls.(*layerStore).driver) |
|
| 390 |
+ if err != nil {
|
|
| 391 |
+ t.Fatal(err) |
|
| 392 |
+ } |
|
| 393 |
+ |
|
| 394 |
+ layer3b, err := ls2.Get(layer3.ChainID()) |
|
| 395 |
+ if err != nil {
|
|
| 396 |
+ t.Fatal(err) |
|
| 397 |
+ } |
|
| 398 |
+ |
|
| 399 |
+ assertLayerEqual(t, layer3b, layer3) |
|
| 400 |
+ |
|
| 401 |
+ // Mount again with same name, should already be loaded |
|
| 402 |
+ m2, err := ls2.Mount("some-mount_name", layer3b.ChainID(), "", nil)
|
|
| 403 |
+ if err != nil {
|
|
| 404 |
+ t.Fatal(err) |
|
| 405 |
+ } |
|
| 406 |
+ |
|
| 407 |
+ path2, err := m2.Path() |
|
| 408 |
+ if err != nil {
|
|
| 409 |
+ t.Fatal(err) |
|
| 410 |
+ } |
|
| 411 |
+ |
|
| 412 |
+ b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) |
|
| 413 |
+ if err != nil {
|
|
| 414 |
+ t.Fatal(err) |
|
| 415 |
+ } |
|
| 416 |
+ if expected := "nothing here"; string(b) != expected {
|
|
| 417 |
+ t.Fatalf("Unexpected content %q, expected %q", string(b), expected)
|
|
| 418 |
+ } |
|
| 419 |
+ |
|
| 420 |
+ if err := ls2.Unmount("some-mount_name"); err != nil {
|
|
| 421 |
+ t.Fatal(err) |
|
| 422 |
+ } |
|
| 423 |
+ |
|
| 424 |
+ if metadata, err := ls2.DeleteMount("some-mount_name"); err != nil {
|
|
| 425 |
+ t.Fatal(err) |
|
| 426 |
+ } else if len(metadata) != 0 {
|
|
| 427 |
+ t.Fatalf("Unexpectedly deleted layers: %#v", metadata)
|
|
| 428 |
+ } |
|
| 429 |
+ |
|
| 430 |
+ releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) |
|
| 431 |
+} |
|
| 432 |
+ |
|
| 433 |
+func TestTarStreamStability(t *testing.T) {
|
|
| 434 |
+ ls, cleanup := newTestStore(t) |
|
| 435 |
+ defer cleanup() |
|
| 436 |
+ |
|
| 437 |
+ files1 := []FileApplier{
|
|
| 438 |
+ newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644),
|
|
| 439 |
+ newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644),
|
|
| 440 |
+ } |
|
| 441 |
+ addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644)
|
|
| 442 |
+ files2 := []FileApplier{
|
|
| 443 |
+ newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644),
|
|
| 444 |
+ newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664),
|
|
| 445 |
+ newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644),
|
|
| 446 |
+ } |
|
| 447 |
+ |
|
| 448 |
+ tar1, err := tarFromFiles(files1...) |
|
| 449 |
+ if err != nil {
|
|
| 450 |
+ t.Fatal(err) |
|
| 451 |
+ } |
|
| 452 |
+ |
|
| 453 |
+ tar2, err := tarFromFiles(files2...) |
|
| 454 |
+ if err != nil {
|
|
| 455 |
+ t.Fatal(err) |
|
| 456 |
+ } |
|
| 457 |
+ |
|
| 458 |
+ layer1, err := ls.Register(bytes.NewReader(tar1), "") |
|
| 459 |
+ if err != nil {
|
|
| 460 |
+ t.Fatal(err) |
|
| 461 |
+ } |
|
| 462 |
+ |
|
| 463 |
+ // hack layer to add file |
|
| 464 |
+ p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") |
|
| 465 |
+ if err != nil {
|
|
| 466 |
+ t.Fatal(err) |
|
| 467 |
+ } |
|
| 468 |
+ |
|
| 469 |
+ if err := addedFile.ApplyFile(p); err != nil {
|
|
| 470 |
+ t.Fatal(err) |
|
| 471 |
+ } |
|
| 472 |
+ |
|
| 473 |
+ if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil {
|
|
| 474 |
+ t.Fatal(err) |
|
| 475 |
+ } |
|
| 476 |
+ |
|
| 477 |
+ layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) |
|
| 478 |
+ if err != nil {
|
|
| 479 |
+ t.Fatal(err) |
|
| 480 |
+ } |
|
| 481 |
+ |
|
| 482 |
+ id1 := layer1.ChainID() |
|
| 483 |
+ t.Logf("Layer 1: %s", layer1.ChainID())
|
|
| 484 |
+ t.Logf("Layer 2: %s", layer2.ChainID())
|
|
| 485 |
+ |
|
| 486 |
+ if _, err := ls.Release(layer1); err != nil {
|
|
| 487 |
+ t.Fatal(err) |
|
| 488 |
+ } |
|
| 489 |
+ |
|
| 490 |
+ assertLayerDiff(t, tar2, layer2) |
|
| 491 |
+ |
|
| 492 |
+ layer1b, err := ls.Get(id1) |
|
| 493 |
+ if err != nil {
|
|
| 494 |
+ t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap)
|
|
| 495 |
+ t.Fatal(err) |
|
| 496 |
+ } |
|
| 497 |
+ |
|
| 498 |
+ if _, err := ls.Release(layer2); err != nil {
|
|
| 499 |
+ t.Fatal(err) |
|
| 500 |
+ } |
|
| 501 |
+ |
|
| 502 |
+ assertLayerDiff(t, tar1, layer1b) |
|
| 503 |
+ |
|
| 504 |
+ if _, err := ls.Release(layer1b); err != nil {
|
|
| 505 |
+ t.Fatal(err) |
|
| 506 |
+ } |
|
| 507 |
+} |
|
| 508 |
+ |
|
| 509 |
+func assertLayerDiff(t *testing.T, expected []byte, layer Layer) {
|
|
| 510 |
+ expectedDigest, err := digest.FromBytes(expected) |
|
| 511 |
+ if err != nil {
|
|
| 512 |
+ t.Fatal(err) |
|
| 513 |
+ } |
|
| 514 |
+ |
|
| 515 |
+ if digest.Digest(layer.DiffID()) != expectedDigest {
|
|
| 516 |
+ t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected)
|
|
| 517 |
+ } |
|
| 518 |
+ |
|
| 519 |
+ ts, err := layer.TarStream() |
|
| 520 |
+ if err != nil {
|
|
| 521 |
+ t.Fatal(err) |
|
| 522 |
+ } |
|
| 523 |
+ |
|
| 524 |
+ actual, err := ioutil.ReadAll(ts) |
|
| 525 |
+ if err != nil {
|
|
| 526 |
+ t.Fatal(err) |
|
| 527 |
+ } |
|
| 528 |
+ |
|
| 529 |
+ if len(actual) != len(expected) {
|
|
| 530 |
+ logByteDiff(t, actual, expected) |
|
| 531 |
+ t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected))
|
|
| 532 |
+ } |
|
| 533 |
+ |
|
| 534 |
+ actualDigest, err := digest.FromBytes(actual) |
|
| 535 |
+ if err != nil {
|
|
| 536 |
+ t.Fatal(err) |
|
| 537 |
+ } |
|
| 538 |
+ |
|
| 539 |
+ if actualDigest != expectedDigest {
|
|
| 540 |
+ logByteDiff(t, actual, expected) |
|
| 541 |
+ t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest)
|
|
| 542 |
+ } |
|
| 543 |
+} |
|
| 544 |
+ |
|
| 545 |
+const maxByteLog = 4 * 1024 |
|
| 546 |
+ |
|
| 547 |
+func logByteDiff(t *testing.T, actual, expected []byte) {
|
|
| 548 |
+ d1, d2 := byteDiff(actual, expected) |
|
| 549 |
+ if len(d1) == 0 && len(d2) == 0 {
|
|
| 550 |
+ return |
|
| 551 |
+ } |
|
| 552 |
+ |
|
| 553 |
+ prefix := len(actual) - len(d1) |
|
| 554 |
+ if len(d1) > maxByteLog || len(d2) > maxByteLog {
|
|
| 555 |
+ t.Logf("Byte diff after %d matching bytes", prefix)
|
|
| 556 |
+ } else {
|
|
| 557 |
+ t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2)
|
|
| 558 |
+ } |
|
| 559 |
+} |
|
| 560 |
+ |
|
| 561 |
+// byteDiff returns the differing bytes after the matching prefix |
|
| 562 |
+func byteDiff(b1, b2 []byte) ([]byte, []byte) {
|
|
| 563 |
+ i := 0 |
|
| 564 |
+ for i < len(b1) && i < len(b2) {
|
|
| 565 |
+ if b1[i] != b2[i] {
|
|
| 566 |
+ break |
|
| 567 |
+ } |
|
| 568 |
+ i++ |
|
| 569 |
+ } |
|
| 570 |
+ |
|
| 571 |
+ return b1[i:], b2[i:] |
|
| 572 |
+} |
|
| 573 |
+ |
|
| 574 |
+func tarFromFiles(files ...FileApplier) ([]byte, error) {
|
|
| 575 |
+ td, err := ioutil.TempDir("", "tar-")
|
|
| 576 |
+ if err != nil {
|
|
| 577 |
+ return nil, err |
|
| 578 |
+ } |
|
| 579 |
+ defer os.RemoveAll(td) |
|
| 580 |
+ |
|
| 581 |
+ for _, f := range files {
|
|
| 582 |
+ if err := f.ApplyFile(td); err != nil {
|
|
| 583 |
+ return nil, err |
|
| 584 |
+ } |
|
| 585 |
+ } |
|
| 586 |
+ |
|
| 587 |
+ r, err := archive.Tar(td, archive.Uncompressed) |
|
| 588 |
+ if err != nil {
|
|
| 589 |
+ return nil, err |
|
| 590 |
+ } |
|
| 591 |
+ |
|
| 592 |
+ buf := bytes.NewBuffer(nil) |
|
| 593 |
+ if _, err := io.Copy(buf, r); err != nil {
|
|
| 594 |
+ return nil, err |
|
| 595 |
+ } |
|
| 596 |
+ |
|
| 597 |
+ return buf.Bytes(), nil |
|
| 598 |
+} |
|
| 599 |
+ |
|
| 600 |
+// assertReferences asserts that all the references are to the same |
|
| 601 |
+// image and represent the full set of references to that image. |
|
| 602 |
+func assertReferences(t *testing.T, references ...Layer) {
|
|
| 603 |
+ if len(references) == 0 {
|
|
| 604 |
+ return |
|
| 605 |
+ } |
|
| 606 |
+ base := references[0].(*referencedCacheLayer).roLayer |
|
| 607 |
+ seenReferences := map[Layer]struct{}{
|
|
| 608 |
+ references[0]: {},
|
|
| 609 |
+ } |
|
| 610 |
+ for i := 1; i < len(references); i++ {
|
|
| 611 |
+ other := references[i].(*referencedCacheLayer).roLayer |
|
| 612 |
+ if base != other {
|
|
| 613 |
+ t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID())
|
|
| 614 |
+ } |
|
| 615 |
+ if _, ok := base.references[references[i]]; !ok {
|
|
| 616 |
+ t.Fatalf("Reference not part of reference list: %v", references[i])
|
|
| 617 |
+ } |
|
| 618 |
+ if _, ok := seenReferences[references[i]]; ok {
|
|
| 619 |
+ t.Fatalf("Duplicated reference %v", references[i])
|
|
| 620 |
+ } |
|
| 621 |
+ } |
|
| 622 |
+ if rc := len(base.references); rc != len(references) {
|
|
| 623 |
+ t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references))
|
|
| 624 |
+ } |
|
| 625 |
+} |
|
| 626 |
+ |
|
| 627 |
+func TestRegisterExistingLayer(t *testing.T) {
|
|
| 628 |
+ ls, cleanup := newTestStore(t) |
|
| 629 |
+ defer cleanup() |
|
| 630 |
+ |
|
| 631 |
+ baseFiles := []FileApplier{
|
|
| 632 |
+ newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
|
|
| 633 |
+ } |
|
| 634 |
+ |
|
| 635 |
+ layerFiles := []FileApplier{
|
|
| 636 |
+ newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644),
|
|
| 637 |
+ } |
|
| 638 |
+ |
|
| 639 |
+ li := initWithFiles(baseFiles...) |
|
| 640 |
+ layer1, err := createLayer(ls, "", li) |
|
| 641 |
+ if err != nil {
|
|
| 642 |
+ t.Fatal(err) |
|
| 643 |
+ } |
|
| 644 |
+ |
|
| 645 |
+ tar1, err := tarFromFiles(layerFiles...) |
|
| 646 |
+ if err != nil {
|
|
| 647 |
+ t.Fatal(err) |
|
| 648 |
+ } |
|
| 649 |
+ |
|
| 650 |
+ layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) |
|
| 651 |
+ if err != nil {
|
|
| 652 |
+ t.Fatal(err) |
|
| 653 |
+ } |
|
| 654 |
+ |
|
| 655 |
+ layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) |
|
| 656 |
+ if err != nil {
|
|
| 657 |
+ t.Fatal(err) |
|
| 658 |
+ } |
|
| 659 |
+ |
|
| 660 |
+ assertReferences(t, layer2a, layer2b) |
|
| 661 |
+} |
|
| 662 |
+ |
|
| 663 |
+func graphDiffSize(ls Store, l Layer) (int64, error) {
|
|
| 664 |
+ cl := getCachedLayer(l) |
|
| 665 |
+ var parent string |
|
| 666 |
+ if cl.parent != nil {
|
|
| 667 |
+ parent = cl.parent.cacheID |
|
| 668 |
+ } |
|
| 669 |
+ return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) |
|
| 670 |
+} |
|
| 671 |
+ |
|
| 672 |
+func TestLayerSize(t *testing.T) {
|
|
| 673 |
+ ls, cleanup := newTestStore(t) |
|
| 674 |
+ defer cleanup() |
|
| 675 |
+ |
|
| 676 |
+ content1 := []byte("Base contents")
|
|
| 677 |
+ content2 := []byte("Added contents")
|
|
| 678 |
+ |
|
| 679 |
+ layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644)))
|
|
| 680 |
+ if err != nil {
|
|
| 681 |
+ t.Fatal(err) |
|
| 682 |
+ } |
|
| 683 |
+ |
|
| 684 |
+ layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644)))
|
|
| 685 |
+ if err != nil {
|
|
| 686 |
+ t.Fatal(err) |
|
| 687 |
+ } |
|
| 688 |
+ |
|
| 689 |
+ layer1DiffSize, err := graphDiffSize(ls, layer1) |
|
| 690 |
+ if err != nil {
|
|
| 691 |
+ t.Fatal(err) |
|
| 692 |
+ } |
|
| 693 |
+ |
|
| 694 |
+ if int(layer1DiffSize) != len(content1) {
|
|
| 695 |
+ t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1))
|
|
| 696 |
+ } |
|
| 697 |
+ |
|
| 698 |
+ layer1Size, err := layer1.Size() |
|
| 699 |
+ if err != nil {
|
|
| 700 |
+ t.Fatal(err) |
|
| 701 |
+ } |
|
| 702 |
+ |
|
| 703 |
+ if expected := len(content1); int(layer1Size) != expected {
|
|
| 704 |
+ t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected)
|
|
| 705 |
+ } |
|
| 706 |
+ |
|
| 707 |
+ layer2DiffSize, err := graphDiffSize(ls, layer2) |
|
| 708 |
+ if err != nil {
|
|
| 709 |
+ t.Fatal(err) |
|
| 710 |
+ } |
|
| 711 |
+ |
|
| 712 |
+ if int(layer2DiffSize) != len(content2) {
|
|
| 713 |
+ t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2))
|
|
| 714 |
+ } |
|
| 715 |
+ |
|
| 716 |
+ layer2Size, err := layer2.Size() |
|
| 717 |
+ if err != nil {
|
|
| 718 |
+ t.Fatal(err) |
|
| 719 |
+ } |
|
| 720 |
+ |
|
| 721 |
+ if expected := len(content1) + len(content2); int(layer2Size) != expected {
|
|
| 722 |
+ t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected)
|
|
| 723 |
+ } |
|
| 724 |
+} |
| 0 | 725 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,109 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/Sirupsen/logrus" |
|
| 7 |
+ "github.com/docker/distribution/digest" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// GetLayerPath returns the path to a layer |
|
| 11 |
+func GetLayerPath(s Store, layer ChainID) (string, error) {
|
|
| 12 |
+ ls, ok := s.(*layerStore) |
|
| 13 |
+ if !ok {
|
|
| 14 |
+ return "", errors.New("unsupported layer store")
|
|
| 15 |
+ } |
|
| 16 |
+ ls.layerL.Lock() |
|
| 17 |
+ defer ls.layerL.Unlock() |
|
| 18 |
+ |
|
| 19 |
+ rl, ok := ls.layerMap[layer] |
|
| 20 |
+ if !ok {
|
|
| 21 |
+ return "", ErrLayerDoesNotExist |
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ path, err := ls.driver.Get(rl.cacheID, "") |
|
| 25 |
+ if err != nil {
|
|
| 26 |
+ return "", err |
|
| 27 |
+ } |
|
| 28 |
+ |
|
| 29 |
+ if err := ls.driver.Put(rl.cacheID); err != nil {
|
|
| 30 |
+ return "", err |
|
| 31 |
+ } |
|
| 32 |
+ |
|
| 33 |
+ return path, nil |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// RWLayerMetadata returns the graph metadata for the provided |
|
| 37 |
+// mount name. |
|
| 38 |
+func RWLayerMetadata(s Store, name string) (map[string]string, error) {
|
|
| 39 |
+ ls, ok := s.(*layerStore) |
|
| 40 |
+ if !ok {
|
|
| 41 |
+ return nil, errors.New("unsupported layer store")
|
|
| 42 |
+ } |
|
| 43 |
+ ls.mountL.Lock() |
|
| 44 |
+ defer ls.mountL.Unlock() |
|
| 45 |
+ |
|
| 46 |
+ ml, ok := ls.mounts[name] |
|
| 47 |
+ if !ok {
|
|
| 48 |
+ return nil, errors.New("mount does not exist")
|
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ return ls.driver.GetMetadata(ml.mountID) |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
|
|
| 55 |
+ var err error // this is used for cleanup in existingLayer case |
|
| 56 |
+ diffID, err := digest.FromBytes([]byte(graphID)) |
|
| 57 |
+ if err != nil {
|
|
| 58 |
+ return nil, err |
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 61 |
+ // Create new roLayer |
|
| 62 |
+ layer := &roLayer{
|
|
| 63 |
+ cacheID: graphID, |
|
| 64 |
+ diffID: DiffID(diffID), |
|
| 65 |
+ referenceCount: 1, |
|
| 66 |
+ layerStore: ls, |
|
| 67 |
+ references: map[Layer]struct{}{},
|
|
| 68 |
+ size: size, |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ tx, err := ls.store.StartTransaction() |
|
| 72 |
+ if err != nil {
|
|
| 73 |
+ return nil, err |
|
| 74 |
+ } |
|
| 75 |
+ defer func() {
|
|
| 76 |
+ if err != nil {
|
|
| 77 |
+ if err := tx.Cancel(); err != nil {
|
|
| 78 |
+ logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
|
| 79 |
+ } |
|
| 80 |
+ } |
|
| 81 |
+ }() |
|
| 82 |
+ |
|
| 83 |
+ layer.chainID = createChainIDFromParent("", layer.diffID)
|
|
| 84 |
+ |
|
| 85 |
+ if !ls.driver.Exists(layer.cacheID) {
|
|
| 86 |
+ return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID)
|
|
| 87 |
+ } |
|
| 88 |
+ if err = storeLayer(tx, layer); err != nil {
|
|
| 89 |
+ return nil, err |
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ ls.layerL.Lock() |
|
| 93 |
+ defer ls.layerL.Unlock() |
|
| 94 |
+ |
|
| 95 |
+ if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil {
|
|
| 96 |
+ // Set error for cleanup, but do not return |
|
| 97 |
+ err = errors.New("layer already exists")
|
|
| 98 |
+ return existingLayer.getReference(), nil |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ if err = tx.Commit(layer.chainID); err != nil {
|
|
| 102 |
+ return nil, err |
|
| 103 |
+ } |
|
| 104 |
+ |
|
| 105 |
+ ls.layerMap[layer.chainID] = layer |
|
| 106 |
+ |
|
| 107 |
+ return layer.getReference(), nil |
|
| 108 |
+} |
| 0 | 109 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,251 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "compress/gzip" |
|
| 4 |
+ "errors" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "os" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/Sirupsen/logrus" |
|
| 10 |
+ "github.com/docker/distribution/digest" |
|
| 11 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 12 |
+ "github.com/vbatts/tar-split/tar/asm" |
|
| 13 |
+ "github.com/vbatts/tar-split/tar/storage" |
|
| 14 |
+) |
|
| 15 |
+ |
|
| 16 |
+func (ls *layerStore) MountByGraphID(name string, graphID string, parent ChainID) (l RWLayer, err error) {
|
|
| 17 |
+ ls.mountL.Lock() |
|
| 18 |
+ defer ls.mountL.Unlock() |
|
| 19 |
+ m, ok := ls.mounts[name] |
|
| 20 |
+ if ok {
|
|
| 21 |
+ if m.parent.chainID != parent {
|
|
| 22 |
+ return nil, errors.New("name conflict, mismatched parent")
|
|
| 23 |
+ } |
|
| 24 |
+ if m.mountID != graphID {
|
|
| 25 |
+ return nil, errors.New("mount already exists")
|
|
| 26 |
+ } |
|
| 27 |
+ |
|
| 28 |
+ return m, nil |
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ if !ls.driver.Exists(graphID) {
|
|
| 32 |
+ return nil, errors.New("graph ID does not exist")
|
|
| 33 |
+ } |
|
| 34 |
+ |
|
| 35 |
+ var p *roLayer |
|
| 36 |
+ if string(parent) != "" {
|
|
| 37 |
+ ls.layerL.Lock() |
|
| 38 |
+ p = ls.getAndRetainLayer(parent) |
|
| 39 |
+ ls.layerL.Unlock() |
|
| 40 |
+ if p == nil {
|
|
| 41 |
+ return nil, ErrLayerDoesNotExist |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ // Release parent chain if error |
|
| 45 |
+ defer func() {
|
|
| 46 |
+ if err != nil {
|
|
| 47 |
+ ls.layerL.Lock() |
|
| 48 |
+ ls.releaseLayer(p) |
|
| 49 |
+ ls.layerL.Unlock() |
|
| 50 |
+ } |
|
| 51 |
+ }() |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ // TODO: Ensure graphID has correct parent |
|
| 55 |
+ |
|
| 56 |
+ m = &mountedLayer{
|
|
| 57 |
+ name: name, |
|
| 58 |
+ parent: p, |
|
| 59 |
+ mountID: graphID, |
|
| 60 |
+ layerStore: ls, |
|
| 61 |
+ } |
|
| 62 |
+ |
|
| 63 |
+ // Check for existing init layer |
|
| 64 |
+ initID := fmt.Sprintf("%s-init", graphID)
|
|
| 65 |
+ if ls.driver.Exists(initID) {
|
|
| 66 |
+ m.initID = initID |
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 69 |
+ if err = ls.saveMount(m); err != nil {
|
|
| 70 |
+ return nil, err |
|
| 71 |
+ } |
|
| 72 |
+ |
|
| 73 |
+ // TODO: provide a mount label |
|
| 74 |
+ if err = ls.mount(m, ""); err != nil {
|
|
| 75 |
+ return nil, err |
|
| 76 |
+ } |
|
| 77 |
+ |
|
| 78 |
+ return m, nil |
|
| 79 |
+} |
|
| 80 |
+ |
|
| 81 |
+func (ls *layerStore) migrateLayer(tx MetadataTransaction, tarDataFile string, layer *roLayer) error {
|
|
| 82 |
+ var ar io.Reader |
|
| 83 |
+ var tdf *os.File |
|
| 84 |
+ var err error |
|
| 85 |
+ if tarDataFile != "" {
|
|
| 86 |
+ tdf, err = os.Open(tarDataFile) |
|
| 87 |
+ if err != nil {
|
|
| 88 |
+ if !os.IsNotExist(err) {
|
|
| 89 |
+ return err |
|
| 90 |
+ } |
|
| 91 |
+ tdf = nil |
|
| 92 |
+ } |
|
| 93 |
+ defer tdf.Close() |
|
| 94 |
+ } |
|
| 95 |
+ if tdf != nil {
|
|
| 96 |
+ tsw, err := tx.TarSplitWriter() |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ return err |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ defer tsw.Close() |
|
| 102 |
+ |
|
| 103 |
+ uncompressed, err := gzip.NewReader(tdf) |
|
| 104 |
+ if err != nil {
|
|
| 105 |
+ return err |
|
| 106 |
+ } |
|
| 107 |
+ defer uncompressed.Close() |
|
| 108 |
+ |
|
| 109 |
+ tr := io.TeeReader(uncompressed, tsw) |
|
| 110 |
+ trc := ioutils.NewReadCloserWrapper(tr, uncompressed.Close) |
|
| 111 |
+ |
|
| 112 |
+ ar, err = ls.assembleTar(layer.cacheID, trc, &layer.size) |
|
| 113 |
+ if err != nil {
|
|
| 114 |
+ return err |
|
| 115 |
+ } |
|
| 116 |
+ |
|
| 117 |
+ } else {
|
|
| 118 |
+ var graphParent string |
|
| 119 |
+ if layer.parent != nil {
|
|
| 120 |
+ graphParent = layer.parent.cacheID |
|
| 121 |
+ } |
|
| 122 |
+ archiver, err := ls.driver.Diff(layer.cacheID, graphParent) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return err |
|
| 125 |
+ } |
|
| 126 |
+ defer archiver.Close() |
|
| 127 |
+ |
|
| 128 |
+ tsw, err := tx.TarSplitWriter() |
|
| 129 |
+ if err != nil {
|
|
| 130 |
+ return err |
|
| 131 |
+ } |
|
| 132 |
+ metaPacker := storage.NewJSONPacker(tsw) |
|
| 133 |
+ packerCounter := &packSizeCounter{metaPacker, &layer.size}
|
|
| 134 |
+ defer tsw.Close() |
|
| 135 |
+ |
|
| 136 |
+ ar, err = asm.NewInputTarStream(archiver, packerCounter, nil) |
|
| 137 |
+ if err != nil {
|
|
| 138 |
+ return err |
|
| 139 |
+ } |
|
| 140 |
+ } |
|
| 141 |
+ |
|
| 142 |
+ digester := digest.Canonical.New() |
|
| 143 |
+ _, err = io.Copy(digester.Hash(), ar) |
|
| 144 |
+ if err != nil {
|
|
| 145 |
+ return err |
|
| 146 |
+ } |
|
| 147 |
+ |
|
| 148 |
+ layer.diffID = DiffID(digester.Digest()) |
|
| 149 |
+ |
|
| 150 |
+ return nil |
|
| 151 |
+} |
|
| 152 |
+ |
|
| 153 |
+func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, tarDataFile string) (Layer, error) {
|
|
| 154 |
+ // err is used to hold the error which will always trigger |
|
| 155 |
+ // cleanup of creates sources but may not be an error returned |
|
| 156 |
+ // to the caller (already exists). |
|
| 157 |
+ var err error |
|
| 158 |
+ var p *roLayer |
|
| 159 |
+ if string(parent) != "" {
|
|
| 160 |
+ p = ls.get(parent) |
|
| 161 |
+ if p == nil {
|
|
| 162 |
+ return nil, ErrLayerDoesNotExist |
|
| 163 |
+ } |
|
| 164 |
+ |
|
| 165 |
+ // Release parent chain if error |
|
| 166 |
+ defer func() {
|
|
| 167 |
+ if err != nil {
|
|
| 168 |
+ ls.layerL.Lock() |
|
| 169 |
+ ls.releaseLayer(p) |
|
| 170 |
+ ls.layerL.Unlock() |
|
| 171 |
+ } |
|
| 172 |
+ }() |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ // Create new roLayer |
|
| 176 |
+ layer := &roLayer{
|
|
| 177 |
+ parent: p, |
|
| 178 |
+ cacheID: graphID, |
|
| 179 |
+ referenceCount: 1, |
|
| 180 |
+ layerStore: ls, |
|
| 181 |
+ references: map[Layer]struct{}{},
|
|
| 182 |
+ } |
|
| 183 |
+ |
|
| 184 |
+ tx, err := ls.store.StartTransaction() |
|
| 185 |
+ if err != nil {
|
|
| 186 |
+ return nil, err |
|
| 187 |
+ } |
|
| 188 |
+ |
|
| 189 |
+ defer func() {
|
|
| 190 |
+ if err != nil {
|
|
| 191 |
+ logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
|
|
| 192 |
+ if err := tx.Cancel(); err != nil {
|
|
| 193 |
+ logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
|
| 194 |
+ } |
|
| 195 |
+ } |
|
| 196 |
+ }() |
|
| 197 |
+ |
|
| 198 |
+ if err = ls.migrateLayer(tx, tarDataFile, layer); err != nil {
|
|
| 199 |
+ return nil, err |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ layer.chainID = createChainIDFromParent(parent, layer.diffID) |
|
| 203 |
+ |
|
| 204 |
+ if err = storeLayer(tx, layer); err != nil {
|
|
| 205 |
+ return nil, err |
|
| 206 |
+ } |
|
| 207 |
+ |
|
| 208 |
+ ls.layerL.Lock() |
|
| 209 |
+ defer ls.layerL.Unlock() |
|
| 210 |
+ |
|
| 211 |
+ if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil {
|
|
| 212 |
+ // Set error for cleanup, but do not return |
|
| 213 |
+ err = errors.New("layer already exists")
|
|
| 214 |
+ return existingLayer.getReference(), nil |
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ if err = tx.Commit(layer.chainID); err != nil {
|
|
| 218 |
+ return nil, err |
|
| 219 |
+ } |
|
| 220 |
+ |
|
| 221 |
+ ls.layerMap[layer.chainID] = layer |
|
| 222 |
+ |
|
| 223 |
+ return layer.getReference(), nil |
|
| 224 |
+} |
|
| 225 |
+ |
|
| 226 |
+type unpackSizeCounter struct {
|
|
| 227 |
+ unpacker storage.Unpacker |
|
| 228 |
+ size *int64 |
|
| 229 |
+} |
|
| 230 |
+ |
|
| 231 |
+func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
|
|
| 232 |
+ e, err := u.unpacker.Next() |
|
| 233 |
+ if err == nil && u.size != nil {
|
|
| 234 |
+ *u.size += e.Size |
|
| 235 |
+ } |
|
| 236 |
+ return e, err |
|
| 237 |
+} |
|
| 238 |
+ |
|
| 239 |
+type packSizeCounter struct {
|
|
| 240 |
+ packer storage.Packer |
|
| 241 |
+ size *int64 |
|
| 242 |
+} |
|
| 243 |
+ |
|
| 244 |
+func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
|
|
| 245 |
+ n, err := p.packer.AddEntry(e) |
|
| 246 |
+ if err == nil && p.size != nil {
|
|
| 247 |
+ *p.size += e.Size |
|
| 248 |
+ } |
|
| 249 |
+ return n, err |
|
| 250 |
+} |
| 0 | 251 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,385 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "compress/gzip" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "os" |
|
| 9 |
+ "path/filepath" |
|
| 10 |
+ "testing" |
|
| 11 |
+ |
|
| 12 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
| 13 |
+ "github.com/docker/docker/pkg/archive" |
|
| 14 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 15 |
+ "github.com/vbatts/tar-split/tar/asm" |
|
| 16 |
+ "github.com/vbatts/tar-split/tar/storage" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+func writeTarSplitFile(name string, tarContent []byte) error {
|
|
| 20 |
+ f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) |
|
| 21 |
+ if err != nil {
|
|
| 22 |
+ return err |
|
| 23 |
+ } |
|
| 24 |
+ defer f.Close() |
|
| 25 |
+ |
|
| 26 |
+ fz := gzip.NewWriter(f) |
|
| 27 |
+ |
|
| 28 |
+ metaPacker := storage.NewJSONPacker(fz) |
|
| 29 |
+ defer fz.Close() |
|
| 30 |
+ |
|
| 31 |
+ rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) |
|
| 32 |
+ if err != nil {
|
|
| 33 |
+ return err |
|
| 34 |
+ } |
|
| 35 |
+ |
|
| 36 |
+ if _, err := io.Copy(ioutil.Discard, rdr); err != nil {
|
|
| 37 |
+ return err |
|
| 38 |
+ } |
|
| 39 |
+ |
|
| 40 |
+ return nil |
|
| 41 |
+} |
|
| 42 |
+ |
|
| 43 |
+func TestLayerMigration(t *testing.T) {
|
|
| 44 |
+ td, err := ioutil.TempDir("", "migration-test-")
|
|
| 45 |
+ if err != nil {
|
|
| 46 |
+ t.Fatal(err) |
|
| 47 |
+ } |
|
| 48 |
+ defer os.RemoveAll(td) |
|
| 49 |
+ |
|
| 50 |
+ layer1Files := []FileApplier{
|
|
| 51 |
+ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
|
|
| 52 |
+ newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
|
|
| 53 |
+ } |
|
| 54 |
+ |
|
| 55 |
+ layer2Files := []FileApplier{
|
|
| 56 |
+ newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
|
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ tar1, err := tarFromFiles(layer1Files...) |
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ t.Fatal(err) |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ tar2, err := tarFromFiles(layer2Files...) |
|
| 65 |
+ if err != nil {
|
|
| 66 |
+ t.Fatal(err) |
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 69 |
+ graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) |
|
| 70 |
+ if err != nil {
|
|
| 71 |
+ t.Fatal(err) |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ graphID1 := stringid.GenerateRandomID() |
|
| 75 |
+ if err := graph.Create(graphID1, "", ""); err != nil {
|
|
| 76 |
+ t.Fatal(err) |
|
| 77 |
+ } |
|
| 78 |
+ if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil {
|
|
| 79 |
+ t.Fatal(err) |
|
| 80 |
+ } |
|
| 81 |
+ |
|
| 82 |
+ tf1 := filepath.Join(td, "tar1.json.gz") |
|
| 83 |
+ if err := writeTarSplitFile(tf1, tar1); err != nil {
|
|
| 84 |
+ t.Fatal(err) |
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) |
|
| 88 |
+ if err != nil {
|
|
| 89 |
+ t.Fatal(err) |
|
| 90 |
+ } |
|
| 91 |
+ ls, err := NewStore(fms, graph) |
|
| 92 |
+ if err != nil {
|
|
| 93 |
+ t.Fatal(err) |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", tf1) |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ t.Fatal(err) |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ layer1b, err := ls.Register(bytes.NewReader(tar1), "") |
|
| 102 |
+ if err != nil {
|
|
| 103 |
+ t.Fatal(err) |
|
| 104 |
+ } |
|
| 105 |
+ |
|
| 106 |
+ assertReferences(t, layer1a, layer1b) |
|
| 107 |
+ |
|
| 108 |
+ // Attempt register, should be same |
|
| 109 |
+ layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) |
|
| 110 |
+ if err != nil {
|
|
| 111 |
+ t.Fatal(err) |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ graphID2 := stringid.GenerateRandomID() |
|
| 115 |
+ if err := graph.Create(graphID2, graphID1, ""); err != nil {
|
|
| 116 |
+ t.Fatal(err) |
|
| 117 |
+ } |
|
| 118 |
+ if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil {
|
|
| 119 |
+ t.Fatal(err) |
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ tf2 := filepath.Join(td, "tar2.json.gz") |
|
| 123 |
+ if err := writeTarSplitFile(tf2, tar2); err != nil {
|
|
| 124 |
+ t.Fatal(err) |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), tf2) |
|
| 128 |
+ if err != nil {
|
|
| 129 |
+ t.Fatal(err) |
|
| 130 |
+ } |
|
| 131 |
+ |
|
| 132 |
+ assertReferences(t, layer2a, layer2b) |
|
| 133 |
+ |
|
| 134 |
+ if metadata, err := ls.Release(layer2a); err != nil {
|
|
| 135 |
+ t.Fatal(err) |
|
| 136 |
+ } else if len(metadata) > 0 {
|
|
| 137 |
+ t.Fatalf("Unexpected layer removal after first release: %#v", metadata)
|
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ metadata, err := ls.Release(layer2b) |
|
| 141 |
+ if err != nil {
|
|
| 142 |
+ t.Fatal(err) |
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 145 |
+ assertMetadata(t, metadata, createMetadata(layer2a)) |
|
| 146 |
+} |
|
| 147 |
+ |
|
| 148 |
+func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) {
|
|
| 149 |
+ t, err := tarFromFiles(files...) |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ return nil, err |
|
| 152 |
+ } |
|
| 153 |
+ |
|
| 154 |
+ if err := graph.Create(graphID, parentID, ""); err != nil {
|
|
| 155 |
+ return nil, err |
|
| 156 |
+ } |
|
| 157 |
+ if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil {
|
|
| 158 |
+ return nil, err |
|
| 159 |
+ } |
|
| 160 |
+ |
|
| 161 |
+ ar, err := graph.Diff(graphID, parentID) |
|
| 162 |
+ if err != nil {
|
|
| 163 |
+ return nil, err |
|
| 164 |
+ } |
|
| 165 |
+ defer ar.Close() |
|
| 166 |
+ |
|
| 167 |
+ return ioutil.ReadAll(ar) |
|
| 168 |
+} |
|
| 169 |
+ |
|
| 170 |
+func TestLayerMigrationNoTarsplit(t *testing.T) {
|
|
| 171 |
+ td, err := ioutil.TempDir("", "migration-test-")
|
|
| 172 |
+ if err != nil {
|
|
| 173 |
+ t.Fatal(err) |
|
| 174 |
+ } |
|
| 175 |
+ defer os.RemoveAll(td) |
|
| 176 |
+ |
|
| 177 |
+ layer1Files := []FileApplier{
|
|
| 178 |
+ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
|
|
| 179 |
+ newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
|
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ layer2Files := []FileApplier{
|
|
| 183 |
+ newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
|
|
| 184 |
+ } |
|
| 185 |
+ |
|
| 186 |
+ graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) |
|
| 187 |
+ if err != nil {
|
|
| 188 |
+ t.Fatal(err) |
|
| 189 |
+ } |
|
| 190 |
+ graphID1 := stringid.GenerateRandomID() |
|
| 191 |
+ graphID2 := stringid.GenerateRandomID() |
|
| 192 |
+ |
|
| 193 |
+ tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) |
|
| 194 |
+ if err != nil {
|
|
| 195 |
+ t.Fatal(err) |
|
| 196 |
+ } |
|
| 197 |
+ |
|
| 198 |
+ tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) |
|
| 199 |
+ if err != nil {
|
|
| 200 |
+ t.Fatal(err) |
|
| 201 |
+ } |
|
| 202 |
+ |
|
| 203 |
+ fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) |
|
| 204 |
+ if err != nil {
|
|
| 205 |
+ t.Fatal(err) |
|
| 206 |
+ } |
|
| 207 |
+ ls, err := NewStore(fms, graph) |
|
| 208 |
+ if err != nil {
|
|
| 209 |
+ t.Fatal(err) |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", "") |
|
| 213 |
+ if err != nil {
|
|
| 214 |
+ t.Fatal(err) |
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ layer1b, err := ls.Register(bytes.NewReader(tar1), "") |
|
| 218 |
+ if err != nil {
|
|
| 219 |
+ t.Fatal(err) |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ assertReferences(t, layer1a, layer1b) |
|
| 223 |
+ |
|
| 224 |
+ // Attempt register, should be same |
|
| 225 |
+ layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) |
|
| 226 |
+ if err != nil {
|
|
| 227 |
+ t.Fatal(err) |
|
| 228 |
+ } |
|
| 229 |
+ |
|
| 230 |
+ layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), "") |
|
| 231 |
+ if err != nil {
|
|
| 232 |
+ t.Fatal(err) |
|
| 233 |
+ } |
|
| 234 |
+ |
|
| 235 |
+ assertReferences(t, layer2a, layer2b) |
|
| 236 |
+ |
|
| 237 |
+ if metadata, err := ls.Release(layer2a); err != nil {
|
|
| 238 |
+ t.Fatal(err) |
|
| 239 |
+ } else if len(metadata) > 0 {
|
|
| 240 |
+ t.Fatalf("Unexpected layer removal after first release: %#v", metadata)
|
|
| 241 |
+ } |
|
| 242 |
+ |
|
| 243 |
+ metadata, err := ls.Release(layer2b) |
|
| 244 |
+ if err != nil {
|
|
| 245 |
+ t.Fatal(err) |
|
| 246 |
+ } |
|
| 247 |
+ |
|
| 248 |
+ assertMetadata(t, metadata, createMetadata(layer2a)) |
|
| 249 |
+} |
|
| 250 |
+ |
|
| 251 |
+func TestMountMigration(t *testing.T) {
|
|
| 252 |
+ ls, cleanup := newTestStore(t) |
|
| 253 |
+ defer cleanup() |
|
| 254 |
+ |
|
| 255 |
+ baseFiles := []FileApplier{
|
|
| 256 |
+ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
|
|
| 257 |
+ newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
|
|
| 258 |
+ } |
|
| 259 |
+ initFiles := []FileApplier{
|
|
| 260 |
+ newTestFile("/etc/hosts", []byte{}, 0644),
|
|
| 261 |
+ newTestFile("/etc/resolv.conf", []byte{}, 0644),
|
|
| 262 |
+ } |
|
| 263 |
+ mountFiles := []FileApplier{
|
|
| 264 |
+ newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644),
|
|
| 265 |
+ newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
|
|
| 266 |
+ newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644),
|
|
| 267 |
+ } |
|
| 268 |
+ |
|
| 269 |
+ initTar, err := tarFromFiles(initFiles...) |
|
| 270 |
+ if err != nil {
|
|
| 271 |
+ t.Fatal(err) |
|
| 272 |
+ } |
|
| 273 |
+ |
|
| 274 |
+ mountTar, err := tarFromFiles(mountFiles...) |
|
| 275 |
+ if err != nil {
|
|
| 276 |
+ t.Fatal(err) |
|
| 277 |
+ } |
|
| 278 |
+ |
|
| 279 |
+ graph := ls.(*layerStore).driver |
|
| 280 |
+ |
|
| 281 |
+ layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) |
|
| 282 |
+ if err != nil {
|
|
| 283 |
+ t.Fatal(err) |
|
| 284 |
+ } |
|
| 285 |
+ |
|
| 286 |
+ graphID1 := layer1.(*referencedCacheLayer).cacheID |
|
| 287 |
+ |
|
| 288 |
+ containerID := stringid.GenerateRandomID() |
|
| 289 |
+ containerInit := fmt.Sprintf("%s-init", containerID)
|
|
| 290 |
+ |
|
| 291 |
+ if err := graph.Create(containerInit, graphID1, ""); err != nil {
|
|
| 292 |
+ t.Fatal(err) |
|
| 293 |
+ } |
|
| 294 |
+ if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil {
|
|
| 295 |
+ t.Fatal(err) |
|
| 296 |
+ } |
|
| 297 |
+ |
|
| 298 |
+ if err := graph.Create(containerID, containerInit, ""); err != nil {
|
|
| 299 |
+ t.Fatal(err) |
|
| 300 |
+ } |
|
| 301 |
+ if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil {
|
|
| 302 |
+ t.Fatal(err) |
|
| 303 |
+ } |
|
| 304 |
+ |
|
| 305 |
+ rwLayer1, err := ls.(*layerStore).MountByGraphID("migration-mount", containerID, layer1.ChainID())
|
|
| 306 |
+ if err != nil {
|
|
| 307 |
+ t.Fatal(err) |
|
| 308 |
+ } |
|
| 309 |
+ |
|
| 310 |
+ changes, err := ls.Changes("migration-mount")
|
|
| 311 |
+ if err != nil {
|
|
| 312 |
+ t.Fatal(err) |
|
| 313 |
+ } |
|
| 314 |
+ |
|
| 315 |
+ if expected := 5; len(changes) != expected {
|
|
| 316 |
+ t.Logf("Changes %#v", changes)
|
|
| 317 |
+ t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected)
|
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ sortChanges(changes) |
|
| 321 |
+ |
|
| 322 |
+ assertChange(t, changes[0], archive.Change{
|
|
| 323 |
+ Path: "/etc", |
|
| 324 |
+ Kind: archive.ChangeModify, |
|
| 325 |
+ }) |
|
| 326 |
+ assertChange(t, changes[1], archive.Change{
|
|
| 327 |
+ Path: "/etc/hosts", |
|
| 328 |
+ Kind: archive.ChangeModify, |
|
| 329 |
+ }) |
|
| 330 |
+ assertChange(t, changes[2], archive.Change{
|
|
| 331 |
+ Path: "/root", |
|
| 332 |
+ Kind: archive.ChangeModify, |
|
| 333 |
+ }) |
|
| 334 |
+ assertChange(t, changes[3], archive.Change{
|
|
| 335 |
+ Path: "/root/.bashrc", |
|
| 336 |
+ Kind: archive.ChangeModify, |
|
| 337 |
+ }) |
|
| 338 |
+ assertChange(t, changes[4], archive.Change{
|
|
| 339 |
+ Path: "/root/testfile1.txt", |
|
| 340 |
+ Kind: archive.ChangeAdd, |
|
| 341 |
+ }) |
|
| 342 |
+ |
|
| 343 |
+ if expectedCount := 1; rwLayer1.(*mountedLayer).activityCount != expectedCount {
|
|
| 344 |
+ t.Fatalf("Wrong activity count %d, expected %d", rwLayer1.(*mountedLayer).activityCount, expectedCount)
|
|
| 345 |
+ } |
|
| 346 |
+ |
|
| 347 |
+ rwLayer2, err := ls.Mount("migration-mount", layer1.ChainID(), "", nil)
|
|
| 348 |
+ if err != nil {
|
|
| 349 |
+ t.Fatal(err) |
|
| 350 |
+ } |
|
| 351 |
+ |
|
| 352 |
+ if rwLayer1 != rwLayer2 {
|
|
| 353 |
+ t.Fatalf("Wrong rwlayer %v, expected %v", rwLayer2, rwLayer1)
|
|
| 354 |
+ } |
|
| 355 |
+ |
|
| 356 |
+ if expectedCount := 2; rwLayer2.(*mountedLayer).activityCount != expectedCount {
|
|
| 357 |
+ t.Fatalf("Wrong activity count %d, expected %d", rwLayer2.(*mountedLayer).activityCount, expectedCount)
|
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ if metadata, err := ls.Release(layer1); err != nil {
|
|
| 361 |
+ t.Fatal(err) |
|
| 362 |
+ } else if len(metadata) > 0 {
|
|
| 363 |
+ t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata)
|
|
| 364 |
+ } |
|
| 365 |
+ |
|
| 366 |
+ if err := ls.Unmount("migration-mount"); err != nil {
|
|
| 367 |
+ t.Fatal(err) |
|
| 368 |
+ } |
|
| 369 |
+ if _, err := ls.DeleteMount("migration-mount"); err == nil {
|
|
| 370 |
+ t.Fatal("Expected error deleting active mount")
|
|
| 371 |
+ } |
|
| 372 |
+ if err := ls.Unmount("migration-mount"); err != nil {
|
|
| 373 |
+ t.Fatal(err) |
|
| 374 |
+ } |
|
| 375 |
+ metadata, err := ls.DeleteMount("migration-mount")
|
|
| 376 |
+ if err != nil {
|
|
| 377 |
+ t.Fatal(err) |
|
| 378 |
+ } |
|
| 379 |
+ if len(metadata) == 0 {
|
|
| 380 |
+ t.Fatal("Expected base layer to be deleted when deleting mount")
|
|
| 381 |
+ } |
|
| 382 |
+ |
|
| 383 |
+ assertMetadata(t, metadata, createMetadata(layer1)) |
|
| 384 |
+} |
| 0 | 385 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,217 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "io/ioutil" |
|
| 4 |
+ "os" |
|
| 5 |
+ "path/filepath" |
|
| 6 |
+ "sort" |
|
| 7 |
+ "testing" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/pkg/archive" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+func TestMountInit(t *testing.T) {
|
|
| 13 |
+ ls, cleanup := newTestStore(t) |
|
| 14 |
+ defer cleanup() |
|
| 15 |
+ |
|
| 16 |
+ basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
|
|
| 17 |
+ initfile := newTestFile("testfile.txt", []byte("init data!"), 0777)
|
|
| 18 |
+ |
|
| 19 |
+ li := initWithFiles(basefile) |
|
| 20 |
+ layer, err := createLayer(ls, "", li) |
|
| 21 |
+ if err != nil {
|
|
| 22 |
+ t.Fatal(err) |
|
| 23 |
+ } |
|
| 24 |
+ |
|
| 25 |
+ mountInit := func(root string) error {
|
|
| 26 |
+ return initfile.ApplyFile(root) |
|
| 27 |
+ } |
|
| 28 |
+ |
|
| 29 |
+ m, err := ls.Mount("fun-mount", layer.ChainID(), "", mountInit)
|
|
| 30 |
+ if err != nil {
|
|
| 31 |
+ t.Fatal(err) |
|
| 32 |
+ } |
|
| 33 |
+ |
|
| 34 |
+ path, err := m.Path() |
|
| 35 |
+ if err != nil {
|
|
| 36 |
+ t.Fatal(err) |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ f, err := os.Open(filepath.Join(path, "testfile.txt")) |
|
| 40 |
+ if err != nil {
|
|
| 41 |
+ t.Fatal(err) |
|
| 42 |
+ } |
|
| 43 |
+ defer f.Close() |
|
| 44 |
+ |
|
| 45 |
+ fi, err := f.Stat() |
|
| 46 |
+ if err != nil {
|
|
| 47 |
+ t.Fatal(err) |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ b, err := ioutil.ReadAll(f) |
|
| 51 |
+ if err != nil {
|
|
| 52 |
+ t.Fatal(err) |
|
| 53 |
+ } |
|
| 54 |
+ |
|
| 55 |
+ if expected := "init data!"; string(b) != expected {
|
|
| 56 |
+ t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected)
|
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ if fi.Mode().Perm() != 0777 {
|
|
| 60 |
+ t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777)
|
|
| 61 |
+ } |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+func TestMountSize(t *testing.T) {
|
|
| 65 |
+ ls, cleanup := newTestStore(t) |
|
| 66 |
+ defer cleanup() |
|
| 67 |
+ |
|
| 68 |
+ content1 := []byte("Base contents")
|
|
| 69 |
+ content2 := []byte("Mutable contents")
|
|
| 70 |
+ contentInit := []byte("why am I excluded from the size ☹")
|
|
| 71 |
+ |
|
| 72 |
+ li := initWithFiles(newTestFile("file1", content1, 0644))
|
|
| 73 |
+ layer, err := createLayer(ls, "", li) |
|
| 74 |
+ if err != nil {
|
|
| 75 |
+ t.Fatal(err) |
|
| 76 |
+ } |
|
| 77 |
+ |
|
| 78 |
+ mountInit := func(root string) error {
|
|
| 79 |
+ return newTestFile("file-init", contentInit, 0777).ApplyFile(root)
|
|
| 80 |
+ } |
|
| 81 |
+ |
|
| 82 |
+ m, err := ls.Mount("mount-size", layer.ChainID(), "", mountInit)
|
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ t.Fatal(err) |
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ path, err := m.Path() |
|
| 88 |
+ if err != nil {
|
|
| 89 |
+ t.Fatal(err) |
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil {
|
|
| 93 |
+ t.Fatal(err) |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ mountSize, err := m.Size() |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ t.Fatal(err) |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ if expected := len(content2); int(mountSize) != expected {
|
|
| 102 |
+ t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected)
|
|
| 103 |
+ } |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+func TestMountChanges(t *testing.T) {
|
|
| 107 |
+ ls, cleanup := newTestStore(t) |
|
| 108 |
+ defer cleanup() |
|
| 109 |
+ |
|
| 110 |
+ basefiles := []FileApplier{
|
|
| 111 |
+ newTestFile("testfile1.txt", []byte("base data!"), 0644),
|
|
| 112 |
+ newTestFile("testfile2.txt", []byte("base data!"), 0644),
|
|
| 113 |
+ newTestFile("testfile3.txt", []byte("base data!"), 0644),
|
|
| 114 |
+ } |
|
| 115 |
+ initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777)
|
|
| 116 |
+ |
|
| 117 |
+ li := initWithFiles(basefiles...) |
|
| 118 |
+ layer, err := createLayer(ls, "", li) |
|
| 119 |
+ if err != nil {
|
|
| 120 |
+ t.Fatal(err) |
|
| 121 |
+ } |
|
| 122 |
+ |
|
| 123 |
+ mountInit := func(root string) error {
|
|
| 124 |
+ return initfile.ApplyFile(root) |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ m, err := ls.Mount("mount-changes", layer.ChainID(), "", mountInit)
|
|
| 128 |
+ if err != nil {
|
|
| 129 |
+ t.Fatal(err) |
|
| 130 |
+ } |
|
| 131 |
+ |
|
| 132 |
+ path, err := m.Path() |
|
| 133 |
+ if err != nil {
|
|
| 134 |
+ t.Fatal(err) |
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil {
|
|
| 138 |
+ t.Fatal(err) |
|
| 139 |
+ } |
|
| 140 |
+ |
|
| 141 |
+ if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
|
|
| 142 |
+ t.Fatal(err) |
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 145 |
+ if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil {
|
|
| 146 |
+ t.Fatal(err) |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil {
|
|
| 150 |
+ t.Fatal(err) |
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
|
|
| 154 |
+ t.Fatal(err) |
|
| 155 |
+ } |
|
| 156 |
+ |
|
| 157 |
+ changes, err := ls.Changes("mount-changes")
|
|
| 158 |
+ if err != nil {
|
|
| 159 |
+ t.Fatal(err) |
|
| 160 |
+ } |
|
| 161 |
+ |
|
| 162 |
+ if expected := 4; len(changes) != expected {
|
|
| 163 |
+ t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected)
|
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ sortChanges(changes) |
|
| 167 |
+ |
|
| 168 |
+ assertChange(t, changes[0], archive.Change{
|
|
| 169 |
+ Path: "/testfile1.txt", |
|
| 170 |
+ Kind: archive.ChangeModify, |
|
| 171 |
+ }) |
|
| 172 |
+ assertChange(t, changes[1], archive.Change{
|
|
| 173 |
+ Path: "/testfile2.txt", |
|
| 174 |
+ Kind: archive.ChangeDelete, |
|
| 175 |
+ }) |
|
| 176 |
+ assertChange(t, changes[2], archive.Change{
|
|
| 177 |
+ Path: "/testfile3.txt", |
|
| 178 |
+ Kind: archive.ChangeModify, |
|
| 179 |
+ }) |
|
| 180 |
+ assertChange(t, changes[3], archive.Change{
|
|
| 181 |
+ Path: "/testfile4.txt", |
|
| 182 |
+ Kind: archive.ChangeAdd, |
|
| 183 |
+ }) |
|
| 184 |
+} |
|
| 185 |
+ |
|
| 186 |
+func assertChange(t *testing.T, actual, expected archive.Change) {
|
|
| 187 |
+ if actual.Path != expected.Path {
|
|
| 188 |
+ t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path)
|
|
| 189 |
+ } |
|
| 190 |
+ if actual.Kind != expected.Kind {
|
|
| 191 |
+ t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind)
|
|
| 192 |
+ } |
|
| 193 |
+} |
|
| 194 |
+ |
|
| 195 |
+func sortChanges(changes []archive.Change) {
|
|
| 196 |
+ cs := &changeSorter{
|
|
| 197 |
+ changes: changes, |
|
| 198 |
+ } |
|
| 199 |
+ sort.Sort(cs) |
|
| 200 |
+} |
|
| 201 |
+ |
|
| 202 |
+type changeSorter struct {
|
|
| 203 |
+ changes []archive.Change |
|
| 204 |
+} |
|
| 205 |
+ |
|
| 206 |
+func (cs *changeSorter) Len() int {
|
|
| 207 |
+ return len(cs.changes) |
|
| 208 |
+} |
|
| 209 |
+ |
|
| 210 |
+func (cs *changeSorter) Swap(i, j int) {
|
|
| 211 |
+ cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] |
|
| 212 |
+} |
|
| 213 |
+ |
|
| 214 |
+func (cs *changeSorter) Less(i, j int) bool {
|
|
| 215 |
+ return cs.changes[i].Path < cs.changes[j].Path |
|
| 216 |
+} |
| 0 | 217 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,64 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import "io" |
|
| 3 |
+ |
|
| 4 |
+type mountedLayer struct {
|
|
| 5 |
+ name string |
|
| 6 |
+ mountID string |
|
| 7 |
+ initID string |
|
| 8 |
+ parent *roLayer |
|
| 9 |
+ path string |
|
| 10 |
+ layerStore *layerStore |
|
| 11 |
+ activityCount int |
|
| 12 |
+} |
|
| 13 |
+ |
|
| 14 |
+func (ml *mountedLayer) cacheParent() string {
|
|
| 15 |
+ if ml.initID != "" {
|
|
| 16 |
+ return ml.initID |
|
| 17 |
+ } |
|
| 18 |
+ if ml.parent != nil {
|
|
| 19 |
+ return ml.parent.cacheID |
|
| 20 |
+ } |
|
| 21 |
+ return "" |
|
| 22 |
+} |
|
| 23 |
+ |
|
| 24 |
+func (ml *mountedLayer) TarStream() (io.Reader, error) {
|
|
| 25 |
+ archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) |
|
| 26 |
+ if err != nil {
|
|
| 27 |
+ return nil, err |
|
| 28 |
+ } |
|
| 29 |
+ return autoClosingReader{archiver}, nil
|
|
| 30 |
+} |
|
| 31 |
+ |
|
| 32 |
+func (ml *mountedLayer) Path() (string, error) {
|
|
| 33 |
+ if ml.path == "" {
|
|
| 34 |
+ return "", ErrNotMounted |
|
| 35 |
+ } |
|
| 36 |
+ return ml.path, nil |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+func (ml *mountedLayer) Parent() Layer {
|
|
| 40 |
+ if ml.parent != nil {
|
|
| 41 |
+ return ml.parent |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ // Return a nil interface instead of an interface wrapping a nil |
|
| 45 |
+ // pointer. |
|
| 46 |
+ return nil |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+func (ml *mountedLayer) Size() (int64, error) {
|
|
| 50 |
+ return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+type autoClosingReader struct {
|
|
| 54 |
+ source io.ReadCloser |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+func (r autoClosingReader) Read(p []byte) (n int, err error) {
|
|
| 58 |
+ n, err = r.source.Read(p) |
|
| 59 |
+ if err != nil {
|
|
| 60 |
+ r.source.Close() |
|
| 61 |
+ } |
|
| 62 |
+ return |
|
| 63 |
+} |
| 0 | 64 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,110 @@ |
| 0 |
+package layer |
|
| 1 |
+ |
|
| 2 |
+import "io" |
|
| 3 |
+ |
|
| 4 |
+type roLayer struct {
|
|
| 5 |
+ chainID ChainID |
|
| 6 |
+ diffID DiffID |
|
| 7 |
+ parent *roLayer |
|
| 8 |
+ cacheID string |
|
| 9 |
+ size int64 |
|
| 10 |
+ layerStore *layerStore |
|
| 11 |
+ |
|
| 12 |
+ referenceCount int |
|
| 13 |
+ references map[Layer]struct{}
|
|
| 14 |
+} |
|
| 15 |
+ |
|
| 16 |
+func (rl *roLayer) TarStream() (io.Reader, error) {
|
|
| 17 |
+ r, err := rl.layerStore.store.TarSplitReader(rl.chainID) |
|
| 18 |
+ if err != nil {
|
|
| 19 |
+ return nil, err |
|
| 20 |
+ } |
|
| 21 |
+ |
|
| 22 |
+ return rl.layerStore.assembleTar(rl.cacheID, r, nil) |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+func (rl *roLayer) ChainID() ChainID {
|
|
| 26 |
+ return rl.chainID |
|
| 27 |
+} |
|
| 28 |
+ |
|
| 29 |
+func (rl *roLayer) DiffID() DiffID {
|
|
| 30 |
+ return rl.diffID |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+func (rl *roLayer) Parent() Layer {
|
|
| 34 |
+ if rl.parent == nil {
|
|
| 35 |
+ return nil |
|
| 36 |
+ } |
|
| 37 |
+ return rl.parent |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+func (rl *roLayer) Size() (size int64, err error) {
|
|
| 41 |
+ if rl.parent != nil {
|
|
| 42 |
+ size, err = rl.parent.Size() |
|
| 43 |
+ if err != nil {
|
|
| 44 |
+ return |
|
| 45 |
+ } |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ return size + rl.size, nil |
|
| 49 |
+} |
|
| 50 |
+ |
|
| 51 |
+func (rl *roLayer) DiffSize() (size int64, err error) {
|
|
| 52 |
+ return rl.size, nil |
|
| 53 |
+} |
|
| 54 |
+ |
|
| 55 |
+func (rl *roLayer) Metadata() (map[string]string, error) {
|
|
| 56 |
+ return rl.layerStore.driver.GetMetadata(rl.cacheID) |
|
| 57 |
+} |
|
| 58 |
+ |
|
| 59 |
+type referencedCacheLayer struct {
|
|
| 60 |
+ *roLayer |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+func (rl *roLayer) getReference() Layer {
|
|
| 64 |
+ ref := &referencedCacheLayer{
|
|
| 65 |
+ roLayer: rl, |
|
| 66 |
+ } |
|
| 67 |
+ rl.references[ref] = struct{}{}
|
|
| 68 |
+ |
|
| 69 |
+ return ref |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 72 |
+func (rl *roLayer) hasReference(ref Layer) bool {
|
|
| 73 |
+ _, ok := rl.references[ref] |
|
| 74 |
+ return ok |
|
| 75 |
+} |
|
| 76 |
+ |
|
| 77 |
+func (rl *roLayer) hasReferences() bool {
|
|
| 78 |
+ return len(rl.references) > 0 |
|
| 79 |
+} |
|
| 80 |
+ |
|
| 81 |
+func (rl *roLayer) deleteReference(ref Layer) {
|
|
| 82 |
+ delete(rl.references, ref) |
|
| 83 |
+} |
|
| 84 |
+ |
|
| 85 |
+func (rl *roLayer) depth() int {
|
|
| 86 |
+ if rl.parent == nil {
|
|
| 87 |
+ return 1 |
|
| 88 |
+ } |
|
| 89 |
+ return rl.parent.depth() + 1 |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+func storeLayer(tx MetadataTransaction, layer *roLayer) error {
|
|
| 93 |
+ if err := tx.SetDiffID(layer.diffID); err != nil {
|
|
| 94 |
+ return err |
|
| 95 |
+ } |
|
| 96 |
+ if err := tx.SetSize(layer.size); err != nil {
|
|
| 97 |
+ return err |
|
| 98 |
+ } |
|
| 99 |
+ if err := tx.SetCacheID(layer.cacheID); err != nil {
|
|
| 100 |
+ return err |
|
| 101 |
+ } |
|
| 102 |
+ if layer.parent != nil {
|
|
| 103 |
+ if err := tx.SetParent(layer.parent.chainID); err != nil {
|
|
| 104 |
+ return err |
|
| 105 |
+ } |
|
| 106 |
+ } |
|
| 107 |
+ |
|
| 108 |
+ return nil |
|
| 109 |
+} |