Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
| ... | ... |
@@ -796,9 +796,11 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo |
| 796 | 796 |
return nil, fmt.Errorf("Couldn't restore custom images: %s", err)
|
| 797 | 797 |
} |
| 798 | 798 |
|
| 799 |
+ migrationStart := time.Now() |
|
| 799 | 800 |
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
|
| 800 | 801 |
return nil, err |
| 801 | 802 |
} |
| 803 |
+ logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
|
|
| 802 | 804 |
|
| 803 | 805 |
// Discovery is only enabled when the daemon is launched with an address to advertise. When |
| 804 | 806 |
// initialized, the daemon is registered and we can store the discovery backend as its read-only |
| ... | ... |
@@ -97,16 +97,20 @@ func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
|
| 97 | 97 |
return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) |
| 98 | 98 |
} |
| 99 | 99 |
|
| 100 |
-func (fm *fileMetadataTransaction) TarSplitWriter() (io.WriteCloser, error) {
|
|
| 100 |
+func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
|
|
| 101 | 101 |
f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) |
| 102 | 102 |
if err != nil {
|
| 103 | 103 |
return nil, err |
| 104 | 104 |
} |
| 105 |
+ var wc io.WriteCloser |
|
| 106 |
+ if compressInput {
|
|
| 107 |
+ wc = gzip.NewWriter(f) |
|
| 108 |
+ } else {
|
|
| 109 |
+ wc = f |
|
| 110 |
+ } |
|
| 105 | 111 |
|
| 106 |
- fz := gzip.NewWriter(f) |
|
| 107 |
- |
|
| 108 |
- return ioutils.NewWriteCloserWrapper(fz, func() error {
|
|
| 109 |
- fz.Close() |
|
| 112 |
+ return ioutils.NewWriteCloserWrapper(wc, func() error {
|
|
| 113 |
+ wc.Close() |
|
| 110 | 114 |
return f.Close() |
| 111 | 115 |
}), nil |
| 112 | 116 |
} |
| ... | ... |
@@ -183,7 +183,7 @@ type MetadataTransaction interface {
|
| 183 | 183 |
SetParent(parent ChainID) error |
| 184 | 184 |
SetDiffID(DiffID) error |
| 185 | 185 |
SetCacheID(string) error |
| 186 |
- TarSplitWriter() (io.WriteCloser, error) |
|
| 186 |
+ TarSplitWriter(compressInput bool) (io.WriteCloser, error) |
|
| 187 | 187 |
|
| 188 | 188 |
Commit(ChainID) error |
| 189 | 189 |
Cancel() error |
| ... | ... |
@@ -196,7 +196,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri |
| 196 | 196 |
digester := digest.Canonical.New() |
| 197 | 197 |
tr := io.TeeReader(ts, digester.Hash()) |
| 198 | 198 |
|
| 199 |
- tsw, err := tx.TarSplitWriter() |
|
| 199 |
+ tsw, err := tx.TarSplitWriter(true) |
|
| 200 | 200 |
if err != nil {
|
| 201 | 201 |
return err |
| 202 | 202 |
} |
| ... | ... |
@@ -572,7 +572,7 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou |
| 572 | 572 |
return initID, nil |
| 573 | 573 |
} |
| 574 | 574 |
|
| 575 |
-func (ls *layerStore) assembleTar(graphID string, metadata io.ReadCloser, size *int64) (io.ReadCloser, error) {
|
|
| 575 |
+func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
|
| 576 | 576 |
type diffPathDriver interface {
|
| 577 | 577 |
DiffPath(string) (string, func() error, error) |
| 578 | 578 |
} |
| ... | ... |
@@ -582,34 +582,20 @@ func (ls *layerStore) assembleTar(graphID string, metadata io.ReadCloser, size * |
| 582 | 582 |
diffDriver = &naiveDiffPathDriver{ls.driver}
|
| 583 | 583 |
} |
| 584 | 584 |
|
| 585 |
+ defer metadata.Close() |
|
| 586 |
+ |
|
| 585 | 587 |
// get our relative path to the container |
| 586 | 588 |
fsPath, releasePath, err := diffDriver.DiffPath(graphID) |
| 587 | 589 |
if err != nil {
|
| 588 |
- metadata.Close() |
|
| 589 |
- return nil, err |
|
| 590 |
+ return err |
|
| 590 | 591 |
} |
| 592 |
+ defer releasePath() |
|
| 591 | 593 |
|
| 592 |
- pR, pW := io.Pipe() |
|
| 593 |
- // this will need to be in a goroutine, as we are returning the stream of a |
|
| 594 |
- // tar archive, but can not close the metadata reader early (when this |
|
| 595 |
- // function returns)... |
|
| 596 |
- go func() {
|
|
| 597 |
- defer releasePath() |
|
| 598 |
- defer metadata.Close() |
|
| 599 |
- |
|
| 600 |
- metaUnpacker := storage.NewJSONUnpacker(metadata) |
|
| 601 |
- upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
|
| 602 |
- fileGetter := storage.NewPathFileGetter(fsPath) |
|
| 603 |
- logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
|
|
| 604 |
- ots := asm.NewOutputTarStream(fileGetter, upackerCounter) |
|
| 605 |
- defer ots.Close() |
|
| 606 |
- if _, err := io.Copy(pW, ots); err != nil {
|
|
| 607 |
- pW.CloseWithError(err) |
|
| 608 |
- return |
|
| 609 |
- } |
|
| 610 |
- pW.Close() |
|
| 611 |
- }() |
|
| 612 |
- return pR, nil |
|
| 594 |
+ metaUnpacker := storage.NewJSONUnpacker(metadata) |
|
| 595 |
+ upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
|
| 596 |
+ fileGetter := storage.NewPathFileGetter(fsPath) |
|
| 597 |
+ logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
|
|
| 598 |
+ return asm.WriteOutputTarStream(fileGetter, upackerCounter, w) |
|
| 613 | 599 |
} |
| 614 | 600 |
|
| 615 | 601 |
func (ls *layerStore) Cleanup() error {
|
| ... | ... |
@@ -9,7 +9,6 @@ import ( |
| 9 | 9 |
|
| 10 | 10 |
"github.com/Sirupsen/logrus" |
| 11 | 11 |
"github.com/docker/distribution/digest" |
| 12 |
- "github.com/docker/docker/pkg/ioutils" |
|
| 13 | 12 |
"github.com/vbatts/tar-split/tar/asm" |
| 14 | 13 |
"github.com/vbatts/tar-split/tar/storage" |
| 15 | 14 |
) |
| ... | ... |
@@ -76,79 +75,75 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent |
| 76 | 76 |
return nil |
| 77 | 77 |
} |
| 78 | 78 |
|
| 79 |
-func (ls *layerStore) migrateLayer(tx MetadataTransaction, tarDataFile string, layer *roLayer) error {
|
|
| 80 |
- var ar io.Reader |
|
| 81 |
- var tdf *os.File |
|
| 82 |
- var err error |
|
| 83 |
- if tarDataFile != "" {
|
|
| 84 |
- tdf, err = os.Open(tarDataFile) |
|
| 85 |
- if err != nil {
|
|
| 86 |
- if !os.IsNotExist(err) {
|
|
| 87 |
- return err |
|
| 88 |
- } |
|
| 89 |
- tdf = nil |
|
| 90 |
- } |
|
| 91 |
- defer tdf.Close() |
|
| 92 |
- } |
|
| 93 |
- if tdf != nil {
|
|
| 94 |
- tsw, err := tx.TarSplitWriter() |
|
| 79 |
+func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
|
|
| 80 |
+ defer func() {
|
|
| 95 | 81 |
if err != nil {
|
| 96 |
- return err |
|
| 82 |
+ logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err)
|
|
| 83 |
+ diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) |
|
| 97 | 84 |
} |
| 85 |
+ }() |
|
| 98 | 86 |
|
| 99 |
- defer tsw.Close() |
|
| 100 |
- |
|
| 101 |
- uncompressed, err := gzip.NewReader(tdf) |
|
| 102 |
- if err != nil {
|
|
| 103 |
- return err |
|
| 104 |
- } |
|
| 105 |
- defer uncompressed.Close() |
|
| 87 |
+ if oldTarDataPath == "" {
|
|
| 88 |
+ err = errors.New("no tar-split file")
|
|
| 89 |
+ return |
|
| 90 |
+ } |
|
| 106 | 91 |
|
| 107 |
- tr := io.TeeReader(uncompressed, tsw) |
|
| 108 |
- trc := ioutils.NewReadCloserWrapper(tr, uncompressed.Close) |
|
| 92 |
+ tarDataFile, err := os.Open(oldTarDataPath) |
|
| 93 |
+ if err != nil {
|
|
| 94 |
+ return |
|
| 95 |
+ } |
|
| 96 |
+ defer tarDataFile.Close() |
|
| 97 |
+ uncompressed, err := gzip.NewReader(tarDataFile) |
|
| 98 |
+ if err != nil {
|
|
| 99 |
+ return |
|
| 100 |
+ } |
|
| 109 | 101 |
|
| 110 |
- ar, err = ls.assembleTar(layer.cacheID, trc, &layer.size) |
|
| 111 |
- if err != nil {
|
|
| 112 |
- return err |
|
| 113 |
- } |
|
| 102 |
+ dgst := digest.Canonical.New() |
|
| 103 |
+ err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) |
|
| 104 |
+ if err != nil {
|
|
| 105 |
+ return |
|
| 106 |
+ } |
|
| 114 | 107 |
|
| 115 |
- } else {
|
|
| 116 |
- var graphParent string |
|
| 117 |
- if layer.parent != nil {
|
|
| 118 |
- graphParent = layer.parent.cacheID |
|
| 119 |
- } |
|
| 120 |
- archiver, err := ls.driver.Diff(layer.cacheID, graphParent) |
|
| 121 |
- if err != nil {
|
|
| 122 |
- return err |
|
| 123 |
- } |
|
| 124 |
- defer archiver.Close() |
|
| 108 |
+ diffID = DiffID(dgst.Digest()) |
|
| 109 |
+ err = os.RemoveAll(newTarDataPath) |
|
| 110 |
+ if err != nil {
|
|
| 111 |
+ return |
|
| 112 |
+ } |
|
| 113 |
+ err = os.Link(oldTarDataPath, newTarDataPath) |
|
| 125 | 114 |
|
| 126 |
- tsw, err := tx.TarSplitWriter() |
|
| 127 |
- if err != nil {
|
|
| 128 |
- return err |
|
| 129 |
- } |
|
| 130 |
- metaPacker := storage.NewJSONPacker(tsw) |
|
| 131 |
- packerCounter := &packSizeCounter{metaPacker, &layer.size}
|
|
| 132 |
- defer tsw.Close() |
|
| 115 |
+ return |
|
| 116 |
+} |
|
| 133 | 117 |
|
| 134 |
- ar, err = asm.NewInputTarStream(archiver, packerCounter, nil) |
|
| 135 |
- if err != nil {
|
|
| 136 |
- return err |
|
| 137 |
- } |
|
| 118 |
+func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
|
|
| 119 |
+ rawarchive, err := ls.driver.Diff(id, parent) |
|
| 120 |
+ if err != nil {
|
|
| 121 |
+ return |
|
| 138 | 122 |
} |
| 123 |
+ defer rawarchive.Close() |
|
| 139 | 124 |
|
| 140 |
- digester := digest.Canonical.New() |
|
| 141 |
- _, err = io.Copy(digester.Hash(), ar) |
|
| 125 |
+ f, err := os.Create(newTarDataPath) |
|
| 142 | 126 |
if err != nil {
|
| 143 |
- return err |
|
| 127 |
+ return |
|
| 144 | 128 |
} |
| 129 |
+ defer f.Close() |
|
| 130 |
+ mfz := gzip.NewWriter(f) |
|
| 131 |
+ metaPacker := storage.NewJSONPacker(mfz) |
|
| 145 | 132 |
|
| 146 |
- layer.diffID = DiffID(digester.Digest()) |
|
| 133 |
+ packerCounter := &packSizeCounter{metaPacker, &size}
|
|
| 147 | 134 |
|
| 148 |
- return nil |
|
| 135 |
+ archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) |
|
| 136 |
+ if err != nil {
|
|
| 137 |
+ return |
|
| 138 |
+ } |
|
| 139 |
+ dgst, err := digest.FromReader(archive) |
|
| 140 |
+ if err != nil {
|
|
| 141 |
+ return |
|
| 142 |
+ } |
|
| 143 |
+ diffID = DiffID(dgst) |
|
| 144 |
+ return |
|
| 149 | 145 |
} |
| 150 | 146 |
|
| 151 |
-func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, tarDataFile string) (Layer, error) {
|
|
| 147 |
+func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
|
|
| 152 | 148 |
// err is used to hold the error which will always trigger |
| 153 | 149 |
// cleanup of creates sources but may not be an error returned |
| 154 | 150 |
// to the caller (already exists). |
| ... | ... |
@@ -177,6 +172,18 @@ func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, tarDataF |
| 177 | 177 |
referenceCount: 1, |
| 178 | 178 |
layerStore: ls, |
| 179 | 179 |
references: map[Layer]struct{}{},
|
| 180 |
+ diffID: diffID, |
|
| 181 |
+ size: size, |
|
| 182 |
+ chainID: createChainIDFromParent(parent, diffID), |
|
| 183 |
+ } |
|
| 184 |
+ |
|
| 185 |
+ ls.layerL.Lock() |
|
| 186 |
+ defer ls.layerL.Unlock() |
|
| 187 |
+ |
|
| 188 |
+ if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
|
|
| 189 |
+ // Set error for cleanup, but do not return |
|
| 190 |
+ err = errors.New("layer already exists")
|
|
| 191 |
+ return existingLayer.getReference(), nil |
|
| 180 | 192 |
} |
| 181 | 193 |
|
| 182 | 194 |
tx, err := ls.store.StartTransaction() |
| ... | ... |
@@ -193,23 +200,23 @@ func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, tarDataF |
| 193 | 193 |
} |
| 194 | 194 |
}() |
| 195 | 195 |
|
| 196 |
- if err = ls.migrateLayer(tx, tarDataFile, layer); err != nil {
|
|
| 196 |
+ tsw, err := tx.TarSplitWriter(false) |
|
| 197 |
+ if err != nil {
|
|
| 197 | 198 |
return nil, err |
| 198 | 199 |
} |
| 199 |
- |
|
| 200 |
- layer.chainID = createChainIDFromParent(parent, layer.diffID) |
|
| 201 |
- |
|
| 202 |
- if err = storeLayer(tx, layer); err != nil {
|
|
| 200 |
+ defer tsw.Close() |
|
| 201 |
+ tdf, err := os.Open(tarDataFile) |
|
| 202 |
+ if err != nil {
|
|
| 203 |
+ return nil, err |
|
| 204 |
+ } |
|
| 205 |
+ defer tdf.Close() |
|
| 206 |
+ _, err = io.Copy(tsw, tdf) |
|
| 207 |
+ if err != nil {
|
|
| 203 | 208 |
return nil, err |
| 204 | 209 |
} |
| 205 | 210 |
|
| 206 |
- ls.layerL.Lock() |
|
| 207 |
- defer ls.layerL.Unlock() |
|
| 208 |
- |
|
| 209 |
- if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
|
|
| 210 |
- // Set error for cleanup, but do not return |
|
| 211 |
- err = errors.New("layer already exists")
|
|
| 212 |
- return existingLayer.getReference(), nil |
|
| 211 |
+ if err = storeLayer(tx, layer); err != nil {
|
|
| 212 |
+ return nil, err |
|
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
if err = tx.Commit(layer.chainID); err != nil {
|
| ... | ... |
@@ -94,7 +94,13 @@ func TestLayerMigration(t *testing.T) {
|
| 94 | 94 |
t.Fatal(err) |
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 |
- layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", tf1) |
|
| 97 |
+ newTarDataPath := filepath.Join(td, ".migration-tardata") |
|
| 98 |
+ diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) |
|
| 99 |
+ if err != nil {
|
|
| 100 |
+ t.Fatal(err) |
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) |
|
| 98 | 104 |
if err != nil {
|
| 99 | 105 |
t.Fatal(err) |
| 100 | 106 |
} |
| ... | ... |
@@ -105,7 +111,6 @@ func TestLayerMigration(t *testing.T) {
|
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 | 107 |
assertReferences(t, layer1a, layer1b) |
| 108 |
- |
|
| 109 | 108 |
// Attempt register, should be same |
| 110 | 109 |
layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) |
| 111 | 110 |
if err != nil {
|
| ... | ... |
@@ -124,12 +129,15 @@ func TestLayerMigration(t *testing.T) {
|
| 124 | 124 |
if err := writeTarSplitFile(tf2, tar2); err != nil {
|
| 125 | 125 |
t.Fatal(err) |
| 126 | 126 |
} |
| 127 |
- |
|
| 128 |
- layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), tf2) |
|
| 127 |
+ diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) |
|
| 129 | 128 |
if err != nil {
|
| 130 | 129 |
t.Fatal(err) |
| 131 | 130 |
} |
| 132 | 131 |
|
| 132 |
+ layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) |
|
| 133 |
+ if err != nil {
|
|
| 134 |
+ t.Fatal(err) |
|
| 135 |
+ } |
|
| 133 | 136 |
assertReferences(t, layer2a, layer2b) |
| 134 | 137 |
|
| 135 | 138 |
if metadata, err := ls.Release(layer2a); err != nil {
|
| ... | ... |
@@ -210,7 +218,13 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
|
| 210 | 210 |
t.Fatal(err) |
| 211 | 211 |
} |
| 212 | 212 |
|
| 213 |
- layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", "") |
|
| 213 |
+ newTarDataPath := filepath.Join(td, ".migration-tardata") |
|
| 214 |
+ diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) |
|
| 215 |
+ if err != nil {
|
|
| 216 |
+ t.Fatal(err) |
|
| 217 |
+ } |
|
| 218 |
+ |
|
| 219 |
+ layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) |
|
| 214 | 220 |
if err != nil {
|
| 215 | 221 |
t.Fatal(err) |
| 216 | 222 |
} |
| ... | ... |
@@ -228,11 +242,15 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
|
| 228 | 228 |
t.Fatal(err) |
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 |
- layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), "") |
|
| 231 |
+ diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) |
|
| 232 | 232 |
if err != nil {
|
| 233 | 233 |
t.Fatal(err) |
| 234 | 234 |
} |
| 235 | 235 |
|
| 236 |
+ layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) |
|
| 237 |
+ if err != nil {
|
|
| 238 |
+ t.Fatal(err) |
|
| 239 |
+ } |
|
| 236 | 240 |
assertReferences(t, layer2a, layer2b) |
| 237 | 241 |
|
| 238 | 242 |
if metadata, err := ls.Release(layer2a); err != nil {
|
| ... | ... |
@@ -20,7 +20,16 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
|
| 20 | 20 |
return nil, err |
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 |
- return rl.layerStore.assembleTar(rl.cacheID, r, nil) |
|
| 23 |
+ pr, pw := io.Pipe() |
|
| 24 |
+ go func() {
|
|
| 25 |
+ err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) |
|
| 26 |
+ if err != nil {
|
|
| 27 |
+ pw.CloseWithError(err) |
|
| 28 |
+ } else {
|
|
| 29 |
+ pw.Close() |
|
| 30 |
+ } |
|
| 31 |
+ }() |
|
| 32 |
+ return pr, nil |
|
| 24 | 33 |
} |
| 25 | 34 |
|
| 26 | 35 |
func (rl *roLayer) ChainID() ChainID {
|
| ... | ... |
@@ -6,6 +6,10 @@ import ( |
| 6 | 6 |
"io/ioutil" |
| 7 | 7 |
"os" |
| 8 | 8 |
"path/filepath" |
| 9 |
+ "runtime" |
|
| 10 |
+ "strconv" |
|
| 11 |
+ "sync" |
|
| 12 |
+ "time" |
|
| 9 | 13 |
|
| 10 | 14 |
"encoding/json" |
| 11 | 15 |
|
| ... | ... |
@@ -19,7 +23,7 @@ import ( |
| 19 | 19 |
) |
| 20 | 20 |
|
| 21 | 21 |
type graphIDRegistrar interface {
|
| 22 |
- RegisterByGraphID(string, layer.ChainID, string) (layer.Layer, error) |
|
| 22 |
+ RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) |
|
| 23 | 23 |
Release(layer.Layer) ([]layer.Metadata, error) |
| 24 | 24 |
} |
| 25 | 25 |
|
| ... | ... |
@@ -27,11 +31,18 @@ type graphIDMounter interface {
|
| 27 | 27 |
CreateRWLayerByGraphID(string, string, layer.ChainID) error |
| 28 | 28 |
} |
| 29 | 29 |
|
| 30 |
+type checksumCalculator interface {
|
|
| 31 |
+ ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 30 | 34 |
const ( |
| 31 | 35 |
graphDirName = "graph" |
| 32 | 36 |
tarDataFileName = "tar-data.json.gz" |
| 33 | 37 |
migrationFileName = ".migration-v1-images.json" |
| 34 | 38 |
migrationTagsFileName = ".migration-v1-tags" |
| 39 |
+ migrationDiffIDFileName = ".migration-diffid" |
|
| 40 |
+ migrationSizeFileName = ".migration-size" |
|
| 41 |
+ migrationTarDataFileName = ".migration-tardata" |
|
| 35 | 42 |
containersDirName = "containers" |
| 36 | 43 |
configFileNameLegacy = "config.json" |
| 37 | 44 |
configFileName = "config.v2.json" |
| ... | ... |
@@ -45,7 +56,19 @@ var ( |
| 45 | 45 |
// Migrate takes an old graph directory and transforms the metadata into the |
| 46 | 46 |
// new format. |
| 47 | 47 |
func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error {
|
| 48 |
- mappings := make(map[string]image.ID) |
|
| 48 |
+ graphDir := filepath.Join(root, graphDirName) |
|
| 49 |
+ if _, err := os.Lstat(graphDir); os.IsNotExist(err) {
|
|
| 50 |
+ return nil |
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ mappings, err := restoreMappings(root) |
|
| 54 |
+ if err != nil {
|
|
| 55 |
+ return err |
|
| 56 |
+ } |
|
| 57 |
+ |
|
| 58 |
+ if cc, ok := ls.(checksumCalculator); ok {
|
|
| 59 |
+ CalculateLayerChecksums(root, cc, mappings) |
|
| 60 |
+ } |
|
| 49 | 61 |
|
| 50 | 62 |
if registrar, ok := ls.(graphIDRegistrar); !ok {
|
| 51 | 63 |
return errUnsupported |
| ... | ... |
@@ -53,6 +76,11 @@ func Migrate(root, driverName string, ls layer.Store, is image.Store, rs referen |
| 53 | 53 |
return err |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
+ err = saveMappings(root, mappings) |
|
| 57 |
+ if err != nil {
|
|
| 58 |
+ return err |
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 56 | 61 |
if mounter, ok := ls.(graphIDMounter); !ok {
|
| 57 | 62 |
return errUnsupported |
| 58 | 63 |
} else if err := migrateContainers(root, mounter, is, mappings); err != nil {
|
| ... | ... |
@@ -66,28 +94,115 @@ func Migrate(root, driverName string, ls layer.Store, is image.Store, rs referen |
| 66 | 66 |
return nil |
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
-func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error {
|
|
| 69 |
+// CalculateLayerChecksums walks an old graph directory and calculates checksums |
|
| 70 |
+// for each layer. These checksums are later used for migration. |
|
| 71 |
+func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) {
|
|
| 70 | 72 |
graphDir := filepath.Join(root, graphDirName) |
| 71 |
- if _, err := os.Lstat(graphDir); err != nil {
|
|
| 72 |
- if os.IsNotExist(err) {
|
|
| 73 |
- return nil |
|
| 73 |
+ // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io |
|
| 74 |
+ workers := runtime.NumCPU() * 3 |
|
| 75 |
+ workQueue := make(chan string, workers) |
|
| 76 |
+ |
|
| 77 |
+ wg := sync.WaitGroup{}
|
|
| 78 |
+ |
|
| 79 |
+ for i := 0; i < workers; i++ {
|
|
| 80 |
+ wg.Add(1) |
|
| 81 |
+ go func() {
|
|
| 82 |
+ for id := range workQueue {
|
|
| 83 |
+ start := time.Now() |
|
| 84 |
+ if err := calculateLayerChecksum(graphDir, id, ls); err != nil {
|
|
| 85 |
+ logrus.Errorf("could not calculate checksum for %q, %q", id, err)
|
|
| 86 |
+ } |
|
| 87 |
+ elapsed := time.Since(start) |
|
| 88 |
+ logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds())
|
|
| 89 |
+ } |
|
| 90 |
+ wg.Done() |
|
| 91 |
+ }() |
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ dir, err := ioutil.ReadDir(graphDir) |
|
| 95 |
+ if err != nil {
|
|
| 96 |
+ logrus.Errorf("could not read directory %q", graphDir)
|
|
| 97 |
+ return |
|
| 98 |
+ } |
|
| 99 |
+ for _, v := range dir {
|
|
| 100 |
+ v1ID := v.Name() |
|
| 101 |
+ if err := imagev1.ValidateID(v1ID); err != nil {
|
|
| 102 |
+ continue |
|
| 74 | 103 |
} |
| 104 |
+ if _, ok := mappings[v1ID]; ok { // support old migrations without helper files
|
|
| 105 |
+ continue |
|
| 106 |
+ } |
|
| 107 |
+ workQueue <- v1ID |
|
| 108 |
+ } |
|
| 109 |
+ close(workQueue) |
|
| 110 |
+ wg.Wait() |
|
| 111 |
+} |
|
| 112 |
+ |
|
| 113 |
+func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error {
|
|
| 114 |
+ diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) |
|
| 115 |
+ if _, err := os.Lstat(diffIDFile); err == nil {
|
|
| 116 |
+ return nil |
|
| 117 |
+ } else if !os.IsNotExist(err) {
|
|
| 118 |
+ return err |
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ parent, err := getParent(filepath.Join(graphDir, id)) |
|
| 122 |
+ if err != nil {
|
|
| 123 |
+ return err |
|
| 124 |
+ } |
|
| 125 |
+ |
|
| 126 |
+ diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) |
|
| 127 |
+ if err != nil {
|
|
| 128 |
+ return err |
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil {
|
|
| 132 |
+ return err |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil {
|
|
| 75 | 136 |
return err |
| 76 | 137 |
} |
| 77 | 138 |
|
| 139 |
+ logrus.Infof("calculated checksum for layer %s: %s", id, diffID)
|
|
| 140 |
+ return nil |
|
| 141 |
+} |
|
| 142 |
+ |
|
| 143 |
+func restoreMappings(root string) (map[string]image.ID, error) {
|
|
| 144 |
+ mappings := make(map[string]image.ID) |
|
| 145 |
+ |
|
| 78 | 146 |
mfile := filepath.Join(root, migrationFileName) |
| 79 | 147 |
f, err := os.Open(mfile) |
| 80 | 148 |
if err != nil && !os.IsNotExist(err) {
|
| 81 |
- return err |
|
| 149 |
+ return nil, err |
|
| 82 | 150 |
} else if err == nil {
|
| 83 | 151 |
err := json.NewDecoder(f).Decode(&mappings) |
| 84 | 152 |
if err != nil {
|
| 85 | 153 |
f.Close() |
| 86 |
- return err |
|
| 154 |
+ return nil, err |
|
| 87 | 155 |
} |
| 88 | 156 |
f.Close() |
| 89 | 157 |
} |
| 90 | 158 |
|
| 159 |
+ return mappings, nil |
|
| 160 |
+} |
|
| 161 |
+ |
|
| 162 |
+func saveMappings(root string, mappings map[string]image.ID) error {
|
|
| 163 |
+ mfile := filepath.Join(root, migrationFileName) |
|
| 164 |
+ f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) |
|
| 165 |
+ if err != nil {
|
|
| 166 |
+ return err |
|
| 167 |
+ } |
|
| 168 |
+ defer f.Close() |
|
| 169 |
+ if err := json.NewEncoder(f).Encode(mappings); err != nil {
|
|
| 170 |
+ return err |
|
| 171 |
+ } |
|
| 172 |
+ return nil |
|
| 173 |
+} |
|
| 174 |
+ |
|
| 175 |
+func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error {
|
|
| 176 |
+ graphDir := filepath.Join(root, graphDirName) |
|
| 177 |
+ |
|
| 91 | 178 |
dir, err := ioutil.ReadDir(graphDir) |
| 92 | 179 |
if err != nil {
|
| 93 | 180 |
return err |
| ... | ... |
@@ -105,15 +220,6 @@ func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata |
| 105 | 105 |
} |
| 106 | 106 |
} |
| 107 | 107 |
|
| 108 |
- f, err = os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) |
|
| 109 |
- if err != nil {
|
|
| 110 |
- return err |
|
| 111 |
- } |
|
| 112 |
- defer f.Close() |
|
| 113 |
- if err := json.NewEncoder(f).Encode(mappings); err != nil {
|
|
| 114 |
- return err |
|
| 115 |
- } |
|
| 116 |
- |
|
| 117 | 108 |
return nil |
| 118 | 109 |
} |
| 119 | 110 |
|
| ... | ... |
@@ -251,43 +357,51 @@ func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image |
| 251 | 251 |
return nil |
| 252 | 252 |
} |
| 253 | 253 |
|
| 254 |
-func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
|
|
| 255 |
- defer func() {
|
|
| 256 |
- if err != nil {
|
|
| 257 |
- logrus.Errorf("migration failed for %v, err: %v", id, err)
|
|
| 258 |
- } |
|
| 259 |
- }() |
|
| 260 |
- |
|
| 261 |
- jsonFile := filepath.Join(root, graphDirName, id, "json") |
|
| 254 |
+func getParent(confDir string) (string, error) {
|
|
| 255 |
+ jsonFile := filepath.Join(confDir, "json") |
|
| 262 | 256 |
imageJSON, err := ioutil.ReadFile(jsonFile) |
| 263 | 257 |
if err != nil {
|
| 264 |
- return err |
|
| 258 |
+ return "", err |
|
| 265 | 259 |
} |
| 266 | 260 |
var parent struct {
|
| 267 | 261 |
Parent string |
| 268 | 262 |
ParentID digest.Digest `json:"parent_id"` |
| 269 | 263 |
} |
| 270 | 264 |
if err := json.Unmarshal(imageJSON, &parent); err != nil {
|
| 271 |
- return err |
|
| 265 |
+ return "", err |
|
| 272 | 266 |
} |
| 273 | 267 |
if parent.Parent == "" && parent.ParentID != "" { // v1.9
|
| 274 | 268 |
parent.Parent = parent.ParentID.Hex() |
| 275 | 269 |
} |
| 276 | 270 |
// compatibilityID for parent |
| 277 |
- parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "parent")) |
|
| 271 |
+ parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) |
|
| 278 | 272 |
if err == nil && len(parentCompatibilityID) > 0 {
|
| 279 | 273 |
parent.Parent = string(parentCompatibilityID) |
| 280 | 274 |
} |
| 275 |
+ return parent.Parent, nil |
|
| 276 |
+} |
|
| 277 |
+ |
|
| 278 |
+func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
|
|
| 279 |
+ defer func() {
|
|
| 280 |
+ if err != nil {
|
|
| 281 |
+ logrus.Errorf("migration failed for %v, err: %v", id, err)
|
|
| 282 |
+ } |
|
| 283 |
+ }() |
|
| 284 |
+ |
|
| 285 |
+ parent, err := getParent(filepath.Join(root, graphDirName, id)) |
|
| 286 |
+ if err != nil {
|
|
| 287 |
+ return err |
|
| 288 |
+ } |
|
| 281 | 289 |
|
| 282 | 290 |
var parentID image.ID |
| 283 |
- if parent.Parent != "" {
|
|
| 291 |
+ if parent != "" {
|
|
| 284 | 292 |
var exists bool |
| 285 |
- if parentID, exists = mappings[parent.Parent]; !exists {
|
|
| 286 |
- if err := migrateImage(parent.Parent, root, ls, is, ms, mappings); err != nil {
|
|
| 293 |
+ if parentID, exists = mappings[parent]; !exists {
|
|
| 294 |
+ if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil {
|
|
| 287 | 295 |
// todo: fail or allow broken chains? |
| 288 | 296 |
return err |
| 289 | 297 |
} |
| 290 |
- parentID = mappings[parent.Parent] |
|
| 298 |
+ parentID = mappings[parent] |
|
| 291 | 299 |
} |
| 292 | 300 |
} |
| 293 | 301 |
|
| ... | ... |
@@ -304,12 +418,32 @@ func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metad |
| 304 | 304 |
history = parentImg.History |
| 305 | 305 |
} |
| 306 | 306 |
|
| 307 |
- layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), filepath.Join(filepath.Join(root, graphDirName, id, tarDataFileName))) |
|
| 307 |
+ diffID, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) |
|
| 308 |
+ if err != nil {
|
|
| 309 |
+ return err |
|
| 310 |
+ } |
|
| 311 |
+ |
|
| 312 |
+ sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) |
|
| 313 |
+ if err != nil {
|
|
| 314 |
+ return err |
|
| 315 |
+ } |
|
| 316 |
+ size, err := strconv.ParseInt(string(sizeStr), 10, 64) |
|
| 317 |
+ if err != nil {
|
|
| 318 |
+ return err |
|
| 319 |
+ } |
|
| 320 |
+ |
|
| 321 |
+ layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) |
|
| 308 | 322 |
if err != nil {
|
| 309 | 323 |
return err |
| 310 | 324 |
} |
| 311 | 325 |
logrus.Infof("migrated layer %s to %s", id, layer.DiffID())
|
| 312 | 326 |
|
| 327 |
+ jsonFile := filepath.Join(root, graphDirName, id, "json") |
|
| 328 |
+ imageJSON, err := ioutil.ReadFile(jsonFile) |
|
| 329 |
+ if err != nil {
|
|
| 330 |
+ return err |
|
| 331 |
+ } |
|
| 332 |
+ |
|
| 313 | 333 |
h, err := imagev1.HistoryFromConfig(imageJSON, false) |
| 314 | 334 |
if err != nil {
|
| 315 | 335 |
return err |
| ... | ... |
@@ -234,12 +234,30 @@ func TestMigrateUnsupported(t *testing.T) {
|
| 234 | 234 |
} |
| 235 | 235 |
defer os.RemoveAll(tmpdir) |
| 236 | 236 |
|
| 237 |
+ err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) |
|
| 238 |
+ if err != nil {
|
|
| 239 |
+ t.Fatal(err) |
|
| 240 |
+ } |
|
| 241 |
+ |
|
| 237 | 242 |
err = Migrate(tmpdir, "generic", nil, nil, nil, nil) |
| 238 | 243 |
if err != errUnsupported {
|
| 239 | 244 |
t.Fatalf("expected unsupported error, got %q", err)
|
| 240 | 245 |
} |
| 241 | 246 |
} |
| 242 | 247 |
|
| 248 |
+func TestMigrateEmptyDir(t *testing.T) {
|
|
| 249 |
+ tmpdir, err := ioutil.TempDir("", "migrate-empty")
|
|
| 250 |
+ if err != nil {
|
|
| 251 |
+ t.Fatal(err) |
|
| 252 |
+ } |
|
| 253 |
+ defer os.RemoveAll(tmpdir) |
|
| 254 |
+ |
|
| 255 |
+ err = Migrate(tmpdir, "generic", nil, nil, nil, nil) |
|
| 256 |
+ if err != nil {
|
|
| 257 |
+ t.Fatal(err) |
|
| 258 |
+ } |
|
| 259 |
+} |
|
| 260 |
+ |
|
| 243 | 261 |
func addImage(dest, jsonConfig, parent, checksum string) (string, error) {
|
| 244 | 262 |
var config struct{ ID string }
|
| 245 | 263 |
if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil {
|
| ... | ... |
@@ -257,6 +275,17 @@ func addImage(dest, jsonConfig, parent, checksum string) (string, error) {
|
| 257 | 257 |
if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil {
|
| 258 | 258 |
return "", err |
| 259 | 259 |
} |
| 260 |
+ if checksum != "" {
|
|
| 261 |
+ if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil {
|
|
| 262 |
+ return "", err |
|
| 263 |
+ } |
|
| 264 |
+ } |
|
| 265 |
+ if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil {
|
|
| 266 |
+ return "", err |
|
| 267 |
+ } |
|
| 268 |
+ if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil {
|
|
| 269 |
+ return "", err |
|
| 270 |
+ } |
|
| 260 | 271 |
if parent != "" {
|
| 261 | 272 |
if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil {
|
| 262 | 273 |
return "", err |
| ... | ... |
@@ -305,7 +334,7 @@ type mockRegistrar struct {
|
| 305 | 305 |
count int |
| 306 | 306 |
} |
| 307 | 307 |
|
| 308 |
-func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, tarDataFile string) (layer.Layer, error) {
|
|
| 308 |
+func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) {
|
|
| 309 | 309 |
r.count++ |
| 310 | 310 |
l := &mockLayer{}
|
| 311 | 311 |
if parent != "" {
|
| ... | ... |
@@ -316,7 +345,7 @@ func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, |
| 316 | 316 |
l.parent = p |
| 317 | 317 |
l.diffIDs = append(l.diffIDs, p.diffIDs...) |
| 318 | 318 |
} |
| 319 |
- l.diffIDs = append(l.diffIDs, layer.EmptyLayer.DiffID()) |
|
| 319 |
+ l.diffIDs = append(l.diffIDs, diffID) |
|
| 320 | 320 |
if r.layers == nil {
|
| 321 | 321 |
r.layers = make(map[layer.ChainID]*mockLayer) |
| 322 | 322 |
} |