Browse code

Merge pull request #22641 from cpuguy83/build_finalization

Adds ability to flatten image after build

Michael Crosby authored on 2016/11/02 06:30:18
Showing 21 changed files
... ...
@@ -54,6 +54,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
54 54
 	options.NetworkMode = r.FormValue("networkmode")
55 55
 	options.Tags = r.Form["t"]
56 56
 	options.SecurityOpt = r.Form["securityopt"]
57
+	options.Squash = httputils.BoolValue(r, "squash")
57 58
 
58 59
 	if r.Form.Get("shmsize") != "" {
59 60
 		shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
... ...
@@ -135,9 +135,15 @@ type Backend interface {
135 135
 	// TODO: make an Extract method instead of passing `decompress`
136 136
 	// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
137 137
 	// with Context.Walk
138
-	//ContainerCopy(name string, res string) (io.ReadCloser, error)
138
+	// ContainerCopy(name string, res string) (io.ReadCloser, error)
139 139
 	// TODO: use copyBackend api
140 140
 	CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
141
+
142
+	// HasExperimental checks if the backend supports experimental features
143
+	HasExperimental() bool
144
+
145
+	// SquashImage squashes the fs layers from the provided image down to the specified `to` image
146
+	SquashImage(from string, to string) (string, error)
141 147
 }
142 148
 
143 149
 // Image represents a Docker image used by the builder.
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	"strings"
11 11
 
12 12
 	"github.com/Sirupsen/logrus"
13
+	apierrors "github.com/docker/docker/api/errors"
13 14
 	"github.com/docker/docker/api/types"
14 15
 	"github.com/docker/docker/api/types/backend"
15 16
 	"github.com/docker/docker/api/types/container"
... ...
@@ -18,6 +19,7 @@ import (
18 18
 	"github.com/docker/docker/image"
19 19
 	"github.com/docker/docker/pkg/stringid"
20 20
 	"github.com/docker/docker/reference"
21
+	perrors "github.com/pkg/errors"
21 22
 	"golang.org/x/net/context"
22 23
 )
23 24
 
... ...
@@ -77,6 +79,7 @@ type Builder struct {
77 77
 	id string
78 78
 
79 79
 	imageCache builder.ImageCache
80
+	from       builder.Image
80 81
 }
81 82
 
82 83
 // BuildManager implements builder.Backend and is shared across all Builder objects.
... ...
@@ -91,6 +94,9 @@ func NewBuildManager(b builder.Backend) (bm *BuildManager) {
91 91
 
92 92
 // BuildFromContext builds a new image from a given context.
93 93
 func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
94
+	if buildOptions.Squash && !bm.backend.HasExperimental() {
95
+		return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode"))
96
+	}
94 97
 	buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
95 98
 	if err != nil {
96 99
 		return "", err
... ...
@@ -100,6 +106,7 @@ func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser,
100 100
 			logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
101 101
 		}
102 102
 	}()
103
+
103 104
 	if len(dockerfileName) > 0 {
104 105
 		buildOptions.Dockerfile = dockerfileName
105 106
 	}
... ...
@@ -287,6 +294,17 @@ func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (stri
287 287
 		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
288 288
 	}
289 289
 
290
+	if b.options.Squash {
291
+		var fromID string
292
+		if b.from != nil {
293
+			fromID = b.from.ImageID()
294
+		}
295
+		b.image, err = b.docker.SquashImage(b.image, fromID)
296
+		if err != nil {
297
+			return "", perrors.Wrap(err, "error squashing image")
298
+		}
299
+	}
300
+
290 301
 	imageID := image.ID(b.image)
291 302
 	for _, rt := range repoAndTags {
292 303
 		if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
... ...
@@ -221,6 +221,7 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
221 221
 			}
222 222
 		}
223 223
 	}
224
+	b.from = image
224 225
 
225 226
 	return b.processImageFrom(image)
226 227
 }
... ...
@@ -59,6 +59,7 @@ type buildOptions struct {
59 59
 	compress       bool
60 60
 	securityOpt    []string
61 61
 	networkMode    string
62
+	squash         bool
62 63
 }
63 64
 
64 65
 // NewBuildCommand creates a new `docker build` command
... ...
@@ -110,6 +111,10 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
110 110
 
111 111
 	command.AddTrustedFlags(flags, true)
112 112
 
113
+	if dockerCli.HasExperimental() {
114
+		flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
115
+	}
116
+
113 117
 	return cmd
114 118
 }
115 119
 
... ...
@@ -305,6 +310,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
305 305
 		CacheFrom:      options.cacheFrom,
306 306
 		SecurityOpt:    options.securityOpt,
307 307
 		NetworkMode:    options.networkMode,
308
+		Squash:         options.squash,
308 309
 	}
309 310
 
310 311
 	response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
... ...
@@ -74,6 +74,7 @@ type Driver struct {
74 74
 	ctr           *graphdriver.RefCounter
75 75
 	pathCacheLock sync.Mutex
76 76
 	pathCache     map[string]string
77
+	naiveDiff     graphdriver.DiffDriver
77 78
 }
78 79
 
79 80
 // Init returns a new AUFS driver.
... ...
@@ -137,6 +138,8 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
137 137
 			return nil, err
138 138
 		}
139 139
 	}
140
+
141
+	a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
140 142
 	return a, nil
141 143
 }
142 144
 
... ...
@@ -225,7 +228,7 @@ func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
225 225
 	defer f.Close()
226 226
 
227 227
 	if parent != "" {
228
-		ids, err := getParentIds(a.rootPath(), parent)
228
+		ids, err := getParentIDs(a.rootPath(), parent)
229 229
 		if err != nil {
230 230
 			return err
231 231
 		}
... ...
@@ -427,9 +430,22 @@ func (a *Driver) Put(id string) error {
427 427
 	return err
428 428
 }
429 429
 
430
+// isParent returns if the passed in parent is the direct parent of the passed in layer
431
+func (a *Driver) isParent(id, parent string) bool {
432
+	parents, _ := getParentIDs(a.rootPath(), id)
433
+	if parent == "" && len(parents) > 0 {
434
+		return false
435
+	}
436
+	return !(len(parents) > 0 && parent != parents[0])
437
+}
438
+
430 439
 // Diff produces an archive of the changes between the specified
431 440
 // layer and its parent layer which may be "".
432 441
 func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
442
+	if !a.isParent(id, parent) {
443
+		return a.naiveDiff.Diff(id, parent)
444
+	}
445
+
433 446
 	// AUFS doesn't need the parent layer to produce a diff.
434 447
 	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
435 448
 		Compression:     archive.Uncompressed,
... ...
@@ -465,6 +481,9 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error {
465 465
 // and its parent and returns the size in bytes of the changes
466 466
 // relative to its base filesystem directory.
467 467
 func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
468
+	if !a.isParent(id, parent) {
469
+		return a.naiveDiff.DiffSize(id, parent)
470
+	}
468 471
 	// AUFS doesn't need the parent layer to calculate the diff size.
469 472
 	return directory.Size(path.Join(a.rootPath(), "diff", id))
470 473
 }
... ...
@@ -473,7 +492,11 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
473 473
 // layer with the specified id and parent, returning the size of the
474 474
 // new layer in bytes.
475 475
 func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
476
-	// AUFS doesn't need the parent id to apply the diff.
476
+	if !a.isParent(id, parent) {
477
+		return a.naiveDiff.ApplyDiff(id, parent, diff)
478
+	}
479
+
480
+	// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
477 481
 	if err = a.applyDiff(id, diff); err != nil {
478 482
 		return
479 483
 	}
... ...
@@ -484,6 +507,10 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e
484 484
 // Changes produces a list of changes between the specified layer
485 485
 // and its parent layer. If parent is "", then all changes will be ADD changes.
486 486
 func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
487
+	if !a.isParent(id, parent) {
488
+		return a.naiveDiff.Changes(id, parent)
489
+	}
490
+
487 491
 	// AUFS doesn't have snapshots, so we need to get changes from all parent
488 492
 	// layers.
489 493
 	layers, err := a.getParentLayerPaths(id)
... ...
@@ -494,7 +521,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
494 494
 }
495 495
 
496 496
 func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
497
-	parentIds, err := getParentIds(a.rootPath(), id)
497
+	parentIds, err := getParentIDs(a.rootPath(), id)
498 498
 	if err != nil {
499 499
 		return nil, err
500 500
 	}
... ...
@@ -424,7 +424,7 @@ func TestChanges(t *testing.T) {
424 424
 		t.Fatal(err)
425 425
 	}
426 426
 
427
-	changes, err = d.Changes("3", "")
427
+	changes, err = d.Changes("3", "2")
428 428
 	if err != nil {
429 429
 		t.Fatal(err)
430 430
 	}
... ...
@@ -530,7 +530,7 @@ func TestChildDiffSize(t *testing.T) {
530 530
 		t.Fatal(err)
531 531
 	}
532 532
 
533
-	diffSize, err = d.DiffSize("2", "")
533
+	diffSize, err = d.DiffSize("2", "1")
534 534
 	if err != nil {
535 535
 		t.Fatal(err)
536 536
 	}
... ...
@@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) {
29 29
 //
30 30
 // If there are no lines in the file then the id has no parent
31 31
 // and an empty slice is returned.
32
-func getParentIds(root, id string) ([]string, error) {
32
+func getParentIDs(root, id string) ([]string, error) {
33 33
 	f, err := os.Open(path.Join(root, "layers", id))
34 34
 	if err != nil {
35 35
 		return nil, err
... ...
@@ -78,9 +78,8 @@ type ProtoDriver interface {
78 78
 	Cleanup() error
79 79
 }
80 80
 
81
-// Driver is the interface for layered/snapshot file system drivers.
82
-type Driver interface {
83
-	ProtoDriver
81
+// DiffDriver is the interface to use to implement graph diffs
82
+type DiffDriver interface {
84 83
 	// Diff produces an archive of the changes between the specified
85 84
 	// layer and its parent layer which may be "".
86 85
 	Diff(id, parent string) (io.ReadCloser, error)
... ...
@@ -98,6 +97,12 @@ type Driver interface {
98 98
 	DiffSize(id, parent string) (size int64, err error)
99 99
 }
100 100
 
101
+// Driver is the interface for layered/snapshot file system drivers.
102
+type Driver interface {
103
+	ProtoDriver
104
+	DiffDriver
105
+}
106
+
101 107
 // DiffGetterDriver is the interface for layered file system drivers that
102 108
 // provide a specialized function for getting file contents for tar-split.
103 109
 type DiffGetterDriver interface {
... ...
@@ -11,6 +11,7 @@ import (
11 11
 	"os"
12 12
 	"os/exec"
13 13
 	"path"
14
+	"path/filepath"
14 15
 	"strconv"
15 16
 	"strings"
16 17
 	"syscall"
... ...
@@ -44,7 +45,7 @@ var (
44 44
 
45 45
 // Each container/image has at least a "diff" directory and "link" file.
46 46
 // If there is also a "lower" file when there are diff layers
47
-// below  as well as "merged" and "work" directories. The "diff" directory
47
+// below as well as "merged" and "work" directories. The "diff" directory
48 48
 // has the upper layer of the overlay and is used to capture any
49 49
 // changes to the layer. The "lower" file contains all the lower layer
50 50
 // mounts separated by ":" and ordered from uppermost to lowermost
... ...
@@ -86,12 +87,13 @@ type overlayOptions struct {
86 86
 
87 87
 // Driver contains information about the home directory and the list of active mounts that are created using this driver.
88 88
 type Driver struct {
89
-	home     string
90
-	uidMaps  []idtools.IDMap
91
-	gidMaps  []idtools.IDMap
92
-	ctr      *graphdriver.RefCounter
93
-	quotaCtl *quota.Control
94
-	options  overlayOptions
89
+	home      string
90
+	uidMaps   []idtools.IDMap
91
+	gidMaps   []idtools.IDMap
92
+	ctr       *graphdriver.RefCounter
93
+	quotaCtl  *quota.Control
94
+	options   overlayOptions
95
+	naiveDiff graphdriver.DiffDriver
95 96
 }
96 97
 
97 98
 var (
... ...
@@ -163,6 +165,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
163 163
 		ctr:     graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
164 164
 	}
165 165
 
166
+	d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps)
167
+
166 168
 	if backingFs == "xfs" {
167 169
 		// Try to enable project quota support over xfs.
168 170
 		if d.quotaCtl, err = quota.NewControl(home); err == nil {
... ...
@@ -525,7 +529,7 @@ func (d *Driver) Put(id string) error {
525 525
 		return nil
526 526
 	}
527 527
 	if err := syscall.Unmount(mountpoint, 0); err != nil {
528
-		logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
528
+		logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
529 529
 	}
530 530
 	return nil
531 531
 }
... ...
@@ -536,8 +540,33 @@ func (d *Driver) Exists(id string) bool {
536 536
 	return err == nil
537 537
 }
538 538
 
539
+// isParent returns if the passed in parent is the direct parent of the passed in layer
540
+func (d *Driver) isParent(id, parent string) bool {
541
+	lowers, err := d.getLowerDirs(id)
542
+	if err != nil {
543
+		return false
544
+	}
545
+	if parent == "" && len(lowers) > 0 {
546
+		return false
547
+	}
548
+
549
+	parentDir := d.dir(parent)
550
+	var ld string
551
+	if len(lowers) > 0 {
552
+		ld = filepath.Dir(lowers[0])
553
+	}
554
+	if ld == "" && parent == "" {
555
+		return true
556
+	}
557
+	return ld == parentDir
558
+}
559
+
539 560
 // ApplyDiff applies the new layer into a root
540 561
 func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
562
+	if !d.isParent(id, parent) {
563
+		return d.naiveDiff.ApplyDiff(id, parent, diff)
564
+	}
565
+
541 566
 	applyDir := d.getDiffPath(id)
542 567
 
543 568
 	logrus.Debugf("Applying tar in %s", applyDir)
... ...
@@ -563,12 +592,19 @@ func (d *Driver) getDiffPath(id string) string {
563 563
 // and its parent and returns the size in bytes of the changes
564 564
 // relative to its base filesystem directory.
565 565
 func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
566
+	if !d.isParent(id, parent) {
567
+		return d.naiveDiff.DiffSize(id, parent)
568
+	}
566 569
 	return directory.Size(d.getDiffPath(id))
567 570
 }
568 571
 
569 572
 // Diff produces an archive of the changes between the specified
570 573
 // layer and its parent layer which may be "".
571 574
 func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
575
+	if !d.isParent(id, parent) {
576
+		return d.naiveDiff.Diff(id, parent)
577
+	}
578
+
572 579
 	diffPath := d.getDiffPath(id)
573 580
 	logrus.Debugf("Tar with options on %s", diffPath)
574 581
 	return archive.TarWithOptions(diffPath, &archive.TarOptions{
... ...
@@ -582,6 +618,9 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
582 582
 // Changes produces a list of changes between the specified layer
583 583
 // and its parent layer. If parent is "", then all changes will be ADD changes.
584 584
 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
585
+	if !d.isParent(id, parent) {
586
+		return d.naiveDiff.Changes(id, parent)
587
+	}
585 588
 	// Overlay doesn't have snapshots, so we need to get changes from all parent
586 589
 	// layers.
587 590
 	diffPath := d.getDiffPath(id)
... ...
@@ -1,9 +1,13 @@
1 1
 package daemon
2 2
 
3 3
 import (
4
+	"encoding/json"
4 5
 	"fmt"
5 6
 	"path"
6 7
 	"sort"
8
+	"time"
9
+
10
+	"github.com/pkg/errors"
7 11
 
8 12
 	"github.com/docker/docker/api/types"
9 13
 	"github.com/docker/docker/api/types/filters"
... ...
@@ -241,6 +245,89 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool, withExtraAttrs
241 241
 	return images, nil
242 242
 }
243 243
 
244
+// SquashImage creates a new image with the diff of the specified image and the specified parent.
245
+// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
246
+// The existing image(s) is not destroyed.
247
+// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
248
+func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
249
+	img, err := daemon.imageStore.Get(image.ID(id))
250
+	if err != nil {
251
+		return "", err
252
+	}
253
+
254
+	var parentImg *image.Image
255
+	var parentChainID layer.ChainID
256
+	if len(parent) != 0 {
257
+		parentImg, err = daemon.imageStore.Get(image.ID(parent))
258
+		if err != nil {
259
+			return "", errors.Wrap(err, "error getting specified parent layer")
260
+		}
261
+		parentChainID = parentImg.RootFS.ChainID()
262
+	} else {
263
+		rootFS := image.NewRootFS()
264
+		parentImg = &image.Image{RootFS: rootFS}
265
+	}
266
+
267
+	l, err := daemon.layerStore.Get(img.RootFS.ChainID())
268
+	if err != nil {
269
+		return "", errors.Wrap(err, "error getting image layer")
270
+	}
271
+	defer daemon.layerStore.Release(l)
272
+
273
+	ts, err := l.TarStreamFrom(parentChainID)
274
+	if err != nil {
275
+		return "", errors.Wrapf(err, "error getting tar stream to parent")
276
+	}
277
+	defer ts.Close()
278
+
279
+	newL, err := daemon.layerStore.Register(ts, parentChainID)
280
+	if err != nil {
281
+		return "", errors.Wrap(err, "error registering layer")
282
+	}
283
+	defer daemon.layerStore.Release(newL)
284
+
285
+	var newImage image.Image
286
+	newImage = *img
287
+	newImage.RootFS = nil
288
+
289
+	var rootFS image.RootFS
290
+	rootFS = *parentImg.RootFS
291
+	rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
292
+	newImage.RootFS = &rootFS
293
+
294
+	for i, hi := range newImage.History {
295
+		if i >= len(parentImg.History) {
296
+			hi.EmptyLayer = true
297
+		}
298
+		newImage.History[i] = hi
299
+	}
300
+
301
+	now := time.Now()
302
+	var historyComment string
303
+	if len(parent) > 0 {
304
+		historyComment = fmt.Sprintf("merge %s to %s", id, parent)
305
+	} else {
306
+		historyComment = fmt.Sprintf("create new from %s", id)
307
+	}
308
+
309
+	newImage.History = append(newImage.History, image.History{
310
+		Created: now,
311
+		Comment: historyComment,
312
+	})
313
+	newImage.Created = now
314
+
315
+	b, err := json.Marshal(&newImage)
316
+	if err != nil {
317
+		return "", errors.Wrap(err, "error marshalling image config")
318
+	}
319
+
320
+	newImgID, err := daemon.imageStore.Create(b)
321
+	if err != nil {
322
+		return "", errors.Wrap(err, "error creating new image after squash")
323
+	}
324
+	return string(newImgID), nil
325
+}
326
+
244 327
 func newImage(image *image.Image, virtualSize int64) *types.ImageSummary {
245 328
 	newImage := new(types.ImageSummary)
246 329
 	newImage.ParentID = image.Parent.String()
... ...
@@ -3,6 +3,7 @@ package xfer
3 3
 import (
4 4
 	"bytes"
5 5
 	"errors"
6
+	"fmt"
6 7
 	"io"
7 8
 	"io/ioutil"
8 9
 	"runtime"
... ...
@@ -31,6 +32,10 @@ func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
31 31
 	return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil
32 32
 }
33 33
 
34
+func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) {
35
+	return nil, fmt.Errorf("not implemented")
36
+}
37
+
34 38
 func (ml *mockLayer) ChainID() layer.ChainID {
35 39
 	return ml.chainID
36 40
 }
... ...
@@ -1800,6 +1800,7 @@ or being killed.
1800 1800
         variable expansion in other Dockerfile instructions. This is not meant for
1801 1801
         passing secret values. [Read more about the buildargs instruction](../../reference/builder.md#arg)
1802 1802
 -   **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0.  If omitted the system uses 64MB.
1803
+-   **squash** - squash the resulting images layers into a single layer (boolean) **Experimental Only**
1803 1804
 -   **labels** – JSON map of string pairs for labels to set on the image.
1804 1805
 -   **networkmode** - Sets the networking mode for the run commands during
1805 1806
         build. Supported standard values are: `bridge`, `host`, `none`, and
... ...
@@ -54,6 +54,7 @@ Options:
54 54
                                 The format is `<number><unit>`. `number` must be greater than `0`.
55 55
                                 Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
56 56
                                 or `g` (gigabytes). If you omit the unit, the system uses bytes.
57
+  --squash                      Squash newly built layers into a single new layer (**Experimental Only**) 
57 58
   -t, --tag value               Name and optionally a tag in the 'name:tag' format (default [])
58 59
       --ulimit value            Ulimit options (default [])
59 60
 ```
... ...
@@ -432,3 +433,20 @@ Linux namespaces. On Microsoft Windows, you can specify these values:
432 432
 | `hyperv`  | Hyper-V hypervisor partition-based isolation.                                                                                                                 |
433 433
 
434 434
 Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
435
+
436
+
437
+### Squash an image's layers (--squash) **Experimental Only**
438
+
439
+Once the image is built, squash the new layers into a new image with a single
440
+new layer. Squashing does not destroy any existing image, rather it creates a new
441
+image with the content of the squshed layers. This effectively makes it look
442
+like all `Dockerfile` commands were created with a single layer. The build
443
+cache is preserved with this method.
444
+
445
+**Note**: using this option means the new image will not be able to take
446
+advantage of layer sharing with other images and may use significantly more
447
+space.
448
+
449
+**Note**: using this option you may see significantly more space used due to
450
+storing two copies of the image, one for the build cache with all the cache
451
+layers in tact, and one for the squashed version.
... ...
@@ -7196,3 +7196,44 @@ RUN ["cat", "/foo/file"]
7196 7196
 		c.Fatal(err)
7197 7197
 	}
7198 7198
 }
7199
+
7200
+func (s *DockerSuite) TestBuildSquashParent(c *check.C) {
7201
+	testRequires(c, ExperimentalDaemon)
7202
+	dockerFile := `
7203
+		FROM busybox
7204
+		RUN echo hello > /hello
7205
+		RUN echo world >> /hello
7206
+		RUN echo hello > /remove_me
7207
+		ENV HELLO world
7208
+		RUN rm /remove_me
7209
+		`
7210
+	// build and get the ID that we can use later for history comparison
7211
+	origID, err := buildImage("test", dockerFile, false)
7212
+	c.Assert(err, checker.IsNil)
7213
+
7214
+	// build with squash
7215
+	id, err := buildImage("test", dockerFile, true, "--squash")
7216
+	c.Assert(err, checker.IsNil)
7217
+
7218
+	out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello")
7219
+	c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld")
7220
+
7221
+	dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]")
7222
+	dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`)
7223
+
7224
+	// make sure the ID produced is the ID of the tag we specified
7225
+	inspectID, err := inspectImage("test", ".ID")
7226
+	c.Assert(err, checker.IsNil)
7227
+	c.Assert(inspectID, checker.Equals, id)
7228
+
7229
+	origHistory, _ := dockerCmd(c, "history", origID)
7230
+	testHistory, _ := dockerCmd(c, "history", "test")
7231
+
7232
+	splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n")
7233
+	splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n")
7234
+	c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1)
7235
+
7236
+	out, err = inspectImage(id, "len .RootFS.Layers")
7237
+	c.Assert(err, checker.IsNil)
7238
+	c.Assert(strings.TrimSpace(out), checker.Equals, "3")
7239
+}
... ...
@@ -3,6 +3,7 @@ package layer
3 3
 import (
4 4
 	"archive/tar"
5 5
 	"bytes"
6
+	"fmt"
6 7
 	"io"
7 8
 	"io/ioutil"
8 9
 )
... ...
@@ -23,6 +24,10 @@ func (el *emptyLayer) TarStream() (io.ReadCloser, error) {
23 23
 	return ioutil.NopCloser(buf), nil
24 24
 }
25 25
 
26
+func (el *emptyLayer) TarStreamFrom(ChainID) (io.ReadCloser, error) {
27
+	return nil, fmt.Errorf("can't get parent tar stream of an empty layer")
28
+}
29
+
26 30
 func (el *emptyLayer) ChainID() ChainID {
27 31
 	return ChainID(DigestSHA256EmptyTar)
28 32
 }
... ...
@@ -78,6 +78,9 @@ type TarStreamer interface {
78 78
 	// TarStream returns a tar archive stream
79 79
 	// for the contents of a layer.
80 80
 	TarStream() (io.ReadCloser, error)
81
+	// TarStreamFrom returns a tar archive stream for all the layer chain with
82
+	// arbitrary depth.
83
+	TarStreamFrom(ChainID) (io.ReadCloser, error)
81 84
 }
82 85
 
83 86
 // Layer represents a read-only layer
... ...
@@ -1,6 +1,7 @@
1 1
 package layer
2 2
 
3 3
 import (
4
+	"fmt"
4 5
 	"io"
5 6
 
6 7
 	"github.com/docker/docker/pkg/archive"
... ...
@@ -28,11 +29,14 @@ func (ml *mountedLayer) cacheParent() string {
28 28
 }
29 29
 
30 30
 func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
31
-	archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
32
-	if err != nil {
33
-		return nil, err
34
-	}
35
-	return archiver, nil
31
+	return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
32
+}
33
+
34
+func (ml *mountedLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
35
+	// Not supported since this will include the init layer as well
36
+	// This can already be acheived with mount + tar.
37
+	// Should probably never reach this point, but error out here.
38
+	return nil, fmt.Errorf("getting a layer diff from an arbitrary parent is not supported on mounted layer")
36 39
 }
37 40
 
38 41
 func (ml *mountedLayer) Name() string {
... ...
@@ -21,6 +21,8 @@ type roLayer struct {
21 21
 	references     map[Layer]struct{}
22 22
 }
23 23
 
24
+// TarStream for roLayer guarentees that the data that is produced is the exact
25
+// data that the layer was registered with.
24 26
 func (rl *roLayer) TarStream() (io.ReadCloser, error) {
25 27
 	r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
26 28
 	if err != nil {
... ...
@@ -43,6 +45,24 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
43 43
 	return rc, nil
44 44
 }
45 45
 
46
+// TarStreamFrom does not make any guarentees to the correctness of the produced
47
+// data. As such it should not be used when the layer content must be verified
48
+// to be an exact match to the registered layer.
49
+func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
50
+	var parentCacheID string
51
+	for pl := rl.parent; pl != nil; pl = pl.parent {
52
+		if pl.chainID == parent {
53
+			parentCacheID = pl.cacheID
54
+			break
55
+		}
56
+	}
57
+
58
+	if parent != ChainID("") && parentCacheID == "" {
59
+		return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
60
+	}
61
+	return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID)
62
+}
63
+
46 64
 func (rl *roLayer) ChainID() ChainID {
47 65
 	return rl.chainID
48 66
 }
... ...
@@ -11,6 +11,7 @@ docker-build - Build a new image from the source code at PATH
11 11
 [**--cgroup-parent**[=*CGROUP-PARENT*]]
12 12
 [**--help**]
13 13
 [**-f**|**--file**[=*PATH/Dockerfile*]]
14
+[**-squash**] *Experimental*
14 15
 [**--force-rm**]
15 16
 [**--isolation**[=*default*]]
16 17
 [**--label**[=*[]*]]
... ...
@@ -57,6 +58,22 @@ set as the **URL**, the repository is cloned locally and then sent as the contex
57 57
    the remote context. In all cases, the file must be within the build context.
58 58
    The default is *Dockerfile*.
59 59
 
60
+**--squash**=*true*|*false*
61
+   **Experimental Only**
62
+   Once the image is built, squash the new layers into a new image with a single
63
+   new layer. Squashing does not destroy any existing image, rather it creates a new
64
+   image with the content of the squshed layers. This effectively makes it look
65
+   like all `Dockerfile` commands were created with a single layer. The build
66
+   cache is preserved with this method.
67
+
68
+   **Note**: using this option means the new image will not be able to take
69
+   advantage of layer sharing with other images and may use significantly more
70
+   space.
71
+
72
+   **Note**: using this option you may see significantly more space used due to
73
+   storing two copies of the image, one for the build cache with all the cache
74
+   layers in tact, and one for the squashed version.
75
+
60 76
 **--build-arg**=*variable*
61 77
    name and value of a **buildarg**.
62 78
 
... ...
@@ -406,6 +406,9 @@ type mockLayer struct {
406 406
 func (l *mockLayer) TarStream() (io.ReadCloser, error) {
407 407
 	return nil, nil
408 408
 }
409
+func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) {
410
+	return nil, nil
411
+}
409 412
 
410 413
 func (l *mockLayer) ChainID() layer.ChainID {
411 414
 	return layer.CreateChainID(l.diffIDs)