The `archive` package defines aliases for `io.ReadCloser` and
`io.Reader`. These don't seem to provide an benefit other than type
decoration. Per this change, several unnecessary type cases were
removed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
| ... | ... |
@@ -89,7 +89,12 @@ func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgres |
| 89 | 89 |
dockerfileName = DefaultDockerfileName |
| 90 | 90 |
|
| 91 | 91 |
// TODO: return a context without tarsum |
| 92 |
- return archive.Generate(dockerfileName, string(dockerfile)) |
|
| 92 |
+ r, err := archive.Generate(dockerfileName, string(dockerfile)) |
|
| 93 |
+ if err != nil {
|
|
| 94 |
+ return nil, err |
|
| 95 |
+ } |
|
| 96 |
+ |
|
| 97 |
+ return ioutil.NopCloser(r), nil |
|
| 93 | 98 |
}, |
| 94 | 99 |
// fallback handler (tar context) |
| 95 | 100 |
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
| ... | ... |
@@ -172,7 +172,12 @@ func TestMakeRemoteContext(t *testing.T) {
|
| 172 | 172 |
if err != nil {
|
| 173 | 173 |
return nil, err |
| 174 | 174 |
} |
| 175 |
- return archive.Generate(DefaultDockerfileName, string(dockerfile)) |
|
| 175 |
+ |
|
| 176 |
+ r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) |
|
| 177 |
+ if err != nil {
|
|
| 178 |
+ return nil, err |
|
| 179 |
+ } |
|
| 180 |
+ return ioutil.NopCloser(r), nil |
|
| 176 | 181 |
}, |
| 177 | 182 |
}) |
| 178 | 183 |
|
| ... | ... |
@@ -3,6 +3,7 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
+ "io" |
|
| 6 | 7 |
"runtime" |
| 7 | 8 |
"strings" |
| 8 | 9 |
"time" |
| ... | ... |
@@ -14,7 +15,6 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/dockerversion" |
| 15 | 15 |
"github.com/docker/docker/image" |
| 16 | 16 |
"github.com/docker/docker/layer" |
| 17 |
- "github.com/docker/docker/pkg/archive" |
|
| 18 | 17 |
"github.com/docker/docker/pkg/ioutils" |
| 19 | 18 |
"github.com/docker/docker/reference" |
| 20 | 19 |
"github.com/docker/go-connections/nat" |
| ... | ... |
@@ -247,7 +247,7 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str |
| 247 | 247 |
return id.String(), nil |
| 248 | 248 |
} |
| 249 | 249 |
|
| 250 |
-func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) {
|
|
| 250 |
+func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) {
|
|
| 251 | 251 |
if err := daemon.Mount(container); err != nil {
|
| 252 | 252 |
return nil, err |
| 253 | 253 |
} |
| ... | ... |
@@ -35,7 +35,7 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
| 35 | 35 |
return nil |
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 |
-func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) {
|
|
| 38 |
+func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) {
|
|
| 39 | 39 |
if err := daemon.Mount(container); err != nil {
|
| 40 | 40 |
return nil, err |
| 41 | 41 |
} |
| ... | ... |
@@ -25,6 +25,7 @@ package aufs |
| 25 | 25 |
import ( |
| 26 | 26 |
"bufio" |
| 27 | 27 |
"fmt" |
| 28 |
+ "io" |
|
| 28 | 29 |
"io/ioutil" |
| 29 | 30 |
"os" |
| 30 | 31 |
"os/exec" |
| ... | ... |
@@ -428,7 +429,7 @@ func (a *Driver) Put(id string) error {
|
| 428 | 428 |
|
| 429 | 429 |
// Diff produces an archive of the changes between the specified |
| 430 | 430 |
// layer and its parent layer which may be "". |
| 431 |
-func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
|
| 431 |
+func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|
| 432 | 432 |
// AUFS doesn't need the parent layer to produce a diff. |
| 433 | 433 |
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
| 434 | 434 |
Compression: archive.Uncompressed, |
| ... | ... |
@@ -453,7 +454,7 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
| 453 | 453 |
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
| 454 | 454 |
} |
| 455 | 455 |
|
| 456 |
-func (a *Driver) applyDiff(id string, diff archive.Reader) error {
|
|
| 456 |
+func (a *Driver) applyDiff(id string, diff io.Reader) error {
|
|
| 457 | 457 |
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
| 458 | 458 |
UIDMaps: a.uidMaps, |
| 459 | 459 |
GIDMaps: a.gidMaps, |
| ... | ... |
@@ -471,7 +472,7 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
| 471 | 471 |
// ApplyDiff extracts the changeset from the given diff into the |
| 472 | 472 |
// layer with the specified id and parent, returning the size of the |
| 473 | 473 |
// new layer in bytes. |
| 474 |
-func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
|
| 474 |
+func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
|
| 475 | 475 |
// AUFS doesn't need the parent id to apply the diff. |
| 476 | 476 |
if err = a.applyDiff(id, diff); err != nil {
|
| 477 | 477 |
return |
| ... | ... |
@@ -3,6 +3,7 @@ package graphdriver |
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 | 5 |
"fmt" |
| 6 |
+ "io" |
|
| 6 | 7 |
"os" |
| 7 | 8 |
"path/filepath" |
| 8 | 9 |
"strings" |
| ... | ... |
@@ -82,7 +83,7 @@ type Driver interface {
|
| 82 | 82 |
ProtoDriver |
| 83 | 83 |
// Diff produces an archive of the changes between the specified |
| 84 | 84 |
// layer and its parent layer which may be "". |
| 85 |
- Diff(id, parent string) (archive.Archive, error) |
|
| 85 |
+ Diff(id, parent string) (io.ReadCloser, error) |
|
| 86 | 86 |
// Changes produces a list of changes between the specified layer |
| 87 | 87 |
// and its parent layer. If parent is "", then all changes will be ADD changes. |
| 88 | 88 |
Changes(id, parent string) ([]archive.Change, error) |
| ... | ... |
@@ -90,7 +91,7 @@ type Driver interface {
|
| 90 | 90 |
// layer with the specified id and parent, returning the size of the |
| 91 | 91 |
// new layer in bytes. |
| 92 | 92 |
// The archive.Reader must be an uncompressed stream. |
| 93 |
- ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) |
|
| 93 |
+ ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) |
|
| 94 | 94 |
// DiffSize calculates the changes between the specified id |
| 95 | 95 |
// and its parent and returns the size in bytes of the changes |
| 96 | 96 |
// relative to its base filesystem directory. |
| ... | ... |
@@ -1,10 +1,10 @@ |
| 1 | 1 |
package graphdriver |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "io" |
|
| 4 | 5 |
"time" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/Sirupsen/logrus" |
| 7 |
- |
|
| 8 | 8 |
"github.com/docker/docker/pkg/archive" |
| 9 | 9 |
"github.com/docker/docker/pkg/chrootarchive" |
| 10 | 10 |
"github.com/docker/docker/pkg/idtools" |
| ... | ... |
@@ -43,7 +43,7 @@ func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Dr |
| 43 | 43 |
|
| 44 | 44 |
// Diff produces an archive of the changes between the specified |
| 45 | 45 |
// layer and its parent layer which may be "". |
| 46 |
-func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
|
|
| 46 |
+func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
|
|
| 47 | 47 |
startTime := time.Now() |
| 48 | 48 |
driver := gdw.ProtoDriver |
| 49 | 49 |
|
| ... | ... |
@@ -126,7 +126,7 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) |
| 126 | 126 |
// ApplyDiff extracts the changeset from the given diff into the |
| 127 | 127 |
// layer with the specified id and parent, returning the size of the |
| 128 | 128 |
// new layer in bytes. |
| 129 |
-func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
|
| 129 |
+func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
|
| 130 | 130 |
driver := gdw.ProtoDriver |
| 131 | 131 |
|
| 132 | 132 |
// Mount the root filesystem so we can apply the diff/layer. |
| ... | ... |
@@ -5,6 +5,7 @@ package overlay |
| 5 | 5 |
import ( |
| 6 | 6 |
"bufio" |
| 7 | 7 |
"fmt" |
| 8 |
+ "io" |
|
| 8 | 9 |
"io/ioutil" |
| 9 | 10 |
"os" |
| 10 | 11 |
"os/exec" |
| ... | ... |
@@ -12,11 +13,9 @@ import ( |
| 12 | 12 |
"syscall" |
| 13 | 13 |
|
| 14 | 14 |
"github.com/Sirupsen/logrus" |
| 15 |
- |
|
| 16 | 15 |
"github.com/docker/docker/daemon/graphdriver" |
| 17 | 16 |
"github.com/docker/docker/pkg/archive" |
| 18 | 17 |
"github.com/docker/docker/pkg/idtools" |
| 19 |
- |
|
| 20 | 18 |
"github.com/docker/docker/pkg/mount" |
| 21 | 19 |
"github.com/opencontainers/runc/libcontainer/label" |
| 22 | 20 |
) |
| ... | ... |
@@ -35,7 +34,7 @@ type ApplyDiffProtoDriver interface {
|
| 35 | 35 |
graphdriver.ProtoDriver |
| 36 | 36 |
// ApplyDiff writes the diff to the archive for the given id and parent id. |
| 37 | 37 |
// It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. |
| 38 |
- ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) |
|
| 38 |
+ ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) |
|
| 39 | 39 |
} |
| 40 | 40 |
|
| 41 | 41 |
type naiveDiffDriverWithApply struct {
|
| ... | ... |
@@ -52,7 +51,7 @@ func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []id |
| 52 | 52 |
} |
| 53 | 53 |
|
| 54 | 54 |
// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. |
| 55 |
-func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
|
| 55 |
+func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|
| 56 | 56 |
b, err := d.applyDiff.ApplyDiff(id, parent, diff) |
| 57 | 57 |
if err == ErrApplyDiffFallback {
|
| 58 | 58 |
return d.Driver.ApplyDiff(id, parent, diff) |
| ... | ... |
@@ -386,7 +385,7 @@ func (d *Driver) Put(id string) error {
|
| 386 | 386 |
} |
| 387 | 387 |
|
| 388 | 388 |
// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. |
| 389 |
-func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
|
|
| 389 |
+func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
|
| 390 | 390 |
dir := d.dir(id) |
| 391 | 391 |
|
| 392 | 392 |
if parent == "" {
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"bufio" |
| 7 | 7 |
"errors" |
| 8 | 8 |
"fmt" |
| 9 |
+ "io" |
|
| 9 | 10 |
"io/ioutil" |
| 10 | 11 |
"os" |
| 11 | 12 |
"os/exec" |
| ... | ... |
@@ -536,7 +537,7 @@ func (d *Driver) Exists(id string) bool {
|
| 536 | 536 |
} |
| 537 | 537 |
|
| 538 | 538 |
// ApplyDiff applies the new layer into a root |
| 539 |
-func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
|
|
| 539 |
+func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
|
| 540 | 540 |
applyDir := d.getDiffPath(id) |
| 541 | 541 |
|
| 542 | 542 |
logrus.Debugf("Applying tar in %s", applyDir)
|
| ... | ... |
@@ -567,7 +568,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
| 567 | 567 |
|
| 568 | 568 |
// Diff produces an archive of the changes between the specified |
| 569 | 569 |
// layer and its parent layer which may be "". |
| 570 |
-func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
|
|
| 570 |
+func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|
| 571 | 571 |
diffPath := d.getDiffPath(id) |
| 572 | 572 |
logrus.Debugf("Tar with options on %s", diffPath)
|
| 573 | 573 |
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
| ... | ... |
@@ -5,6 +5,7 @@ package graphdriver |
| 5 | 5 |
import ( |
| 6 | 6 |
"errors" |
| 7 | 7 |
"fmt" |
| 8 |
+ "io" |
|
| 8 | 9 |
|
| 9 | 10 |
"github.com/docker/docker/pkg/archive" |
| 10 | 11 |
) |
| ... | ... |
@@ -170,7 +171,7 @@ func (d *graphDriverProxy) Cleanup() error {
|
| 170 | 170 |
return nil |
| 171 | 171 |
} |
| 172 | 172 |
|
| 173 |
-func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
|
|
| 173 |
+func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
|
|
| 174 | 174 |
args := &graphDriverRequest{
|
| 175 | 175 |
ID: id, |
| 176 | 176 |
Parent: parent, |
| ... | ... |
@@ -179,7 +180,7 @@ func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
|
| 179 | 179 |
if err != nil {
|
| 180 | 180 |
return nil, err |
| 181 | 181 |
} |
| 182 |
- return archive.Archive(body), nil |
|
| 182 |
+ return body, nil |
|
| 183 | 183 |
} |
| 184 | 184 |
|
| 185 | 185 |
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
|
| ... | ... |
@@ -198,7 +199,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) |
| 198 | 198 |
return ret.Changes, nil |
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 |
-func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
|
| 201 |
+func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|
| 202 | 202 |
var ret graphDriverResponse |
| 203 | 203 |
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
|
| 204 | 204 |
return -1, err |
| ... | ... |
@@ -331,7 +331,7 @@ func (d *Driver) Cleanup() error {
|
| 331 | 331 |
// Diff produces an archive of the changes between the specified |
| 332 | 332 |
// layer and its parent layer which may be "". |
| 333 | 333 |
// The layer should be mounted when calling this function |
| 334 |
-func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
|
|
| 334 |
+func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
|
|
| 335 | 335 |
rID, err := d.resolveID(id) |
| 336 | 336 |
if err != nil {
|
| 337 | 337 |
return |
| ... | ... |
@@ -423,7 +423,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
| 423 | 423 |
// layer with the specified id and parent, returning the size of the |
| 424 | 424 |
// new layer in bytes. |
| 425 | 425 |
// The layer should not be mounted when calling this function |
| 426 |
-func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
|
| 426 |
+func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|
| 427 | 427 |
var layerChain []string |
| 428 | 428 |
if parent != "" {
|
| 429 | 429 |
rPId, err := d.resolveID(parent) |
| ... | ... |
@@ -514,7 +514,7 @@ func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
|
| 514 | 514 |
} |
| 515 | 515 |
|
| 516 | 516 |
// exportLayer generates an archive from a layer based on the given ID. |
| 517 |
-func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) {
|
|
| 517 |
+func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) {
|
|
| 518 | 518 |
archive, w := io.Pipe() |
| 519 | 519 |
go func() {
|
| 520 | 520 |
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
| ... | ... |
@@ -577,7 +577,7 @@ func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, |
| 577 | 577 |
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) |
| 578 | 578 |
} |
| 579 | 579 |
|
| 580 |
-func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
|
|
| 580 |
+func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
|
|
| 581 | 581 |
t := tar.NewReader(r) |
| 582 | 582 |
hdr, err := t.Next() |
| 583 | 583 |
totalSize := int64(0) |
| ... | ... |
@@ -622,7 +622,7 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter, root string) (in |
| 622 | 622 |
} |
| 623 | 623 |
|
| 624 | 624 |
// importLayer adds a new layer to the tag and graph store based on the given data. |
| 625 |
-func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) {
|
|
| 625 |
+func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) {
|
|
| 626 | 626 |
cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
|
| 627 | 627 |
output := bytes.NewBuffer(nil) |
| 628 | 628 |
cmd.Stdin = layerData |
| ... | ... |
@@ -287,7 +287,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex |
| 287 | 287 |
|
| 288 | 288 |
mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) {
|
| 289 | 289 |
s.ec[ext].applydiff++ |
| 290 |
- var diff archive.Reader = r.Body |
|
| 290 |
+ var diff io.Reader = r.Body |
|
| 291 | 291 |
defer r.Body.Close() |
| 292 | 292 |
|
| 293 | 293 |
id := r.URL.Query().Get("id")
|
| ... | ... |
@@ -11,7 +11,6 @@ import ( |
| 11 | 11 |
"github.com/docker/distribution" |
| 12 | 12 |
"github.com/docker/distribution/digest" |
| 13 | 13 |
"github.com/docker/docker/daemon/graphdriver" |
| 14 |
- "github.com/docker/docker/pkg/archive" |
|
| 15 | 14 |
"github.com/docker/docker/pkg/idtools" |
| 16 | 15 |
"github.com/docker/docker/pkg/plugingetter" |
| 17 | 16 |
"github.com/docker/docker/pkg/stringid" |
| ... | ... |
@@ -221,7 +220,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri |
| 221 | 221 |
return err |
| 222 | 222 |
} |
| 223 | 223 |
|
| 224 |
- applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) |
|
| 224 |
+ applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) |
|
| 225 | 225 |
if err != nil {
|
| 226 | 226 |
return err |
| 227 | 227 |
} |
| ... | ... |
@@ -81,7 +81,7 @@ func TestLayerMigration(t *testing.T) {
|
| 81 | 81 |
if err := graph.Create(graphID1, "", "", nil); err != nil {
|
| 82 | 82 |
t.Fatal(err) |
| 83 | 83 |
} |
| 84 |
- if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil {
|
|
| 84 |
+ if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil {
|
|
| 85 | 85 |
t.Fatal(err) |
| 86 | 86 |
} |
| 87 | 87 |
|
| ... | ... |
@@ -126,7 +126,7 @@ func TestLayerMigration(t *testing.T) {
|
| 126 | 126 |
if err := graph.Create(graphID2, graphID1, "", nil); err != nil {
|
| 127 | 127 |
t.Fatal(err) |
| 128 | 128 |
} |
| 129 |
- if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil {
|
|
| 129 |
+ if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil {
|
|
| 130 | 130 |
t.Fatal(err) |
| 131 | 131 |
} |
| 132 | 132 |
|
| ... | ... |
@@ -168,7 +168,7 @@ func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, fil |
| 168 | 168 |
if err := graph.Create(graphID, parentID, "", nil); err != nil {
|
| 169 | 169 |
return nil, err |
| 170 | 170 |
} |
| 171 |
- if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil {
|
|
| 171 |
+ if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil {
|
|
| 172 | 172 |
return nil, err |
| 173 | 173 |
} |
| 174 | 174 |
|
| ... | ... |
@@ -323,14 +323,14 @@ func TestMountMigration(t *testing.T) {
|
| 323 | 323 |
if err := graph.Create(containerInit, graphID1, "", nil); err != nil {
|
| 324 | 324 |
t.Fatal(err) |
| 325 | 325 |
} |
| 326 |
- if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil {
|
|
| 326 |
+ if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil {
|
|
| 327 | 327 |
t.Fatal(err) |
| 328 | 328 |
} |
| 329 | 329 |
|
| 330 | 330 |
if err := graph.Create(containerID, containerInit, "", nil); err != nil {
|
| 331 | 331 |
t.Fatal(err) |
| 332 | 332 |
} |
| 333 |
- if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil {
|
|
| 333 |
+ if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil {
|
|
| 334 | 334 |
t.Fatal(err) |
| 335 | 335 |
} |
| 336 | 336 |
|
| ... | ... |
@@ -27,10 +27,6 @@ import ( |
| 27 | 27 |
) |
| 28 | 28 |
|
| 29 | 29 |
type ( |
| 30 |
- // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. |
|
| 31 |
- Archive io.ReadCloser |
|
| 32 |
- // Reader is a type of io.Reader. |
|
| 33 |
- Reader io.Reader |
|
| 34 | 30 |
// Compression is the state represents if compressed or not. |
| 35 | 31 |
Compression int |
| 36 | 32 |
// WhiteoutFormat is the format of whiteouts unpacked |
| ... | ... |
@@ -39,6 +35,7 @@ type ( |
| 39 | 39 |
TarChownOptions struct {
|
| 40 | 40 |
UID, GID int |
| 41 | 41 |
} |
| 42 |
+ |
|
| 42 | 43 |
// TarOptions wraps the tar options. |
| 43 | 44 |
TarOptions struct {
|
| 44 | 45 |
IncludeFiles []string |
| ... | ... |
@@ -1106,7 +1103,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{},
|
| 1106 | 1106 |
// NewTempArchive reads the content of src into a temporary file, and returns the contents |
| 1107 | 1107 |
// of that file as an archive. The archive can only be read once - as soon as reading completes, |
| 1108 | 1108 |
// the file will be deleted. |
| 1109 |
-func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
|
| 1109 |
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
|
| 1110 | 1110 |
f, err := ioutil.TempFile(dir, "") |
| 1111 | 1111 |
if err != nil {
|
| 1112 | 1112 |
return nil, err |
| ... | ... |
@@ -391,7 +391,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
| 391 | 391 |
} |
| 392 | 392 |
|
| 393 | 393 |
// ExportChanges produces an Archive from the provided changes, relative to dir. |
| 394 |
-func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
|
| 394 |
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
|
| 395 | 395 |
reader, writer := io.Pipe() |
| 396 | 396 |
go func() {
|
| 397 | 397 |
ta := &tarAppender{
|
| ... | ... |
@@ -88,13 +88,13 @@ func SplitPathDirEntry(path string) (dir, base string) {
|
| 88 | 88 |
// This function acts as a convenient wrapper around TarWithOptions, which |
| 89 | 89 |
// requires a directory as the source path. TarResource accepts either a |
| 90 | 90 |
// directory or a file path and correctly sets the Tar options. |
| 91 |
-func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
|
| 91 |
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
|
| 92 | 92 |
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) |
| 93 | 93 |
} |
| 94 | 94 |
|
| 95 | 95 |
// TarResourceRebase is like TarResource but renames the first path element of |
| 96 | 96 |
// items in the resulting tar archive to match the given rebaseName if not "". |
| 97 |
-func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
|
| 97 |
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
|
| 98 | 98 |
sourcePath = normalizePath(sourcePath) |
| 99 | 99 |
if _, err = os.Lstat(sourcePath); err != nil {
|
| 100 | 100 |
// Catches the case where the source does not exist or is not a |
| ... | ... |
@@ -241,7 +241,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
| 241 | 241 |
// contain the archived resource described by srcInfo, to the destination |
| 242 | 242 |
// described by dstInfo. Returns the possibly modified content archive along |
| 243 | 243 |
// with the path to the destination directory which it should be extracted to. |
| 244 |
-func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
|
| 244 |
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
|
| 245 | 245 |
// Ensure in platform semantics |
| 246 | 246 |
srcInfo.Path = normalizePath(srcInfo.Path) |
| 247 | 247 |
dstInfo.Path = normalizePath(dstInfo.Path) |
| ... | ... |
@@ -304,7 +304,7 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st |
| 304 | 304 |
|
| 305 | 305 |
// RebaseArchiveEntries rewrites the given srcContent archive replacing |
| 306 | 306 |
// an occurrence of oldBase with newBase at the beginning of entry names. |
| 307 |
-func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
|
| 307 |
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
|
| 308 | 308 |
if oldBase == string(os.PathSeparator) {
|
| 309 | 309 |
// If oldBase specifies the root directory, use an empty string as |
| 310 | 310 |
// oldBase instead so that newBase doesn't replace the path separator |
| ... | ... |
@@ -380,7 +380,7 @@ func CopyResource(srcPath, dstPath string, followLink bool) error {
|
| 380 | 380 |
|
| 381 | 381 |
// CopyTo handles extracting the given content whose |
| 382 | 382 |
// entries should be sourced from srcInfo to dstPath. |
| 383 |
-func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
|
| 383 |
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
|
| 384 | 384 |
// The destination path need not exist, but CopyInfoDestinationPath will |
| 385 | 385 |
// ensure that at least the parent directory exists. |
| 386 | 386 |
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) |
| ... | ... |
@@ -19,7 +19,7 @@ import ( |
| 19 | 19 |
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be |
| 20 | 20 |
// compressed or uncompressed. |
| 21 | 21 |
// Returns the size in bytes of the contents of the layer. |
| 22 |
-func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
|
| 22 |
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
|
| 23 | 23 |
tr := tar.NewReader(layer) |
| 24 | 24 |
trBuf := pools.BufioReader32KPool.Get(tr) |
| 25 | 25 |
defer pools.BufioReader32KPool.Put(trBuf) |
| ... | ... |
@@ -246,7 +246,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er |
| 246 | 246 |
// and applies it to the directory `dest`. The stream `layer` can be |
| 247 | 247 |
// compressed or uncompressed. |
| 248 | 248 |
// Returns the size in bytes of the contents of the layer. |
| 249 |
-func ApplyLayer(dest string, layer Reader) (int64, error) {
|
|
| 249 |
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
|
| 250 | 250 |
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
| 251 | 251 |
} |
| 252 | 252 |
|
| ... | ... |
@@ -254,12 +254,12 @@ func ApplyLayer(dest string, layer Reader) (int64, error) {
|
| 254 | 254 |
// `layer`, and applies it to the directory `dest`. The stream `layer` |
| 255 | 255 |
// can only be uncompressed. |
| 256 | 256 |
// Returns the size in bytes of the contents of the layer. |
| 257 |
-func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
|
| 257 |
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
|
| 258 | 258 |
return applyLayerHandler(dest, layer, options, false) |
| 259 | 259 |
} |
| 260 | 260 |
|
| 261 | 261 |
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream |
| 262 |
-func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
|
| 262 |
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
|
| 263 | 263 |
dest = filepath.Clean(dest) |
| 264 | 264 |
|
| 265 | 265 |
// We need to be able to set any perms |
| ... | ... |
@@ -3,7 +3,7 @@ package archive |
| 3 | 3 |
import ( |
| 4 | 4 |
"archive/tar" |
| 5 | 5 |
"bytes" |
| 6 |
- "io/ioutil" |
|
| 6 |
+ "io" |
|
| 7 | 7 |
) |
| 8 | 8 |
|
| 9 | 9 |
// Generate generates a new archive from the content provided |
| ... | ... |
@@ -22,7 +22,7 @@ import ( |
| 22 | 22 |
// |
| 23 | 23 |
// FIXME: stream content instead of buffering |
| 24 | 24 |
// FIXME: specify permissions and other archive metadata |
| 25 |
-func Generate(input ...string) (Archive, error) {
|
|
| 25 |
+func Generate(input ...string) (io.Reader, error) {
|
|
| 26 | 26 |
files := parseStringPairs(input...) |
| 27 | 27 |
buf := new(bytes.Buffer) |
| 28 | 28 |
tw := tar.NewWriter(buf) |
| ... | ... |
@@ -42,7 +42,7 @@ func Generate(input ...string) (Archive, error) {
|
| 42 | 42 |
if err := tw.Close(); err != nil {
|
| 43 | 43 |
return nil, err |
| 44 | 44 |
} |
| 45 |
- return ioutil.NopCloser(buf), nil |
|
| 45 |
+ return buf, nil |
|
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 | 48 |
func parseStringPairs(input ...string) (output [][2]string) {
|
| ... | ... |
@@ -1,12 +1,16 @@ |
| 1 | 1 |
package chrootarchive |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/docker/pkg/archive" |
|
| 3 |
+import ( |
|
| 4 |
+ "io" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/pkg/archive" |
|
| 7 |
+) |
|
| 4 | 8 |
|
| 5 | 9 |
// ApplyLayer parses a diff in the standard layer format from `layer`, |
| 6 | 10 |
// and applies it to the directory `dest`. The stream `layer` can only be |
| 7 | 11 |
// uncompressed. |
| 8 | 12 |
// Returns the size in bytes of the contents of the layer. |
| 9 |
-func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
|
|
| 13 |
+func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
|
| 10 | 14 |
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
| 11 | 15 |
} |
| 12 | 16 |
|
| ... | ... |
@@ -14,6 +18,6 @@ func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
|
| 14 | 14 |
// `layer`, and applies it to the directory `dest`. The stream `layer` |
| 15 | 15 |
// can only be uncompressed. |
| 16 | 16 |
// Returns the size in bytes of the contents of the layer. |
| 17 |
-func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) {
|
|
| 17 |
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
|
| 18 | 18 |
return applyLayerHandler(dest, layer, options, false) |
| 19 | 19 |
} |
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"encoding/json" |
| 8 | 8 |
"flag" |
| 9 | 9 |
"fmt" |
| 10 |
+ "io" |
|
| 10 | 11 |
"io/ioutil" |
| 11 | 12 |
"os" |
| 12 | 13 |
"path/filepath" |
| ... | ... |
@@ -81,7 +82,7 @@ func applyLayer() {
|
| 81 | 81 |
// applyLayerHandler parses a diff in the standard layer format from `layer`, and |
| 82 | 82 |
// applies it to the directory `dest`. Returns the size in bytes of the |
| 83 | 83 |
// contents of the layer. |
| 84 |
-func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
|
| 84 |
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
|
| 85 | 85 |
dest = filepath.Clean(dest) |
| 86 | 86 |
if decompress {
|
| 87 | 87 |
decompressed, err := archive.DecompressStream(layer) |
| ... | ... |
@@ -2,6 +2,7 @@ package chrootarchive |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
+ "io" |
|
| 5 | 6 |
"io/ioutil" |
| 6 | 7 |
"os" |
| 7 | 8 |
"path/filepath" |
| ... | ... |
@@ -13,7 +14,7 @@ import ( |
| 13 | 13 |
// applyLayerHandler parses a diff in the standard layer format from `layer`, and |
| 14 | 14 |
// applies it to the directory `dest`. Returns the size in bytes of the |
| 15 | 15 |
// contents of the layer. |
| 16 |
-func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
|
| 16 |
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
|
| 17 | 17 |
dest = filepath.Clean(dest) |
| 18 | 18 |
|
| 19 | 19 |
// Ensure it is a Windows-style volume path |