Browse code

Enable golint in pkg/arcive

Signed-off-by: Lei Jitang <leijitang@huawei.com>

Lei Jitang authored on 2015/08/04 10:52:54
Showing 19 changed files
... ...
@@ -326,7 +326,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
326 326
 	})
327 327
 }
328 328
 
329
-func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
329
+func (a *Driver) applyDiff(id string, diff archive.Reader) error {
330 330
 	return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), nil)
331 331
 }
332 332
 
... ...
@@ -341,7 +341,7 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
341 341
 // ApplyDiff extracts the changeset from the given diff into the
342 342
 // layer with the specified id and parent, returning the size of the
343 343
 // new layer in bytes.
344
-func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
344
+func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
345 345
 	// AUFS doesn't need the parent id to apply the diff.
346 346
 	if err = a.applyDiff(id, diff); err != nil {
347 347
 		return
... ...
@@ -77,8 +77,8 @@ type Driver interface {
77 77
 	// ApplyDiff extracts the changeset from the given diff into the
78 78
 	// layer with the specified id and parent, returning the size of the
79 79
 	// new layer in bytes.
80
-	// The archive.ArchiveReader must be an uncompressed stream.
81
-	ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
80
+	// The archive.Reader must be an uncompressed stream.
81
+	ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
82 82
 	// DiffSize calculates the changes between the specified id
83 83
 	// and its parent and returns the size in bytes of the changes
84 84
 	// relative to its base filesystem directory.
... ...
@@ -11,7 +11,7 @@ type WindowsGraphDriver interface {
11 11
 	LayerIdsToPaths(ids []string) []string
12 12
 	Info() hcsshim.DriverInfo
13 13
 	Export(id string, parentLayerPaths []string) (archive.Archive, error)
14
-	Import(id string, layerData archive.ArchiveReader, parentLayerPaths []string) (int64, error)
14
+	Import(id string, layerData archive.Reader, parentLayerPaths []string) (int64, error)
15 15
 }
16 16
 
17 17
 var (
... ...
@@ -25,7 +25,7 @@ type naiveDiffDriver struct {
25 25
 // it may or may not support on its own:
26 26
 //     Diff(id, parent string) (archive.Archive, error)
27 27
 //     Changes(id, parent string) ([]archive.Change, error)
28
-//     ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
28
+//     ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
29 29
 //     DiffSize(id, parent string) (size int64, err error)
30 30
 func NaiveDiffDriver(driver ProtoDriver) Driver {
31 31
 	return &naiveDiffDriver{ProtoDriver: driver}
... ...
@@ -109,7 +109,7 @@ func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
109 109
 // ApplyDiff extracts the changeset from the given diff into the
110 110
 // layer with the specified id and parent, returning the size of the
111 111
 // new layer in bytes.
112
-func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
112
+func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
113 113
 	driver := gdw.ProtoDriver
114 114
 
115 115
 	// Mount the root filesystem so we can apply the diff/layer.
... ...
@@ -28,7 +28,7 @@ var (
28 28
 
29 29
 type ApplyDiffProtoDriver interface {
30 30
 	graphdriver.ProtoDriver
31
-	ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
31
+	ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
32 32
 }
33 33
 
34 34
 type naiveDiffDriverWithApply struct {
... ...
@@ -43,7 +43,7 @@ func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver {
43 43
 	}
44 44
 }
45 45
 
46
-func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) {
46
+func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
47 47
 	b, err := d.applyDiff.ApplyDiff(id, parent, diff)
48 48
 	if err == ErrApplyDiffFallback {
49 49
 		return d.Driver.ApplyDiff(id, parent, diff)
... ...
@@ -373,7 +373,7 @@ func (d *Driver) Put(id string) error {
373 373
 	return nil
374 374
 }
375 375
 
376
-func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (size int64, err error) {
376
+func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
377 377
 	dir := d.dir(id)
378 378
 
379 379
 	if parent == "" {
... ...
@@ -171,7 +171,7 @@ func (d *WindowsGraphDriver) Changes(id, parent string) ([]archive.Change, error
171 171
 // ApplyDiff extracts the changeset from the given diff into the
172 172
 // layer with the specified id and parent, returning the size of the
173 173
 // new layer in bytes.
174
-func (d *WindowsGraphDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
174
+func (d *WindowsGraphDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
175 175
 	start := time.Now().UTC()
176 176
 	logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer")
177 177
 
... ...
@@ -289,7 +289,7 @@ func (d *WindowsGraphDriver) Export(id string, parentLayerPaths []string) (arch
289 289
 
290 290
 }
291 291
 
292
-func (d *WindowsGraphDriver) Import(id string, layerData archive.ArchiveReader, parentLayerPaths []string) (size int64, err error) {
292
+func (d *WindowsGraphDriver) Import(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) {
293 293
 	layerFs, err := d.Get(id, "")
294 294
 	if err != nil {
295 295
 		return
... ...
@@ -198,7 +198,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) {
198 198
 }
199 199
 
200 200
 // Create creates a new image and registers it in the graph.
201
-func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
201
+func (graph *Graph) Create(layerData archive.Reader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
202 202
 	img := &image.Image{
203 203
 		ID:            stringid.GenerateRandomID(),
204 204
 		Comment:       comment,
... ...
@@ -223,7 +223,7 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain
223 223
 }
224 224
 
225 225
 // Register imports a pre-existing image into the graph.
226
-func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) {
226
+func (graph *Graph) Register(img *image.Image, layerData archive.Reader) (err error) {
227 227
 
228 228
 	if err := image.ValidateID(img.ID); err != nil {
229 229
 		return err
... ...
@@ -535,7 +535,7 @@ func jsonPath(root string) string {
535 535
 	return filepath.Join(root, jsonFileName)
536 536
 }
537 537
 
538
-func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData archive.ArchiveReader, root string) error {
538
+func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData archive.Reader, root string) error {
539 539
 	// this is saving the tar-split metadata
540 540
 	mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
541 541
 	if err != nil {
... ...
@@ -558,7 +558,7 @@ func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData arch
558 558
 		return err
559 559
 	}
560 560
 
561
-	if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, archive.ArchiveReader(rdr)); err != nil {
561
+	if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, archive.Reader(rdr)); err != nil {
562 562
 		return err
563 563
 	}
564 564
 
... ...
@@ -73,7 +73,7 @@ func SetupInitLayer(initLayer string) error {
73 73
 	return nil
74 74
 }
75 75
 
76
-func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.ArchiveReader) error {
76
+func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.Reader) error {
77 77
 	if err := graph.driver.Create(img.ID, img.Parent); err != nil {
78 78
 		return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err)
79 79
 	}
... ...
@@ -87,7 +87,7 @@ func (graph *Graph) restoreBaseImages() ([]string, error) {
87 87
 // storeImage stores file system layer data for the given image to the
88 88
 // graph's storage driver. Image metadata is stored in a file
89 89
 // at the specified root directory.
90
-func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader, root string) (err error) {
90
+func (graph *Graph) storeImage(img *image.Image, layerData archive.Reader, root string) (err error) {
91 91
 	// Store the layer. If layerData is not nil, unpack it into the new layer
92 92
 	if layerData != nil {
93 93
 		if err := graph.disassembleAndApplyTarLayer(img, layerData, root); err != nil {
... ...
@@ -19,7 +19,7 @@ func SetupInitLayer(initLayer string) error {
19 19
 	return nil
20 20
 }
21 21
 
22
-func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.ArchiveReader) error {
22
+func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.Reader) error {
23 23
 	if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok {
24 24
 		if img.Container != "" && layerData == nil {
25 25
 			logrus.Debugf("Copying from container %s.", img.Container)
... ...
@@ -71,7 +71,7 @@ func (graph *Graph) ParentLayerIds(img *image.Image) (ids []string, err error) {
71 71
 // storeImage stores file system layer data for the given image to the
72 72
 // graph's storage driver. Image metadata is stored in a file
73 73
 // at the specified root directory.
74
-func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader, root string) (err error) {
74
+func (graph *Graph) storeImage(img *image.Image, layerData archive.Reader, root string) (err error) {
75 75
 
76 76
 	if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok {
77 77
 		// Store the layer. If layerData is not nil and this isn't a base image,
... ...
@@ -20,7 +20,7 @@ import (
20 20
 func (s *TagStore) Import(src string, repo string, tag string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
21 21
 	var (
22 22
 		sf      = streamformatter.NewJSONStreamFormatter()
23
-		archive archive.ArchiveReader
23
+		archive archive.Reader
24 24
 		resp    *http.Response
25 25
 	)
26 26
 
... ...
@@ -37,6 +37,7 @@ packages=(
37 37
 	graph
38 38
 	image
39 39
 	integration-cli
40
+	pkg/archive
40 41
 	pkg/broadcastwriter
41 42
 	pkg/chrootarchive
42 43
 	pkg/directory
... ...
@@ -25,12 +25,17 @@ import (
25 25
 )
26 26
 
27 27
 type (
28
-	Archive         io.ReadCloser
29
-	ArchiveReader   io.Reader
30
-	Compression     int
28
+	// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
29
+	Archive io.ReadCloser
30
+	// Reader is a type of io.Reader.
31
+	Reader io.Reader
32
+	// Compression is the state represtents if compressed or not.
33
+	Compression int
34
+	// TarChownOptions wraps the chown options UID and GID.
31 35
 	TarChownOptions struct {
32 36
 		UID, GID int
33 37
 	}
38
+	// TarOptions wraps the tar options.
34 39
 	TarOptions struct {
35 40
 		IncludeFiles     []string
36 41
 		ExcludePatterns  []string
... ...
@@ -59,17 +64,23 @@ type (
59 59
 )
60 60
 
61 61
 var (
62
+	// ErrNotImplemented is the error message of function not implemented.
62 63
 	ErrNotImplemented = errors.New("Function not implemented")
63 64
 	defaultArchiver   = &Archiver{Untar}
64 65
 )
65 66
 
66 67
 const (
68
+	// Uncompressed represents the uncompressed.
67 69
 	Uncompressed Compression = iota
70
+	// Bzip2 is bzip2 compression algorithm.
68 71
 	Bzip2
72
+	// Gzip is gzip compression algorithm.
69 73
 	Gzip
74
+	// Xz is xz compression algorithm.
70 75
 	Xz
71 76
 )
72 77
 
78
+// IsArchive checks if it is a archive by the header.
73 79
 func IsArchive(header []byte) bool {
74 80
 	compression := DetectCompression(header)
75 81
 	if compression != Uncompressed {
... ...
@@ -80,6 +91,7 @@ func IsArchive(header []byte) bool {
80 80
 	return err == nil
81 81
 }
82 82
 
83
+// DetectCompression detects the compression algorithm of the source.
83 84
 func DetectCompression(source []byte) Compression {
84 85
 	for compression, m := range map[Compression][]byte{
85 86
 		Bzip2: {0x42, 0x5A, 0x68},
... ...
@@ -103,6 +115,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
103 103
 	return CmdStream(exec.Command(args[0], args[1:]...), archive)
104 104
 }
105 105
 
106
+// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
106 107
 func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
107 108
 	p := pools.BufioReader32KPool
108 109
 	buf := p.Get(archive)
... ...
@@ -139,6 +152,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
139 139
 	}
140 140
 }
141 141
 
142
+// CompressStream compresses the dest with specified compression algorithm.
142 143
 func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
143 144
 	p := pools.BufioWriter32KPool
144 145
 	buf := p.Get(dest)
... ...
@@ -159,6 +173,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose
159 159
 	}
160 160
 }
161 161
 
162
+// Extension returns the extension of a file that uses the specified compression algorithm.
162 163
 func (compression *Compression) Extension() string {
163 164
 	switch *compression {
164 165
 	case Uncompressed:
... ...
@@ -530,6 +545,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
530 530
 	return pipeReader, nil
531 531
 }
532 532
 
533
+// Unpack unpacks the decompressedArchive to dest with options.
533 534
 func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
534 535
 	tr := tar.NewReader(decompressedArchive)
535 536
 	trBuf := pools.BufioReader32KPool.Get(nil)
... ...
@@ -643,7 +659,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
643 643
 	return untarHandler(tarArchive, dest, options, true)
644 644
 }
645 645
 
646
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
646
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
647 647
 // and unpacks it into the directory at `dest`.
648 648
 // The archive must be an uncompressed stream.
649 649
 func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
... ...
@@ -663,7 +679,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
663 663
 		options.ExcludePatterns = []string{}
664 664
 	}
665 665
 
666
-	var r io.Reader = tarArchive
666
+	r := tarArchive
667 667
 	if decompress {
668 668
 		decompressedArchive, err := DecompressStream(tarArchive)
669 669
 		if err != nil {
... ...
@@ -676,6 +692,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
676 676
 	return Unpack(r, dest, options)
677 677
 }
678 678
 
679
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
680
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
679 681
 func (archiver *Archiver) TarUntar(src, dst string) error {
680 682
 	logrus.Debugf("TarUntar(%s %s)", src, dst)
681 683
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
... ...
@@ -692,6 +710,7 @@ func TarUntar(src, dst string) error {
692 692
 	return defaultArchiver.TarUntar(src, dst)
693 693
 }
694 694
 
695
+// UntarPath untar a file from path to a destination, src is the source tar file path.
695 696
 func (archiver *Archiver) UntarPath(src, dst string) error {
696 697
 	archive, err := os.Open(src)
697 698
 	if err != nil {
... ...
@@ -710,6 +729,10 @@ func UntarPath(src, dst string) error {
710 710
 	return defaultArchiver.UntarPath(src, dst)
711 711
 }
712 712
 
713
+// CopyWithTar creates a tar archive of filesystem path `src`, and
714
+// unpacks it at filesystem path `dst`.
715
+// The archive is streamed directly with fixed buffering and no
716
+// intermediary disk IO.
713 717
 func (archiver *Archiver) CopyWithTar(src, dst string) error {
714 718
 	srcSt, err := os.Stat(src)
715 719
 	if err != nil {
... ...
@@ -735,6 +758,9 @@ func CopyWithTar(src, dst string) error {
735 735
 	return defaultArchiver.CopyWithTar(src, dst)
736 736
 }
737 737
 
738
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
739
+// for a single file. It copies a regular file from path `src` to
740
+// path `dst`, and preserves all its metadata.
738 741
 func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
739 742
 	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
740 743
 	srcSt, err := os.Stat(src)
... ...
@@ -878,6 +904,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
878 878
 	return &TempArchive{File: f, Size: size}, nil
879 879
 }
880 880
 
881
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
882
+// the file will be deleted.
881 883
 type TempArchive struct {
882 884
 	*os.File
883 885
 	Size   int64 // Pre-computed from Stat().Size() as a convenience
... ...
@@ -18,14 +18,22 @@ import (
18 18
 	"github.com/docker/docker/pkg/system"
19 19
 )
20 20
 
21
+// ChangeType represents the change type.
21 22
 type ChangeType int
22 23
 
23 24
 const (
25
+	// ChangeModify represents the modify operation.
24 26
 	ChangeModify = iota
27
+	// ChangeAdd represents the add operation.
25 28
 	ChangeAdd
29
+	// ChangeDelete represents the delete operation.
26 30
 	ChangeDelete
27 31
 )
28 32
 
33
+// Change represents a change, it wraps the change type and path.
34
+// It describes changes of the files in the path respect to the
35
+// parent layers. The change could be modify, add, delete.
36
+// This is used for layer diff.
29 37
 type Change struct {
30 38
 	Path string
31 39
 	Kind ChangeType
... ...
@@ -161,6 +169,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
161 161
 	return changes, nil
162 162
 }
163 163
 
164
+// FileInfo describes the information of a file.
164 165
 type FileInfo struct {
165 166
 	parent     *FileInfo
166 167
 	name       string
... ...
@@ -170,11 +179,12 @@ type FileInfo struct {
170 170
 	added      bool
171 171
 }
172 172
 
173
-func (root *FileInfo) LookUp(path string) *FileInfo {
173
+// LookUp looks up the file information of a file.
174
+func (info *FileInfo) LookUp(path string) *FileInfo {
174 175
 	// As this runs on the daemon side, file paths are OS specific.
175
-	parent := root
176
+	parent := info
176 177
 	if path == string(os.PathSeparator) {
177
-		return root
178
+		return info
178 179
 	}
179 180
 
180 181
 	pathElements := strings.Split(path, string(os.PathSeparator))
... ...
@@ -275,6 +285,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
275 275
 
276 276
 }
277 277
 
278
+// Changes add changes to file information.
278 279
 func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
279 280
 	var changes []Change
280 281
 
... ...
@@ -246,7 +246,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
246 246
 // contain the archived resource described by srcInfo, to the destination
247 247
 // described by dstInfo. Returns the possibly modified content archive along
248 248
 // with the path to the destination directory which it should be extracted to.
249
-func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
249
+func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
250 250
 	// Separate the destination path between its directory and base
251 251
 	// components in case the source archive contents need to be rebased.
252 252
 	dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
... ...
@@ -296,7 +296,7 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
296 296
 
297 297
 // rebaseArchiveEntries rewrites the given srcContent archive replacing
298 298
 // an occurance of oldBase with newBase at the beginning of entry names.
299
-func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
299
+func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
300 300
 	if oldBase == "/" {
301 301
 		// If oldBase specifies the root directory, use an empty string as
302 302
 		// oldBase instead so that newBase doesn't replace the path separator
... ...
@@ -368,7 +368,7 @@ func CopyResource(srcPath, dstPath string) error {
368 368
 
369 369
 // CopyTo handles extracting the given content whose
370 370
 // entries should be sourced from srcInfo to dstPath.
371
-func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
371
+func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
372 372
 	// The destination path need not exist, but CopyInfoDestinationPath will
373 373
 	// ensure that at least the parent directory exists.
374 374
 	dstInfo, err := CopyInfoDestinationPath(dstPath)
... ...
@@ -16,7 +16,10 @@ import (
16 16
 	"github.com/docker/docker/pkg/system"
17 17
 )
18 18
 
19
-func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
19
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
20
+// compressed or uncompressed.
21
+// Returns the size in bytes of the contents of the layer.
22
+func UnpackLayer(dest string, layer Reader) (size int64, err error) {
20 23
 	tr := tar.NewReader(layer)
21 24
 	trBuf := pools.BufioReader32KPool.Get(tr)
22 25
 	defer pools.BufioReader32KPool.Put(trBuf)
... ...
@@ -177,7 +180,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
177 177
 // and applies it to the directory `dest`. The stream `layer` can be
178 178
 // compressed or uncompressed.
179 179
 // Returns the size in bytes of the contents of the layer.
180
-func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
180
+func ApplyLayer(dest string, layer Reader) (int64, error) {
181 181
 	return applyLayerHandler(dest, layer, true)
182 182
 }
183 183
 
... ...
@@ -185,12 +188,12 @@ func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
185 185
 // `layer`, and applies it to the directory `dest`. The stream `layer`
186 186
 // can only be uncompressed.
187 187
 // Returns the size in bytes of the contents of the layer.
188
-func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
188
+func ApplyUncompressedLayer(dest string, layer Reader) (int64, error) {
189 189
 	return applyLayerHandler(dest, layer, false)
190 190
 }
191 191
 
192 192
 // do the bulk load of ApplyLayer, but allow for not calling DecompressStream
193
-func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
193
+func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error) {
194 194
 	dest = filepath.Clean(dest)
195 195
 
196 196
 	// We need to be able to set any perms
... ...
@@ -16,7 +16,7 @@ var testUntarFns = map[string]func(string, io.Reader) error{
16 16
 		return Untar(r, dest, nil)
17 17
 	},
18 18
 	"applylayer": func(dest string, r io.Reader) error {
19
-		_, err := ApplyLayer(dest, ArchiveReader(r))
19
+		_, err := ApplyLayer(dest, Reader(r))
20 20
 		return err
21 21
 	},
22 22
 }
... ...
@@ -6,7 +6,7 @@ import "github.com/docker/docker/pkg/archive"
6 6
 // and applies it to the directory `dest`. The stream `layer` can only be
7 7
 // uncompressed.
8 8
 // Returns the size in bytes of the contents of the layer.
9
-func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
9
+func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
10 10
 	return applyLayerHandler(dest, layer, true)
11 11
 }
12 12
 
... ...
@@ -14,6 +14,6 @@ func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error
14 14
 // `layer`, and applies it to the directory `dest`. The stream `layer`
15 15
 // can only be uncompressed.
16 16
 // Returns the size in bytes of the contents of the layer.
17
-func ApplyUncompressedLayer(dest string, layer archive.ArchiveReader) (int64, error) {
17
+func ApplyUncompressedLayer(dest string, layer archive.Reader) (int64, error) {
18 18
 	return applyLayerHandler(dest, layer, false)
19 19
 }
... ...
@@ -68,7 +68,7 @@ func applyLayer() {
68 68
 // applyLayerHandler parses a diff in the standard layer format from `layer`, and
69 69
 // applies it to the directory `dest`. Returns the size in bytes of the
70 70
 // contents of the layer.
71
-func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) {
71
+func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) {
72 72
 	dest = filepath.Clean(dest)
73 73
 	if decompress {
74 74
 		decompressed, err := archive.DecompressStream(layer)
... ...
@@ -12,7 +12,7 @@ import (
12 12
 // applyLayerHandler parses a diff in the standard layer format from `layer`, and
13 13
 // applies it to the directory `dest`. Returns the size in bytes of the
14 14
 // contents of the layer.
15
-func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) {
15
+func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) {
16 16
 	dest = filepath.Clean(dest)
17 17
 	if decompress {
18 18
 		decompressed, err := archive.DecompressStream(layer)