Browse code

Properly close archives

All archive that are created from somewhere generally have to be closed, because
at some point there is a file or a pipe or something that backs them. So, we
make archive.Archive a ReadCloser. However, code consuming archives does not
typically close them so we add an archive.ArchiveReader and use that when we're
only reading.

We then change all the Tar/Archive places to create ReadClosers, and to properly
close them everywhere.

As an added bonus we can use ReadCloserWrapper rather than EofReader in several places,
which is good as EofReader doesn't always work right. For instance, many compression
schemes like gzip knows it is EOF before having read the EOF from the stream, so the
EofCloser never sees an EOF.

Docker-DCO-1.1-Signed-off-by: Alexander Larsson <alexl@redhat.com> (github: alexlarsson)

Alexander Larsson authored on 2014/02/14 20:41:46
Showing 13 changed files
... ...
@@ -19,9 +19,10 @@ import (
19 19
 )
20 20
 
21 21
 type (
22
-	Archive     io.Reader
23
-	Compression int
24
-	TarOptions  struct {
22
+	Archive       io.ReadCloser
23
+	ArchiveReader io.Reader
24
+	Compression   int
25
+	TarOptions    struct {
25 26
 		Includes    []string
26 27
 		Compression Compression
27 28
 	}
... ...
@@ -269,7 +270,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader)
269 269
 
270 270
 // Tar creates an archive from the directory at `path`, and returns it as a
271 271
 // stream of bytes.
272
-func Tar(path string, compression Compression) (io.Reader, error) {
272
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
273 273
 	return TarFilter(path, &TarOptions{Compression: compression})
274 274
 }
275 275
 
... ...
@@ -291,7 +292,7 @@ func escapeName(name string) string {
291 291
 
292 292
 // Tar creates an archive from the directory at `path`, only including files whose relative
293 293
 // paths are included in `filter`. If `filter` is nil, then all files are included.
294
-func TarFilter(srcPath string, options *TarOptions) (io.Reader, error) {
294
+func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
295 295
 	pipeReader, pipeWriter := io.Pipe()
296 296
 
297 297
 	compressWriter, err := CompressStream(pipeWriter, options.Compression)
... ...
@@ -436,15 +437,19 @@ func TarUntar(src string, dst string) error {
436 436
 	if err != nil {
437 437
 		return err
438 438
 	}
439
+	defer archive.Close()
439 440
 	return Untar(archive, dst, nil)
440 441
 }
441 442
 
442 443
 // UntarPath is a convenience function which looks for an archive
443 444
 // at filesystem path `src`, and unpacks it at `dst`.
444 445
 func UntarPath(src, dst string) error {
445
-	if archive, err := os.Open(src); err != nil {
446
+	archive, err := os.Open(src)
447
+	if err != nil {
446 448
 		return err
447
-	} else if err := Untar(archive, dst, nil); err != nil {
449
+	}
450
+	defer archive.Close()
451
+	if err := Untar(archive, dst, nil); err != nil {
448 452
 		return err
449 453
 	}
450 454
 	return nil
... ...
@@ -67,12 +67,13 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
67 67
 	if err != nil {
68 68
 		t.Fatal(err)
69 69
 	}
70
+	defer archive.Close()
70 71
 
71 72
 	buf := make([]byte, 10)
72 73
 	if _, err := archive.Read(buf); err != nil {
73 74
 		return err
74 75
 	}
75
-	archive = io.MultiReader(bytes.NewReader(buf), archive)
76
+	wrap := io.MultiReader(bytes.NewReader(buf), archive)
76 77
 
77 78
 	detectedCompression := DetectCompression(buf)
78 79
 	if detectedCompression.Extension() != compression.Extension() {
... ...
@@ -84,7 +85,7 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
84 84
 		return err
85 85
 	}
86 86
 	defer os.RemoveAll(tmp)
87
-	if err := Untar(archive, tmp, nil); err != nil {
87
+	if err := Untar(wrap, tmp, nil); err != nil {
88 88
 		return err
89 89
 	}
90 90
 	if _, err := os.Stat(tmp); err != nil {
... ...
@@ -28,7 +28,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) {
28 28
 
29 29
 // ApplyLayer parses a diff in the standard layer format from `layer`, and
30 30
 // applies it to the directory `dest`.
31
-func ApplyLayer(dest string, layer Archive) error {
31
+func ApplyLayer(dest string, layer ArchiveReader) error {
32 32
 	// We need to be able to set any perms
33 33
 	oldmask := syscall.Umask(0)
34 34
 	defer syscall.Umask(oldmask)
... ...
@@ -464,6 +464,7 @@ func (b *buildFile) CmdAdd(args string) error {
464 464
 		}
465 465
 		tarSum := utils.TarSum{Reader: r, DisableCompression: true}
466 466
 		remoteHash = tarSum.Sum(nil)
467
+		r.Close()
467 468
 
468 469
 		// If the destination is a directory, figure out the filename.
469 470
 		if strings.HasSuffix(dest, "/") {
... ...
@@ -158,7 +158,7 @@ func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, erro
158 158
 	if err := tw.Close(); err != nil {
159 159
 		return nil, err
160 160
 	}
161
-	return buf, nil
161
+	return ioutil.NopCloser(buf), nil
162 162
 }
163 163
 
164 164
 func (cli *DockerCli) CmdBuild(args ...string) error {
... ...
@@ -206,7 +206,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
206 206
 	// FIXME: ProgressReader shouldn't be this annoying to use
207 207
 	if context != nil {
208 208
 		sf := utils.NewStreamFormatter(false)
209
-		body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
209
+		body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context")
210 210
 	}
211 211
 	// Upload the build context
212 212
 	v := &url.Values{}
... ...
@@ -1288,7 +1288,11 @@ func (container *Container) ExportRw() (archive.Archive, error) {
1288 1288
 		container.Unmount()
1289 1289
 		return nil, err
1290 1290
 	}
1291
-	return EofReader(archive, func() { container.Unmount() }), nil
1291
+	return utils.NewReadCloserWrapper(archive, func() error {
1292
+		err := archive.Close()
1293
+		container.Unmount()
1294
+		return err
1295
+	}), nil
1292 1296
 }
1293 1297
 
1294 1298
 func (container *Container) Export() (archive.Archive, error) {
... ...
@@ -1301,7 +1305,11 @@ func (container *Container) Export() (archive.Archive, error) {
1301 1301
 		container.Unmount()
1302 1302
 		return nil, err
1303 1303
 	}
1304
-	return EofReader(archive, func() { container.Unmount() }), nil
1304
+	return utils.NewReadCloserWrapper(archive, func() error {
1305
+		err := archive.Close()
1306
+		container.Unmount()
1307
+		return err
1308
+	}), nil
1305 1309
 }
1306 1310
 
1307 1311
 func (container *Container) WaitTimeout(timeout time.Duration) error {
... ...
@@ -1455,7 +1463,11 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
1455 1455
 	if err != nil {
1456 1456
 		return nil, err
1457 1457
 	}
1458
-	return utils.NewReadCloserWrapper(archive, container.Unmount), nil
1458
+	return utils.NewReadCloserWrapper(archive, func() error {
1459
+		err := archive.Close()
1460
+		container.Unmount()
1461
+		return err
1462
+	}), nil
1459 1463
 }
1460 1464
 
1461 1465
 // Returns true if the container exposes a certain port
... ...
@@ -127,7 +127,7 @@ func (graph *Graph) Get(name string) (*Image, error) {
127 127
 }
128 128
 
129 129
 // Create creates a new image and registers it in the graph.
130
-func (graph *Graph) Create(layerData archive.Archive, container *Container, comment, author string, config *runconfig.Config) (*Image, error) {
130
+func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) {
131 131
 	img := &Image{
132 132
 		ID:            GenerateID(),
133 133
 		Comment:       comment,
... ...
@@ -151,7 +151,7 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
151 151
 
152 152
 // Register imports a pre-existing image into the graph.
153 153
 // FIXME: pass img as first argument
154
-func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) (err error) {
154
+func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) {
155 155
 	defer func() {
156 156
 		// If any error occurs, remove the new dir from the driver.
157 157
 		// Don't check for errors since the dir might not have been created.
... ...
@@ -226,7 +226,9 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
226 226
 	if err != nil {
227 227
 		return nil, err
228 228
 	}
229
-	return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp)
229
+	progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk")
230
+	defer progress.Close()
231
+	return archive.NewTempArchive(progress, tmp)
230 232
 }
231 233
 
232 234
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
... ...
@@ -271,7 +271,7 @@ func (a *Driver) Diff(id string) (archive.Archive, error) {
271 271
 	})
272 272
 }
273 273
 
274
-func (a *Driver) ApplyDiff(id string, diff archive.Archive) error {
274
+func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error {
275 275
 	return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
276 276
 }
277 277
 
... ...
@@ -28,7 +28,7 @@ type Driver interface {
28 28
 type Differ interface {
29 29
 	Diff(id string) (archive.Archive, error)
30 30
 	Changes(id string) ([]archive.Change, error)
31
-	ApplyDiff(id string, diff archive.Archive) error
31
+	ApplyDiff(id string, diff archive.ArchiveReader) error
32 32
 	DiffSize(id string) (bytes int64, err error)
33 33
 }
34 34
 
... ...
@@ -67,7 +67,7 @@ func LoadImage(root string) (*Image, error) {
67 67
 	return img, nil
68 68
 }
69 69
 
70
-func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, layer string) error {
70
+func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error {
71 71
 	// Store the layer
72 72
 	var (
73 73
 		size   int64
... ...
@@ -174,7 +174,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) {
174 174
 		if err != nil {
175 175
 			return nil, err
176 176
 		}
177
-		return EofReader(archive, func() { driver.Put(img.ID) }), nil
177
+		return utils.NewReadCloserWrapper(archive, func() error {
178
+			err := archive.Close()
179
+			driver.Put(img.ID)
180
+			return err
181
+		}), nil
178 182
 	}
179 183
 
180 184
 	parentFs, err := driver.Get(img.Parent)
... ...
@@ -190,7 +194,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) {
190 190
 	if err != nil {
191 191
 		return nil, err
192 192
 	}
193
-	return EofReader(archive, func() { driver.Put(img.ID) }), nil
193
+	return utils.NewReadCloserWrapper(archive, func() error {
194
+		err := archive.Close()
195
+		driver.Put(img.ID)
196
+		return err
197
+	}), nil
194 198
 }
195 199
 
196 200
 func ValidateID(id string) error {
... ...
@@ -319,7 +319,7 @@ func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testi
319 319
 }
320 320
 
321 321
 // FIXME: this is duplicated from graph_test.go in the docker package.
322
-func fakeTar() (io.Reader, error) {
322
+func fakeTar() (io.ReadCloser, error) {
323 323
 	content := []byte("Hello world!\n")
324 324
 	buf := new(bytes.Buffer)
325 325
 	tw := tar.NewWriter(buf)
... ...
@@ -333,7 +333,7 @@ func fakeTar() (io.Reader, error) {
333 333
 		tw.Write([]byte(content))
334 334
 	}
335 335
 	tw.Close()
336
-	return buf, nil
336
+	return ioutil.NopCloser(buf), nil
337 337
 }
338 338
 
339 339
 func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table {
... ...
@@ -531,6 +531,8 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
531 531
 	if err != nil {
532 532
 		return nil, err
533 533
 	}
534
+	defer rwTar.Close()
535
+
534 536
 	// Create a new image from the container's base layers + a new layer from container changes
535 537
 	img, err := runtime.graph.Create(rwTar, container, comment, author, config)
536 538
 	if err != nil {
... ...
@@ -817,7 +819,11 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
817 817
 	if err != nil {
818 818
 		return nil, err
819 819
 	}
820
-	return EofReader(archive, func() { runtime.driver.Put(container.ID) }), nil
820
+	return utils.NewReadCloserWrapper(archive, func() error {
821
+		err := archive.Close()
822
+		runtime.driver.Put(container.ID)
823
+		return err
824
+	}), nil
821 825
 }
822 826
 
823 827
 func (runtime *Runtime) Run(c *Container, startCallback execdriver.StartCallback) (int, error) {
... ...
@@ -292,6 +292,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
292 292
 		if err != nil {
293 293
 			return job.Errorf("%s: %s", name, err)
294 294
 		}
295
+		defer data.Close()
295 296
 
296 297
 		// Stream the entire contents of the container (basically a volatile snapshot)
297 298
 		if _, err := io.Copy(job.Stdout, data); err != nil {
... ...
@@ -361,6 +362,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
361 361
 	if err != nil {
362 362
 		return job.Error(err)
363 363
 	}
364
+	defer fs.Close()
364 365
 
365 366
 	if _, err := io.Copy(job.Stdout, fs); err != nil {
366 367
 		return job.Error(err)
... ...
@@ -400,6 +402,7 @@ func (srv *Server) exportImage(image *Image, tempdir string) error {
400 400
 		if err != nil {
401 401
 			return err
402 402
 		}
403
+		defer fs.Close()
403 404
 
404 405
 		fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
405 406
 		if err != nil {
... ...
@@ -436,14 +439,14 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
436 436
 		authConfig     = &auth.AuthConfig{}
437 437
 		configFile     = &auth.ConfigFile{}
438 438
 		tag            string
439
-		context        io.Reader
439
+		context        io.ReadCloser
440 440
 	)
441 441
 	job.GetenvJson("authConfig", authConfig)
442 442
 	job.GetenvJson("configFile", configFile)
443 443
 	repoName, tag = utils.ParseRepositoryTag(repoName)
444 444
 
445 445
 	if remoteURL == "" {
446
-		context = job.Stdin
446
+		context = ioutil.NopCloser(job.Stdin)
447 447
 	} else if utils.IsGIT(remoteURL) {
448 448
 		if !strings.HasPrefix(remoteURL, "git://") {
449 449
 			remoteURL = "https://" + remoteURL
... ...
@@ -479,6 +482,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
479 479
 		}
480 480
 		context = c
481 481
 	}
482
+	defer context.Close()
482 483
 
483 484
 	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
484 485
 	b := NewBuildFile(srv,
... ...
@@ -1575,7 +1579,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
1575 1575
 		repo    = job.Args[1]
1576 1576
 		tag     string
1577 1577
 		sf      = utils.NewStreamFormatter(job.GetenvBool("json"))
1578
-		archive io.Reader
1578
+		archive archive.ArchiveReader
1579 1579
 		resp    *http.Response
1580 1580
 	)
1581 1581
 	if len(job.Args) > 2 {
... ...
@@ -1601,7 +1605,9 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
1601 1601
 		if err != nil {
1602 1602
 			return job.Error(err)
1603 1603
 		}
1604
-		archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
1604
+		progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
1605
+		defer progressReader.Close()
1606
+		archive = progressReader
1605 1607
 	}
1606 1608
 	img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
1607 1609
 	if err != nil {