add defer file.Close to avoid potential fd leak
| ... | ... |
@@ -255,9 +255,9 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
| 255 | 255 |
// ignoring error because the file was already opened successfully |
| 256 | 256 |
tmpFileSt, err := tmpFile.Stat() |
| 257 | 257 |
if err != nil {
|
| 258 |
+ tmpFile.Close() |
|
| 258 | 259 |
return |
| 259 | 260 |
} |
| 260 |
- tmpFile.Close() |
|
| 261 | 261 |
|
| 262 | 262 |
// Set the mtime to the Last-Modified header value if present |
| 263 | 263 |
// Otherwise just remove atime and mtime |
| ... | ... |
@@ -272,6 +272,8 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
| 272 | 272 |
} |
| 273 | 273 |
} |
| 274 | 274 |
|
| 275 |
+ tmpFile.Close() |
|
| 276 |
+ |
|
| 275 | 277 |
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
| 276 | 278 |
return |
| 277 | 279 |
} |
| ... | ... |
@@ -38,6 +38,7 @@ func TestTestNegative(t *testing.T) {
|
| 38 | 38 |
if err != nil {
|
| 39 | 39 |
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
| 40 | 40 |
} |
| 41 |
+ defer df.Close() |
|
| 41 | 42 |
|
| 42 | 43 |
d := Directive{LookingForDirectives: true}
|
| 43 | 44 |
SetEscapeToken(DefaultEscapeToken, &d) |
| ... | ... |
@@ -45,8 +46,6 @@ func TestTestNegative(t *testing.T) {
|
| 45 | 45 |
if err == nil {
|
| 46 | 46 |
t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
| 47 | 47 |
} |
| 48 |
- |
|
| 49 |
- df.Close() |
|
| 50 | 48 |
} |
| 51 | 49 |
} |
| 52 | 50 |
|
| ... | ... |
@@ -36,6 +36,7 @@ func (c DockerIgnoreContext) Process(filesToRemove []string) error {
|
| 36 | 36 |
return err |
| 37 | 37 |
} |
| 38 | 38 |
excludes, _ := dockerignore.ReadAll(f) |
| 39 |
+ f.Close() |
|
| 39 | 40 |
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
| 40 | 41 |
for _, fileToRemove := range filesToRemove {
|
| 41 | 42 |
rm, _ := fileutils.Matches(fileToRemove, excludes) |
| ... | ... |
@@ -12,11 +12,11 @@ import ( |
| 12 | 12 |
// ReadAll reads a .dockerignore file and returns the list of file patterns |
| 13 | 13 |
// to ignore. Note this will trim whitespace from each line as well |
| 14 | 14 |
// as use GO's "clean" func to get the shortest/cleanest path for each. |
| 15 |
-func ReadAll(reader io.ReadCloser) ([]string, error) {
|
|
| 15 |
+func ReadAll(reader io.Reader) ([]string, error) {
|
|
| 16 | 16 |
if reader == nil {
|
| 17 | 17 |
return nil, nil |
| 18 | 18 |
} |
| 19 |
- defer reader.Close() |
|
| 19 |
+ |
|
| 20 | 20 |
scanner := bufio.NewScanner(reader) |
| 21 | 21 |
var excludes []string |
| 22 | 22 |
currentLine := 0 |
| ... | ... |
@@ -484,6 +484,8 @@ func TestJsonSaveWithNoFile(t *testing.T) {
|
| 484 | 484 |
|
| 485 | 485 |
fn := filepath.Join(tmpHome, ConfigFileName) |
| 486 | 486 |
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) |
| 487 |
+ defer f.Close() |
|
| 488 |
+ |
|
| 487 | 489 |
err = config.SaveToWriter(f) |
| 488 | 490 |
if err != nil {
|
| 489 | 491 |
t.Fatalf("Failed saving to file: %q", err)
|
| ... | ... |
@@ -522,6 +524,8 @@ func TestLegacyJsonSaveWithNoFile(t *testing.T) {
|
| 522 | 522 |
|
| 523 | 523 |
fn := filepath.Join(tmpHome, ConfigFileName) |
| 524 | 524 |
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) |
| 525 |
+ defer f.Close() |
|
| 526 |
+ |
|
| 525 | 527 |
if err = config.SaveToWriter(f); err != nil {
|
| 526 | 528 |
t.Fatalf("Failed saving to file: %q", err)
|
| 527 | 529 |
} |
| ... | ... |
@@ -55,6 +55,8 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R |
| 55 | 55 |
} |
| 56 | 56 |
continue |
| 57 | 57 |
} |
| 58 |
+ defer f.Close() |
|
| 59 |
+ |
|
| 58 | 60 |
files = append(files, f) |
| 59 | 61 |
} |
| 60 | 62 |
|
| ... | ... |
@@ -63,6 +65,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R |
| 63 | 63 |
logWatcher.Err <- err |
| 64 | 64 |
return |
| 65 | 65 |
} |
| 66 |
+ defer latestFile.Close() |
|
| 66 | 67 |
|
| 67 | 68 |
if config.Tail != 0 {
|
| 68 | 69 |
tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) |
| ... | ... |
@@ -174,17 +174,18 @@ func (s *saveSession) save(outStream io.Writer) error {
|
| 174 | 174 |
|
| 175 | 175 |
if len(reposLegacy) > 0 {
|
| 176 | 176 |
reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) |
| 177 |
- f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) |
|
| 177 |
+ rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) |
|
| 178 | 178 |
if err != nil {
|
| 179 |
- f.Close() |
|
| 180 | 179 |
return err |
| 181 | 180 |
} |
| 182 |
- if err := json.NewEncoder(f).Encode(reposLegacy); err != nil {
|
|
| 183 |
- return err |
|
| 184 |
- } |
|
| 185 |
- if err := f.Close(); err != nil {
|
|
| 181 |
+ |
|
| 182 |
+ if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil {
|
|
| 183 |
+ rf.Close() |
|
| 186 | 184 |
return err |
| 187 | 185 |
} |
| 186 |
+ |
|
| 187 |
+ rf.Close() |
|
| 188 |
+ |
|
| 188 | 189 |
if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
| 189 | 190 |
return err |
| 190 | 191 |
} |
| ... | ... |
@@ -193,15 +194,16 @@ func (s *saveSession) save(outStream io.Writer) error {
|
| 193 | 193 |
manifestFileName := filepath.Join(tempDir, manifestFileName) |
| 194 | 194 |
f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) |
| 195 | 195 |
if err != nil {
|
| 196 |
- f.Close() |
|
| 197 | 196 |
return err |
| 198 | 197 |
} |
| 198 |
+ |
|
| 199 | 199 |
if err := json.NewEncoder(f).Encode(manifest); err != nil {
|
| 200 |
+ f.Close() |
|
| 200 | 201 |
return err |
| 201 | 202 |
} |
| 202 |
- if err := f.Close(); err != nil {
|
|
| 203 |
- return err |
|
| 204 |
- } |
|
| 203 |
+ |
|
| 204 |
+ f.Close() |
|
| 205 |
+ |
|
| 205 | 206 |
if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
| 206 | 207 |
return err |
| 207 | 208 |
} |
| ... | ... |
@@ -1151,6 +1151,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
|
| 1151 | 1151 |
if err != nil {
|
| 1152 | 1152 |
c.Fatal(err) |
| 1153 | 1153 |
} |
| 1154 |
+ defer f.Close() |
|
| 1155 |
+ |
|
| 1154 | 1156 |
var res struct {
|
| 1155 | 1157 |
Log string `json:"log"` |
| 1156 | 1158 |
Stream string `json:"stream"` |
| ... | ... |
@@ -1229,6 +1231,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
|
| 1229 | 1229 |
if err != nil {
|
| 1230 | 1230 |
c.Fatal(err) |
| 1231 | 1231 |
} |
| 1232 |
+ defer f.Close() |
|
| 1233 |
+ |
|
| 1232 | 1234 |
var res struct {
|
| 1233 | 1235 |
Log string `json:"log"` |
| 1234 | 1236 |
Stream string `json:"stream"` |
| ... | ... |
@@ -131,6 +131,7 @@ func testPushEmptyLayer(c *check.C) {
|
| 131 | 131 |
|
| 132 | 132 |
freader, err := os.Open(emptyTarball.Name()) |
| 133 | 133 |
c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball"))
|
| 134 |
+ defer freader.Close() |
|
| 134 | 135 |
|
| 135 | 136 |
importCmd := exec.Command(dockerBinary, "import", "-", repoName) |
| 136 | 137 |
importCmd.Stdin = freader |
| ... | ... |
@@ -283,6 +283,7 @@ func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) {
|
| 283 | 283 |
|
| 284 | 284 |
f, err := os.Open(layerPath) |
| 285 | 285 |
c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err))
|
| 286 |
+ defer f.Close() |
|
| 286 | 287 |
|
| 287 | 288 |
entries, err := listTar(f) |
| 288 | 289 |
for _, e := range entries {
|
| ... | ... |
@@ -61,10 +61,10 @@ func newTestNotary(c *check.C) (*testNotary, error) {
|
| 61 | 61 |
} |
| 62 | 62 |
confPath := filepath.Join(tmp, "config.json") |
| 63 | 63 |
config, err := os.Create(confPath) |
| 64 |
- defer config.Close() |
|
| 65 | 64 |
if err != nil {
|
| 66 | 65 |
return nil, err |
| 67 | 66 |
} |
| 67 |
+ defer config.Close() |
|
| 68 | 68 |
|
| 69 | 69 |
workingDir, err := os.Getwd() |
| 70 | 70 |
if err != nil {
|
| ... | ... |
@@ -78,10 +78,11 @@ func newTestNotary(c *check.C) (*testNotary, error) {
|
| 78 | 78 |
// generate client config |
| 79 | 79 |
clientConfPath := filepath.Join(tmp, "client-config.json") |
| 80 | 80 |
clientConfig, err := os.Create(clientConfPath) |
| 81 |
- defer clientConfig.Close() |
|
| 82 | 81 |
if err != nil {
|
| 83 | 82 |
return nil, err |
| 84 | 83 |
} |
| 84 |
+ defer clientConfig.Close() |
|
| 85 |
+ |
|
| 85 | 86 |
template = `{
|
| 86 | 87 |
"trust_dir" : "%s", |
| 87 | 88 |
"remote_server": {
|
| ... | ... |
@@ -741,18 +741,20 @@ func TestTarStreamVerification(t *testing.T) {
|
| 741 | 741 |
if err != nil {
|
| 742 | 742 |
t.Fatal(err) |
| 743 | 743 |
} |
| 744 |
+ defer src.Close() |
|
| 744 | 745 |
|
| 745 | 746 |
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) |
| 746 | 747 |
if err != nil {
|
| 747 | 748 |
t.Fatal(err) |
| 748 | 749 |
} |
| 750 |
+ defer dst.Close() |
|
| 749 | 751 |
|
| 750 | 752 |
if _, err := io.Copy(dst, src); err != nil {
|
| 751 | 753 |
t.Fatal(err) |
| 752 | 754 |
} |
| 753 | 755 |
|
| 754 |
- src.Close() |
|
| 755 |
- dst.Close() |
|
| 756 |
+ src.Sync() |
|
| 757 |
+ dst.Sync() |
|
| 756 | 758 |
|
| 757 | 759 |
ts, err := layer2.TarStream() |
| 758 | 760 |
if err != nil {
|
| ... | ... |
@@ -216,11 +216,11 @@ func (r *remote) Client(b Backend) (Client, error) {
|
| 216 | 216 |
|
| 217 | 217 |
func (r *remote) updateEventTimestamp(t time.Time) {
|
| 218 | 218 |
f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) |
| 219 |
- defer f.Close() |
|
| 220 | 219 |
if err != nil {
|
| 221 | 220 |
logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err)
|
| 222 | 221 |
return |
| 223 | 222 |
} |
| 223 |
+ defer f.Close() |
|
| 224 | 224 |
|
| 225 | 225 |
b, err := t.MarshalText() |
| 226 | 226 |
if err != nil {
|
| ... | ... |
@@ -245,11 +245,11 @@ func (r *remote) getLastEventTimestamp() time.Time {
|
| 245 | 245 |
} |
| 246 | 246 |
|
| 247 | 247 |
f, err := os.Open(r.eventTsPath) |
| 248 |
- defer f.Close() |
|
| 249 | 248 |
if err != nil {
|
| 250 | 249 |
logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
|
| 251 | 250 |
return t |
| 252 | 251 |
} |
| 252 |
+ defer f.Close() |
|
| 253 | 253 |
|
| 254 | 254 |
b := make([]byte, fi.Size()) |
| 255 | 255 |
n, err := f.Read(b) |
| ... | ... |
@@ -329,10 +329,10 @@ func (r *remote) handleEventStream(events containerd.API_EventsClient) {
|
| 329 | 329 |
func (r *remote) runContainerdDaemon() error {
|
| 330 | 330 |
pidFilename := filepath.Join(r.stateDir, containerdPidFilename) |
| 331 | 331 |
f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) |
| 332 |
- defer f.Close() |
|
| 333 | 332 |
if err != nil {
|
| 334 | 333 |
return err |
| 335 | 334 |
} |
| 335 |
+ defer f.Close() |
|
| 336 | 336 |
|
| 337 | 337 |
// File exist, check if the daemon is alive |
| 338 | 338 |
b := make([]byte, 8) |
| ... | ... |
@@ -101,6 +101,11 @@ func TestDecompressStreamGzip(t *testing.T) {
|
| 101 | 101 |
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
| 102 | 102 |
} |
| 103 | 103 |
archive, err := os.Open(tmp + "archive.gz") |
| 104 |
+ if err != nil {
|
|
| 105 |
+ t.Fatalf("Fail to open file archive.gz")
|
|
| 106 |
+ } |
|
| 107 |
+ defer archive.Close() |
|
| 108 |
+ |
|
| 104 | 109 |
_, err = DecompressStream(archive) |
| 105 | 110 |
if err != nil {
|
| 106 | 111 |
t.Fatalf("Failed to decompress a gzip file.")
|
| ... | ... |
@@ -114,6 +119,11 @@ func TestDecompressStreamBzip2(t *testing.T) {
|
| 114 | 114 |
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
| 115 | 115 |
} |
| 116 | 116 |
archive, err := os.Open(tmp + "archive.bz2") |
| 117 |
+ if err != nil {
|
|
| 118 |
+ t.Fatalf("Fail to open file archive.bz2")
|
|
| 119 |
+ } |
|
| 120 |
+ defer archive.Close() |
|
| 121 |
+ |
|
| 117 | 122 |
_, err = DecompressStream(archive) |
| 118 | 123 |
if err != nil {
|
| 119 | 124 |
t.Fatalf("Failed to decompress a bzip2 file.")
|
| ... | ... |
@@ -130,6 +140,10 @@ func TestDecompressStreamXz(t *testing.T) {
|
| 130 | 130 |
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
| 131 | 131 |
} |
| 132 | 132 |
archive, err := os.Open(tmp + "archive.xz") |
| 133 |
+ if err != nil {
|
|
| 134 |
+ t.Fatalf("Fail to open file archive.xz")
|
|
| 135 |
+ } |
|
| 136 |
+ defer archive.Close() |
|
| 133 | 137 |
_, err = DecompressStream(archive) |
| 134 | 138 |
if err != nil {
|
| 135 | 139 |
t.Fatalf("Failed to decompress an xz file.")
|
| ... | ... |
@@ -141,6 +155,8 @@ func TestCompressStreamXzUnsuported(t *testing.T) {
|
| 141 | 141 |
if err != nil {
|
| 142 | 142 |
t.Fatalf("Fail to create the destination file")
|
| 143 | 143 |
} |
| 144 |
+ defer dest.Close() |
|
| 145 |
+ |
|
| 144 | 146 |
_, err = CompressStream(dest, Xz) |
| 145 | 147 |
if err == nil {
|
| 146 | 148 |
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
| ... | ... |
@@ -152,6 +168,8 @@ func TestCompressStreamBzip2Unsupported(t *testing.T) {
|
| 152 | 152 |
if err != nil {
|
| 153 | 153 |
t.Fatalf("Fail to create the destination file")
|
| 154 | 154 |
} |
| 155 |
+ defer dest.Close() |
|
| 156 |
+ |
|
| 155 | 157 |
_, err = CompressStream(dest, Xz) |
| 156 | 158 |
if err == nil {
|
| 157 | 159 |
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
| ... | ... |
@@ -163,6 +181,8 @@ func TestCompressStreamInvalid(t *testing.T) {
|
| 163 | 163 |
if err != nil {
|
| 164 | 164 |
t.Fatalf("Fail to create the destination file")
|
| 165 | 165 |
} |
| 166 |
+ defer dest.Close() |
|
| 167 |
+ |
|
| 166 | 168 |
_, err = CompressStream(dest, -1) |
| 167 | 169 |
if err == nil {
|
| 168 | 170 |
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
| ... | ... |
@@ -795,6 +815,8 @@ func TestUntarUstarGnuConflict(t *testing.T) {
|
| 795 | 795 |
if err != nil {
|
| 796 | 796 |
t.Fatal(err) |
| 797 | 797 |
} |
| 798 |
+ defer f.Close() |
|
| 799 |
+ |
|
| 798 | 800 |
found := false |
| 799 | 801 |
tr := tar.NewReader(f) |
| 800 | 802 |
// Iterate through the files in the archive. |
| ... | ... |
@@ -14,6 +14,8 @@ func TestTarSumRemoveNonExistent(t *testing.T) {
|
| 14 | 14 |
if err != nil {
|
| 15 | 15 |
t.Fatal(err) |
| 16 | 16 |
} |
| 17 |
+ defer reader.Close() |
|
| 18 |
+ |
|
| 17 | 19 |
ts, err := NewTarSum(reader, false, Version0) |
| 18 | 20 |
if err != nil {
|
| 19 | 21 |
t.Fatal(err) |
| ... | ... |
@@ -42,6 +44,8 @@ func TestTarSumRemove(t *testing.T) {
|
| 42 | 42 |
if err != nil {
|
| 43 | 43 |
t.Fatal(err) |
| 44 | 44 |
} |
| 45 |
+ defer reader.Close() |
|
| 46 |
+ |
|
| 45 | 47 |
ts, err := NewTarSum(reader, false, Version0) |
| 46 | 48 |
if err != nil {
|
| 47 | 49 |
t.Fatal(err) |
| ... | ... |
@@ -200,6 +200,8 @@ func TestNewTarSumForLabel(t *testing.T) {
|
| 200 | 200 |
if err != nil {
|
| 201 | 201 |
t.Fatal(err) |
| 202 | 202 |
} |
| 203 |
+ defer reader.Close() |
|
| 204 |
+ |
|
| 203 | 205 |
label := strings.Split(layer.tarsum, ":")[0] |
| 204 | 206 |
ts, err := NewTarSumForLabel(reader, false, label) |
| 205 | 207 |
if err != nil {
|
| ... | ... |
@@ -302,6 +304,8 @@ func TestTarSumsReadSize(t *testing.T) {
|
| 302 | 302 |
if err != nil {
|
| 303 | 303 |
t.Fatal(err) |
| 304 | 304 |
} |
| 305 |
+ defer reader.Close() |
|
| 306 |
+ |
|
| 305 | 307 |
ts, err := NewTarSum(reader, false, layer.version) |
| 306 | 308 |
if err != nil {
|
| 307 | 309 |
t.Fatal(err) |
| ... | ... |
@@ -380,6 +384,8 @@ func TestTarSums(t *testing.T) {
|
| 380 | 380 |
t.Errorf("failed to open %s: %s", layer.jsonfile, err)
|
| 381 | 381 |
continue |
| 382 | 382 |
} |
| 383 |
+ defer jfh.Close() |
|
| 384 |
+ |
|
| 383 | 385 |
buf, err := ioutil.ReadAll(jfh) |
| 384 | 386 |
if err != nil {
|
| 385 | 387 |
t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
|
| ... | ... |
@@ -559,12 +565,13 @@ func Benchmark9kTar(b *testing.B) {
|
| 559 | 559 |
b.Error(err) |
| 560 | 560 |
return |
| 561 | 561 |
} |
| 562 |
+ defer fh.Close() |
|
| 563 |
+ |
|
| 562 | 564 |
n, err := io.Copy(buf, fh) |
| 563 | 565 |
if err != nil {
|
| 564 | 566 |
b.Error(err) |
| 565 | 567 |
return |
| 566 | 568 |
} |
| 567 |
- fh.Close() |
|
| 568 | 569 |
|
| 569 | 570 |
reader := bytes.NewReader(buf.Bytes()) |
| 570 | 571 |
|
| ... | ... |
@@ -589,12 +596,13 @@ func Benchmark9kTarGzip(b *testing.B) {
|
| 589 | 589 |
b.Error(err) |
| 590 | 590 |
return |
| 591 | 591 |
} |
| 592 |
+ defer fh.Close() |
|
| 593 |
+ |
|
| 592 | 594 |
n, err := io.Copy(buf, fh) |
| 593 | 595 |
if err != nil {
|
| 594 | 596 |
b.Error(err) |
| 595 | 597 |
return |
| 596 | 598 |
} |
| 597 |
- fh.Close() |
|
| 598 | 599 |
|
| 599 | 600 |
reader := bytes.NewReader(buf.Bytes()) |
| 600 | 601 |
|
| ... | ... |
@@ -124,6 +124,8 @@ func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.A |
| 124 | 124 |
if err != nil {
|
| 125 | 125 |
return err |
| 126 | 126 |
} |
| 127 |
+ defer rootfs.Close() |
|
| 128 |
+ |
|
| 127 | 129 |
_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs) |
| 128 | 130 |
// XXX: Ignore returning digest for now. |
| 129 | 131 |
// Since digest needs to be written to the ProgressWriter. |