Now that the archive package does not depend on any docker-specific
packages, only those in pkg and vendor, it can be safely moved into pkg.
Signed-off-by: Rafe Colton <rafael.colton@gmail.com>
... | ... |
@@ -23,12 +23,12 @@ import ( |
23 | 23 |
"time" |
24 | 24 |
|
25 | 25 |
"github.com/docker/docker/api" |
26 |
- "github.com/docker/docker/archive" |
|
27 | 26 |
"github.com/docker/docker/dockerversion" |
28 | 27 |
"github.com/docker/docker/engine" |
29 | 28 |
"github.com/docker/docker/graph" |
30 | 29 |
"github.com/docker/docker/nat" |
31 | 30 |
"github.com/docker/docker/opts" |
31 |
+ "github.com/docker/docker/pkg/archive" |
|
32 | 32 |
"github.com/docker/docker/pkg/log" |
33 | 33 |
flag "github.com/docker/docker/pkg/mflag" |
34 | 34 |
"github.com/docker/docker/pkg/parsers" |
4 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,705 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "bufio" |
|
5 |
- "bytes" |
|
6 |
- "compress/bzip2" |
|
7 |
- "compress/gzip" |
|
8 |
- "errors" |
|
9 |
- "fmt" |
|
10 |
- "io" |
|
11 |
- "io/ioutil" |
|
12 |
- "os" |
|
13 |
- "os/exec" |
|
14 |
- "path" |
|
15 |
- "path/filepath" |
|
16 |
- "strings" |
|
17 |
- "syscall" |
|
18 |
- |
|
19 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
20 |
- |
|
21 |
- "github.com/docker/docker/pkg/log" |
|
22 |
- "github.com/docker/docker/pkg/pools" |
|
23 |
- "github.com/docker/docker/pkg/system" |
|
24 |
- "github.com/docker/docker/utils" |
|
25 |
-) |
|
26 |
- |
|
27 |
-type ( |
|
28 |
- Archive io.ReadCloser |
|
29 |
- ArchiveReader io.Reader |
|
30 |
- Compression int |
|
31 |
- TarOptions struct { |
|
32 |
- Includes []string |
|
33 |
- Excludes []string |
|
34 |
- Compression Compression |
|
35 |
- NoLchown bool |
|
36 |
- } |
|
37 |
-) |
|
38 |
- |
|
39 |
-var ( |
|
40 |
- ErrNotImplemented = errors.New("Function not implemented") |
|
41 |
-) |
|
42 |
- |
|
43 |
-const ( |
|
44 |
- Uncompressed Compression = iota |
|
45 |
- Bzip2 |
|
46 |
- Gzip |
|
47 |
- Xz |
|
48 |
-) |
|
49 |
- |
|
50 |
-func IsArchive(header []byte) bool { |
|
51 |
- compression := DetectCompression(header) |
|
52 |
- if compression != Uncompressed { |
|
53 |
- return true |
|
54 |
- } |
|
55 |
- r := tar.NewReader(bytes.NewBuffer(header)) |
|
56 |
- _, err := r.Next() |
|
57 |
- return err == nil |
|
58 |
-} |
|
59 |
- |
|
60 |
-func DetectCompression(source []byte) Compression { |
|
61 |
- for compression, m := range map[Compression][]byte{ |
|
62 |
- Bzip2: {0x42, 0x5A, 0x68}, |
|
63 |
- Gzip: {0x1F, 0x8B, 0x08}, |
|
64 |
- Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, |
|
65 |
- } { |
|
66 |
- if len(source) < len(m) { |
|
67 |
- log.Debugf("Len too short") |
|
68 |
- continue |
|
69 |
- } |
|
70 |
- if bytes.Compare(m, source[:len(m)]) == 0 { |
|
71 |
- return compression |
|
72 |
- } |
|
73 |
- } |
|
74 |
- return Uncompressed |
|
75 |
-} |
|
76 |
- |
|
77 |
-func xzDecompress(archive io.Reader) (io.ReadCloser, error) { |
|
78 |
- args := []string{"xz", "-d", "-c", "-q"} |
|
79 |
- |
|
80 |
- return CmdStream(exec.Command(args[0], args[1:]...), archive) |
|
81 |
-} |
|
82 |
- |
|
83 |
-func DecompressStream(archive io.Reader) (io.ReadCloser, error) { |
|
84 |
- p := pools.BufioReader32KPool |
|
85 |
- buf := p.Get(archive) |
|
86 |
- bs, err := buf.Peek(10) |
|
87 |
- if err != nil { |
|
88 |
- return nil, err |
|
89 |
- } |
|
90 |
- log.Debugf("[tar autodetect] n: %v", bs) |
|
91 |
- |
|
92 |
- compression := DetectCompression(bs) |
|
93 |
- switch compression { |
|
94 |
- case Uncompressed: |
|
95 |
- readBufWrapper := p.NewReadCloserWrapper(buf, buf) |
|
96 |
- return readBufWrapper, nil |
|
97 |
- case Gzip: |
|
98 |
- gzReader, err := gzip.NewReader(buf) |
|
99 |
- if err != nil { |
|
100 |
- return nil, err |
|
101 |
- } |
|
102 |
- readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) |
|
103 |
- return readBufWrapper, nil |
|
104 |
- case Bzip2: |
|
105 |
- bz2Reader := bzip2.NewReader(buf) |
|
106 |
- readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) |
|
107 |
- return readBufWrapper, nil |
|
108 |
- case Xz: |
|
109 |
- xzReader, err := xzDecompress(buf) |
|
110 |
- if err != nil { |
|
111 |
- return nil, err |
|
112 |
- } |
|
113 |
- readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) |
|
114 |
- return readBufWrapper, nil |
|
115 |
- default: |
|
116 |
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
117 |
- } |
|
118 |
-} |
|
119 |
- |
|
120 |
-func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { |
|
121 |
- p := pools.BufioWriter32KPool |
|
122 |
- buf := p.Get(dest) |
|
123 |
- switch compression { |
|
124 |
- case Uncompressed: |
|
125 |
- writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) |
|
126 |
- return writeBufWrapper, nil |
|
127 |
- case Gzip: |
|
128 |
- gzWriter := gzip.NewWriter(dest) |
|
129 |
- writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) |
|
130 |
- return writeBufWrapper, nil |
|
131 |
- case Bzip2, Xz: |
|
132 |
- // archive/bzip2 does not support writing, and there is no xz support at all |
|
133 |
- // However, this is not a problem as docker only currently generates gzipped tars |
|
134 |
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
135 |
- default: |
|
136 |
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
137 |
- } |
|
138 |
-} |
|
139 |
- |
|
140 |
-func (compression *Compression) Extension() string { |
|
141 |
- switch *compression { |
|
142 |
- case Uncompressed: |
|
143 |
- return "tar" |
|
144 |
- case Bzip2: |
|
145 |
- return "tar.bz2" |
|
146 |
- case Gzip: |
|
147 |
- return "tar.gz" |
|
148 |
- case Xz: |
|
149 |
- return "tar.xz" |
|
150 |
- } |
|
151 |
- return "" |
|
152 |
-} |
|
153 |
- |
|
154 |
-func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { |
|
155 |
- fi, err := os.Lstat(path) |
|
156 |
- if err != nil { |
|
157 |
- return err |
|
158 |
- } |
|
159 |
- |
|
160 |
- link := "" |
|
161 |
- if fi.Mode()&os.ModeSymlink != 0 { |
|
162 |
- if link, err = os.Readlink(path); err != nil { |
|
163 |
- return err |
|
164 |
- } |
|
165 |
- } |
|
166 |
- |
|
167 |
- hdr, err := tar.FileInfoHeader(fi, link) |
|
168 |
- if err != nil { |
|
169 |
- return err |
|
170 |
- } |
|
171 |
- |
|
172 |
- if fi.IsDir() && !strings.HasSuffix(name, "/") { |
|
173 |
- name = name + "/" |
|
174 |
- } |
|
175 |
- |
|
176 |
- hdr.Name = name |
|
177 |
- |
|
178 |
- stat, ok := fi.Sys().(*syscall.Stat_t) |
|
179 |
- if ok { |
|
180 |
- // Currently go does not fill in the major/minors |
|
181 |
- if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || |
|
182 |
- stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { |
|
183 |
- hdr.Devmajor = int64(major(uint64(stat.Rdev))) |
|
184 |
- hdr.Devminor = int64(minor(uint64(stat.Rdev))) |
|
185 |
- } |
|
186 |
- |
|
187 |
- } |
|
188 |
- |
|
189 |
- capability, _ := system.Lgetxattr(path, "security.capability") |
|
190 |
- if capability != nil { |
|
191 |
- hdr.Xattrs = make(map[string]string) |
|
192 |
- hdr.Xattrs["security.capability"] = string(capability) |
|
193 |
- } |
|
194 |
- |
|
195 |
- if err := tw.WriteHeader(hdr); err != nil { |
|
196 |
- return err |
|
197 |
- } |
|
198 |
- |
|
199 |
- if hdr.Typeflag == tar.TypeReg { |
|
200 |
- file, err := os.Open(path) |
|
201 |
- if err != nil { |
|
202 |
- return err |
|
203 |
- } |
|
204 |
- |
|
205 |
- twBuf.Reset(tw) |
|
206 |
- _, err = io.Copy(twBuf, file) |
|
207 |
- file.Close() |
|
208 |
- if err != nil { |
|
209 |
- return err |
|
210 |
- } |
|
211 |
- err = twBuf.Flush() |
|
212 |
- if err != nil { |
|
213 |
- return err |
|
214 |
- } |
|
215 |
- twBuf.Reset(nil) |
|
216 |
- } |
|
217 |
- |
|
218 |
- return nil |
|
219 |
-} |
|
220 |
- |
|
221 |
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { |
|
222 |
- // hdr.Mode is in linux format, which we can use for sycalls, |
|
223 |
- // but for os.Foo() calls we need the mode converted to os.FileMode, |
|
224 |
- // so use hdrInfo.Mode() (they differ for e.g. setuid bits) |
|
225 |
- hdrInfo := hdr.FileInfo() |
|
226 |
- |
|
227 |
- switch hdr.Typeflag { |
|
228 |
- case tar.TypeDir: |
|
229 |
- // Create directory unless it exists as a directory already. |
|
230 |
- // In that case we just want to merge the two |
|
231 |
- if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { |
|
232 |
- if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { |
|
233 |
- return err |
|
234 |
- } |
|
235 |
- } |
|
236 |
- |
|
237 |
- case tar.TypeReg, tar.TypeRegA: |
|
238 |
- // Source is regular file |
|
239 |
- file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) |
|
240 |
- if err != nil { |
|
241 |
- return err |
|
242 |
- } |
|
243 |
- if _, err := io.Copy(file, reader); err != nil { |
|
244 |
- file.Close() |
|
245 |
- return err |
|
246 |
- } |
|
247 |
- file.Close() |
|
248 |
- |
|
249 |
- case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: |
|
250 |
- mode := uint32(hdr.Mode & 07777) |
|
251 |
- switch hdr.Typeflag { |
|
252 |
- case tar.TypeBlock: |
|
253 |
- mode |= syscall.S_IFBLK |
|
254 |
- case tar.TypeChar: |
|
255 |
- mode |= syscall.S_IFCHR |
|
256 |
- case tar.TypeFifo: |
|
257 |
- mode |= syscall.S_IFIFO |
|
258 |
- } |
|
259 |
- |
|
260 |
- if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { |
|
261 |
- return err |
|
262 |
- } |
|
263 |
- |
|
264 |
- case tar.TypeLink: |
|
265 |
- if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { |
|
266 |
- return err |
|
267 |
- } |
|
268 |
- |
|
269 |
- case tar.TypeSymlink: |
|
270 |
- if err := os.Symlink(hdr.Linkname, path); err != nil { |
|
271 |
- return err |
|
272 |
- } |
|
273 |
- |
|
274 |
- case tar.TypeXGlobalHeader: |
|
275 |
- log.Debugf("PAX Global Extended Headers found and ignored") |
|
276 |
- return nil |
|
277 |
- |
|
278 |
- default: |
|
279 |
- return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) |
|
280 |
- } |
|
281 |
- |
|
282 |
- if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { |
|
283 |
- return err |
|
284 |
- } |
|
285 |
- |
|
286 |
- for key, value := range hdr.Xattrs { |
|
287 |
- if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { |
|
288 |
- return err |
|
289 |
- } |
|
290 |
- } |
|
291 |
- |
|
292 |
- // There is no LChmod, so ignore mode for symlink. Also, this |
|
293 |
- // must happen after chown, as that can modify the file mode |
|
294 |
- if hdr.Typeflag != tar.TypeSymlink { |
|
295 |
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil { |
|
296 |
- return err |
|
297 |
- } |
|
298 |
- } |
|
299 |
- |
|
300 |
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
301 |
- // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and |
|
302 |
- if hdr.Typeflag != tar.TypeSymlink { |
|
303 |
- if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { |
|
304 |
- return err |
|
305 |
- } |
|
306 |
- } else { |
|
307 |
- if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { |
|
308 |
- return err |
|
309 |
- } |
|
310 |
- } |
|
311 |
- return nil |
|
312 |
-} |
|
313 |
- |
|
314 |
-// Tar creates an archive from the directory at `path`, and returns it as a |
|
315 |
-// stream of bytes. |
|
316 |
-func Tar(path string, compression Compression) (io.ReadCloser, error) { |
|
317 |
- return TarWithOptions(path, &TarOptions{Compression: compression}) |
|
318 |
-} |
|
319 |
- |
|
320 |
-func escapeName(name string) string { |
|
321 |
- escaped := make([]byte, 0) |
|
322 |
- for i, c := range []byte(name) { |
|
323 |
- if i == 0 && c == '/' { |
|
324 |
- continue |
|
325 |
- } |
|
326 |
- // all printable chars except "-" which is 0x2d |
|
327 |
- if (0x20 <= c && c <= 0x7E) && c != 0x2d { |
|
328 |
- escaped = append(escaped, c) |
|
329 |
- } else { |
|
330 |
- escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) |
|
331 |
- } |
|
332 |
- } |
|
333 |
- return string(escaped) |
|
334 |
-} |
|
335 |
- |
|
336 |
-// TarWithOptions creates an archive from the directory at `path`, only including files whose relative |
|
337 |
-// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`. |
|
338 |
-func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { |
|
339 |
- pipeReader, pipeWriter := io.Pipe() |
|
340 |
- |
|
341 |
- compressWriter, err := CompressStream(pipeWriter, options.Compression) |
|
342 |
- if err != nil { |
|
343 |
- return nil, err |
|
344 |
- } |
|
345 |
- |
|
346 |
- tw := tar.NewWriter(compressWriter) |
|
347 |
- |
|
348 |
- go func() { |
|
349 |
- // In general we log errors here but ignore them because |
|
350 |
- // during e.g. a diff operation the container can continue |
|
351 |
- // mutating the filesystem and we can see transient errors |
|
352 |
- // from this |
|
353 |
- |
|
354 |
- if options.Includes == nil { |
|
355 |
- options.Includes = []string{"."} |
|
356 |
- } |
|
357 |
- |
|
358 |
- twBuf := pools.BufioWriter32KPool.Get(nil) |
|
359 |
- defer pools.BufioWriter32KPool.Put(twBuf) |
|
360 |
- |
|
361 |
- for _, include := range options.Includes { |
|
362 |
- filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { |
|
363 |
- if err != nil { |
|
364 |
- log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) |
|
365 |
- return nil |
|
366 |
- } |
|
367 |
- |
|
368 |
- relFilePath, err := filepath.Rel(srcPath, filePath) |
|
369 |
- if err != nil { |
|
370 |
- return nil |
|
371 |
- } |
|
372 |
- |
|
373 |
- skip, err := utils.Matches(relFilePath, options.Excludes) |
|
374 |
- if err != nil { |
|
375 |
- log.Debugf("Error matching %s", relFilePath, err) |
|
376 |
- return err |
|
377 |
- } |
|
378 |
- |
|
379 |
- if skip { |
|
380 |
- if f.IsDir() { |
|
381 |
- return filepath.SkipDir |
|
382 |
- } |
|
383 |
- return nil |
|
384 |
- } |
|
385 |
- |
|
386 |
- if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { |
|
387 |
- log.Debugf("Can't add file %s to tar: %s", srcPath, err) |
|
388 |
- } |
|
389 |
- return nil |
|
390 |
- }) |
|
391 |
- } |
|
392 |
- |
|
393 |
- // Make sure to check the error on Close. |
|
394 |
- if err := tw.Close(); err != nil { |
|
395 |
- log.Debugf("Can't close tar writer: %s", err) |
|
396 |
- } |
|
397 |
- if err := compressWriter.Close(); err != nil { |
|
398 |
- log.Debugf("Can't close compress writer: %s", err) |
|
399 |
- } |
|
400 |
- if err := pipeWriter.Close(); err != nil { |
|
401 |
- log.Debugf("Can't close pipe writer: %s", err) |
|
402 |
- } |
|
403 |
- }() |
|
404 |
- |
|
405 |
- return pipeReader, nil |
|
406 |
-} |
|
407 |
- |
|
408 |
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive, |
|
409 |
-// and unpacks it into the directory at `path`. |
|
410 |
-// The archive may be compressed with one of the following algorithms: |
|
411 |
-// identity (uncompressed), gzip, bzip2, xz. |
|
412 |
-// FIXME: specify behavior when target path exists vs. doesn't exist. |
|
413 |
-func Untar(archive io.Reader, dest string, options *TarOptions) error { |
|
414 |
- if options == nil { |
|
415 |
- options = &TarOptions{} |
|
416 |
- } |
|
417 |
- |
|
418 |
- if archive == nil { |
|
419 |
- return fmt.Errorf("Empty archive") |
|
420 |
- } |
|
421 |
- |
|
422 |
- if options.Excludes == nil { |
|
423 |
- options.Excludes = []string{} |
|
424 |
- } |
|
425 |
- |
|
426 |
- decompressedArchive, err := DecompressStream(archive) |
|
427 |
- if err != nil { |
|
428 |
- return err |
|
429 |
- } |
|
430 |
- defer decompressedArchive.Close() |
|
431 |
- |
|
432 |
- tr := tar.NewReader(decompressedArchive) |
|
433 |
- trBuf := pools.BufioReader32KPool.Get(nil) |
|
434 |
- defer pools.BufioReader32KPool.Put(trBuf) |
|
435 |
- |
|
436 |
- var dirs []*tar.Header |
|
437 |
- |
|
438 |
- // Iterate through the files in the archive. |
|
439 |
-loop: |
|
440 |
- for { |
|
441 |
- hdr, err := tr.Next() |
|
442 |
- if err == io.EOF { |
|
443 |
- // end of tar archive |
|
444 |
- break |
|
445 |
- } |
|
446 |
- if err != nil { |
|
447 |
- return err |
|
448 |
- } |
|
449 |
- |
|
450 |
- // Normalize name, for safety and for a simple is-root check |
|
451 |
- hdr.Name = filepath.Clean(hdr.Name) |
|
452 |
- |
|
453 |
- for _, exclude := range options.Excludes { |
|
454 |
- if strings.HasPrefix(hdr.Name, exclude) { |
|
455 |
- continue loop |
|
456 |
- } |
|
457 |
- } |
|
458 |
- |
|
459 |
- if !strings.HasSuffix(hdr.Name, "/") { |
|
460 |
- // Not the root directory, ensure that the parent directory exists |
|
461 |
- parent := filepath.Dir(hdr.Name) |
|
462 |
- parentPath := filepath.Join(dest, parent) |
|
463 |
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { |
|
464 |
- err = os.MkdirAll(parentPath, 0777) |
|
465 |
- if err != nil { |
|
466 |
- return err |
|
467 |
- } |
|
468 |
- } |
|
469 |
- } |
|
470 |
- |
|
471 |
- path := filepath.Join(dest, hdr.Name) |
|
472 |
- |
|
473 |
- // If path exits we almost always just want to remove and replace it |
|
474 |
- // The only exception is when it is a directory *and* the file from |
|
475 |
- // the layer is also a directory. Then we want to merge them (i.e. |
|
476 |
- // just apply the metadata from the layer). |
|
477 |
- if fi, err := os.Lstat(path); err == nil { |
|
478 |
- if fi.IsDir() && hdr.Name == "." { |
|
479 |
- continue |
|
480 |
- } |
|
481 |
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { |
|
482 |
- if err := os.RemoveAll(path); err != nil { |
|
483 |
- return err |
|
484 |
- } |
|
485 |
- } |
|
486 |
- } |
|
487 |
- trBuf.Reset(tr) |
|
488 |
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { |
|
489 |
- return err |
|
490 |
- } |
|
491 |
- |
|
492 |
- // Directory mtimes must be handled at the end to avoid further |
|
493 |
- // file creation in them to modify the directory mtime |
|
494 |
- if hdr.Typeflag == tar.TypeDir { |
|
495 |
- dirs = append(dirs, hdr) |
|
496 |
- } |
|
497 |
- } |
|
498 |
- |
|
499 |
- for _, hdr := range dirs { |
|
500 |
- path := filepath.Join(dest, hdr.Name) |
|
501 |
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
502 |
- if err := syscall.UtimesNano(path, ts); err != nil { |
|
503 |
- return err |
|
504 |
- } |
|
505 |
- } |
|
506 |
- |
|
507 |
- return nil |
|
508 |
-} |
|
509 |
- |
|
510 |
-// TarUntar is a convenience function which calls Tar and Untar, with |
|
511 |
-// the output of one piped into the other. If either Tar or Untar fails, |
|
512 |
-// TarUntar aborts and returns the error. |
|
513 |
-func TarUntar(src string, dst string) error { |
|
514 |
- log.Debugf("TarUntar(%s %s)", src, dst) |
|
515 |
- archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) |
|
516 |
- if err != nil { |
|
517 |
- return err |
|
518 |
- } |
|
519 |
- defer archive.Close() |
|
520 |
- return Untar(archive, dst, nil) |
|
521 |
-} |
|
522 |
- |
|
523 |
-// UntarPath is a convenience function which looks for an archive |
|
524 |
-// at filesystem path `src`, and unpacks it at `dst`. |
|
525 |
-func UntarPath(src, dst string) error { |
|
526 |
- archive, err := os.Open(src) |
|
527 |
- if err != nil { |
|
528 |
- return err |
|
529 |
- } |
|
530 |
- defer archive.Close() |
|
531 |
- if err := Untar(archive, dst, nil); err != nil { |
|
532 |
- return err |
|
533 |
- } |
|
534 |
- return nil |
|
535 |
-} |
|
536 |
- |
|
537 |
-// CopyWithTar creates a tar archive of filesystem path `src`, and |
|
538 |
-// unpacks it at filesystem path `dst`. |
|
539 |
-// The archive is streamed directly with fixed buffering and no |
|
540 |
-// intermediary disk IO. |
|
541 |
-// |
|
542 |
-func CopyWithTar(src, dst string) error { |
|
543 |
- srcSt, err := os.Stat(src) |
|
544 |
- if err != nil { |
|
545 |
- return err |
|
546 |
- } |
|
547 |
- if !srcSt.IsDir() { |
|
548 |
- return CopyFileWithTar(src, dst) |
|
549 |
- } |
|
550 |
- // Create dst, copy src's content into it |
|
551 |
- log.Debugf("Creating dest directory: %s", dst) |
|
552 |
- if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { |
|
553 |
- return err |
|
554 |
- } |
|
555 |
- log.Debugf("Calling TarUntar(%s, %s)", src, dst) |
|
556 |
- return TarUntar(src, dst) |
|
557 |
-} |
|
558 |
- |
|
559 |
-// CopyFileWithTar emulates the behavior of the 'cp' command-line |
|
560 |
-// for a single file. It copies a regular file from path `src` to |
|
561 |
-// path `dst`, and preserves all its metadata. |
|
562 |
-// |
|
563 |
-// If `dst` ends with a trailing slash '/', the final destination path |
|
564 |
-// will be `dst/base(src)`. |
|
565 |
-func CopyFileWithTar(src, dst string) (err error) { |
|
566 |
- log.Debugf("CopyFileWithTar(%s, %s)", src, dst) |
|
567 |
- srcSt, err := os.Stat(src) |
|
568 |
- if err != nil { |
|
569 |
- return err |
|
570 |
- } |
|
571 |
- if srcSt.IsDir() { |
|
572 |
- return fmt.Errorf("Can't copy a directory") |
|
573 |
- } |
|
574 |
- // Clean up the trailing / |
|
575 |
- if dst[len(dst)-1] == '/' { |
|
576 |
- dst = path.Join(dst, filepath.Base(src)) |
|
577 |
- } |
|
578 |
- // Create the holding directory if necessary |
|
579 |
- if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { |
|
580 |
- return err |
|
581 |
- } |
|
582 |
- |
|
583 |
- r, w := io.Pipe() |
|
584 |
- errC := utils.Go(func() error { |
|
585 |
- defer w.Close() |
|
586 |
- |
|
587 |
- srcF, err := os.Open(src) |
|
588 |
- if err != nil { |
|
589 |
- return err |
|
590 |
- } |
|
591 |
- defer srcF.Close() |
|
592 |
- |
|
593 |
- hdr, err := tar.FileInfoHeader(srcSt, "") |
|
594 |
- if err != nil { |
|
595 |
- return err |
|
596 |
- } |
|
597 |
- hdr.Name = filepath.Base(dst) |
|
598 |
- tw := tar.NewWriter(w) |
|
599 |
- defer tw.Close() |
|
600 |
- if err := tw.WriteHeader(hdr); err != nil { |
|
601 |
- return err |
|
602 |
- } |
|
603 |
- if _, err := io.Copy(tw, srcF); err != nil { |
|
604 |
- return err |
|
605 |
- } |
|
606 |
- return nil |
|
607 |
- }) |
|
608 |
- defer func() { |
|
609 |
- if er := <-errC; err != nil { |
|
610 |
- err = er |
|
611 |
- } |
|
612 |
- }() |
|
613 |
- return Untar(r, filepath.Dir(dst), nil) |
|
614 |
-} |
|
615 |
- |
|
616 |
-// CmdStream executes a command, and returns its stdout as a stream. |
|
617 |
-// If the command fails to run or doesn't complete successfully, an error |
|
618 |
-// will be returned, including anything written on stderr. |
|
619 |
-func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { |
|
620 |
- if input != nil { |
|
621 |
- stdin, err := cmd.StdinPipe() |
|
622 |
- if err != nil { |
|
623 |
- return nil, err |
|
624 |
- } |
|
625 |
- // Write stdin if any |
|
626 |
- go func() { |
|
627 |
- io.Copy(stdin, input) |
|
628 |
- stdin.Close() |
|
629 |
- }() |
|
630 |
- } |
|
631 |
- stdout, err := cmd.StdoutPipe() |
|
632 |
- if err != nil { |
|
633 |
- return nil, err |
|
634 |
- } |
|
635 |
- stderr, err := cmd.StderrPipe() |
|
636 |
- if err != nil { |
|
637 |
- return nil, err |
|
638 |
- } |
|
639 |
- pipeR, pipeW := io.Pipe() |
|
640 |
- errChan := make(chan []byte) |
|
641 |
- // Collect stderr, we will use it in case of an error |
|
642 |
- go func() { |
|
643 |
- errText, e := ioutil.ReadAll(stderr) |
|
644 |
- if e != nil { |
|
645 |
- errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") |
|
646 |
- } |
|
647 |
- errChan <- errText |
|
648 |
- }() |
|
649 |
- // Copy stdout to the returned pipe |
|
650 |
- go func() { |
|
651 |
- _, err := io.Copy(pipeW, stdout) |
|
652 |
- if err != nil { |
|
653 |
- pipeW.CloseWithError(err) |
|
654 |
- } |
|
655 |
- errText := <-errChan |
|
656 |
- if err := cmd.Wait(); err != nil { |
|
657 |
- pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) |
|
658 |
- } else { |
|
659 |
- pipeW.Close() |
|
660 |
- } |
|
661 |
- }() |
|
662 |
- // Run the command and return the pipe |
|
663 |
- if err := cmd.Start(); err != nil { |
|
664 |
- return nil, err |
|
665 |
- } |
|
666 |
- return pipeR, nil |
|
667 |
-} |
|
668 |
- |
|
669 |
-// NewTempArchive reads the content of src into a temporary file, and returns the contents |
|
670 |
-// of that file as an archive. The archive can only be read once - as soon as reading completes, |
|
671 |
-// the file will be deleted. |
|
672 |
-func NewTempArchive(src Archive, dir string) (*TempArchive, error) { |
|
673 |
- f, err := ioutil.TempFile(dir, "") |
|
674 |
- if err != nil { |
|
675 |
- return nil, err |
|
676 |
- } |
|
677 |
- if _, err := io.Copy(f, src); err != nil { |
|
678 |
- return nil, err |
|
679 |
- } |
|
680 |
- if err = f.Sync(); err != nil { |
|
681 |
- return nil, err |
|
682 |
- } |
|
683 |
- if _, err := f.Seek(0, 0); err != nil { |
|
684 |
- return nil, err |
|
685 |
- } |
|
686 |
- st, err := f.Stat() |
|
687 |
- if err != nil { |
|
688 |
- return nil, err |
|
689 |
- } |
|
690 |
- size := st.Size() |
|
691 |
- return &TempArchive{f, size}, nil |
|
692 |
-} |
|
693 |
- |
|
694 |
-type TempArchive struct { |
|
695 |
- *os.File |
|
696 |
- Size int64 // Pre-computed from Stat().Size() as a convenience |
|
697 |
-} |
|
698 |
- |
|
699 |
-func (archive *TempArchive) Read(data []byte) (int, error) { |
|
700 |
- n, err := archive.File.Read(data) |
|
701 |
- if err != nil { |
|
702 |
- os.Remove(archive.File.Name()) |
|
703 |
- } |
|
704 |
- return n, err |
|
705 |
-} |
706 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,244 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "bytes" |
|
5 |
- "fmt" |
|
6 |
- "io" |
|
7 |
- "io/ioutil" |
|
8 |
- "os" |
|
9 |
- "os/exec" |
|
10 |
- "path" |
|
11 |
- "testing" |
|
12 |
- "time" |
|
13 |
- |
|
14 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
15 |
-) |
|
16 |
- |
|
17 |
-func TestCmdStreamLargeStderr(t *testing.T) { |
|
18 |
- cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") |
|
19 |
- out, err := CmdStream(cmd, nil) |
|
20 |
- if err != nil { |
|
21 |
- t.Fatalf("Failed to start command: %s", err) |
|
22 |
- } |
|
23 |
- errCh := make(chan error) |
|
24 |
- go func() { |
|
25 |
- _, err := io.Copy(ioutil.Discard, out) |
|
26 |
- errCh <- err |
|
27 |
- }() |
|
28 |
- select { |
|
29 |
- case err := <-errCh: |
|
30 |
- if err != nil { |
|
31 |
- t.Fatalf("Command should not have failed (err=%.100s...)", err) |
|
32 |
- } |
|
33 |
- case <-time.After(5 * time.Second): |
|
34 |
- t.Fatalf("Command did not complete in 5 seconds; probable deadlock") |
|
35 |
- } |
|
36 |
-} |
|
37 |
- |
|
38 |
-func TestCmdStreamBad(t *testing.T) { |
|
39 |
- badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") |
|
40 |
- out, err := CmdStream(badCmd, nil) |
|
41 |
- if err != nil { |
|
42 |
- t.Fatalf("Failed to start command: %s", err) |
|
43 |
- } |
|
44 |
- if output, err := ioutil.ReadAll(out); err == nil { |
|
45 |
- t.Fatalf("Command should have failed") |
|
46 |
- } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { |
|
47 |
- t.Fatalf("Wrong error value (%s)", err) |
|
48 |
- } else if s := string(output); s != "hello\n" { |
|
49 |
- t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) |
|
50 |
- } |
|
51 |
-} |
|
52 |
- |
|
53 |
-func TestCmdStreamGood(t *testing.T) { |
|
54 |
- cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") |
|
55 |
- out, err := CmdStream(cmd, nil) |
|
56 |
- if err != nil { |
|
57 |
- t.Fatal(err) |
|
58 |
- } |
|
59 |
- if output, err := ioutil.ReadAll(out); err != nil { |
|
60 |
- t.Fatalf("Command should not have failed (err=%s)", err) |
|
61 |
- } else if s := string(output); s != "hello\n" { |
|
62 |
- t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) |
|
63 |
- } |
|
64 |
-} |
|
65 |
- |
|
66 |
-func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { |
|
67 |
- archive, err := TarWithOptions(origin, options) |
|
68 |
- if err != nil { |
|
69 |
- t.Fatal(err) |
|
70 |
- } |
|
71 |
- defer archive.Close() |
|
72 |
- |
|
73 |
- buf := make([]byte, 10) |
|
74 |
- if _, err := archive.Read(buf); err != nil { |
|
75 |
- return nil, err |
|
76 |
- } |
|
77 |
- wrap := io.MultiReader(bytes.NewReader(buf), archive) |
|
78 |
- |
|
79 |
- detectedCompression := DetectCompression(buf) |
|
80 |
- compression := options.Compression |
|
81 |
- if detectedCompression.Extension() != compression.Extension() { |
|
82 |
- return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) |
|
83 |
- } |
|
84 |
- |
|
85 |
- tmp, err := ioutil.TempDir("", "docker-test-untar") |
|
86 |
- if err != nil { |
|
87 |
- return nil, err |
|
88 |
- } |
|
89 |
- defer os.RemoveAll(tmp) |
|
90 |
- if err := Untar(wrap, tmp, nil); err != nil { |
|
91 |
- return nil, err |
|
92 |
- } |
|
93 |
- if _, err := os.Stat(tmp); err != nil { |
|
94 |
- return nil, err |
|
95 |
- } |
|
96 |
- |
|
97 |
- return ChangesDirs(origin, tmp) |
|
98 |
-} |
|
99 |
- |
|
100 |
-func TestTarUntar(t *testing.T) { |
|
101 |
- origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
102 |
- if err != nil { |
|
103 |
- t.Fatal(err) |
|
104 |
- } |
|
105 |
- defer os.RemoveAll(origin) |
|
106 |
- if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { |
|
107 |
- t.Fatal(err) |
|
108 |
- } |
|
109 |
- if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { |
|
110 |
- t.Fatal(err) |
|
111 |
- } |
|
112 |
- if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { |
|
113 |
- t.Fatal(err) |
|
114 |
- } |
|
115 |
- |
|
116 |
- for _, c := range []Compression{ |
|
117 |
- Uncompressed, |
|
118 |
- Gzip, |
|
119 |
- } { |
|
120 |
- changes, err := tarUntar(t, origin, &TarOptions{ |
|
121 |
- Compression: c, |
|
122 |
- Excludes: []string{"3"}, |
|
123 |
- }) |
|
124 |
- |
|
125 |
- if err != nil { |
|
126 |
- t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) |
|
127 |
- } |
|
128 |
- |
|
129 |
- if len(changes) != 1 || changes[0].Path != "/3" { |
|
130 |
- t.Fatalf("Unexpected differences after tarUntar: %v", changes) |
|
131 |
- } |
|
132 |
- } |
|
133 |
-} |
|
134 |
- |
|
135 |
-func TestTarWithOptions(t *testing.T) { |
|
136 |
- origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
137 |
- if err != nil { |
|
138 |
- t.Fatal(err) |
|
139 |
- } |
|
140 |
- defer os.RemoveAll(origin) |
|
141 |
- if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { |
|
142 |
- t.Fatal(err) |
|
143 |
- } |
|
144 |
- if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { |
|
145 |
- t.Fatal(err) |
|
146 |
- } |
|
147 |
- |
|
148 |
- cases := []struct { |
|
149 |
- opts *TarOptions |
|
150 |
- numChanges int |
|
151 |
- }{ |
|
152 |
- {&TarOptions{Includes: []string{"1"}}, 1}, |
|
153 |
- {&TarOptions{Excludes: []string{"2"}}, 1}, |
|
154 |
- } |
|
155 |
- for _, testCase := range cases { |
|
156 |
- changes, err := tarUntar(t, origin, testCase.opts) |
|
157 |
- if err != nil { |
|
158 |
- t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) |
|
159 |
- } |
|
160 |
- if len(changes) != testCase.numChanges { |
|
161 |
- t.Errorf("Expected %d changes, got %d for %+v:", |
|
162 |
- testCase.numChanges, len(changes), testCase.opts) |
|
163 |
- } |
|
164 |
- } |
|
165 |
-} |
|
166 |
- |
|
167 |
-// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz |
|
168 |
-// use PAX Global Extended Headers. |
|
169 |
-// Failing prevents the archives from being uncompressed during ADD |
|
170 |
-func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { |
|
171 |
- hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} |
|
172 |
- err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) |
|
173 |
- if err != nil { |
|
174 |
- t.Fatal(err) |
|
175 |
- } |
|
176 |
-} |
|
177 |
- |
|
178 |
-// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. |
|
179 |
-// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. |
|
180 |
-func TestUntarUstarGnuConflict(t *testing.T) { |
|
181 |
- f, err := os.Open("testdata/broken.tar") |
|
182 |
- if err != nil { |
|
183 |
- t.Fatal(err) |
|
184 |
- } |
|
185 |
- found := false |
|
186 |
- tr := tar.NewReader(f) |
|
187 |
- // Iterate through the files in the archive. |
|
188 |
- for { |
|
189 |
- hdr, err := tr.Next() |
|
190 |
- if err == io.EOF { |
|
191 |
- // end of tar archive |
|
192 |
- break |
|
193 |
- } |
|
194 |
- if err != nil { |
|
195 |
- t.Fatal(err) |
|
196 |
- } |
|
197 |
- if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { |
|
198 |
- found = true |
|
199 |
- break |
|
200 |
- } |
|
201 |
- } |
|
202 |
- if !found { |
|
203 |
- t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") |
|
204 |
- } |
|
205 |
-} |
|
206 |
- |
|
207 |
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { |
|
208 |
- fileData := []byte("fooo") |
|
209 |
- for n := 0; n < numberOfFiles; n++ { |
|
210 |
- fileName := fmt.Sprintf("file-%d", n) |
|
211 |
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { |
|
212 |
- return 0, err |
|
213 |
- } |
|
214 |
- } |
|
215 |
- totalSize := numberOfFiles * len(fileData) |
|
216 |
- return totalSize, nil |
|
217 |
-} |
|
218 |
- |
|
219 |
-func BenchmarkTarUntar(b *testing.B) { |
|
220 |
- origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
221 |
- if err != nil { |
|
222 |
- b.Fatal(err) |
|
223 |
- } |
|
224 |
- tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") |
|
225 |
- if err != nil { |
|
226 |
- b.Fatal(err) |
|
227 |
- } |
|
228 |
- target := path.Join(tempDir, "dest") |
|
229 |
- n, err := prepareUntarSourceDirectory(100, origin) |
|
230 |
- if err != nil { |
|
231 |
- b.Fatal(err) |
|
232 |
- } |
|
233 |
- b.ResetTimer() |
|
234 |
- b.SetBytes(int64(n)) |
|
235 |
- defer os.RemoveAll(origin) |
|
236 |
- defer os.RemoveAll(tempDir) |
|
237 |
- for n := 0; n < b.N; n++ { |
|
238 |
- err := TarUntar(origin, target) |
|
239 |
- if err != nil { |
|
240 |
- b.Fatal(err) |
|
241 |
- } |
|
242 |
- os.RemoveAll(target) |
|
243 |
- } |
|
244 |
-} |
245 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,411 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "bytes" |
|
5 |
- "fmt" |
|
6 |
- "io" |
|
7 |
- "os" |
|
8 |
- "path/filepath" |
|
9 |
- "strings" |
|
10 |
- "syscall" |
|
11 |
- "time" |
|
12 |
- |
|
13 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
14 |
- |
|
15 |
- "github.com/docker/docker/pkg/log" |
|
16 |
- "github.com/docker/docker/pkg/pools" |
|
17 |
- "github.com/docker/docker/pkg/system" |
|
18 |
-) |
|
19 |
- |
|
20 |
-type ChangeType int |
|
21 |
- |
|
22 |
-const ( |
|
23 |
- ChangeModify = iota |
|
24 |
- ChangeAdd |
|
25 |
- ChangeDelete |
|
26 |
-) |
|
27 |
- |
|
28 |
-type Change struct { |
|
29 |
- Path string |
|
30 |
- Kind ChangeType |
|
31 |
-} |
|
32 |
- |
|
33 |
-func (change *Change) String() string { |
|
34 |
- var kind string |
|
35 |
- switch change.Kind { |
|
36 |
- case ChangeModify: |
|
37 |
- kind = "C" |
|
38 |
- case ChangeAdd: |
|
39 |
- kind = "A" |
|
40 |
- case ChangeDelete: |
|
41 |
- kind = "D" |
|
42 |
- } |
|
43 |
- return fmt.Sprintf("%s %s", kind, change.Path) |
|
44 |
-} |
|
45 |
- |
|
46 |
-// Gnu tar and the go tar writer don't have sub-second mtime |
|
47 |
-// precision, which is problematic when we apply changes via tar |
|
48 |
-// files, we handle this by comparing for exact times, *or* same |
|
49 |
-// second count and either a or b having exactly 0 nanoseconds |
|
50 |
-func sameFsTime(a, b time.Time) bool { |
|
51 |
- return a == b || |
|
52 |
- (a.Unix() == b.Unix() && |
|
53 |
- (a.Nanosecond() == 0 || b.Nanosecond() == 0)) |
|
54 |
-} |
|
55 |
- |
|
56 |
-func sameFsTimeSpec(a, b syscall.Timespec) bool { |
|
57 |
- return a.Sec == b.Sec && |
|
58 |
- (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) |
|
59 |
-} |
|
60 |
- |
|
61 |
-// Changes walks the path rw and determines changes for the files in the path, |
|
62 |
-// with respect to the parent layers |
|
63 |
-func Changes(layers []string, rw string) ([]Change, error) { |
|
64 |
- var changes []Change |
|
65 |
- err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { |
|
66 |
- if err != nil { |
|
67 |
- return err |
|
68 |
- } |
|
69 |
- |
|
70 |
- // Rebase path |
|
71 |
- path, err = filepath.Rel(rw, path) |
|
72 |
- if err != nil { |
|
73 |
- return err |
|
74 |
- } |
|
75 |
- path = filepath.Join("/", path) |
|
76 |
- |
|
77 |
- // Skip root |
|
78 |
- if path == "/" { |
|
79 |
- return nil |
|
80 |
- } |
|
81 |
- |
|
82 |
- // Skip AUFS metadata |
|
83 |
- if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { |
|
84 |
- return err |
|
85 |
- } |
|
86 |
- |
|
87 |
- change := Change{ |
|
88 |
- Path: path, |
|
89 |
- } |
|
90 |
- |
|
91 |
- // Find out what kind of modification happened |
|
92 |
- file := filepath.Base(path) |
|
93 |
- // If there is a whiteout, then the file was removed |
|
94 |
- if strings.HasPrefix(file, ".wh.") { |
|
95 |
- originalFile := file[len(".wh."):] |
|
96 |
- change.Path = filepath.Join(filepath.Dir(path), originalFile) |
|
97 |
- change.Kind = ChangeDelete |
|
98 |
- } else { |
|
99 |
- // Otherwise, the file was added |
|
100 |
- change.Kind = ChangeAdd |
|
101 |
- |
|
102 |
- // ...Unless it already existed in a top layer, in which case, it's a modification |
|
103 |
- for _, layer := range layers { |
|
104 |
- stat, err := os.Stat(filepath.Join(layer, path)) |
|
105 |
- if err != nil && !os.IsNotExist(err) { |
|
106 |
- return err |
|
107 |
- } |
|
108 |
- if err == nil { |
|
109 |
- // The file existed in the top layer, so that's a modification |
|
110 |
- |
|
111 |
- // However, if it's a directory, maybe it wasn't actually modified. |
|
112 |
- // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar |
|
113 |
- if stat.IsDir() && f.IsDir() { |
|
114 |
- if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { |
|
115 |
- // Both directories are the same, don't record the change |
|
116 |
- return nil |
|
117 |
- } |
|
118 |
- } |
|
119 |
- change.Kind = ChangeModify |
|
120 |
- break |
|
121 |
- } |
|
122 |
- } |
|
123 |
- } |
|
124 |
- |
|
125 |
- // Record change |
|
126 |
- changes = append(changes, change) |
|
127 |
- return nil |
|
128 |
- }) |
|
129 |
- if err != nil && !os.IsNotExist(err) { |
|
130 |
- return nil, err |
|
131 |
- } |
|
132 |
- return changes, nil |
|
133 |
-} |
|
134 |
- |
|
135 |
-type FileInfo struct { |
|
136 |
- parent *FileInfo |
|
137 |
- name string |
|
138 |
- stat syscall.Stat_t |
|
139 |
- children map[string]*FileInfo |
|
140 |
- capability []byte |
|
141 |
- added bool |
|
142 |
-} |
|
143 |
- |
|
144 |
-func (root *FileInfo) LookUp(path string) *FileInfo { |
|
145 |
- parent := root |
|
146 |
- if path == "/" { |
|
147 |
- return root |
|
148 |
- } |
|
149 |
- |
|
150 |
- pathElements := strings.Split(path, "/") |
|
151 |
- for _, elem := range pathElements { |
|
152 |
- if elem != "" { |
|
153 |
- child := parent.children[elem] |
|
154 |
- if child == nil { |
|
155 |
- return nil |
|
156 |
- } |
|
157 |
- parent = child |
|
158 |
- } |
|
159 |
- } |
|
160 |
- return parent |
|
161 |
-} |
|
162 |
- |
|
163 |
-func (info *FileInfo) path() string { |
|
164 |
- if info.parent == nil { |
|
165 |
- return "/" |
|
166 |
- } |
|
167 |
- return filepath.Join(info.parent.path(), info.name) |
|
168 |
-} |
|
169 |
- |
|
170 |
-func (info *FileInfo) isDir() bool { |
|
171 |
- return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR |
|
172 |
-} |
|
173 |
- |
|
174 |
-func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { |
|
175 |
- |
|
176 |
- sizeAtEntry := len(*changes) |
|
177 |
- |
|
178 |
- if oldInfo == nil { |
|
179 |
- // add |
|
180 |
- change := Change{ |
|
181 |
- Path: info.path(), |
|
182 |
- Kind: ChangeAdd, |
|
183 |
- } |
|
184 |
- *changes = append(*changes, change) |
|
185 |
- info.added = true |
|
186 |
- } |
|
187 |
- |
|
188 |
- // We make a copy so we can modify it to detect additions |
|
189 |
- // also, we only recurse on the old dir if the new info is a directory |
|
190 |
- // otherwise any previous delete/change is considered recursive |
|
191 |
- oldChildren := make(map[string]*FileInfo) |
|
192 |
- if oldInfo != nil && info.isDir() { |
|
193 |
- for k, v := range oldInfo.children { |
|
194 |
- oldChildren[k] = v |
|
195 |
- } |
|
196 |
- } |
|
197 |
- |
|
198 |
- for name, newChild := range info.children { |
|
199 |
- oldChild, _ := oldChildren[name] |
|
200 |
- if oldChild != nil { |
|
201 |
- // change? |
|
202 |
- oldStat := &oldChild.stat |
|
203 |
- newStat := &newChild.stat |
|
204 |
- // Note: We can't compare inode or ctime or blocksize here, because these change |
|
205 |
- // when copying a file into a container. However, that is not generally a problem |
|
206 |
- // because any content change will change mtime, and any status change should |
|
207 |
- // be visible when actually comparing the stat fields. The only time this |
|
208 |
- // breaks down is if some code intentionally hides a change by setting |
|
209 |
- // back mtime |
|
210 |
- if oldStat.Mode != newStat.Mode || |
|
211 |
- oldStat.Uid != newStat.Uid || |
|
212 |
- oldStat.Gid != newStat.Gid || |
|
213 |
- oldStat.Rdev != newStat.Rdev || |
|
214 |
- // Don't look at size for dirs, its not a good measure of change |
|
215 |
- (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || |
|
216 |
- !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || |
|
217 |
- bytes.Compare(oldChild.capability, newChild.capability) != 0 { |
|
218 |
- change := Change{ |
|
219 |
- Path: newChild.path(), |
|
220 |
- Kind: ChangeModify, |
|
221 |
- } |
|
222 |
- *changes = append(*changes, change) |
|
223 |
- newChild.added = true |
|
224 |
- } |
|
225 |
- |
|
226 |
- // Remove from copy so we can detect deletions |
|
227 |
- delete(oldChildren, name) |
|
228 |
- } |
|
229 |
- |
|
230 |
- newChild.addChanges(oldChild, changes) |
|
231 |
- } |
|
232 |
- for _, oldChild := range oldChildren { |
|
233 |
- // delete |
|
234 |
- change := Change{ |
|
235 |
- Path: oldChild.path(), |
|
236 |
- Kind: ChangeDelete, |
|
237 |
- } |
|
238 |
- *changes = append(*changes, change) |
|
239 |
- } |
|
240 |
- |
|
241 |
- // If there were changes inside this directory, we need to add it, even if the directory |
|
242 |
- // itself wasn't changed. This is needed to properly save and restore filesystem permissions. |
|
243 |
- if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { |
|
244 |
- change := Change{ |
|
245 |
- Path: info.path(), |
|
246 |
- Kind: ChangeModify, |
|
247 |
- } |
|
248 |
- // Let's insert the directory entry before the recently added entries located inside this dir |
|
249 |
- *changes = append(*changes, change) // just to resize the slice, will be overwritten |
|
250 |
- copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) |
|
251 |
- (*changes)[sizeAtEntry] = change |
|
252 |
- } |
|
253 |
- |
|
254 |
-} |
|
255 |
- |
|
256 |
-func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { |
|
257 |
- var changes []Change |
|
258 |
- |
|
259 |
- info.addChanges(oldInfo, &changes) |
|
260 |
- |
|
261 |
- return changes |
|
262 |
-} |
|
263 |
- |
|
264 |
-func newRootFileInfo() *FileInfo { |
|
265 |
- root := &FileInfo{ |
|
266 |
- name: "/", |
|
267 |
- children: make(map[string]*FileInfo), |
|
268 |
- } |
|
269 |
- return root |
|
270 |
-} |
|
271 |
- |
|
272 |
-func collectFileInfo(sourceDir string) (*FileInfo, error) { |
|
273 |
- root := newRootFileInfo() |
|
274 |
- |
|
275 |
- err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { |
|
276 |
- if err != nil { |
|
277 |
- return err |
|
278 |
- } |
|
279 |
- |
|
280 |
- // Rebase path |
|
281 |
- relPath, err := filepath.Rel(sourceDir, path) |
|
282 |
- if err != nil { |
|
283 |
- return err |
|
284 |
- } |
|
285 |
- relPath = filepath.Join("/", relPath) |
|
286 |
- |
|
287 |
- if relPath == "/" { |
|
288 |
- return nil |
|
289 |
- } |
|
290 |
- |
|
291 |
- parent := root.LookUp(filepath.Dir(relPath)) |
|
292 |
- if parent == nil { |
|
293 |
- return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) |
|
294 |
- } |
|
295 |
- |
|
296 |
- info := &FileInfo{ |
|
297 |
- name: filepath.Base(relPath), |
|
298 |
- children: make(map[string]*FileInfo), |
|
299 |
- parent: parent, |
|
300 |
- } |
|
301 |
- |
|
302 |
- if err := syscall.Lstat(path, &info.stat); err != nil { |
|
303 |
- return err |
|
304 |
- } |
|
305 |
- |
|
306 |
- info.capability, _ = system.Lgetxattr(path, "security.capability") |
|
307 |
- |
|
308 |
- parent.children[info.name] = info |
|
309 |
- |
|
310 |
- return nil |
|
311 |
- }) |
|
312 |
- if err != nil { |
|
313 |
- return nil, err |
|
314 |
- } |
|
315 |
- return root, nil |
|
316 |
-} |
|
317 |
- |
|
318 |
-// ChangesDirs compares two directories and generates an array of Change objects describing the changes. |
|
319 |
-// If oldDir is "", then all files in newDir will be Add-Changes. |
|
320 |
-func ChangesDirs(newDir, oldDir string) ([]Change, error) { |
|
321 |
- var ( |
|
322 |
- oldRoot, newRoot *FileInfo |
|
323 |
- err1, err2 error |
|
324 |
- errs = make(chan error, 2) |
|
325 |
- ) |
|
326 |
- go func() { |
|
327 |
- if oldDir != "" { |
|
328 |
- oldRoot, err1 = collectFileInfo(oldDir) |
|
329 |
- } |
|
330 |
- errs <- err1 |
|
331 |
- }() |
|
332 |
- go func() { |
|
333 |
- newRoot, err2 = collectFileInfo(newDir) |
|
334 |
- errs <- err2 |
|
335 |
- }() |
|
336 |
- for i := 0; i < 2; i++ { |
|
337 |
- if err := <-errs; err != nil { |
|
338 |
- return nil, err |
|
339 |
- } |
|
340 |
- } |
|
341 |
- |
|
342 |
- return newRoot.Changes(oldRoot), nil |
|
343 |
-} |
|
344 |
- |
|
345 |
-// ChangesSize calculates the size in bytes of the provided changes, based on newDir. |
|
346 |
-func ChangesSize(newDir string, changes []Change) int64 { |
|
347 |
- var size int64 |
|
348 |
- for _, change := range changes { |
|
349 |
- if change.Kind == ChangeModify || change.Kind == ChangeAdd { |
|
350 |
- file := filepath.Join(newDir, change.Path) |
|
351 |
- fileInfo, _ := os.Lstat(file) |
|
352 |
- if fileInfo != nil && !fileInfo.IsDir() { |
|
353 |
- size += fileInfo.Size() |
|
354 |
- } |
|
355 |
- } |
|
356 |
- } |
|
357 |
- return size |
|
358 |
-} |
|
359 |
- |
|
360 |
-func major(device uint64) uint64 { |
|
361 |
- return (device >> 8) & 0xfff |
|
362 |
-} |
|
363 |
- |
|
364 |
-func minor(device uint64) uint64 { |
|
365 |
- return (device & 0xff) | ((device >> 12) & 0xfff00) |
|
366 |
-} |
|
367 |
- |
|
368 |
-// ExportChanges produces an Archive from the provided changes, relative to dir. |
|
369 |
-func ExportChanges(dir string, changes []Change) (Archive, error) { |
|
370 |
- reader, writer := io.Pipe() |
|
371 |
- tw := tar.NewWriter(writer) |
|
372 |
- |
|
373 |
- go func() { |
|
374 |
- twBuf := pools.BufioWriter32KPool.Get(nil) |
|
375 |
- defer pools.BufioWriter32KPool.Put(twBuf) |
|
376 |
- // In general we log errors here but ignore them because |
|
377 |
- // during e.g. a diff operation the container can continue |
|
378 |
- // mutating the filesystem and we can see transient errors |
|
379 |
- // from this |
|
380 |
- for _, change := range changes { |
|
381 |
- if change.Kind == ChangeDelete { |
|
382 |
- whiteOutDir := filepath.Dir(change.Path) |
|
383 |
- whiteOutBase := filepath.Base(change.Path) |
|
384 |
- whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) |
|
385 |
- timestamp := time.Now() |
|
386 |
- hdr := &tar.Header{ |
|
387 |
- Name: whiteOut[1:], |
|
388 |
- Size: 0, |
|
389 |
- ModTime: timestamp, |
|
390 |
- AccessTime: timestamp, |
|
391 |
- ChangeTime: timestamp, |
|
392 |
- } |
|
393 |
- if err := tw.WriteHeader(hdr); err != nil { |
|
394 |
- log.Debugf("Can't write whiteout header: %s", err) |
|
395 |
- } |
|
396 |
- } else { |
|
397 |
- path := filepath.Join(dir, change.Path) |
|
398 |
- if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { |
|
399 |
- log.Debugf("Can't add file %s to tar: %s", path, err) |
|
400 |
- } |
|
401 |
- } |
|
402 |
- } |
|
403 |
- |
|
404 |
- // Make sure to check the error on Close. |
|
405 |
- if err := tw.Close(); err != nil { |
|
406 |
- log.Debugf("Can't close layer: %s", err) |
|
407 |
- } |
|
408 |
- writer.Close() |
|
409 |
- }() |
|
410 |
- return reader, nil |
|
411 |
-} |
412 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,301 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "io/ioutil" |
|
5 |
- "os" |
|
6 |
- "os/exec" |
|
7 |
- "path" |
|
8 |
- "sort" |
|
9 |
- "testing" |
|
10 |
- "time" |
|
11 |
-) |
|
12 |
- |
|
13 |
-func max(x, y int) int { |
|
14 |
- if x >= y { |
|
15 |
- return x |
|
16 |
- } |
|
17 |
- return y |
|
18 |
-} |
|
19 |
- |
|
20 |
-func copyDir(src, dst string) error { |
|
21 |
- cmd := exec.Command("cp", "-a", src, dst) |
|
22 |
- if err := cmd.Run(); err != nil { |
|
23 |
- return err |
|
24 |
- } |
|
25 |
- return nil |
|
26 |
-} |
|
27 |
- |
|
28 |
-// Helper to sort []Change by path |
|
29 |
-type byPath struct{ changes []Change } |
|
30 |
- |
|
31 |
-func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } |
|
32 |
-func (b byPath) Len() int { return len(b.changes) } |
|
33 |
-func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } |
|
34 |
- |
|
35 |
-type FileType uint32 |
|
36 |
- |
|
37 |
-const ( |
|
38 |
- Regular FileType = iota |
|
39 |
- Dir |
|
40 |
- Symlink |
|
41 |
-) |
|
42 |
- |
|
43 |
-type FileData struct { |
|
44 |
- filetype FileType |
|
45 |
- path string |
|
46 |
- contents string |
|
47 |
- permissions os.FileMode |
|
48 |
-} |
|
49 |
- |
|
50 |
-func createSampleDir(t *testing.T, root string) { |
|
51 |
- files := []FileData{ |
|
52 |
- {Regular, "file1", "file1\n", 0600}, |
|
53 |
- {Regular, "file2", "file2\n", 0666}, |
|
54 |
- {Regular, "file3", "file3\n", 0404}, |
|
55 |
- {Regular, "file4", "file4\n", 0600}, |
|
56 |
- {Regular, "file5", "file5\n", 0600}, |
|
57 |
- {Regular, "file6", "file6\n", 0600}, |
|
58 |
- {Regular, "file7", "file7\n", 0600}, |
|
59 |
- {Dir, "dir1", "", 0740}, |
|
60 |
- {Regular, "dir1/file1-1", "file1-1\n", 01444}, |
|
61 |
- {Regular, "dir1/file1-2", "file1-2\n", 0666}, |
|
62 |
- {Dir, "dir2", "", 0700}, |
|
63 |
- {Regular, "dir2/file2-1", "file2-1\n", 0666}, |
|
64 |
- {Regular, "dir2/file2-2", "file2-2\n", 0666}, |
|
65 |
- {Dir, "dir3", "", 0700}, |
|
66 |
- {Regular, "dir3/file3-1", "file3-1\n", 0666}, |
|
67 |
- {Regular, "dir3/file3-2", "file3-2\n", 0666}, |
|
68 |
- {Dir, "dir4", "", 0700}, |
|
69 |
- {Regular, "dir4/file3-1", "file4-1\n", 0666}, |
|
70 |
- {Regular, "dir4/file3-2", "file4-2\n", 0666}, |
|
71 |
- {Symlink, "symlink1", "target1", 0666}, |
|
72 |
- {Symlink, "symlink2", "target2", 0666}, |
|
73 |
- } |
|
74 |
- |
|
75 |
- now := time.Now() |
|
76 |
- for _, info := range files { |
|
77 |
- p := path.Join(root, info.path) |
|
78 |
- if info.filetype == Dir { |
|
79 |
- if err := os.MkdirAll(p, info.permissions); err != nil { |
|
80 |
- t.Fatal(err) |
|
81 |
- } |
|
82 |
- } else if info.filetype == Regular { |
|
83 |
- if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { |
|
84 |
- t.Fatal(err) |
|
85 |
- } |
|
86 |
- } else if info.filetype == Symlink { |
|
87 |
- if err := os.Symlink(info.contents, p); err != nil { |
|
88 |
- t.Fatal(err) |
|
89 |
- } |
|
90 |
- } |
|
91 |
- |
|
92 |
- if info.filetype != Symlink { |
|
93 |
- // Set a consistent ctime, atime for all files and dirs |
|
94 |
- if err := os.Chtimes(p, now, now); err != nil { |
|
95 |
- t.Fatal(err) |
|
96 |
- } |
|
97 |
- } |
|
98 |
- } |
|
99 |
-} |
|
100 |
- |
|
101 |
-// Create an directory, copy it, make sure we report no changes between the two |
|
102 |
-func TestChangesDirsEmpty(t *testing.T) { |
|
103 |
- src, err := ioutil.TempDir("", "docker-changes-test") |
|
104 |
- if err != nil { |
|
105 |
- t.Fatal(err) |
|
106 |
- } |
|
107 |
- createSampleDir(t, src) |
|
108 |
- dst := src + "-copy" |
|
109 |
- if err := copyDir(src, dst); err != nil { |
|
110 |
- t.Fatal(err) |
|
111 |
- } |
|
112 |
- changes, err := ChangesDirs(dst, src) |
|
113 |
- if err != nil { |
|
114 |
- t.Fatal(err) |
|
115 |
- } |
|
116 |
- |
|
117 |
- if len(changes) != 0 { |
|
118 |
- t.Fatalf("Reported changes for identical dirs: %v", changes) |
|
119 |
- } |
|
120 |
- os.RemoveAll(src) |
|
121 |
- os.RemoveAll(dst) |
|
122 |
-} |
|
123 |
- |
|
124 |
-func mutateSampleDir(t *testing.T, root string) { |
|
125 |
- // Remove a regular file |
|
126 |
- if err := os.RemoveAll(path.Join(root, "file1")); err != nil { |
|
127 |
- t.Fatal(err) |
|
128 |
- } |
|
129 |
- |
|
130 |
- // Remove a directory |
|
131 |
- if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { |
|
132 |
- t.Fatal(err) |
|
133 |
- } |
|
134 |
- |
|
135 |
- // Remove a symlink |
|
136 |
- if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { |
|
137 |
- t.Fatal(err) |
|
138 |
- } |
|
139 |
- |
|
140 |
- // Rewrite a file |
|
141 |
- if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { |
|
142 |
- t.Fatal(err) |
|
143 |
- } |
|
144 |
- |
|
145 |
- // Replace a file |
|
146 |
- if err := os.RemoveAll(path.Join(root, "file3")); err != nil { |
|
147 |
- t.Fatal(err) |
|
148 |
- } |
|
149 |
- if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { |
|
150 |
- t.Fatal(err) |
|
151 |
- } |
|
152 |
- |
|
153 |
- // Touch file |
|
154 |
- if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { |
|
155 |
- t.Fatal(err) |
|
156 |
- } |
|
157 |
- |
|
158 |
- // Replace file with dir |
|
159 |
- if err := os.RemoveAll(path.Join(root, "file5")); err != nil { |
|
160 |
- t.Fatal(err) |
|
161 |
- } |
|
162 |
- if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { |
|
163 |
- t.Fatal(err) |
|
164 |
- } |
|
165 |
- |
|
166 |
- // Create new file |
|
167 |
- if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { |
|
168 |
- t.Fatal(err) |
|
169 |
- } |
|
170 |
- |
|
171 |
- // Create new dir |
|
172 |
- if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { |
|
173 |
- t.Fatal(err) |
|
174 |
- } |
|
175 |
- |
|
176 |
- // Create a new symlink |
|
177 |
- if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { |
|
178 |
- t.Fatal(err) |
|
179 |
- } |
|
180 |
- |
|
181 |
- // Change a symlink |
|
182 |
- if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { |
|
183 |
- t.Fatal(err) |
|
184 |
- } |
|
185 |
- if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { |
|
186 |
- t.Fatal(err) |
|
187 |
- } |
|
188 |
- |
|
189 |
- // Replace dir with file |
|
190 |
- if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { |
|
191 |
- t.Fatal(err) |
|
192 |
- } |
|
193 |
- if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { |
|
194 |
- t.Fatal(err) |
|
195 |
- } |
|
196 |
- |
|
197 |
- // Touch dir |
|
198 |
- if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { |
|
199 |
- t.Fatal(err) |
|
200 |
- } |
|
201 |
-} |
|
202 |
- |
|
203 |
-func TestChangesDirsMutated(t *testing.T) { |
|
204 |
- src, err := ioutil.TempDir("", "docker-changes-test") |
|
205 |
- if err != nil { |
|
206 |
- t.Fatal(err) |
|
207 |
- } |
|
208 |
- createSampleDir(t, src) |
|
209 |
- dst := src + "-copy" |
|
210 |
- if err := copyDir(src, dst); err != nil { |
|
211 |
- t.Fatal(err) |
|
212 |
- } |
|
213 |
- defer os.RemoveAll(src) |
|
214 |
- defer os.RemoveAll(dst) |
|
215 |
- |
|
216 |
- mutateSampleDir(t, dst) |
|
217 |
- |
|
218 |
- changes, err := ChangesDirs(dst, src) |
|
219 |
- if err != nil { |
|
220 |
- t.Fatal(err) |
|
221 |
- } |
|
222 |
- |
|
223 |
- sort.Sort(byPath{changes}) |
|
224 |
- |
|
225 |
- expectedChanges := []Change{ |
|
226 |
- {"/dir1", ChangeDelete}, |
|
227 |
- {"/dir2", ChangeModify}, |
|
228 |
- {"/dir3", ChangeModify}, |
|
229 |
- {"/dirnew", ChangeAdd}, |
|
230 |
- {"/file1", ChangeDelete}, |
|
231 |
- {"/file2", ChangeModify}, |
|
232 |
- {"/file3", ChangeModify}, |
|
233 |
- {"/file4", ChangeModify}, |
|
234 |
- {"/file5", ChangeModify}, |
|
235 |
- {"/filenew", ChangeAdd}, |
|
236 |
- {"/symlink1", ChangeDelete}, |
|
237 |
- {"/symlink2", ChangeModify}, |
|
238 |
- {"/symlinknew", ChangeAdd}, |
|
239 |
- } |
|
240 |
- |
|
241 |
- for i := 0; i < max(len(changes), len(expectedChanges)); i++ { |
|
242 |
- if i >= len(expectedChanges) { |
|
243 |
- t.Fatalf("unexpected change %s\n", changes[i].String()) |
|
244 |
- } |
|
245 |
- if i >= len(changes) { |
|
246 |
- t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) |
|
247 |
- } |
|
248 |
- if changes[i].Path == expectedChanges[i].Path { |
|
249 |
- if changes[i] != expectedChanges[i] { |
|
250 |
- t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) |
|
251 |
- } |
|
252 |
- } else if changes[i].Path < expectedChanges[i].Path { |
|
253 |
- t.Fatalf("unexpected change %s\n", changes[i].String()) |
|
254 |
- } else { |
|
255 |
- t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) |
|
256 |
- } |
|
257 |
- } |
|
258 |
-} |
|
259 |
- |
|
260 |
-func TestApplyLayer(t *testing.T) { |
|
261 |
- src, err := ioutil.TempDir("", "docker-changes-test") |
|
262 |
- if err != nil { |
|
263 |
- t.Fatal(err) |
|
264 |
- } |
|
265 |
- createSampleDir(t, src) |
|
266 |
- defer os.RemoveAll(src) |
|
267 |
- dst := src + "-copy" |
|
268 |
- if err := copyDir(src, dst); err != nil { |
|
269 |
- t.Fatal(err) |
|
270 |
- } |
|
271 |
- mutateSampleDir(t, dst) |
|
272 |
- defer os.RemoveAll(dst) |
|
273 |
- |
|
274 |
- changes, err := ChangesDirs(dst, src) |
|
275 |
- if err != nil { |
|
276 |
- t.Fatal(err) |
|
277 |
- } |
|
278 |
- |
|
279 |
- layer, err := ExportChanges(dst, changes) |
|
280 |
- if err != nil { |
|
281 |
- t.Fatal(err) |
|
282 |
- } |
|
283 |
- |
|
284 |
- layerCopy, err := NewTempArchive(layer, "") |
|
285 |
- if err != nil { |
|
286 |
- t.Fatal(err) |
|
287 |
- } |
|
288 |
- |
|
289 |
- if err := ApplyLayer(src, layerCopy); err != nil { |
|
290 |
- t.Fatal(err) |
|
291 |
- } |
|
292 |
- |
|
293 |
- changes2, err := ChangesDirs(src, dst) |
|
294 |
- if err != nil { |
|
295 |
- t.Fatal(err) |
|
296 |
- } |
|
297 |
- |
|
298 |
- if len(changes2) != 0 { |
|
299 |
- t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) |
|
300 |
- } |
|
301 |
-} |
302 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,156 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "fmt" |
|
5 |
- "io" |
|
6 |
- "io/ioutil" |
|
7 |
- "os" |
|
8 |
- "path/filepath" |
|
9 |
- "strings" |
|
10 |
- "syscall" |
|
11 |
- |
|
12 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
13 |
- |
|
14 |
- "github.com/docker/docker/pkg/pools" |
|
15 |
-) |
|
16 |
- |
|
17 |
-// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. |
|
18 |
-// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, |
|
19 |
-// then the top 12 bits of the minor |
|
20 |
-func mkdev(major int64, minor int64) uint32 { |
|
21 |
- return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) |
|
22 |
-} |
|
23 |
- |
|
24 |
-// ApplyLayer parses a diff in the standard layer format from `layer`, and |
|
25 |
-// applies it to the directory `dest`. |
|
26 |
-func ApplyLayer(dest string, layer ArchiveReader) error { |
|
27 |
- // We need to be able to set any perms |
|
28 |
- oldmask := syscall.Umask(0) |
|
29 |
- defer syscall.Umask(oldmask) |
|
30 |
- |
|
31 |
- layer, err := DecompressStream(layer) |
|
32 |
- if err != nil { |
|
33 |
- return err |
|
34 |
- } |
|
35 |
- |
|
36 |
- tr := tar.NewReader(layer) |
|
37 |
- trBuf := pools.BufioReader32KPool.Get(tr) |
|
38 |
- defer pools.BufioReader32KPool.Put(trBuf) |
|
39 |
- |
|
40 |
- var dirs []*tar.Header |
|
41 |
- |
|
42 |
- aufsTempdir := "" |
|
43 |
- aufsHardlinks := make(map[string]*tar.Header) |
|
44 |
- |
|
45 |
- // Iterate through the files in the archive. |
|
46 |
- for { |
|
47 |
- hdr, err := tr.Next() |
|
48 |
- if err == io.EOF { |
|
49 |
- // end of tar archive |
|
50 |
- break |
|
51 |
- } |
|
52 |
- if err != nil { |
|
53 |
- return err |
|
54 |
- } |
|
55 |
- |
|
56 |
- // Normalize name, for safety and for a simple is-root check |
|
57 |
- hdr.Name = filepath.Clean(hdr.Name) |
|
58 |
- |
|
59 |
- if !strings.HasSuffix(hdr.Name, "/") { |
|
60 |
- // Not the root directory, ensure that the parent directory exists. |
|
61 |
- // This happened in some tests where an image had a tarfile without any |
|
62 |
- // parent directories. |
|
63 |
- parent := filepath.Dir(hdr.Name) |
|
64 |
- parentPath := filepath.Join(dest, parent) |
|
65 |
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { |
|
66 |
- err = os.MkdirAll(parentPath, 0600) |
|
67 |
- if err != nil { |
|
68 |
- return err |
|
69 |
- } |
|
70 |
- } |
|
71 |
- } |
|
72 |
- |
|
73 |
- // Skip AUFS metadata dirs |
|
74 |
- if strings.HasPrefix(hdr.Name, ".wh..wh.") { |
|
75 |
- // Regular files inside /.wh..wh.plnk can be used as hardlink targets |
|
76 |
- // We don't want this directory, but we need the files in them so that |
|
77 |
- // such hardlinks can be resolved. |
|
78 |
- if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { |
|
79 |
- basename := filepath.Base(hdr.Name) |
|
80 |
- aufsHardlinks[basename] = hdr |
|
81 |
- if aufsTempdir == "" { |
|
82 |
- if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { |
|
83 |
- return err |
|
84 |
- } |
|
85 |
- defer os.RemoveAll(aufsTempdir) |
|
86 |
- } |
|
87 |
- if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { |
|
88 |
- return err |
|
89 |
- } |
|
90 |
- } |
|
91 |
- continue |
|
92 |
- } |
|
93 |
- |
|
94 |
- path := filepath.Join(dest, hdr.Name) |
|
95 |
- base := filepath.Base(path) |
|
96 |
- if strings.HasPrefix(base, ".wh.") { |
|
97 |
- originalBase := base[len(".wh."):] |
|
98 |
- originalPath := filepath.Join(filepath.Dir(path), originalBase) |
|
99 |
- if err := os.RemoveAll(originalPath); err != nil { |
|
100 |
- return err |
|
101 |
- } |
|
102 |
- } else { |
|
103 |
- // If path exits we almost always just want to remove and replace it. |
|
104 |
- // The only exception is when it is a directory *and* the file from |
|
105 |
- // the layer is also a directory. Then we want to merge them (i.e. |
|
106 |
- // just apply the metadata from the layer). |
|
107 |
- if fi, err := os.Lstat(path); err == nil { |
|
108 |
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { |
|
109 |
- if err := os.RemoveAll(path); err != nil { |
|
110 |
- return err |
|
111 |
- } |
|
112 |
- } |
|
113 |
- } |
|
114 |
- |
|
115 |
- trBuf.Reset(tr) |
|
116 |
- srcData := io.Reader(trBuf) |
|
117 |
- srcHdr := hdr |
|
118 |
- |
|
119 |
- // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so |
|
120 |
- // we manually retarget these into the temporary files we extracted them into |
|
121 |
- if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { |
|
122 |
- linkBasename := filepath.Base(hdr.Linkname) |
|
123 |
- srcHdr = aufsHardlinks[linkBasename] |
|
124 |
- if srcHdr == nil { |
|
125 |
- return fmt.Errorf("Invalid aufs hardlink") |
|
126 |
- } |
|
127 |
- tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) |
|
128 |
- if err != nil { |
|
129 |
- return err |
|
130 |
- } |
|
131 |
- defer tmpFile.Close() |
|
132 |
- srcData = tmpFile |
|
133 |
- } |
|
134 |
- |
|
135 |
- if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { |
|
136 |
- return err |
|
137 |
- } |
|
138 |
- |
|
139 |
- // Directory mtimes must be handled at the end to avoid further |
|
140 |
- // file creation in them to modify the directory mtime |
|
141 |
- if hdr.Typeflag == tar.TypeDir { |
|
142 |
- dirs = append(dirs, hdr) |
|
143 |
- } |
|
144 |
- } |
|
145 |
- } |
|
146 |
- |
|
147 |
- for _, hdr := range dirs { |
|
148 |
- path := filepath.Join(dest, hdr.Name) |
|
149 |
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
150 |
- if err := syscall.UtimesNano(path, ts); err != nil { |
|
151 |
- return err |
|
152 |
- } |
|
153 |
- } |
|
154 |
- |
|
155 |
- return nil |
|
156 |
-} |
159 | 3 |
deleted file mode 100644 |
... | ... |
@@ -1,16 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "syscall" |
|
5 |
- "time" |
|
6 |
-) |
|
7 |
- |
|
8 |
-func timeToTimespec(time time.Time) (ts syscall.Timespec) { |
|
9 |
- if time.IsZero() { |
|
10 |
- // Return UTIME_OMIT special value |
|
11 |
- ts.Sec = 0 |
|
12 |
- ts.Nsec = ((1 << 30) - 2) |
|
13 |
- return |
|
14 |
- } |
|
15 |
- return syscall.NsecToTimespec(time.UnixNano()) |
|
16 |
-} |
17 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,16 +0,0 @@ |
1 |
-// +build !linux |
|
2 |
- |
|
3 |
-package archive |
|
4 |
- |
|
5 |
-import ( |
|
6 |
- "syscall" |
|
7 |
- "time" |
|
8 |
-) |
|
9 |
- |
|
10 |
-func timeToTimespec(time time.Time) (ts syscall.Timespec) { |
|
11 |
- nsec := int64(0) |
|
12 |
- if !time.IsZero() { |
|
13 |
- nsec = time.UnixNano() |
|
14 |
- } |
|
15 |
- return syscall.NsecToTimespec(nsec) |
|
16 |
-} |
17 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,59 +0,0 @@ |
1 |
-package archive |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "bytes" |
|
5 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
6 |
- "io/ioutil" |
|
7 |
-) |
|
8 |
- |
|
9 |
-// Generate generates a new archive from the content provided |
|
10 |
-// as input. |
|
11 |
-// |
|
12 |
-// `files` is a sequence of path/content pairs. A new file is |
|
13 |
-// added to the archive for each pair. |
|
14 |
-// If the last pair is incomplete, the file is created with an |
|
15 |
-// empty content. For example: |
|
16 |
-// |
|
17 |
-// Generate("foo.txt", "hello world", "emptyfile") |
|
18 |
-// |
|
19 |
-// The above call will return an archive with 2 files: |
|
20 |
-// * ./foo.txt with content "hello world" |
|
21 |
-// * ./empty with empty content |
|
22 |
-// |
|
23 |
-// FIXME: stream content instead of buffering |
|
24 |
-// FIXME: specify permissions and other archive metadata |
|
25 |
-func Generate(input ...string) (Archive, error) { |
|
26 |
- files := parseStringPairs(input...) |
|
27 |
- buf := new(bytes.Buffer) |
|
28 |
- tw := tar.NewWriter(buf) |
|
29 |
- for _, file := range files { |
|
30 |
- name, content := file[0], file[1] |
|
31 |
- hdr := &tar.Header{ |
|
32 |
- Name: name, |
|
33 |
- Size: int64(len(content)), |
|
34 |
- } |
|
35 |
- if err := tw.WriteHeader(hdr); err != nil { |
|
36 |
- return nil, err |
|
37 |
- } |
|
38 |
- if _, err := tw.Write([]byte(content)); err != nil { |
|
39 |
- return nil, err |
|
40 |
- } |
|
41 |
- } |
|
42 |
- if err := tw.Close(); err != nil { |
|
43 |
- return nil, err |
|
44 |
- } |
|
45 |
- return ioutil.NopCloser(buf), nil |
|
46 |
-} |
|
47 |
- |
|
48 |
-func parseStringPairs(input ...string) (output [][2]string) { |
|
49 |
- output = make([][2]string, 0, len(input)/2+1) |
|
50 |
- for i := 0; i < len(input); i += 2 { |
|
51 |
- var pair [2]string |
|
52 |
- pair[0] = input[i] |
|
53 |
- if i+1 < len(input) { |
|
54 |
- pair[1] = input[i+1] |
|
55 |
- } |
|
56 |
- output = append(output, pair) |
|
57 |
- } |
|
58 |
- return |
|
59 |
-} |
... | ... |
@@ -18,9 +18,9 @@ import ( |
18 | 18 |
"syscall" |
19 | 19 |
"time" |
20 | 20 |
|
21 |
- "github.com/docker/docker/archive" |
|
22 | 21 |
"github.com/docker/docker/daemon" |
23 | 22 |
imagepkg "github.com/docker/docker/image" |
23 |
+ "github.com/docker/docker/pkg/archive" |
|
24 | 24 |
"github.com/docker/docker/pkg/log" |
25 | 25 |
"github.com/docker/docker/pkg/parsers" |
26 | 26 |
"github.com/docker/docker/pkg/promise" |
... | ... |
@@ -7,9 +7,9 @@ import ( |
7 | 7 |
"os/exec" |
8 | 8 |
"strings" |
9 | 9 |
|
10 |
- "github.com/docker/docker/archive" |
|
11 | 10 |
"github.com/docker/docker/daemon" |
12 | 11 |
"github.com/docker/docker/engine" |
12 |
+ "github.com/docker/docker/pkg/archive" |
|
13 | 13 |
"github.com/docker/docker/pkg/parsers" |
14 | 14 |
"github.com/docker/docker/registry" |
15 | 15 |
"github.com/docker/docker/utils" |
... | ... |
@@ -16,12 +16,12 @@ import ( |
16 | 16 |
"github.com/docker/libcontainer/devices" |
17 | 17 |
"github.com/docker/libcontainer/label" |
18 | 18 |
|
19 |
- "github.com/docker/docker/archive" |
|
20 | 19 |
"github.com/docker/docker/daemon/execdriver" |
21 | 20 |
"github.com/docker/docker/engine" |
22 | 21 |
"github.com/docker/docker/image" |
23 | 22 |
"github.com/docker/docker/links" |
24 | 23 |
"github.com/docker/docker/nat" |
24 |
+ "github.com/docker/docker/pkg/archive" |
|
25 | 25 |
"github.com/docker/docker/pkg/broadcastwriter" |
26 | 26 |
"github.com/docker/docker/pkg/ioutils" |
27 | 27 |
"github.com/docker/docker/pkg/log" |
... | ... |
@@ -15,7 +15,6 @@ import ( |
15 | 15 |
|
16 | 16 |
"github.com/docker/libcontainer/label" |
17 | 17 |
|
18 |
- "github.com/docker/docker/archive" |
|
19 | 18 |
"github.com/docker/docker/daemon/execdriver" |
20 | 19 |
"github.com/docker/docker/daemon/execdriver/execdrivers" |
21 | 20 |
"github.com/docker/docker/daemon/execdriver/lxc" |
... | ... |
@@ -27,6 +26,7 @@ import ( |
27 | 27 |
"github.com/docker/docker/engine" |
28 | 28 |
"github.com/docker/docker/graph" |
29 | 29 |
"github.com/docker/docker/image" |
30 |
+ "github.com/docker/docker/pkg/archive" |
|
30 | 31 |
"github.com/docker/docker/pkg/broadcastwriter" |
31 | 32 |
"github.com/docker/docker/pkg/graphdb" |
32 | 33 |
"github.com/docker/docker/pkg/ioutils" |
... | ... |
@@ -30,8 +30,8 @@ import ( |
30 | 30 |
"sync" |
31 | 31 |
"syscall" |
32 | 32 |
|
33 |
- "github.com/docker/docker/archive" |
|
34 | 33 |
"github.com/docker/docker/daemon/graphdriver" |
34 |
+ "github.com/docker/docker/pkg/archive" |
|
35 | 35 |
"github.com/docker/docker/pkg/log" |
36 | 36 |
mountpk "github.com/docker/docker/pkg/mount" |
37 | 37 |
"github.com/docker/docker/utils" |
... | ... |
@@ -9,8 +9,8 @@ import ( |
9 | 9 |
"strings" |
10 | 10 |
"syscall" |
11 | 11 |
|
12 |
- "github.com/docker/docker/archive" |
|
13 | 12 |
"github.com/docker/docker/daemon/execdriver" |
13 |
+ "github.com/docker/docker/pkg/archive" |
|
14 | 14 |
"github.com/docker/docker/pkg/log" |
15 | 15 |
"github.com/docker/docker/pkg/symlink" |
16 | 16 |
"github.com/docker/docker/volumes" |
... | ... |
@@ -12,10 +12,10 @@ import ( |
12 | 12 |
"syscall" |
13 | 13 |
"time" |
14 | 14 |
|
15 |
- "github.com/docker/docker/archive" |
|
16 | 15 |
"github.com/docker/docker/daemon/graphdriver" |
17 | 16 |
"github.com/docker/docker/dockerversion" |
18 | 17 |
"github.com/docker/docker/image" |
18 |
+ "github.com/docker/docker/pkg/archive" |
|
19 | 19 |
"github.com/docker/docker/pkg/log" |
20 | 20 |
"github.com/docker/docker/pkg/truncindex" |
21 | 21 |
"github.com/docker/docker/runconfig" |
... | ... |
@@ -7,8 +7,8 @@ import ( |
7 | 7 |
"os" |
8 | 8 |
"path" |
9 | 9 |
|
10 |
- "github.com/docker/docker/archive" |
|
11 | 10 |
"github.com/docker/docker/engine" |
11 |
+ "github.com/docker/docker/pkg/archive" |
|
12 | 12 |
"github.com/docker/docker/pkg/log" |
13 | 13 |
"github.com/docker/docker/registry" |
14 | 14 |
"github.com/docker/docker/utils" |
... | ... |
@@ -2,11 +2,11 @@ package docker |
2 | 2 |
|
3 | 3 |
import ( |
4 | 4 |
"errors" |
5 |
- "github.com/docker/docker/archive" |
|
6 | 5 |
"github.com/docker/docker/daemon/graphdriver" |
7 | 6 |
"github.com/docker/docker/dockerversion" |
8 | 7 |
"github.com/docker/docker/graph" |
9 | 8 |
"github.com/docker/docker/image" |
9 |
+ "github.com/docker/docker/pkg/archive" |
|
10 | 10 |
"github.com/docker/docker/utils" |
11 | 11 |
"io" |
12 | 12 |
"io/ioutil" |
0 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,706 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "bufio" |
|
4 |
+ "bytes" |
|
5 |
+ "compress/bzip2" |
|
6 |
+ "compress/gzip" |
|
7 |
+ "errors" |
|
8 |
+ "fmt" |
|
9 |
+ "io" |
|
10 |
+ "io/ioutil" |
|
11 |
+ "os" |
|
12 |
+ "os/exec" |
|
13 |
+ "path" |
|
14 |
+ "path/filepath" |
|
15 |
+ "strings" |
|
16 |
+ "syscall" |
|
17 |
+ |
|
18 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
19 |
+ |
|
20 |
+ "github.com/docker/docker/pkg/fileutils" |
|
21 |
+ "github.com/docker/docker/pkg/log" |
|
22 |
+ "github.com/docker/docker/pkg/pools" |
|
23 |
+ "github.com/docker/docker/pkg/promise" |
|
24 |
+ "github.com/docker/docker/pkg/system" |
|
25 |
+) |
|
26 |
+ |
|
27 |
+type ( |
|
28 |
+ Archive io.ReadCloser |
|
29 |
+ ArchiveReader io.Reader |
|
30 |
+ Compression int |
|
31 |
+ TarOptions struct { |
|
32 |
+ Includes []string |
|
33 |
+ Excludes []string |
|
34 |
+ Compression Compression |
|
35 |
+ NoLchown bool |
|
36 |
+ } |
|
37 |
+) |
|
38 |
+ |
|
39 |
+var ( |
|
40 |
+ ErrNotImplemented = errors.New("Function not implemented") |
|
41 |
+) |
|
42 |
+ |
|
43 |
+const ( |
|
44 |
+ Uncompressed Compression = iota |
|
45 |
+ Bzip2 |
|
46 |
+ Gzip |
|
47 |
+ Xz |
|
48 |
+) |
|
49 |
+ |
|
50 |
+func IsArchive(header []byte) bool { |
|
51 |
+ compression := DetectCompression(header) |
|
52 |
+ if compression != Uncompressed { |
|
53 |
+ return true |
|
54 |
+ } |
|
55 |
+ r := tar.NewReader(bytes.NewBuffer(header)) |
|
56 |
+ _, err := r.Next() |
|
57 |
+ return err == nil |
|
58 |
+} |
|
59 |
+ |
|
60 |
+func DetectCompression(source []byte) Compression { |
|
61 |
+ for compression, m := range map[Compression][]byte{ |
|
62 |
+ Bzip2: {0x42, 0x5A, 0x68}, |
|
63 |
+ Gzip: {0x1F, 0x8B, 0x08}, |
|
64 |
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, |
|
65 |
+ } { |
|
66 |
+ if len(source) < len(m) { |
|
67 |
+ log.Debugf("Len too short") |
|
68 |
+ continue |
|
69 |
+ } |
|
70 |
+ if bytes.Compare(m, source[:len(m)]) == 0 { |
|
71 |
+ return compression |
|
72 |
+ } |
|
73 |
+ } |
|
74 |
+ return Uncompressed |
|
75 |
+} |
|
76 |
+ |
|
77 |
+func xzDecompress(archive io.Reader) (io.ReadCloser, error) { |
|
78 |
+ args := []string{"xz", "-d", "-c", "-q"} |
|
79 |
+ |
|
80 |
+ return CmdStream(exec.Command(args[0], args[1:]...), archive) |
|
81 |
+} |
|
82 |
+ |
|
83 |
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) { |
|
84 |
+ p := pools.BufioReader32KPool |
|
85 |
+ buf := p.Get(archive) |
|
86 |
+ bs, err := buf.Peek(10) |
|
87 |
+ if err != nil { |
|
88 |
+ return nil, err |
|
89 |
+ } |
|
90 |
+ log.Debugf("[tar autodetect] n: %v", bs) |
|
91 |
+ |
|
92 |
+ compression := DetectCompression(bs) |
|
93 |
+ switch compression { |
|
94 |
+ case Uncompressed: |
|
95 |
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf) |
|
96 |
+ return readBufWrapper, nil |
|
97 |
+ case Gzip: |
|
98 |
+ gzReader, err := gzip.NewReader(buf) |
|
99 |
+ if err != nil { |
|
100 |
+ return nil, err |
|
101 |
+ } |
|
102 |
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) |
|
103 |
+ return readBufWrapper, nil |
|
104 |
+ case Bzip2: |
|
105 |
+ bz2Reader := bzip2.NewReader(buf) |
|
106 |
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) |
|
107 |
+ return readBufWrapper, nil |
|
108 |
+ case Xz: |
|
109 |
+ xzReader, err := xzDecompress(buf) |
|
110 |
+ if err != nil { |
|
111 |
+ return nil, err |
|
112 |
+ } |
|
113 |
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) |
|
114 |
+ return readBufWrapper, nil |
|
115 |
+ default: |
|
116 |
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
117 |
+ } |
|
118 |
+} |
|
119 |
+ |
|
120 |
+func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { |
|
121 |
+ p := pools.BufioWriter32KPool |
|
122 |
+ buf := p.Get(dest) |
|
123 |
+ switch compression { |
|
124 |
+ case Uncompressed: |
|
125 |
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) |
|
126 |
+ return writeBufWrapper, nil |
|
127 |
+ case Gzip: |
|
128 |
+ gzWriter := gzip.NewWriter(dest) |
|
129 |
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) |
|
130 |
+ return writeBufWrapper, nil |
|
131 |
+ case Bzip2, Xz: |
|
132 |
+ // archive/bzip2 does not support writing, and there is no xz support at all |
|
133 |
+ // However, this is not a problem as docker only currently generates gzipped tars |
|
134 |
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
135 |
+ default: |
|
136 |
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
|
137 |
+ } |
|
138 |
+} |
|
139 |
+ |
|
140 |
+func (compression *Compression) Extension() string { |
|
141 |
+ switch *compression { |
|
142 |
+ case Uncompressed: |
|
143 |
+ return "tar" |
|
144 |
+ case Bzip2: |
|
145 |
+ return "tar.bz2" |
|
146 |
+ case Gzip: |
|
147 |
+ return "tar.gz" |
|
148 |
+ case Xz: |
|
149 |
+ return "tar.xz" |
|
150 |
+ } |
|
151 |
+ return "" |
|
152 |
+} |
|
153 |
+ |
|
154 |
+func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { |
|
155 |
+ fi, err := os.Lstat(path) |
|
156 |
+ if err != nil { |
|
157 |
+ return err |
|
158 |
+ } |
|
159 |
+ |
|
160 |
+ link := "" |
|
161 |
+ if fi.Mode()&os.ModeSymlink != 0 { |
|
162 |
+ if link, err = os.Readlink(path); err != nil { |
|
163 |
+ return err |
|
164 |
+ } |
|
165 |
+ } |
|
166 |
+ |
|
167 |
+ hdr, err := tar.FileInfoHeader(fi, link) |
|
168 |
+ if err != nil { |
|
169 |
+ return err |
|
170 |
+ } |
|
171 |
+ |
|
172 |
+ if fi.IsDir() && !strings.HasSuffix(name, "/") { |
|
173 |
+ name = name + "/" |
|
174 |
+ } |
|
175 |
+ |
|
176 |
+ hdr.Name = name |
|
177 |
+ |
|
178 |
+ stat, ok := fi.Sys().(*syscall.Stat_t) |
|
179 |
+ if ok { |
|
180 |
+ // Currently go does not fill in the major/minors |
|
181 |
+ if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || |
|
182 |
+ stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { |
|
183 |
+ hdr.Devmajor = int64(major(uint64(stat.Rdev))) |
|
184 |
+ hdr.Devminor = int64(minor(uint64(stat.Rdev))) |
|
185 |
+ } |
|
186 |
+ |
|
187 |
+ } |
|
188 |
+ |
|
189 |
+ capability, _ := system.Lgetxattr(path, "security.capability") |
|
190 |
+ if capability != nil { |
|
191 |
+ hdr.Xattrs = make(map[string]string) |
|
192 |
+ hdr.Xattrs["security.capability"] = string(capability) |
|
193 |
+ } |
|
194 |
+ |
|
195 |
+ if err := tw.WriteHeader(hdr); err != nil { |
|
196 |
+ return err |
|
197 |
+ } |
|
198 |
+ |
|
199 |
+ if hdr.Typeflag == tar.TypeReg { |
|
200 |
+ file, err := os.Open(path) |
|
201 |
+ if err != nil { |
|
202 |
+ return err |
|
203 |
+ } |
|
204 |
+ |
|
205 |
+ twBuf.Reset(tw) |
|
206 |
+ _, err = io.Copy(twBuf, file) |
|
207 |
+ file.Close() |
|
208 |
+ if err != nil { |
|
209 |
+ return err |
|
210 |
+ } |
|
211 |
+ err = twBuf.Flush() |
|
212 |
+ if err != nil { |
|
213 |
+ return err |
|
214 |
+ } |
|
215 |
+ twBuf.Reset(nil) |
|
216 |
+ } |
|
217 |
+ |
|
218 |
+ return nil |
|
219 |
+} |
|
220 |
+ |
|
221 |
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { |
|
222 |
+ // hdr.Mode is in linux format, which we can use for sycalls, |
|
223 |
+ // but for os.Foo() calls we need the mode converted to os.FileMode, |
|
224 |
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits) |
|
225 |
+ hdrInfo := hdr.FileInfo() |
|
226 |
+ |
|
227 |
+ switch hdr.Typeflag { |
|
228 |
+ case tar.TypeDir: |
|
229 |
+ // Create directory unless it exists as a directory already. |
|
230 |
+ // In that case we just want to merge the two |
|
231 |
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { |
|
232 |
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { |
|
233 |
+ return err |
|
234 |
+ } |
|
235 |
+ } |
|
236 |
+ |
|
237 |
+ case tar.TypeReg, tar.TypeRegA: |
|
238 |
+ // Source is regular file |
|
239 |
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) |
|
240 |
+ if err != nil { |
|
241 |
+ return err |
|
242 |
+ } |
|
243 |
+ if _, err := io.Copy(file, reader); err != nil { |
|
244 |
+ file.Close() |
|
245 |
+ return err |
|
246 |
+ } |
|
247 |
+ file.Close() |
|
248 |
+ |
|
249 |
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: |
|
250 |
+ mode := uint32(hdr.Mode & 07777) |
|
251 |
+ switch hdr.Typeflag { |
|
252 |
+ case tar.TypeBlock: |
|
253 |
+ mode |= syscall.S_IFBLK |
|
254 |
+ case tar.TypeChar: |
|
255 |
+ mode |= syscall.S_IFCHR |
|
256 |
+ case tar.TypeFifo: |
|
257 |
+ mode |= syscall.S_IFIFO |
|
258 |
+ } |
|
259 |
+ |
|
260 |
+ if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { |
|
261 |
+ return err |
|
262 |
+ } |
|
263 |
+ |
|
264 |
+ case tar.TypeLink: |
|
265 |
+ if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { |
|
266 |
+ return err |
|
267 |
+ } |
|
268 |
+ |
|
269 |
+ case tar.TypeSymlink: |
|
270 |
+ if err := os.Symlink(hdr.Linkname, path); err != nil { |
|
271 |
+ return err |
|
272 |
+ } |
|
273 |
+ |
|
274 |
+ case tar.TypeXGlobalHeader: |
|
275 |
+ log.Debugf("PAX Global Extended Headers found and ignored") |
|
276 |
+ return nil |
|
277 |
+ |
|
278 |
+ default: |
|
279 |
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) |
|
280 |
+ } |
|
281 |
+ |
|
282 |
+ if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { |
|
283 |
+ return err |
|
284 |
+ } |
|
285 |
+ |
|
286 |
+ for key, value := range hdr.Xattrs { |
|
287 |
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { |
|
288 |
+ return err |
|
289 |
+ } |
|
290 |
+ } |
|
291 |
+ |
|
292 |
+ // There is no LChmod, so ignore mode for symlink. Also, this |
|
293 |
+ // must happen after chown, as that can modify the file mode |
|
294 |
+ if hdr.Typeflag != tar.TypeSymlink { |
|
295 |
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil { |
|
296 |
+ return err |
|
297 |
+ } |
|
298 |
+ } |
|
299 |
+ |
|
300 |
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
301 |
+ // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and |
|
302 |
+ if hdr.Typeflag != tar.TypeSymlink { |
|
303 |
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { |
|
304 |
+ return err |
|
305 |
+ } |
|
306 |
+ } else { |
|
307 |
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { |
|
308 |
+ return err |
|
309 |
+ } |
|
310 |
+ } |
|
311 |
+ return nil |
|
312 |
+} |
|
313 |
+ |
|
314 |
+// Tar creates an archive from the directory at `path`, and returns it as a |
|
315 |
+// stream of bytes. |
|
316 |
+func Tar(path string, compression Compression) (io.ReadCloser, error) { |
|
317 |
+ return TarWithOptions(path, &TarOptions{Compression: compression}) |
|
318 |
+} |
|
319 |
+ |
|
320 |
+func escapeName(name string) string { |
|
321 |
+ escaped := make([]byte, 0) |
|
322 |
+ for i, c := range []byte(name) { |
|
323 |
+ if i == 0 && c == '/' { |
|
324 |
+ continue |
|
325 |
+ } |
|
326 |
+ // all printable chars except "-" which is 0x2d |
|
327 |
+ if (0x20 <= c && c <= 0x7E) && c != 0x2d { |
|
328 |
+ escaped = append(escaped, c) |
|
329 |
+ } else { |
|
330 |
+ escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) |
|
331 |
+ } |
|
332 |
+ } |
|
333 |
+ return string(escaped) |
|
334 |
+} |
|
335 |
+ |
|
336 |
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative |
|
337 |
+// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`. |
|
338 |
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { |
|
339 |
+ pipeReader, pipeWriter := io.Pipe() |
|
340 |
+ |
|
341 |
+ compressWriter, err := CompressStream(pipeWriter, options.Compression) |
|
342 |
+ if err != nil { |
|
343 |
+ return nil, err |
|
344 |
+ } |
|
345 |
+ |
|
346 |
+ tw := tar.NewWriter(compressWriter) |
|
347 |
+ |
|
348 |
+ go func() { |
|
349 |
+ // In general we log errors here but ignore them because |
|
350 |
+ // during e.g. a diff operation the container can continue |
|
351 |
+ // mutating the filesystem and we can see transient errors |
|
352 |
+ // from this |
|
353 |
+ |
|
354 |
+ if options.Includes == nil { |
|
355 |
+ options.Includes = []string{"."} |
|
356 |
+ } |
|
357 |
+ |
|
358 |
+ twBuf := pools.BufioWriter32KPool.Get(nil) |
|
359 |
+ defer pools.BufioWriter32KPool.Put(twBuf) |
|
360 |
+ |
|
361 |
+ for _, include := range options.Includes { |
|
362 |
+ filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { |
|
363 |
+ if err != nil { |
|
364 |
+ log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) |
|
365 |
+ return nil |
|
366 |
+ } |
|
367 |
+ |
|
368 |
+ relFilePath, err := filepath.Rel(srcPath, filePath) |
|
369 |
+ if err != nil { |
|
370 |
+ return nil |
|
371 |
+ } |
|
372 |
+ |
|
373 |
+ skip, err := fileutils.Matches(relFilePath, options.Excludes) |
|
374 |
+ if err != nil { |
|
375 |
+ log.Debugf("Error matching %s", relFilePath, err) |
|
376 |
+ return err |
|
377 |
+ } |
|
378 |
+ |
|
379 |
+ if skip { |
|
380 |
+ if f.IsDir() { |
|
381 |
+ return filepath.SkipDir |
|
382 |
+ } |
|
383 |
+ return nil |
|
384 |
+ } |
|
385 |
+ |
|
386 |
+ if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { |
|
387 |
+ log.Debugf("Can't add file %s to tar: %s", srcPath, err) |
|
388 |
+ } |
|
389 |
+ return nil |
|
390 |
+ }) |
|
391 |
+ } |
|
392 |
+ |
|
393 |
+ // Make sure to check the error on Close. |
|
394 |
+ if err := tw.Close(); err != nil { |
|
395 |
+ log.Debugf("Can't close tar writer: %s", err) |
|
396 |
+ } |
|
397 |
+ if err := compressWriter.Close(); err != nil { |
|
398 |
+ log.Debugf("Can't close compress writer: %s", err) |
|
399 |
+ } |
|
400 |
+ if err := pipeWriter.Close(); err != nil { |
|
401 |
+ log.Debugf("Can't close pipe writer: %s", err) |
|
402 |
+ } |
|
403 |
+ }() |
|
404 |
+ |
|
405 |
+ return pipeReader, nil |
|
406 |
+} |
|
407 |
+ |
|
408 |
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive, |
|
409 |
+// and unpacks it into the directory at `path`. |
|
410 |
+// The archive may be compressed with one of the following algorithms: |
|
411 |
+// identity (uncompressed), gzip, bzip2, xz. |
|
412 |
+// FIXME: specify behavior when target path exists vs. doesn't exist. |
|
413 |
+func Untar(archive io.Reader, dest string, options *TarOptions) error { |
|
414 |
+ if options == nil { |
|
415 |
+ options = &TarOptions{} |
|
416 |
+ } |
|
417 |
+ |
|
418 |
+ if archive == nil { |
|
419 |
+ return fmt.Errorf("Empty archive") |
|
420 |
+ } |
|
421 |
+ |
|
422 |
+ if options.Excludes == nil { |
|
423 |
+ options.Excludes = []string{} |
|
424 |
+ } |
|
425 |
+ |
|
426 |
+ decompressedArchive, err := DecompressStream(archive) |
|
427 |
+ if err != nil { |
|
428 |
+ return err |
|
429 |
+ } |
|
430 |
+ defer decompressedArchive.Close() |
|
431 |
+ |
|
432 |
+ tr := tar.NewReader(decompressedArchive) |
|
433 |
+ trBuf := pools.BufioReader32KPool.Get(nil) |
|
434 |
+ defer pools.BufioReader32KPool.Put(trBuf) |
|
435 |
+ |
|
436 |
+ var dirs []*tar.Header |
|
437 |
+ |
|
438 |
+ // Iterate through the files in the archive. |
|
439 |
+loop: |
|
440 |
+ for { |
|
441 |
+ hdr, err := tr.Next() |
|
442 |
+ if err == io.EOF { |
|
443 |
+ // end of tar archive |
|
444 |
+ break |
|
445 |
+ } |
|
446 |
+ if err != nil { |
|
447 |
+ return err |
|
448 |
+ } |
|
449 |
+ |
|
450 |
+ // Normalize name, for safety and for a simple is-root check |
|
451 |
+ hdr.Name = filepath.Clean(hdr.Name) |
|
452 |
+ |
|
453 |
+ for _, exclude := range options.Excludes { |
|
454 |
+ if strings.HasPrefix(hdr.Name, exclude) { |
|
455 |
+ continue loop |
|
456 |
+ } |
|
457 |
+ } |
|
458 |
+ |
|
459 |
+ if !strings.HasSuffix(hdr.Name, "/") { |
|
460 |
+ // Not the root directory, ensure that the parent directory exists |
|
461 |
+ parent := filepath.Dir(hdr.Name) |
|
462 |
+ parentPath := filepath.Join(dest, parent) |
|
463 |
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { |
|
464 |
+ err = os.MkdirAll(parentPath, 0777) |
|
465 |
+ if err != nil { |
|
466 |
+ return err |
|
467 |
+ } |
|
468 |
+ } |
|
469 |
+ } |
|
470 |
+ |
|
471 |
+ path := filepath.Join(dest, hdr.Name) |
|
472 |
+ |
|
473 |
+ // If path exits we almost always just want to remove and replace it |
|
474 |
+ // The only exception is when it is a directory *and* the file from |
|
475 |
+ // the layer is also a directory. Then we want to merge them (i.e. |
|
476 |
+ // just apply the metadata from the layer). |
|
477 |
+ if fi, err := os.Lstat(path); err == nil { |
|
478 |
+ if fi.IsDir() && hdr.Name == "." { |
|
479 |
+ continue |
|
480 |
+ } |
|
481 |
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { |
|
482 |
+ if err := os.RemoveAll(path); err != nil { |
|
483 |
+ return err |
|
484 |
+ } |
|
485 |
+ } |
|
486 |
+ } |
|
487 |
+ trBuf.Reset(tr) |
|
488 |
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { |
|
489 |
+ return err |
|
490 |
+ } |
|
491 |
+ |
|
492 |
+ // Directory mtimes must be handled at the end to avoid further |
|
493 |
+ // file creation in them to modify the directory mtime |
|
494 |
+ if hdr.Typeflag == tar.TypeDir { |
|
495 |
+ dirs = append(dirs, hdr) |
|
496 |
+ } |
|
497 |
+ } |
|
498 |
+ |
|
499 |
+ for _, hdr := range dirs { |
|
500 |
+ path := filepath.Join(dest, hdr.Name) |
|
501 |
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
502 |
+ if err := syscall.UtimesNano(path, ts); err != nil { |
|
503 |
+ return err |
|
504 |
+ } |
|
505 |
+ } |
|
506 |
+ |
|
507 |
+ return nil |
|
508 |
+} |
|
509 |
+ |
|
510 |
+// TarUntar is a convenience function which calls Tar and Untar, with |
|
511 |
+// the output of one piped into the other. If either Tar or Untar fails, |
|
512 |
+// TarUntar aborts and returns the error. |
|
513 |
+func TarUntar(src string, dst string) error { |
|
514 |
+ log.Debugf("TarUntar(%s %s)", src, dst) |
|
515 |
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) |
|
516 |
+ if err != nil { |
|
517 |
+ return err |
|
518 |
+ } |
|
519 |
+ defer archive.Close() |
|
520 |
+ return Untar(archive, dst, nil) |
|
521 |
+} |
|
522 |
+ |
|
523 |
+// UntarPath is a convenience function which looks for an archive |
|
524 |
+// at filesystem path `src`, and unpacks it at `dst`. |
|
525 |
+func UntarPath(src, dst string) error { |
|
526 |
+ archive, err := os.Open(src) |
|
527 |
+ if err != nil { |
|
528 |
+ return err |
|
529 |
+ } |
|
530 |
+ defer archive.Close() |
|
531 |
+ if err := Untar(archive, dst, nil); err != nil { |
|
532 |
+ return err |
|
533 |
+ } |
|
534 |
+ return nil |
|
535 |
+} |
|
536 |
+ |
|
537 |
+// CopyWithTar creates a tar archive of filesystem path `src`, and |
|
538 |
+// unpacks it at filesystem path `dst`. |
|
539 |
+// The archive is streamed directly with fixed buffering and no |
|
540 |
+// intermediary disk IO. |
|
541 |
+// |
|
542 |
+func CopyWithTar(src, dst string) error { |
|
543 |
+ srcSt, err := os.Stat(src) |
|
544 |
+ if err != nil { |
|
545 |
+ return err |
|
546 |
+ } |
|
547 |
+ if !srcSt.IsDir() { |
|
548 |
+ return CopyFileWithTar(src, dst) |
|
549 |
+ } |
|
550 |
+ // Create dst, copy src's content into it |
|
551 |
+ log.Debugf("Creating dest directory: %s", dst) |
|
552 |
+ if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { |
|
553 |
+ return err |
|
554 |
+ } |
|
555 |
+ log.Debugf("Calling TarUntar(%s, %s)", src, dst) |
|
556 |
+ return TarUntar(src, dst) |
|
557 |
+} |
|
558 |
+ |
|
559 |
+// CopyFileWithTar emulates the behavior of the 'cp' command-line |
|
560 |
+// for a single file. It copies a regular file from path `src` to |
|
561 |
+// path `dst`, and preserves all its metadata. |
|
562 |
+// |
|
563 |
+// If `dst` ends with a trailing slash '/', the final destination path |
|
564 |
+// will be `dst/base(src)`. |
|
565 |
+func CopyFileWithTar(src, dst string) (err error) { |
|
566 |
+ log.Debugf("CopyFileWithTar(%s, %s)", src, dst) |
|
567 |
+ srcSt, err := os.Stat(src) |
|
568 |
+ if err != nil { |
|
569 |
+ return err |
|
570 |
+ } |
|
571 |
+ if srcSt.IsDir() { |
|
572 |
+ return fmt.Errorf("Can't copy a directory") |
|
573 |
+ } |
|
574 |
+ // Clean up the trailing / |
|
575 |
+ if dst[len(dst)-1] == '/' { |
|
576 |
+ dst = path.Join(dst, filepath.Base(src)) |
|
577 |
+ } |
|
578 |
+ // Create the holding directory if necessary |
|
579 |
+ if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { |
|
580 |
+ return err |
|
581 |
+ } |
|
582 |
+ |
|
583 |
+ r, w := io.Pipe() |
|
584 |
+ errC := promise.Go(func() error { |
|
585 |
+ defer w.Close() |
|
586 |
+ |
|
587 |
+ srcF, err := os.Open(src) |
|
588 |
+ if err != nil { |
|
589 |
+ return err |
|
590 |
+ } |
|
591 |
+ defer srcF.Close() |
|
592 |
+ |
|
593 |
+ hdr, err := tar.FileInfoHeader(srcSt, "") |
|
594 |
+ if err != nil { |
|
595 |
+ return err |
|
596 |
+ } |
|
597 |
+ hdr.Name = filepath.Base(dst) |
|
598 |
+ tw := tar.NewWriter(w) |
|
599 |
+ defer tw.Close() |
|
600 |
+ if err := tw.WriteHeader(hdr); err != nil { |
|
601 |
+ return err |
|
602 |
+ } |
|
603 |
+ if _, err := io.Copy(tw, srcF); err != nil { |
|
604 |
+ return err |
|
605 |
+ } |
|
606 |
+ return nil |
|
607 |
+ }) |
|
608 |
+ defer func() { |
|
609 |
+ if er := <-errC; err != nil { |
|
610 |
+ err = er |
|
611 |
+ } |
|
612 |
+ }() |
|
613 |
+ return Untar(r, filepath.Dir(dst), nil) |
|
614 |
+} |
|
615 |
+ |
|
616 |
+// CmdStream executes a command, and returns its stdout as a stream. |
|
617 |
+// If the command fails to run or doesn't complete successfully, an error |
|
618 |
+// will be returned, including anything written on stderr. |
|
619 |
+func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { |
|
620 |
+ if input != nil { |
|
621 |
+ stdin, err := cmd.StdinPipe() |
|
622 |
+ if err != nil { |
|
623 |
+ return nil, err |
|
624 |
+ } |
|
625 |
+ // Write stdin if any |
|
626 |
+ go func() { |
|
627 |
+ io.Copy(stdin, input) |
|
628 |
+ stdin.Close() |
|
629 |
+ }() |
|
630 |
+ } |
|
631 |
+ stdout, err := cmd.StdoutPipe() |
|
632 |
+ if err != nil { |
|
633 |
+ return nil, err |
|
634 |
+ } |
|
635 |
+ stderr, err := cmd.StderrPipe() |
|
636 |
+ if err != nil { |
|
637 |
+ return nil, err |
|
638 |
+ } |
|
639 |
+ pipeR, pipeW := io.Pipe() |
|
640 |
+ errChan := make(chan []byte) |
|
641 |
+ // Collect stderr, we will use it in case of an error |
|
642 |
+ go func() { |
|
643 |
+ errText, e := ioutil.ReadAll(stderr) |
|
644 |
+ if e != nil { |
|
645 |
+ errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") |
|
646 |
+ } |
|
647 |
+ errChan <- errText |
|
648 |
+ }() |
|
649 |
+ // Copy stdout to the returned pipe |
|
650 |
+ go func() { |
|
651 |
+ _, err := io.Copy(pipeW, stdout) |
|
652 |
+ if err != nil { |
|
653 |
+ pipeW.CloseWithError(err) |
|
654 |
+ } |
|
655 |
+ errText := <-errChan |
|
656 |
+ if err := cmd.Wait(); err != nil { |
|
657 |
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) |
|
658 |
+ } else { |
|
659 |
+ pipeW.Close() |
|
660 |
+ } |
|
661 |
+ }() |
|
662 |
+ // Run the command and return the pipe |
|
663 |
+ if err := cmd.Start(); err != nil { |
|
664 |
+ return nil, err |
|
665 |
+ } |
|
666 |
+ return pipeR, nil |
|
667 |
+} |
|
668 |
+ |
|
669 |
+// NewTempArchive reads the content of src into a temporary file, and returns the contents |
|
670 |
+// of that file as an archive. The archive can only be read once - as soon as reading completes, |
|
671 |
+// the file will be deleted. |
|
672 |
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) { |
|
673 |
+ f, err := ioutil.TempFile(dir, "") |
|
674 |
+ if err != nil { |
|
675 |
+ return nil, err |
|
676 |
+ } |
|
677 |
+ if _, err := io.Copy(f, src); err != nil { |
|
678 |
+ return nil, err |
|
679 |
+ } |
|
680 |
+ if err = f.Sync(); err != nil { |
|
681 |
+ return nil, err |
|
682 |
+ } |
|
683 |
+ if _, err := f.Seek(0, 0); err != nil { |
|
684 |
+ return nil, err |
|
685 |
+ } |
|
686 |
+ st, err := f.Stat() |
|
687 |
+ if err != nil { |
|
688 |
+ return nil, err |
|
689 |
+ } |
|
690 |
+ size := st.Size() |
|
691 |
+ return &TempArchive{f, size}, nil |
|
692 |
+} |
|
693 |
+ |
|
694 |
+type TempArchive struct { |
|
695 |
+ *os.File |
|
696 |
+ Size int64 // Pre-computed from Stat().Size() as a convenience |
|
697 |
+} |
|
698 |
+ |
|
699 |
+func (archive *TempArchive) Read(data []byte) (int, error) { |
|
700 |
+ n, err := archive.File.Read(data) |
|
701 |
+ if err != nil { |
|
702 |
+ os.Remove(archive.File.Name()) |
|
703 |
+ } |
|
704 |
+ return n, err |
|
705 |
+} |
0 | 706 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,244 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "bytes" |
|
4 |
+ "fmt" |
|
5 |
+ "io" |
|
6 |
+ "io/ioutil" |
|
7 |
+ "os" |
|
8 |
+ "os/exec" |
|
9 |
+ "path" |
|
10 |
+ "testing" |
|
11 |
+ "time" |
|
12 |
+ |
|
13 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
14 |
+) |
|
15 |
+ |
|
16 |
+func TestCmdStreamLargeStderr(t *testing.T) { |
|
17 |
+ cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") |
|
18 |
+ out, err := CmdStream(cmd, nil) |
|
19 |
+ if err != nil { |
|
20 |
+ t.Fatalf("Failed to start command: %s", err) |
|
21 |
+ } |
|
22 |
+ errCh := make(chan error) |
|
23 |
+ go func() { |
|
24 |
+ _, err := io.Copy(ioutil.Discard, out) |
|
25 |
+ errCh <- err |
|
26 |
+ }() |
|
27 |
+ select { |
|
28 |
+ case err := <-errCh: |
|
29 |
+ if err != nil { |
|
30 |
+ t.Fatalf("Command should not have failed (err=%.100s...)", err) |
|
31 |
+ } |
|
32 |
+ case <-time.After(5 * time.Second): |
|
33 |
+ t.Fatalf("Command did not complete in 5 seconds; probable deadlock") |
|
34 |
+ } |
|
35 |
+} |
|
36 |
+ |
|
37 |
+func TestCmdStreamBad(t *testing.T) { |
|
38 |
+ badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") |
|
39 |
+ out, err := CmdStream(badCmd, nil) |
|
40 |
+ if err != nil { |
|
41 |
+ t.Fatalf("Failed to start command: %s", err) |
|
42 |
+ } |
|
43 |
+ if output, err := ioutil.ReadAll(out); err == nil { |
|
44 |
+ t.Fatalf("Command should have failed") |
|
45 |
+ } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { |
|
46 |
+ t.Fatalf("Wrong error value (%s)", err) |
|
47 |
+ } else if s := string(output); s != "hello\n" { |
|
48 |
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) |
|
49 |
+ } |
|
50 |
+} |
|
51 |
+ |
|
52 |
+func TestCmdStreamGood(t *testing.T) { |
|
53 |
+ cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") |
|
54 |
+ out, err := CmdStream(cmd, nil) |
|
55 |
+ if err != nil { |
|
56 |
+ t.Fatal(err) |
|
57 |
+ } |
|
58 |
+ if output, err := ioutil.ReadAll(out); err != nil { |
|
59 |
+ t.Fatalf("Command should not have failed (err=%s)", err) |
|
60 |
+ } else if s := string(output); s != "hello\n" { |
|
61 |
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) |
|
62 |
+ } |
|
63 |
+} |
|
64 |
+ |
|
65 |
+func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { |
|
66 |
+ archive, err := TarWithOptions(origin, options) |
|
67 |
+ if err != nil { |
|
68 |
+ t.Fatal(err) |
|
69 |
+ } |
|
70 |
+ defer archive.Close() |
|
71 |
+ |
|
72 |
+ buf := make([]byte, 10) |
|
73 |
+ if _, err := archive.Read(buf); err != nil { |
|
74 |
+ return nil, err |
|
75 |
+ } |
|
76 |
+ wrap := io.MultiReader(bytes.NewReader(buf), archive) |
|
77 |
+ |
|
78 |
+ detectedCompression := DetectCompression(buf) |
|
79 |
+ compression := options.Compression |
|
80 |
+ if detectedCompression.Extension() != compression.Extension() { |
|
81 |
+ return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) |
|
82 |
+ } |
|
83 |
+ |
|
84 |
+ tmp, err := ioutil.TempDir("", "docker-test-untar") |
|
85 |
+ if err != nil { |
|
86 |
+ return nil, err |
|
87 |
+ } |
|
88 |
+ defer os.RemoveAll(tmp) |
|
89 |
+ if err := Untar(wrap, tmp, nil); err != nil { |
|
90 |
+ return nil, err |
|
91 |
+ } |
|
92 |
+ if _, err := os.Stat(tmp); err != nil { |
|
93 |
+ return nil, err |
|
94 |
+ } |
|
95 |
+ |
|
96 |
+ return ChangesDirs(origin, tmp) |
|
97 |
+} |
|
98 |
+ |
|
99 |
+func TestTarUntar(t *testing.T) { |
|
100 |
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
101 |
+ if err != nil { |
|
102 |
+ t.Fatal(err) |
|
103 |
+ } |
|
104 |
+ defer os.RemoveAll(origin) |
|
105 |
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { |
|
106 |
+ t.Fatal(err) |
|
107 |
+ } |
|
108 |
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { |
|
109 |
+ t.Fatal(err) |
|
110 |
+ } |
|
111 |
+ if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { |
|
112 |
+ t.Fatal(err) |
|
113 |
+ } |
|
114 |
+ |
|
115 |
+ for _, c := range []Compression{ |
|
116 |
+ Uncompressed, |
|
117 |
+ Gzip, |
|
118 |
+ } { |
|
119 |
+ changes, err := tarUntar(t, origin, &TarOptions{ |
|
120 |
+ Compression: c, |
|
121 |
+ Excludes: []string{"3"}, |
|
122 |
+ }) |
|
123 |
+ |
|
124 |
+ if err != nil { |
|
125 |
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) |
|
126 |
+ } |
|
127 |
+ |
|
128 |
+ if len(changes) != 1 || changes[0].Path != "/3" { |
|
129 |
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes) |
|
130 |
+ } |
|
131 |
+ } |
|
132 |
+} |
|
133 |
+ |
|
134 |
+func TestTarWithOptions(t *testing.T) { |
|
135 |
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
136 |
+ if err != nil { |
|
137 |
+ t.Fatal(err) |
|
138 |
+ } |
|
139 |
+ defer os.RemoveAll(origin) |
|
140 |
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { |
|
141 |
+ t.Fatal(err) |
|
142 |
+ } |
|
143 |
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { |
|
144 |
+ t.Fatal(err) |
|
145 |
+ } |
|
146 |
+ |
|
147 |
+ cases := []struct { |
|
148 |
+ opts *TarOptions |
|
149 |
+ numChanges int |
|
150 |
+ }{ |
|
151 |
+ {&TarOptions{Includes: []string{"1"}}, 1}, |
|
152 |
+ {&TarOptions{Excludes: []string{"2"}}, 1}, |
|
153 |
+ } |
|
154 |
+ for _, testCase := range cases { |
|
155 |
+ changes, err := tarUntar(t, origin, testCase.opts) |
|
156 |
+ if err != nil { |
|
157 |
+ t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) |
|
158 |
+ } |
|
159 |
+ if len(changes) != testCase.numChanges { |
|
160 |
+ t.Errorf("Expected %d changes, got %d for %+v:", |
|
161 |
+ testCase.numChanges, len(changes), testCase.opts) |
|
162 |
+ } |
|
163 |
+ } |
|
164 |
+} |
|
165 |
+ |
|
166 |
+// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz |
|
167 |
+// use PAX Global Extended Headers. |
|
168 |
+// Failing prevents the archives from being uncompressed during ADD |
|
169 |
+func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { |
|
170 |
+ hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} |
|
171 |
+ err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) |
|
172 |
+ if err != nil { |
|
173 |
+ t.Fatal(err) |
|
174 |
+ } |
|
175 |
+} |
|
176 |
+ |
|
177 |
+// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. |
|
178 |
+// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. |
|
179 |
+func TestUntarUstarGnuConflict(t *testing.T) { |
|
180 |
+ f, err := os.Open("testdata/broken.tar") |
|
181 |
+ if err != nil { |
|
182 |
+ t.Fatal(err) |
|
183 |
+ } |
|
184 |
+ found := false |
|
185 |
+ tr := tar.NewReader(f) |
|
186 |
+ // Iterate through the files in the archive. |
|
187 |
+ for { |
|
188 |
+ hdr, err := tr.Next() |
|
189 |
+ if err == io.EOF { |
|
190 |
+ // end of tar archive |
|
191 |
+ break |
|
192 |
+ } |
|
193 |
+ if err != nil { |
|
194 |
+ t.Fatal(err) |
|
195 |
+ } |
|
196 |
+ if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { |
|
197 |
+ found = true |
|
198 |
+ break |
|
199 |
+ } |
|
200 |
+ } |
|
201 |
+ if !found { |
|
202 |
+ t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") |
|
203 |
+ } |
|
204 |
+} |
|
205 |
+ |
|
206 |
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { |
|
207 |
+ fileData := []byte("fooo") |
|
208 |
+ for n := 0; n < numberOfFiles; n++ { |
|
209 |
+ fileName := fmt.Sprintf("file-%d", n) |
|
210 |
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { |
|
211 |
+ return 0, err |
|
212 |
+ } |
|
213 |
+ } |
|
214 |
+ totalSize := numberOfFiles * len(fileData) |
|
215 |
+ return totalSize, nil |
|
216 |
+} |
|
217 |
+ |
|
218 |
+func BenchmarkTarUntar(b *testing.B) { |
|
219 |
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin") |
|
220 |
+ if err != nil { |
|
221 |
+ b.Fatal(err) |
|
222 |
+ } |
|
223 |
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") |
|
224 |
+ if err != nil { |
|
225 |
+ b.Fatal(err) |
|
226 |
+ } |
|
227 |
+ target := path.Join(tempDir, "dest") |
|
228 |
+ n, err := prepareUntarSourceDirectory(100, origin) |
|
229 |
+ if err != nil { |
|
230 |
+ b.Fatal(err) |
|
231 |
+ } |
|
232 |
+ b.ResetTimer() |
|
233 |
+ b.SetBytes(int64(n)) |
|
234 |
+ defer os.RemoveAll(origin) |
|
235 |
+ defer os.RemoveAll(tempDir) |
|
236 |
+ for n := 0; n < b.N; n++ { |
|
237 |
+ err := TarUntar(origin, target) |
|
238 |
+ if err != nil { |
|
239 |
+ b.Fatal(err) |
|
240 |
+ } |
|
241 |
+ os.RemoveAll(target) |
|
242 |
+ } |
|
243 |
+} |
0 | 244 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,411 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "bytes" |
|
4 |
+ "fmt" |
|
5 |
+ "io" |
|
6 |
+ "os" |
|
7 |
+ "path/filepath" |
|
8 |
+ "strings" |
|
9 |
+ "syscall" |
|
10 |
+ "time" |
|
11 |
+ |
|
12 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
13 |
+ |
|
14 |
+ "github.com/docker/docker/pkg/log" |
|
15 |
+ "github.com/docker/docker/pkg/pools" |
|
16 |
+ "github.com/docker/docker/pkg/system" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+type ChangeType int |
|
20 |
+ |
|
21 |
+const ( |
|
22 |
+ ChangeModify = iota |
|
23 |
+ ChangeAdd |
|
24 |
+ ChangeDelete |
|
25 |
+) |
|
26 |
+ |
|
27 |
+type Change struct { |
|
28 |
+ Path string |
|
29 |
+ Kind ChangeType |
|
30 |
+} |
|
31 |
+ |
|
32 |
+func (change *Change) String() string { |
|
33 |
+ var kind string |
|
34 |
+ switch change.Kind { |
|
35 |
+ case ChangeModify: |
|
36 |
+ kind = "C" |
|
37 |
+ case ChangeAdd: |
|
38 |
+ kind = "A" |
|
39 |
+ case ChangeDelete: |
|
40 |
+ kind = "D" |
|
41 |
+ } |
|
42 |
+ return fmt.Sprintf("%s %s", kind, change.Path) |
|
43 |
+} |
|
44 |
+ |
|
45 |
+// Gnu tar and the go tar writer don't have sub-second mtime |
|
46 |
+// precision, which is problematic when we apply changes via tar |
|
47 |
+// files, we handle this by comparing for exact times, *or* same |
|
48 |
+// second count and either a or b having exactly 0 nanoseconds |
|
49 |
+func sameFsTime(a, b time.Time) bool { |
|
50 |
+ return a == b || |
|
51 |
+ (a.Unix() == b.Unix() && |
|
52 |
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0)) |
|
53 |
+} |
|
54 |
+ |
|
55 |
+func sameFsTimeSpec(a, b syscall.Timespec) bool { |
|
56 |
+ return a.Sec == b.Sec && |
|
57 |
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) |
|
58 |
+} |
|
59 |
+ |
|
60 |
+// Changes walks the path rw and determines changes for the files in the path, |
|
61 |
+// with respect to the parent layers |
|
62 |
+func Changes(layers []string, rw string) ([]Change, error) { |
|
63 |
+ var changes []Change |
|
64 |
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { |
|
65 |
+ if err != nil { |
|
66 |
+ return err |
|
67 |
+ } |
|
68 |
+ |
|
69 |
+ // Rebase path |
|
70 |
+ path, err = filepath.Rel(rw, path) |
|
71 |
+ if err != nil { |
|
72 |
+ return err |
|
73 |
+ } |
|
74 |
+ path = filepath.Join("/", path) |
|
75 |
+ |
|
76 |
+ // Skip root |
|
77 |
+ if path == "/" { |
|
78 |
+ return nil |
|
79 |
+ } |
|
80 |
+ |
|
81 |
+ // Skip AUFS metadata |
|
82 |
+ if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { |
|
83 |
+ return err |
|
84 |
+ } |
|
85 |
+ |
|
86 |
+ change := Change{ |
|
87 |
+ Path: path, |
|
88 |
+ } |
|
89 |
+ |
|
90 |
+ // Find out what kind of modification happened |
|
91 |
+ file := filepath.Base(path) |
|
92 |
+ // If there is a whiteout, then the file was removed |
|
93 |
+ if strings.HasPrefix(file, ".wh.") { |
|
94 |
+ originalFile := file[len(".wh."):] |
|
95 |
+ change.Path = filepath.Join(filepath.Dir(path), originalFile) |
|
96 |
+ change.Kind = ChangeDelete |
|
97 |
+ } else { |
|
98 |
+ // Otherwise, the file was added |
|
99 |
+ change.Kind = ChangeAdd |
|
100 |
+ |
|
101 |
+ // ...Unless it already existed in a top layer, in which case, it's a modification |
|
102 |
+ for _, layer := range layers { |
|
103 |
+ stat, err := os.Stat(filepath.Join(layer, path)) |
|
104 |
+ if err != nil && !os.IsNotExist(err) { |
|
105 |
+ return err |
|
106 |
+ } |
|
107 |
+ if err == nil { |
|
108 |
+ // The file existed in the top layer, so that's a modification |
|
109 |
+ |
|
110 |
+ // However, if it's a directory, maybe it wasn't actually modified. |
|
111 |
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar |
|
112 |
+ if stat.IsDir() && f.IsDir() { |
|
113 |
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { |
|
114 |
+ // Both directories are the same, don't record the change |
|
115 |
+ return nil |
|
116 |
+ } |
|
117 |
+ } |
|
118 |
+ change.Kind = ChangeModify |
|
119 |
+ break |
|
120 |
+ } |
|
121 |
+ } |
|
122 |
+ } |
|
123 |
+ |
|
124 |
+ // Record change |
|
125 |
+ changes = append(changes, change) |
|
126 |
+ return nil |
|
127 |
+ }) |
|
128 |
+ if err != nil && !os.IsNotExist(err) { |
|
129 |
+ return nil, err |
|
130 |
+ } |
|
131 |
+ return changes, nil |
|
132 |
+} |
|
133 |
+ |
|
134 |
+type FileInfo struct { |
|
135 |
+ parent *FileInfo |
|
136 |
+ name string |
|
137 |
+ stat syscall.Stat_t |
|
138 |
+ children map[string]*FileInfo |
|
139 |
+ capability []byte |
|
140 |
+ added bool |
|
141 |
+} |
|
142 |
+ |
|
143 |
+func (root *FileInfo) LookUp(path string) *FileInfo { |
|
144 |
+ parent := root |
|
145 |
+ if path == "/" { |
|
146 |
+ return root |
|
147 |
+ } |
|
148 |
+ |
|
149 |
+ pathElements := strings.Split(path, "/") |
|
150 |
+ for _, elem := range pathElements { |
|
151 |
+ if elem != "" { |
|
152 |
+ child := parent.children[elem] |
|
153 |
+ if child == nil { |
|
154 |
+ return nil |
|
155 |
+ } |
|
156 |
+ parent = child |
|
157 |
+ } |
|
158 |
+ } |
|
159 |
+ return parent |
|
160 |
+} |
|
161 |
+ |
|
162 |
+func (info *FileInfo) path() string { |
|
163 |
+ if info.parent == nil { |
|
164 |
+ return "/" |
|
165 |
+ } |
|
166 |
+ return filepath.Join(info.parent.path(), info.name) |
|
167 |
+} |
|
168 |
+ |
|
169 |
+func (info *FileInfo) isDir() bool { |
|
170 |
+ return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR |
|
171 |
+} |
|
172 |
+ |
|
173 |
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { |
|
174 |
+ |
|
175 |
+ sizeAtEntry := len(*changes) |
|
176 |
+ |
|
177 |
+ if oldInfo == nil { |
|
178 |
+ // add |
|
179 |
+ change := Change{ |
|
180 |
+ Path: info.path(), |
|
181 |
+ Kind: ChangeAdd, |
|
182 |
+ } |
|
183 |
+ *changes = append(*changes, change) |
|
184 |
+ info.added = true |
|
185 |
+ } |
|
186 |
+ |
|
187 |
+ // We make a copy so we can modify it to detect additions |
|
188 |
+ // also, we only recurse on the old dir if the new info is a directory |
|
189 |
+ // otherwise any previous delete/change is considered recursive |
|
190 |
+ oldChildren := make(map[string]*FileInfo) |
|
191 |
+ if oldInfo != nil && info.isDir() { |
|
192 |
+ for k, v := range oldInfo.children { |
|
193 |
+ oldChildren[k] = v |
|
194 |
+ } |
|
195 |
+ } |
|
196 |
+ |
|
197 |
+ for name, newChild := range info.children { |
|
198 |
+ oldChild, _ := oldChildren[name] |
|
199 |
+ if oldChild != nil { |
|
200 |
+ // change? |
|
201 |
+ oldStat := &oldChild.stat |
|
202 |
+ newStat := &newChild.stat |
|
203 |
+ // Note: We can't compare inode or ctime or blocksize here, because these change |
|
204 |
+ // when copying a file into a container. However, that is not generally a problem |
|
205 |
+ // because any content change will change mtime, and any status change should |
|
206 |
+ // be visible when actually comparing the stat fields. The only time this |
|
207 |
+ // breaks down is if some code intentionally hides a change by setting |
|
208 |
+ // back mtime |
|
209 |
+ if oldStat.Mode != newStat.Mode || |
|
210 |
+ oldStat.Uid != newStat.Uid || |
|
211 |
+ oldStat.Gid != newStat.Gid || |
|
212 |
+ oldStat.Rdev != newStat.Rdev || |
|
213 |
+ // Don't look at size for dirs, its not a good measure of change |
|
214 |
+ (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || |
|
215 |
+ !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || |
|
216 |
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 { |
|
217 |
+ change := Change{ |
|
218 |
+ Path: newChild.path(), |
|
219 |
+ Kind: ChangeModify, |
|
220 |
+ } |
|
221 |
+ *changes = append(*changes, change) |
|
222 |
+ newChild.added = true |
|
223 |
+ } |
|
224 |
+ |
|
225 |
+ // Remove from copy so we can detect deletions |
|
226 |
+ delete(oldChildren, name) |
|
227 |
+ } |
|
228 |
+ |
|
229 |
+ newChild.addChanges(oldChild, changes) |
|
230 |
+ } |
|
231 |
+ for _, oldChild := range oldChildren { |
|
232 |
+ // delete |
|
233 |
+ change := Change{ |
|
234 |
+ Path: oldChild.path(), |
|
235 |
+ Kind: ChangeDelete, |
|
236 |
+ } |
|
237 |
+ *changes = append(*changes, change) |
|
238 |
+ } |
|
239 |
+ |
|
240 |
+ // If there were changes inside this directory, we need to add it, even if the directory |
|
241 |
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions. |
|
242 |
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { |
|
243 |
+ change := Change{ |
|
244 |
+ Path: info.path(), |
|
245 |
+ Kind: ChangeModify, |
|
246 |
+ } |
|
247 |
+ // Let's insert the directory entry before the recently added entries located inside this dir |
|
248 |
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten |
|
249 |
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) |
|
250 |
+ (*changes)[sizeAtEntry] = change |
|
251 |
+ } |
|
252 |
+ |
|
253 |
+} |
|
254 |
+ |
|
255 |
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { |
|
256 |
+ var changes []Change |
|
257 |
+ |
|
258 |
+ info.addChanges(oldInfo, &changes) |
|
259 |
+ |
|
260 |
+ return changes |
|
261 |
+} |
|
262 |
+ |
|
263 |
+func newRootFileInfo() *FileInfo { |
|
264 |
+ root := &FileInfo{ |
|
265 |
+ name: "/", |
|
266 |
+ children: make(map[string]*FileInfo), |
|
267 |
+ } |
|
268 |
+ return root |
|
269 |
+} |
|
270 |
+ |
|
271 |
+func collectFileInfo(sourceDir string) (*FileInfo, error) { |
|
272 |
+ root := newRootFileInfo() |
|
273 |
+ |
|
274 |
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { |
|
275 |
+ if err != nil { |
|
276 |
+ return err |
|
277 |
+ } |
|
278 |
+ |
|
279 |
+ // Rebase path |
|
280 |
+ relPath, err := filepath.Rel(sourceDir, path) |
|
281 |
+ if err != nil { |
|
282 |
+ return err |
|
283 |
+ } |
|
284 |
+ relPath = filepath.Join("/", relPath) |
|
285 |
+ |
|
286 |
+ if relPath == "/" { |
|
287 |
+ return nil |
|
288 |
+ } |
|
289 |
+ |
|
290 |
+ parent := root.LookUp(filepath.Dir(relPath)) |
|
291 |
+ if parent == nil { |
|
292 |
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) |
|
293 |
+ } |
|
294 |
+ |
|
295 |
+ info := &FileInfo{ |
|
296 |
+ name: filepath.Base(relPath), |
|
297 |
+ children: make(map[string]*FileInfo), |
|
298 |
+ parent: parent, |
|
299 |
+ } |
|
300 |
+ |
|
301 |
+ if err := syscall.Lstat(path, &info.stat); err != nil { |
|
302 |
+ return err |
|
303 |
+ } |
|
304 |
+ |
|
305 |
+ info.capability, _ = system.Lgetxattr(path, "security.capability") |
|
306 |
+ |
|
307 |
+ parent.children[info.name] = info |
|
308 |
+ |
|
309 |
+ return nil |
|
310 |
+ }) |
|
311 |
+ if err != nil { |
|
312 |
+ return nil, err |
|
313 |
+ } |
|
314 |
+ return root, nil |
|
315 |
+} |
|
316 |
+ |
|
317 |
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes. |
|
318 |
+// If oldDir is "", then all files in newDir will be Add-Changes. |
|
319 |
+func ChangesDirs(newDir, oldDir string) ([]Change, error) { |
|
320 |
+ var ( |
|
321 |
+ oldRoot, newRoot *FileInfo |
|
322 |
+ err1, err2 error |
|
323 |
+ errs = make(chan error, 2) |
|
324 |
+ ) |
|
325 |
+ go func() { |
|
326 |
+ if oldDir != "" { |
|
327 |
+ oldRoot, err1 = collectFileInfo(oldDir) |
|
328 |
+ } |
|
329 |
+ errs <- err1 |
|
330 |
+ }() |
|
331 |
+ go func() { |
|
332 |
+ newRoot, err2 = collectFileInfo(newDir) |
|
333 |
+ errs <- err2 |
|
334 |
+ }() |
|
335 |
+ for i := 0; i < 2; i++ { |
|
336 |
+ if err := <-errs; err != nil { |
|
337 |
+ return nil, err |
|
338 |
+ } |
|
339 |
+ } |
|
340 |
+ |
|
341 |
+ return newRoot.Changes(oldRoot), nil |
|
342 |
+} |
|
343 |
+ |
|
344 |
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir. |
|
345 |
+func ChangesSize(newDir string, changes []Change) int64 { |
|
346 |
+ var size int64 |
|
347 |
+ for _, change := range changes { |
|
348 |
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd { |
|
349 |
+ file := filepath.Join(newDir, change.Path) |
|
350 |
+ fileInfo, _ := os.Lstat(file) |
|
351 |
+ if fileInfo != nil && !fileInfo.IsDir() { |
|
352 |
+ size += fileInfo.Size() |
|
353 |
+ } |
|
354 |
+ } |
|
355 |
+ } |
|
356 |
+ return size |
|
357 |
+} |
|
358 |
+ |
|
359 |
+func major(device uint64) uint64 { |
|
360 |
+ return (device >> 8) & 0xfff |
|
361 |
+} |
|
362 |
+ |
|
363 |
+func minor(device uint64) uint64 { |
|
364 |
+ return (device & 0xff) | ((device >> 12) & 0xfff00) |
|
365 |
+} |
|
366 |
+ |
|
367 |
+// ExportChanges produces an Archive from the provided changes, relative to dir. |
|
368 |
+func ExportChanges(dir string, changes []Change) (Archive, error) { |
|
369 |
+ reader, writer := io.Pipe() |
|
370 |
+ tw := tar.NewWriter(writer) |
|
371 |
+ |
|
372 |
+ go func() { |
|
373 |
+ twBuf := pools.BufioWriter32KPool.Get(nil) |
|
374 |
+ defer pools.BufioWriter32KPool.Put(twBuf) |
|
375 |
+ // In general we log errors here but ignore them because |
|
376 |
+ // during e.g. a diff operation the container can continue |
|
377 |
+ // mutating the filesystem and we can see transient errors |
|
378 |
+ // from this |
|
379 |
+ for _, change := range changes { |
|
380 |
+ if change.Kind == ChangeDelete { |
|
381 |
+ whiteOutDir := filepath.Dir(change.Path) |
|
382 |
+ whiteOutBase := filepath.Base(change.Path) |
|
383 |
+ whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) |
|
384 |
+ timestamp := time.Now() |
|
385 |
+ hdr := &tar.Header{ |
|
386 |
+ Name: whiteOut[1:], |
|
387 |
+ Size: 0, |
|
388 |
+ ModTime: timestamp, |
|
389 |
+ AccessTime: timestamp, |
|
390 |
+ ChangeTime: timestamp, |
|
391 |
+ } |
|
392 |
+ if err := tw.WriteHeader(hdr); err != nil { |
|
393 |
+ log.Debugf("Can't write whiteout header: %s", err) |
|
394 |
+ } |
|
395 |
+ } else { |
|
396 |
+ path := filepath.Join(dir, change.Path) |
|
397 |
+ if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { |
|
398 |
+ log.Debugf("Can't add file %s to tar: %s", path, err) |
|
399 |
+ } |
|
400 |
+ } |
|
401 |
+ } |
|
402 |
+ |
|
403 |
+ // Make sure to check the error on Close. |
|
404 |
+ if err := tw.Close(); err != nil { |
|
405 |
+ log.Debugf("Can't close layer: %s", err) |
|
406 |
+ } |
|
407 |
+ writer.Close() |
|
408 |
+ }() |
|
409 |
+ return reader, nil |
|
410 |
+} |
0 | 411 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,301 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io/ioutil" |
|
4 |
+ "os" |
|
5 |
+ "os/exec" |
|
6 |
+ "path" |
|
7 |
+ "sort" |
|
8 |
+ "testing" |
|
9 |
+ "time" |
|
10 |
+) |
|
11 |
+ |
|
12 |
+func max(x, y int) int { |
|
13 |
+ if x >= y { |
|
14 |
+ return x |
|
15 |
+ } |
|
16 |
+ return y |
|
17 |
+} |
|
18 |
+ |
|
19 |
+func copyDir(src, dst string) error { |
|
20 |
+ cmd := exec.Command("cp", "-a", src, dst) |
|
21 |
+ if err := cmd.Run(); err != nil { |
|
22 |
+ return err |
|
23 |
+ } |
|
24 |
+ return nil |
|
25 |
+} |
|
26 |
+ |
|
27 |
+// Helper to sort []Change by path |
|
28 |
+type byPath struct{ changes []Change } |
|
29 |
+ |
|
30 |
+func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } |
|
31 |
+func (b byPath) Len() int { return len(b.changes) } |
|
32 |
+func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } |
|
33 |
+ |
|
34 |
+type FileType uint32 |
|
35 |
+ |
|
36 |
+const ( |
|
37 |
+ Regular FileType = iota |
|
38 |
+ Dir |
|
39 |
+ Symlink |
|
40 |
+) |
|
41 |
+ |
|
42 |
+type FileData struct { |
|
43 |
+ filetype FileType |
|
44 |
+ path string |
|
45 |
+ contents string |
|
46 |
+ permissions os.FileMode |
|
47 |
+} |
|
48 |
+ |
|
49 |
+func createSampleDir(t *testing.T, root string) { |
|
50 |
+ files := []FileData{ |
|
51 |
+ {Regular, "file1", "file1\n", 0600}, |
|
52 |
+ {Regular, "file2", "file2\n", 0666}, |
|
53 |
+ {Regular, "file3", "file3\n", 0404}, |
|
54 |
+ {Regular, "file4", "file4\n", 0600}, |
|
55 |
+ {Regular, "file5", "file5\n", 0600}, |
|
56 |
+ {Regular, "file6", "file6\n", 0600}, |
|
57 |
+ {Regular, "file7", "file7\n", 0600}, |
|
58 |
+ {Dir, "dir1", "", 0740}, |
|
59 |
+ {Regular, "dir1/file1-1", "file1-1\n", 01444}, |
|
60 |
+ {Regular, "dir1/file1-2", "file1-2\n", 0666}, |
|
61 |
+ {Dir, "dir2", "", 0700}, |
|
62 |
+ {Regular, "dir2/file2-1", "file2-1\n", 0666}, |
|
63 |
+ {Regular, "dir2/file2-2", "file2-2\n", 0666}, |
|
64 |
+ {Dir, "dir3", "", 0700}, |
|
65 |
+ {Regular, "dir3/file3-1", "file3-1\n", 0666}, |
|
66 |
+ {Regular, "dir3/file3-2", "file3-2\n", 0666}, |
|
67 |
+ {Dir, "dir4", "", 0700}, |
|
68 |
+ {Regular, "dir4/file3-1", "file4-1\n", 0666}, |
|
69 |
+ {Regular, "dir4/file3-2", "file4-2\n", 0666}, |
|
70 |
+ {Symlink, "symlink1", "target1", 0666}, |
|
71 |
+ {Symlink, "symlink2", "target2", 0666}, |
|
72 |
+ } |
|
73 |
+ |
|
74 |
+ now := time.Now() |
|
75 |
+ for _, info := range files { |
|
76 |
+ p := path.Join(root, info.path) |
|
77 |
+ if info.filetype == Dir { |
|
78 |
+ if err := os.MkdirAll(p, info.permissions); err != nil { |
|
79 |
+ t.Fatal(err) |
|
80 |
+ } |
|
81 |
+ } else if info.filetype == Regular { |
|
82 |
+ if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { |
|
83 |
+ t.Fatal(err) |
|
84 |
+ } |
|
85 |
+ } else if info.filetype == Symlink { |
|
86 |
+ if err := os.Symlink(info.contents, p); err != nil { |
|
87 |
+ t.Fatal(err) |
|
88 |
+ } |
|
89 |
+ } |
|
90 |
+ |
|
91 |
+ if info.filetype != Symlink { |
|
92 |
+ // Set a consistent ctime, atime for all files and dirs |
|
93 |
+ if err := os.Chtimes(p, now, now); err != nil { |
|
94 |
+ t.Fatal(err) |
|
95 |
+ } |
|
96 |
+ } |
|
97 |
+ } |
|
98 |
+} |
|
99 |
+ |
|
100 |
+// Create an directory, copy it, make sure we report no changes between the two |
|
101 |
+func TestChangesDirsEmpty(t *testing.T) { |
|
102 |
+ src, err := ioutil.TempDir("", "docker-changes-test") |
|
103 |
+ if err != nil { |
|
104 |
+ t.Fatal(err) |
|
105 |
+ } |
|
106 |
+ createSampleDir(t, src) |
|
107 |
+ dst := src + "-copy" |
|
108 |
+ if err := copyDir(src, dst); err != nil { |
|
109 |
+ t.Fatal(err) |
|
110 |
+ } |
|
111 |
+ changes, err := ChangesDirs(dst, src) |
|
112 |
+ if err != nil { |
|
113 |
+ t.Fatal(err) |
|
114 |
+ } |
|
115 |
+ |
|
116 |
+ if len(changes) != 0 { |
|
117 |
+ t.Fatalf("Reported changes for identical dirs: %v", changes) |
|
118 |
+ } |
|
119 |
+ os.RemoveAll(src) |
|
120 |
+ os.RemoveAll(dst) |
|
121 |
+} |
|
122 |
+ |
|
123 |
+func mutateSampleDir(t *testing.T, root string) { |
|
124 |
+ // Remove a regular file |
|
125 |
+ if err := os.RemoveAll(path.Join(root, "file1")); err != nil { |
|
126 |
+ t.Fatal(err) |
|
127 |
+ } |
|
128 |
+ |
|
129 |
+ // Remove a directory |
|
130 |
+ if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { |
|
131 |
+ t.Fatal(err) |
|
132 |
+ } |
|
133 |
+ |
|
134 |
+ // Remove a symlink |
|
135 |
+ if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { |
|
136 |
+ t.Fatal(err) |
|
137 |
+ } |
|
138 |
+ |
|
139 |
+ // Rewrite a file |
|
140 |
+ if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { |
|
141 |
+ t.Fatal(err) |
|
142 |
+ } |
|
143 |
+ |
|
144 |
+ // Replace a file |
|
145 |
+ if err := os.RemoveAll(path.Join(root, "file3")); err != nil { |
|
146 |
+ t.Fatal(err) |
|
147 |
+ } |
|
148 |
+ if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { |
|
149 |
+ t.Fatal(err) |
|
150 |
+ } |
|
151 |
+ |
|
152 |
+ // Touch file |
|
153 |
+ if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { |
|
154 |
+ t.Fatal(err) |
|
155 |
+ } |
|
156 |
+ |
|
157 |
+ // Replace file with dir |
|
158 |
+ if err := os.RemoveAll(path.Join(root, "file5")); err != nil { |
|
159 |
+ t.Fatal(err) |
|
160 |
+ } |
|
161 |
+ if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { |
|
162 |
+ t.Fatal(err) |
|
163 |
+ } |
|
164 |
+ |
|
165 |
+ // Create new file |
|
166 |
+ if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { |
|
167 |
+ t.Fatal(err) |
|
168 |
+ } |
|
169 |
+ |
|
170 |
+ // Create new dir |
|
171 |
+ if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { |
|
172 |
+ t.Fatal(err) |
|
173 |
+ } |
|
174 |
+ |
|
175 |
+ // Create a new symlink |
|
176 |
+ if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { |
|
177 |
+ t.Fatal(err) |
|
178 |
+ } |
|
179 |
+ |
|
180 |
+ // Change a symlink |
|
181 |
+ if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { |
|
182 |
+ t.Fatal(err) |
|
183 |
+ } |
|
184 |
+ if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { |
|
185 |
+ t.Fatal(err) |
|
186 |
+ } |
|
187 |
+ |
|
188 |
+ // Replace dir with file |
|
189 |
+ if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { |
|
190 |
+ t.Fatal(err) |
|
191 |
+ } |
|
192 |
+ if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { |
|
193 |
+ t.Fatal(err) |
|
194 |
+ } |
|
195 |
+ |
|
196 |
+ // Touch dir |
|
197 |
+ if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { |
|
198 |
+ t.Fatal(err) |
|
199 |
+ } |
|
200 |
+} |
|
201 |
+ |
|
202 |
+func TestChangesDirsMutated(t *testing.T) { |
|
203 |
+ src, err := ioutil.TempDir("", "docker-changes-test") |
|
204 |
+ if err != nil { |
|
205 |
+ t.Fatal(err) |
|
206 |
+ } |
|
207 |
+ createSampleDir(t, src) |
|
208 |
+ dst := src + "-copy" |
|
209 |
+ if err := copyDir(src, dst); err != nil { |
|
210 |
+ t.Fatal(err) |
|
211 |
+ } |
|
212 |
+ defer os.RemoveAll(src) |
|
213 |
+ defer os.RemoveAll(dst) |
|
214 |
+ |
|
215 |
+ mutateSampleDir(t, dst) |
|
216 |
+ |
|
217 |
+ changes, err := ChangesDirs(dst, src) |
|
218 |
+ if err != nil { |
|
219 |
+ t.Fatal(err) |
|
220 |
+ } |
|
221 |
+ |
|
222 |
+ sort.Sort(byPath{changes}) |
|
223 |
+ |
|
224 |
+ expectedChanges := []Change{ |
|
225 |
+ {"/dir1", ChangeDelete}, |
|
226 |
+ {"/dir2", ChangeModify}, |
|
227 |
+ {"/dir3", ChangeModify}, |
|
228 |
+ {"/dirnew", ChangeAdd}, |
|
229 |
+ {"/file1", ChangeDelete}, |
|
230 |
+ {"/file2", ChangeModify}, |
|
231 |
+ {"/file3", ChangeModify}, |
|
232 |
+ {"/file4", ChangeModify}, |
|
233 |
+ {"/file5", ChangeModify}, |
|
234 |
+ {"/filenew", ChangeAdd}, |
|
235 |
+ {"/symlink1", ChangeDelete}, |
|
236 |
+ {"/symlink2", ChangeModify}, |
|
237 |
+ {"/symlinknew", ChangeAdd}, |
|
238 |
+ } |
|
239 |
+ |
|
240 |
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ { |
|
241 |
+ if i >= len(expectedChanges) { |
|
242 |
+ t.Fatalf("unexpected change %s\n", changes[i].String()) |
|
243 |
+ } |
|
244 |
+ if i >= len(changes) { |
|
245 |
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) |
|
246 |
+ } |
|
247 |
+ if changes[i].Path == expectedChanges[i].Path { |
|
248 |
+ if changes[i] != expectedChanges[i] { |
|
249 |
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) |
|
250 |
+ } |
|
251 |
+ } else if changes[i].Path < expectedChanges[i].Path { |
|
252 |
+ t.Fatalf("unexpected change %s\n", changes[i].String()) |
|
253 |
+ } else { |
|
254 |
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) |
|
255 |
+ } |
|
256 |
+ } |
|
257 |
+} |
|
258 |
+ |
|
259 |
+func TestApplyLayer(t *testing.T) { |
|
260 |
+ src, err := ioutil.TempDir("", "docker-changes-test") |
|
261 |
+ if err != nil { |
|
262 |
+ t.Fatal(err) |
|
263 |
+ } |
|
264 |
+ createSampleDir(t, src) |
|
265 |
+ defer os.RemoveAll(src) |
|
266 |
+ dst := src + "-copy" |
|
267 |
+ if err := copyDir(src, dst); err != nil { |
|
268 |
+ t.Fatal(err) |
|
269 |
+ } |
|
270 |
+ mutateSampleDir(t, dst) |
|
271 |
+ defer os.RemoveAll(dst) |
|
272 |
+ |
|
273 |
+ changes, err := ChangesDirs(dst, src) |
|
274 |
+ if err != nil { |
|
275 |
+ t.Fatal(err) |
|
276 |
+ } |
|
277 |
+ |
|
278 |
+ layer, err := ExportChanges(dst, changes) |
|
279 |
+ if err != nil { |
|
280 |
+ t.Fatal(err) |
|
281 |
+ } |
|
282 |
+ |
|
283 |
+ layerCopy, err := NewTempArchive(layer, "") |
|
284 |
+ if err != nil { |
|
285 |
+ t.Fatal(err) |
|
286 |
+ } |
|
287 |
+ |
|
288 |
+ if err := ApplyLayer(src, layerCopy); err != nil { |
|
289 |
+ t.Fatal(err) |
|
290 |
+ } |
|
291 |
+ |
|
292 |
+ changes2, err := ChangesDirs(src, dst) |
|
293 |
+ if err != nil { |
|
294 |
+ t.Fatal(err) |
|
295 |
+ } |
|
296 |
+ |
|
297 |
+ if len(changes2) != 0 { |
|
298 |
+ t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) |
|
299 |
+ } |
|
300 |
+} |
0 | 301 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,156 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "io" |
|
5 |
+ "io/ioutil" |
|
6 |
+ "os" |
|
7 |
+ "path/filepath" |
|
8 |
+ "strings" |
|
9 |
+ "syscall" |
|
10 |
+ |
|
11 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
12 |
+ |
|
13 |
+ "github.com/docker/docker/pkg/pools" |
|
14 |
+) |
|
15 |
+ |
|
16 |
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. |
|
17 |
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, |
|
18 |
+// then the top 12 bits of the minor |
|
19 |
+func mkdev(major int64, minor int64) uint32 { |
|
20 |
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) |
|
21 |
+} |
|
22 |
+ |
|
23 |
+// ApplyLayer parses a diff in the standard layer format from `layer`, and |
|
24 |
+// applies it to the directory `dest`. |
|
25 |
+func ApplyLayer(dest string, layer ArchiveReader) error { |
|
26 |
+ // We need to be able to set any perms |
|
27 |
+ oldmask := syscall.Umask(0) |
|
28 |
+ defer syscall.Umask(oldmask) |
|
29 |
+ |
|
30 |
+ layer, err := DecompressStream(layer) |
|
31 |
+ if err != nil { |
|
32 |
+ return err |
|
33 |
+ } |
|
34 |
+ |
|
35 |
+ tr := tar.NewReader(layer) |
|
36 |
+ trBuf := pools.BufioReader32KPool.Get(tr) |
|
37 |
+ defer pools.BufioReader32KPool.Put(trBuf) |
|
38 |
+ |
|
39 |
+ var dirs []*tar.Header |
|
40 |
+ |
|
41 |
+ aufsTempdir := "" |
|
42 |
+ aufsHardlinks := make(map[string]*tar.Header) |
|
43 |
+ |
|
44 |
+ // Iterate through the files in the archive. |
|
45 |
+ for { |
|
46 |
+ hdr, err := tr.Next() |
|
47 |
+ if err == io.EOF { |
|
48 |
+ // end of tar archive |
|
49 |
+ break |
|
50 |
+ } |
|
51 |
+ if err != nil { |
|
52 |
+ return err |
|
53 |
+ } |
|
54 |
+ |
|
55 |
+ // Normalize name, for safety and for a simple is-root check |
|
56 |
+ hdr.Name = filepath.Clean(hdr.Name) |
|
57 |
+ |
|
58 |
+ if !strings.HasSuffix(hdr.Name, "/") { |
|
59 |
+ // Not the root directory, ensure that the parent directory exists. |
|
60 |
+ // This happened in some tests where an image had a tarfile without any |
|
61 |
+ // parent directories. |
|
62 |
+ parent := filepath.Dir(hdr.Name) |
|
63 |
+ parentPath := filepath.Join(dest, parent) |
|
64 |
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { |
|
65 |
+ err = os.MkdirAll(parentPath, 0600) |
|
66 |
+ if err != nil { |
|
67 |
+ return err |
|
68 |
+ } |
|
69 |
+ } |
|
70 |
+ } |
|
71 |
+ |
|
72 |
+ // Skip AUFS metadata dirs |
|
73 |
+ if strings.HasPrefix(hdr.Name, ".wh..wh.") { |
|
74 |
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets |
|
75 |
+ // We don't want this directory, but we need the files in them so that |
|
76 |
+ // such hardlinks can be resolved. |
|
77 |
+ if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { |
|
78 |
+ basename := filepath.Base(hdr.Name) |
|
79 |
+ aufsHardlinks[basename] = hdr |
|
80 |
+ if aufsTempdir == "" { |
|
81 |
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { |
|
82 |
+ return err |
|
83 |
+ } |
|
84 |
+ defer os.RemoveAll(aufsTempdir) |
|
85 |
+ } |
|
86 |
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { |
|
87 |
+ return err |
|
88 |
+ } |
|
89 |
+ } |
|
90 |
+ continue |
|
91 |
+ } |
|
92 |
+ |
|
93 |
+ path := filepath.Join(dest, hdr.Name) |
|
94 |
+ base := filepath.Base(path) |
|
95 |
+ if strings.HasPrefix(base, ".wh.") { |
|
96 |
+ originalBase := base[len(".wh."):] |
|
97 |
+ originalPath := filepath.Join(filepath.Dir(path), originalBase) |
|
98 |
+ if err := os.RemoveAll(originalPath); err != nil { |
|
99 |
+ return err |
|
100 |
+ } |
|
101 |
+ } else { |
|
102 |
+ // If path exits we almost always just want to remove and replace it. |
|
103 |
+ // The only exception is when it is a directory *and* the file from |
|
104 |
+ // the layer is also a directory. Then we want to merge them (i.e. |
|
105 |
+ // just apply the metadata from the layer). |
|
106 |
+ if fi, err := os.Lstat(path); err == nil { |
|
107 |
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { |
|
108 |
+ if err := os.RemoveAll(path); err != nil { |
|
109 |
+ return err |
|
110 |
+ } |
|
111 |
+ } |
|
112 |
+ } |
|
113 |
+ |
|
114 |
+ trBuf.Reset(tr) |
|
115 |
+ srcData := io.Reader(trBuf) |
|
116 |
+ srcHdr := hdr |
|
117 |
+ |
|
118 |
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so |
|
119 |
+ // we manually retarget these into the temporary files we extracted them into |
|
120 |
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { |
|
121 |
+ linkBasename := filepath.Base(hdr.Linkname) |
|
122 |
+ srcHdr = aufsHardlinks[linkBasename] |
|
123 |
+ if srcHdr == nil { |
|
124 |
+ return fmt.Errorf("Invalid aufs hardlink") |
|
125 |
+ } |
|
126 |
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) |
|
127 |
+ if err != nil { |
|
128 |
+ return err |
|
129 |
+ } |
|
130 |
+ defer tmpFile.Close() |
|
131 |
+ srcData = tmpFile |
|
132 |
+ } |
|
133 |
+ |
|
134 |
+ if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { |
|
135 |
+ return err |
|
136 |
+ } |
|
137 |
+ |
|
138 |
+ // Directory mtimes must be handled at the end to avoid further |
|
139 |
+ // file creation in them to modify the directory mtime |
|
140 |
+ if hdr.Typeflag == tar.TypeDir { |
|
141 |
+ dirs = append(dirs, hdr) |
|
142 |
+ } |
|
143 |
+ } |
|
144 |
+ } |
|
145 |
+ |
|
146 |
+ for _, hdr := range dirs { |
|
147 |
+ path := filepath.Join(dest, hdr.Name) |
|
148 |
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} |
|
149 |
+ if err := syscall.UtimesNano(path, ts); err != nil { |
|
150 |
+ return err |
|
151 |
+ } |
|
152 |
+ } |
|
153 |
+ |
|
154 |
+ return nil |
|
155 |
+} |
2 | 158 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,16 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "syscall" |
|
4 |
+ "time" |
|
5 |
+) |
|
6 |
+ |
|
7 |
+func timeToTimespec(time time.Time) (ts syscall.Timespec) { |
|
8 |
+ if time.IsZero() { |
|
9 |
+ // Return UTIME_OMIT special value |
|
10 |
+ ts.Sec = 0 |
|
11 |
+ ts.Nsec = ((1 << 30) - 2) |
|
12 |
+ return |
|
13 |
+ } |
|
14 |
+ return syscall.NsecToTimespec(time.UnixNano()) |
|
15 |
+} |
0 | 16 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,16 @@ |
0 |
+// +build !linux |
|
1 |
+ |
|
2 |
+package archive |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "syscall" |
|
6 |
+ "time" |
|
7 |
+) |
|
8 |
+ |
|
9 |
+func timeToTimespec(time time.Time) (ts syscall.Timespec) { |
|
10 |
+ nsec := int64(0) |
|
11 |
+ if !time.IsZero() { |
|
12 |
+ nsec = time.UnixNano() |
|
13 |
+ } |
|
14 |
+ return syscall.NsecToTimespec(nsec) |
|
15 |
+} |
0 | 16 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,59 @@ |
0 |
+package archive |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "bytes" |
|
4 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
5 |
+ "io/ioutil" |
|
6 |
+) |
|
7 |
+ |
|
8 |
+// Generate generates a new archive from the content provided |
|
9 |
+// as input. |
|
10 |
+// |
|
11 |
+// `files` is a sequence of path/content pairs. A new file is |
|
12 |
+// added to the archive for each pair. |
|
13 |
+// If the last pair is incomplete, the file is created with an |
|
14 |
+// empty content. For example: |
|
15 |
+// |
|
16 |
+// Generate("foo.txt", "hello world", "emptyfile") |
|
17 |
+// |
|
18 |
+// The above call will return an archive with 2 files: |
|
19 |
+// * ./foo.txt with content "hello world" |
|
20 |
+// * ./empty with empty content |
|
21 |
+// |
|
22 |
+// FIXME: stream content instead of buffering |
|
23 |
+// FIXME: specify permissions and other archive metadata |
|
24 |
+func Generate(input ...string) (Archive, error) { |
|
25 |
+ files := parseStringPairs(input...) |
|
26 |
+ buf := new(bytes.Buffer) |
|
27 |
+ tw := tar.NewWriter(buf) |
|
28 |
+ for _, file := range files { |
|
29 |
+ name, content := file[0], file[1] |
|
30 |
+ hdr := &tar.Header{ |
|
31 |
+ Name: name, |
|
32 |
+ Size: int64(len(content)), |
|
33 |
+ } |
|
34 |
+ if err := tw.WriteHeader(hdr); err != nil { |
|
35 |
+ return nil, err |
|
36 |
+ } |
|
37 |
+ if _, err := tw.Write([]byte(content)); err != nil { |
|
38 |
+ return nil, err |
|
39 |
+ } |
|
40 |
+ } |
|
41 |
+ if err := tw.Close(); err != nil { |
|
42 |
+ return nil, err |
|
43 |
+ } |
|
44 |
+ return ioutil.NopCloser(buf), nil |
|
45 |
+} |
|
46 |
+ |
|
47 |
+func parseStringPairs(input ...string) (output [][2]string) { |
|
48 |
+ output = make([][2]string, 0, len(input)/2+1) |
|
49 |
+ for i := 0; i < len(input); i += 2 { |
|
50 |
+ var pair [2]string |
|
51 |
+ pair[0] = input[i] |
|
52 |
+ if i+1 < len(input) { |
|
53 |
+ pair[1] = input[i+1] |
|
54 |
+ } |
|
55 |
+ output = append(output, pair) |
|
56 |
+ } |
|
57 |
+ return |
|
58 |
+} |