96d1e9bb |
package archive |
97a82094 |
import ( |
576985a1 |
"archive/tar" |
77c2b76e |
"bufio" |
4a3aefbb |
"bytes" |
a96a26c6 |
"compress/bzip2" |
444a087a |
"compress/gzip" |
54db1862 |
"fmt" |
97a82094 |
"io"
"io/ioutil" |
baacae83 |
"os" |
97a82094 |
"os/exec" |
36d610a3 |
"path/filepath" |
8228ee4b |
"runtime" |
a4868e23 |
"strings" |
710d5a48 |
"syscall" |
77c2b76e |
|
ee7dd44c |
"github.com/docker/docker/pkg/fileutils" |
442b4562 |
"github.com/docker/docker/pkg/idtools" |
b48f4bf5 |
"github.com/docker/docker/pkg/ioutils" |
84d76e55 |
"github.com/docker/docker/pkg/pools" |
30d5a42c |
"github.com/docker/docker/pkg/promise" |
b3ee9ac7 |
"github.com/docker/docker/pkg/system" |
1009e6a4 |
"github.com/sirupsen/logrus" |
97a82094 |
)
|
3dfc910d |
type ( |
927b334e |
// Compression is the state represents if compressed or not. |
ba332b7d |
Compression int |
8222c863 |
// WhiteoutFormat is the format of whiteouts unpacked
WhiteoutFormat int |
aa2cc187 |
|
ba332b7d |
// TarOptions wraps the tar options. |
a74799b7 |
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool |
442b4562 |
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap |
acdbc285 |
ChownOpts *idtools.IDPair |
a74799b7 |
IncludeSourceDir bool |
8222c863 |
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat |
a74799b7 |
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool |
75f6929b |
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string |
617c352e |
InUserNS bool |
3dfc910d |
}
) |
2c7f50a7 |
|
7a7357da |
// Archiver implements the Archiver interface and allows the reuse of most utility functions of
// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. |
967ef7e6 |
type Archiver struct { |
7a7357da |
Untar func(io.Reader, string, *TarOptions) error
IDMappingsVar *idtools.IDMappings |
967ef7e6 |
} |
97a82094 |
|
967ef7e6 |
// NewDefaultArchiver returns a new Archiver without any IDMappings
func NewDefaultArchiver() *Archiver { |
7a7357da |
return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} |
967ef7e6 |
}
// breakoutError is used to differentiate errors related to breaking out
// When testing archive breakout in the unit tests, this error is expected
// in order for the test to pass.
type breakoutError error |
3243e504 |
const ( |
ba332b7d |
// Uncompressed represents the uncompressed. |
97a82094 |
Uncompressed Compression = iota |
ba332b7d |
// Bzip2 is bzip2 compression algorithm. |
97a82094 |
Bzip2 |
ba332b7d |
// Gzip is gzip compression algorithm. |
97a82094 |
Gzip |
ba332b7d |
// Xz is xz compression algorithm. |
3c5d2e46 |
Xz |
97a82094 |
)
|
8222c863 |
const ( |
950073aa |
// AUFSWhiteoutFormat is the default format for whiteouts |
8222c863 |
AUFSWhiteoutFormat WhiteoutFormat = iota
// OverlayWhiteoutFormat formats whiteout according to the overlay
// standard.
OverlayWhiteoutFormat
)
|
1a451d9a |
const (
modeISDIR = 040000 // Directory
modeISFIFO = 010000 // FIFO
modeISREG = 0100000 // Regular file
modeISLNK = 0120000 // Symbolic link
modeISBLK = 060000 // Block special file
modeISCHR = 020000 // Character special file
modeISSOCK = 0140000 // Socket
)
|
3243e504 |
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
func IsArchivePath(path string) bool {
file, err := os.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
|
ba332b7d |
// DetectCompression detects the compression algorithm of the source. |
0425f65e |
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} { |
67ce59a8 |
if len(source) < len(m) { |
a72b45db |
logrus.Debug("Len too short") |
67ce59a8 |
continue
} |
087f7307 |
if bytes.Equal(m, source[:len(m)]) { |
0425f65e |
return compression
}
}
return Uncompressed
}
|
b48f4bf5 |
func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { |
b8a4f570 |
args := []string{"xz", "-d", "-c", "-q"}
|
b48f4bf5 |
return cmdStream(exec.Command(args[0], args[1:]...), archive) |
b8a4f570 |
}
|
3f77baf2 |
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. |
0d9213f8 |
func DecompressStream(archive io.Reader) (io.ReadCloser, error) { |
84d76e55 |
p := pools.BufioReader32KPool
buf := p.Get(archive) |
77c2b76e |
bs, err := buf.Peek(10) |
7bb9fc41 |
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue 18170 |
77c2b76e |
return nil, err |
a96a26c6 |
} |
77c2b76e |
compression := DetectCompression(bs) |
a96a26c6 |
switch compression {
case Uncompressed: |
84d76e55 |
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
return readBufWrapper, nil |
a96a26c6 |
case Gzip: |
84d76e55 |
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
return readBufWrapper, nil |
a96a26c6 |
case Bzip2: |
84d76e55 |
bz2Reader := bzip2.NewReader(buf)
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
return readBufWrapper, nil |
b8a4f570 |
case Xz: |
b48f4bf5 |
xzReader, chdone, err := xzDecompress(buf) |
84d76e55 |
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) |
b48f4bf5 |
return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
<-chdone
return readBufWrapper.Close()
}), nil |
a96a26c6 |
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
|
39bcaee4 |
// CompressStream compresses the dest with specified compression algorithm. |
9b2c3aa8 |
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { |
84d76e55 |
p := pools.BufioWriter32KPool
buf := p.Get(dest) |
5ea48aa7 |
switch compression {
case Uncompressed: |
84d76e55 |
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
return writeBufWrapper, nil |
97a82094 |
case Gzip: |
84d76e55 |
gzWriter := gzip.NewWriter(dest)
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
return writeBufWrapper, nil |
5ea48aa7 |
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) |
97a82094 |
}
}
|
3f6dc81e |
// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to |
8cd6c30a |
// modify the contents or header of an entry in the archive. If the file already
// exists in the archive the TarModifierFunc will be called with the Header and
// a reader which will return the files content. If the file does not exist both
// header and content will be nil. |
3f6dc81e |
type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
|
56bf6de9 |
// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
// tar stream are modified if they match any of the keys in mods. |
3f6dc81e |
func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
go func() {
tarReader := tar.NewReader(inputTarStream)
tarWriter := tar.NewWriter(pipeWriter)
defer inputTarStream.Close() |
8cd6c30a |
defer tarWriter.Close()
modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
header, data, err := modifier(name, original, tarReader)
switch {
case err != nil:
return err
case header == nil:
return nil
} |
3f6dc81e |
|
8cd6c30a |
header.Name = name
header.Size = int64(len(data))
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
if len(data) != 0 {
if _, err := tarWriter.Write(data); err != nil {
return err |
3f6dc81e |
}
} |
8cd6c30a |
return nil
} |
3f6dc81e |
|
8cd6c30a |
var err error
var originalHeader *tar.Header
for {
originalHeader, err = tarReader.Next() |
3f6dc81e |
if err == io.EOF { |
8cd6c30a |
break |
3f6dc81e |
}
if err != nil {
pipeWriter.CloseWithError(err)
return
}
|
8cd6c30a |
modifier, ok := mods[originalHeader.Name]
if !ok {
// No modifiers for this file, copy the header and data
if err := tarWriter.WriteHeader(originalHeader); err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
pipeWriter.CloseWithError(err)
return
}
continue
}
delete(mods, originalHeader.Name)
if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { |
3f6dc81e |
pipeWriter.CloseWithError(err)
return
} |
8cd6c30a |
} |
3f6dc81e |
|
8cd6c30a |
// Apply the modifiers that haven't matched any files in the archive
for name, modifier := range mods {
if err := modify(name, nil, modifier, nil); err != nil { |
3f6dc81e |
pipeWriter.CloseWithError(err)
return
}
} |
8cd6c30a |
pipeWriter.Close()
|
3f6dc81e |
}()
return pipeReader
}
|
ba332b7d |
// Extension returns the extension of a file that uses the specified compression algorithm. |
54db1862 |
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
}
return ""
}
|
f95f5828 |
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information. |
1a451d9a |
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar. |
5c3d2d55 |
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { |
f95f5828 |
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return nil, err
} |
1a451d9a |
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) |
f95f5828 |
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
return nil, err
} |
5c3d2d55 |
return hdr, nil
}
|
1a451d9a |
// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
// https://github.com/golang/go/commit/66b5a2f
func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
fm := fi.Mode()
switch {
case fm.IsRegular():
mode |= modeISREG
case fi.IsDir():
mode |= modeISDIR
case fm&os.ModeSymlink != 0:
mode |= modeISLNK
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
mode |= modeISCHR
} else {
mode |= modeISBLK
}
case fm&os.ModeNamedPipe != 0:
mode |= modeISFIFO
case fm&os.ModeSocket != 0:
mode |= modeISSOCK
}
return mode
}
|
5c3d2d55 |
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { |
f95f5828 |
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
} |
5c3d2d55 |
return nil |
f95f5828 |
}
|
8222c863 |
type tarWhiteoutConverter interface { |
daa70195 |
ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) |
8222c863 |
ConvertRead(*tar.Header, string) (bool, error)
}
|
f9f80443 |
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
// for hardlink mapping |
5672eeb5 |
SeenFiles map[uint64]string
IDMappings *idtools.IDMappings |
8a34c67a |
ChownOpts *idtools.IDPair |
b03d3232 |
|
8222c863 |
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter |
f9f80443 |
}
|
8a34c67a |
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { |
5672eeb5 |
return &tarAppender{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
IDMappings: idMapping, |
8a34c67a |
ChownOpts: chownOpts, |
5672eeb5 |
}
}
|
28842d3f |
// canonicalTarName provides a platform-independent and consistent posix-style
//path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) { |
5ecab9e8 |
name, err := CanonicalTarNameForPath(name) |
28842d3f |
if err != nil {
return "", err
}
// suffix with '/' for directories
if isDir && !strings.HasSuffix(name, "/") {
name += "/"
}
return name, nil
}
|
b03d3232 |
// addTarFile adds to the tar archive a file from `path` as `name` |
f9f80443 |
func (ta *tarAppender) addTarFile(path, name string) error { |
bab8efbf |
fi, err := os.Lstat(path)
if err != nil { |
5b77e51e |
return err
}
|
5c3d2d55 |
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := FileInfoHeader(name, fi, link) |
2180aa4f |
if err != nil {
return err |
3b995390 |
} |
5c3d2d55 |
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
} |
2180aa4f |
|
4102537c |
// if it's not a directory and has more than 1 link, |
e6866492 |
// it's hard linked, so set the type flag accordingly |
4102537c |
if !fi.IsDir() && hasHardlinks(fi) { |
f95f5828 |
inode, err := getInodeFromStat(fi.Sys())
if err != nil {
return err
} |
f9f80443 |
// a link should have a name that it links too
// and that linked name should be first in the tar archive |
5e02468e |
if oldpath, ok := ta.SeenFiles[inode]; ok { |
f9f80443 |
hdr.Typeflag = tar.TypeLink
hdr.Linkname = oldpath
hdr.Size = 0 // This Must be here for the writer math to add up!
} else { |
5e02468e |
ta.SeenFiles[inode] = name |
f9f80443 |
}
}
|
442b4562 |
//handle re-mapping container ID mappings back to host ID mappings before |
1626c9da |
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host |
5672eeb5 |
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { |
df248d31 |
fileIDPair, err := getFileUIDGID(fi.Sys()) |
442b4562 |
if err != nil {
return err
} |
df248d31 |
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) |
442b4562 |
if err != nil {
return err
}
}
|
8a34c67a |
// explicitly override with ChownOpts
if ta.ChownOpts != nil {
hdr.Uid = ta.ChownOpts.UID
hdr.Gid = ta.ChownOpts.GID
}
|
8222c863 |
if ta.WhiteoutConverter != nil { |
daa70195 |
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
if err != nil { |
8222c863 |
return err |
b03d3232 |
} |
daa70195 |
// If a new whiteout file exists, write original hdr, then
// replace hdr with wo to be written after. Whiteouts should
// always be written after the original. Note the original
// hdr may have been updated to be a whiteout with returning
// a whiteout header
if wo != nil {
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
}
hdr = wo
} |
b03d3232 |
}
|
f9f80443 |
if err := ta.TarWriter.WriteHeader(hdr); err != nil { |
5b77e51e |
return err
}
|
bd13c53f |
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { |
7c29f5be |
// We use system.OpenSequential to ensure we use sequential file
// access on Windows to avoid depleting the standby list.
// On Linux, this equates to a regular os.Open.
file, err := system.OpenSequential(path) |
66ccd55e |
if err != nil { |
5b77e51e |
return err |
66ccd55e |
} |
a3778449 |
|
f9f80443 |
ta.Buffer.Reset(ta.TarWriter) |
f14a9ed0 |
defer ta.Buffer.Reset(nil) |
f9f80443 |
_, err = io.Copy(ta.Buffer, file) |
a3778449 |
file.Close()
if err != nil { |
66ccd55e |
return err |
5b77e51e |
} |
f9f80443 |
err = ta.Buffer.Flush() |
a3778449 |
if err != nil {
return err
} |
5b77e51e |
}
return nil
}
|
967ef7e6 |
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { |
5ba24629 |
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
|
710d5a48 |
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { |
5ba24629 |
if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { |
710d5a48 |
return err
}
}
case tar.TypeReg, tar.TypeRegA: |
7c29f5be |
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) |
710d5a48 |
if err != nil {
return err
}
if _, err := io.Copy(file, reader); err != nil {
file.Close()
return err
}
file.Close()
|
617c352e |
case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
return nil
}
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeFifo: |
8228ee4b |
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { |
710d5a48 |
return err
}
case tar.TypeLink: |
1852cc38 |
targetPath := filepath.Join(extractDir, hdr.Linkname)
// check for hardlink breakout
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
if err := os.Link(targetPath, path); err != nil { |
710d5a48 |
return err
}
case tar.TypeSymlink: |
f6d97802 |
// path -> hdr.Linkname = targetPath
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
// that symlink would first have to be created, which would be caught earlier, at this very check:
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) |
1852cc38 |
} |
710d5a48 |
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
|
ce74c8b4 |
case tar.TypeXGlobalHeader: |
a72b45db |
logrus.Debug("PAX Global Extended Headers found and ignored") |
ce74c8b4 |
return nil
|
710d5a48 |
default: |
9b47b7b1 |
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) |
710d5a48 |
}
|
a74799b7 |
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != "windows" {
if chownOpts == nil { |
967ef7e6 |
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} |
a74799b7 |
}
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { |
8228ee4b |
return err
} |
710d5a48 |
}
|
75a27160 |
var errors []string |
c8428d77 |
for key, value := range hdr.Xattrs { |
d6114c0d |
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { |
e6d856df |
if err == syscall.ENOTSUP {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail.
errors = append(errors, err.Error())
continue
}
return err |
c8428d77 |
} |
75a27160 |
}
if len(errors) > 0 {
logrus.WithFields(logrus.Fields{
"errors": errors,
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") |
c8428d77 |
}
|
710d5a48 |
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode |
8228ee4b |
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
return err |
710d5a48 |
}
|
37ba67bf |
aTime := hdr.AccessTime
if aTime.Before(hdr.ModTime) {
// Last access time should never be before last modified time.
aTime = hdr.ModTime
}
|
40b77af2 |
// system.Chtimes doesn't support a NOFOLLOW flag atm |
ab181ce5 |
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { |
37ba67bf |
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { |
ab181ce5 |
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink { |
37ba67bf |
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { |
710d5a48 |
return err
}
} else { |
37ba67bf |
ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} |
b132e7b3 |
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { |
710d5a48 |
return err
}
}
return nil
}
|
5be7b9af |
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes. |
f198ee52 |
func Tar(path string, compression Compression) (io.ReadCloser, error) { |
111ab125 |
return TarWithOptions(path, &TarOptions{Compression: compression}) |
223280f3 |
}
|
111ab125 |
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative |
6d801a3c |
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. |
111ab125 |
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { |
6fd8e485 |
|
230cfc6e |
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
|
adeb3e36 |
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) |
6fd8e485 |
if err != nil {
return nil, err
}
|
5ea48aa7 |
pipeReader, pipeWriter := io.Pipe() |
2c7f50a7 |
|
5ea48aa7 |
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err |
5be7b9af |
} |
223280f3 |
|
5ea48aa7 |
go func() { |
5672eeb5 |
ta := newTarAppender(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter, |
8a34c67a |
options.ChownOpts, |
5672eeb5 |
)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) |
a74799b7 |
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil { |
43943309 |
logrus.Errorf("Can't close tar writer: %s", err) |
a74799b7 |
}
if err := compressWriter.Close(); err != nil { |
43943309 |
logrus.Errorf("Can't close compress writer: %s", err) |
a74799b7 |
}
if err := pipeWriter.Close(); err != nil { |
43943309 |
logrus.Errorf("Can't close pipe writer: %s", err) |
a74799b7 |
}
}()
|
f9f80443 |
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
5ea48aa7 |
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
|
a74799b7 |
stat, err := os.Lstat(srcPath)
if err != nil {
return
}
if !stat.IsDir() {
// We can't later join a non-dir with any includes because the
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 { |
6d801a3c |
options.IncludeFiles = []string{"."} |
5ea48aa7 |
}
|
6d801a3c |
seen := make(map[string]bool)
for _, include := range options.IncludeFiles { |
75f6929b |
rebaseName := options.RebaseNames[include]
|
230cfc6e |
walkRoot := getWalkRoot(srcPath, include) |
a74799b7 |
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { |
5ea48aa7 |
if err != nil { |
43943309 |
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) |
5ea48aa7 |
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath) |
a74799b7 |
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { |
0d97e082 |
// Error getting relative path OR we are looking |
a74799b7 |
// at the source directory path. Skip in both situations. |
5ea48aa7 |
return nil
}
|
a74799b7 |
if options.IncludeSourceDir && include == "." && relFilePath != "." {
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
}
|
6d801a3c |
skip := false
// If "include" is an exact match for the current file
// then even if there's an "excludePatterns" pattern that
// matches it, don't skip it. IOW, assume an explicit 'include'
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if include != relFilePath { |
adeb3e36 |
skip, err = pm.Matches(relFilePath) |
6d801a3c |
if err != nil { |
43943309 |
logrus.Errorf("Error matching %s: %v", relFilePath, err) |
6d801a3c |
return err
} |
27cca4c7 |
} |
8891e912 |
|
27cca4c7 |
if skip { |
842b8d87 |
// If we want to skip this file and its a directory
// then we should first check to see if there's an |
11454e1c |
// excludes pattern (e.g. !dir/file) that starts with this |
842b8d87 |
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir |
adeb3e36 |
if !pm.Exclusions() { |
27cca4c7 |
return filepath.SkipDir |
111ab125 |
} |
842b8d87 |
dirSlash := relFilePath + string(filepath.Separator)
|
adeb3e36 |
for _, pat := range pm.Patterns() {
if !pat.Exclusion() { |
842b8d87 |
continue
} |
adeb3e36 |
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { |
842b8d87 |
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir |
111ab125 |
}
|
6d801a3c |
if seen[relFilePath] {
return nil
}
seen[relFilePath] = true
|
75f6929b |
// Rename the base resource.
if rebaseName != "" {
var replacement string
if rebaseName != string(filepath.Separator) {
// Special case the root directory to replace with an
// empty string instead so that we don't end up with
// double slashes in the paths.
replacement = rebaseName
}
relFilePath = strings.Replace(relFilePath, include, replacement, 1) |
ef98fe07 |
}
|
f9f80443 |
if err := ta.addTarFile(filePath, relFilePath); err != nil { |
43943309 |
logrus.Errorf("Can't add file %s to tar: %s", filePath, err) |
6dede879 |
// if pipe is broken, stop writing tar stream to it |
43943309 |
if err == io.ErrClosedPipe {
return err
} |
5ea48aa7 |
}
return nil
})
}
}()
return pipeReader, nil |
97a82094 |
}
|
ba332b7d |
// Unpack unpacks the decompressedArchive to dest with options. |
001ac15b |
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { |
0d9213f8 |
tr := tar.NewReader(decompressedArchive) |
84d76e55 |
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf) |
a4868e23 |
|
4b1a464a |
var dirs []*tar.Header |
df248d31 |
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) |
93fbdb69 |
rootIDs := idMappings.RootPair() |
8222c863 |
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) |
a4868e23 |
// Iterate through the files in the archive. |
a34dd216 |
loop: |
a4868e23 |
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
} |
5e941f1c |
if err != nil { |
08a87d4b |
return err
} |
5e941f1c |
|
a4868e23 |
// Normalize name, for safety and for a simple is-root check |
3c177dc8 |
// This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
// This keeps "..\" as-is, but normalizes "\..\" to "\". |
a4868e23 |
hdr.Name = filepath.Clean(hdr.Name)
|
6d801a3c |
for _, exclude := range options.ExcludePatterns { |
a34dd216 |
if strings.HasPrefix(hdr.Name, exclude) {
continue loop
}
}
|
3c177dc8 |
// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
// the filepath format for the OS on which the daemon is running. Hence
// the check for a slash-suffix MUST be done in an OS-agnostic way.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { |
a4868e23 |
// Not the root directory, ensure that the parent directory exists
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { |
df248d31 |
err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) |
a4868e23 |
if err != nil {
return err
}
}
}
|
4b1a464a |
path := filepath.Join(dest, hdr.Name) |
be5bfbe2 |
rel, err := filepath.Rel(dest, path)
if err != nil {
return err
} |
3c177dc8 |
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { |
be5bfbe2 |
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) |
1852cc38 |
} |
a4868e23 |
// If path exits we almost always just want to remove and replace it
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil { |
a74799b7 |
if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
// If NoOverwriteDirNonDir is true then we cannot replace
// an existing directory with a non-directory from the archive.
return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
}
if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
// If NoOverwriteDirNonDir is true then we cannot replace
// an existing non-directory with a directory from the archive.
return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
}
|
91b7d8eb |
if fi.IsDir() && hdr.Name == "." {
continue
} |
a74799b7 |
|
a4868e23 |
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
}
}
} |
a3778449 |
trBuf.Reset(tr) |
a74799b7 |
|
df248d31 |
if err := remapIDs(idMappings, hdr); err != nil {
return err |
442b4562 |
}
|
8222c863 |
if whiteoutConverter != nil {
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
if err != nil {
return err |
b03d3232 |
} |
8222c863 |
if !writeFile { |
b03d3232 |
continue
}
}
|
acdbc285 |
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { |
a4868e23 |
return err
} |
0425f65e |
|
a4868e23 |
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr) |
5d972300 |
}
}
|
a4868e23 |
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name) |
40b77af2 |
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { |
a4868e23 |
return err
} |
97a82094 |
}
return nil
}
|
001ac15b |
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist. |
56bf275e |
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
}
|
ba332b7d |
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, |
56bf275e |
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, false)
}
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
if tarArchive == nil { |
001ac15b |
return fmt.Errorf("Empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
options = &TarOptions{}
} |
6d801a3c |
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{} |
001ac15b |
} |
56bf275e |
|
ba332b7d |
r := tarArchive |
56bf275e |
if decompress {
decompressedArchive, err := DecompressStream(tarArchive)
if err != nil {
return err
}
defer decompressedArchive.Close()
r = decompressedArchive |
001ac15b |
} |
56bf275e |
return Unpack(r, dest, options) |
001ac15b |
}
|
ba332b7d |
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error. |
14692d0d |
func (archiver *Archiver) TarUntar(src, dst string) error { |
6f4d8470 |
logrus.Debugf("TarUntar(%s %s)", src, dst) |
111ab125 |
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) |
5be7b9af |
if err != nil {
return err
} |
f198ee52 |
defer archive.Close() |
967ef7e6 |
options := &TarOptions{ |
7a7357da |
UIDMaps: archiver.IDMappingsVar.UIDs(),
GIDMaps: archiver.IDMappingsVar.GIDs(), |
442b4562 |
}
return archiver.Untar(archive, dst, options) |
5be7b9af |
}
|
ba332b7d |
// UntarPath untar a file from path to a destination, src is the source tar file path. |
14692d0d |
func (archiver *Archiver) UntarPath(src, dst string) error { |
f198ee52 |
archive, err := os.Open(src)
if err != nil { |
5b828761 |
return err |
f198ee52 |
}
defer archive.Close() |
967ef7e6 |
options := &TarOptions{ |
7a7357da |
UIDMaps: archiver.IDMappingsVar.UIDs(),
GIDMaps: archiver.IDMappingsVar.GIDs(), |
442b4562 |
} |
3243e504 |
return archiver.Untar(archive, dst, options) |
5b828761 |
}
|
ba332b7d |
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO. |
14692d0d |
func (archiver *Archiver) CopyWithTar(src, dst string) error { |
5be7b9af |
srcSt, err := os.Stat(src) |
5b828761 |
if err != nil {
return err
} |
36d610a3 |
if !srcSt.IsDir() { |
14692d0d |
return archiver.CopyFileWithTar(src, dst) |
36d610a3 |
} |
06d95003 |
|
7a7357da |
// if this Archiver is set up with ID mapping we need to create |
06d95003 |
// the new destination directory with the remapped root UID/GID pair
// as owner |
7a7357da |
rootIDs := archiver.IDMappingsVar.RootPair() |
36d610a3 |
// Create dst, copy src's content into it |
6f4d8470 |
logrus.Debugf("Creating dest directory: %s", dst) |
967ef7e6 |
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { |
36d610a3 |
return err
} |
6f4d8470 |
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) |
14692d0d |
return archiver.TarUntar(src, dst) |
36d610a3 |
}
|
ba332b7d |
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata. |
14692d0d |
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { |
6f4d8470 |
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) |
36d610a3 |
srcSt, err := os.Stat(src) |
5be7b9af |
if err != nil { |
36d610a3 |
return err |
5be7b9af |
} |
3c177dc8 |
|
5be7b9af |
if srcSt.IsDir() { |
36d610a3 |
return fmt.Errorf("Can't copy a directory") |
5be7b9af |
} |
3c177dc8 |
// Clean up the trailing slash. This must be done in an operating
// system specific manner. |
8228ee4b |
if dst[len(dst)-1] == os.PathSeparator {
dst = filepath.Join(dst, filepath.Base(src)) |
5be7b9af |
} |
36d610a3 |
// Create the holding directory if necessary |
ed10ac6e |
if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { |
36d610a3 |
return err
} |
444a087a |
r, w := io.Pipe() |
30d5a42c |
errC := promise.Go(func() error { |
444a087a |
defer w.Close()
srcF, err := os.Open(src)
if err != nil {
return err
}
defer srcF.Close()
hdr, err := tar.FileInfoHeader(srcSt, "")
if err != nil {
return err
}
hdr.Name = filepath.Base(dst) |
1a22418f |
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
7a7357da |
if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { |
442b4562 |
return err
}
|
66ccd55e |
tw := tar.NewWriter(w)
defer tw.Close() |
444a087a |
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(tw, srcF); err != nil {
return err
}
return nil
})
defer func() { |
57e12037 |
if er := <-errC; err == nil && er != nil { |
444a087a |
err = er
}
}() |
442b4562 |
|
a150eee3 |
err = archiver.Untar(r, filepath.Dir(dst), nil)
if err != nil {
r.CloseWithError(err)
}
return err |
14692d0d |
}
|
7a7357da |
// IDMappings returns the IDMappings of the archiver.
func (archiver *Archiver) IDMappings() *idtools.IDMappings {
return archiver.IDMappingsVar
}
|
df248d31 |
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
hdr.Uid, hdr.Gid = ids.UID, ids.GID
return err
}
|
b48f4bf5 |
// cmdStream executes a command, and returns its stdout as a stream. |
f85e6548 |
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr. |
b48f4bf5 |
func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
chdone := make(chan struct{})
cmd.Stdin = input |
97a82094 |
pipeR, pipeW := io.Pipe() |
b48f4bf5 |
cmd.Stdout = pipeW
var errBuf bytes.Buffer
cmd.Stderr = &errBuf
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, nil, err
}
|
6ede6bc8 |
// Copy stdout to the returned pipe |
58befe30 |
go func() { |
97a82094 |
if err := cmd.Wait(); err != nil { |
b48f4bf5 |
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) |
97a82094 |
} else {
pipeW.Close()
} |
b48f4bf5 |
close(chdone) |
97a82094 |
}() |
b48f4bf5 |
return pipeR, chdone, nil |
97a82094 |
} |
baacae83 |
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted. |
aa2cc187 |
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { |
baacae83 |
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size() |
48ec176c |
return &TempArchive{File: f, Size: size}, nil |
baacae83 |
}
|
ba332b7d |
// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted. |
baacae83 |
type TempArchive struct {
*os.File |
48ec176c |
Size int64 // Pre-computed from Stat().Size() as a convenience
read int64
closed bool
}
// Close closes the underlying file if it's still open, or does a no-op
// to allow callers to try to close the TempArchive multiple times safely.
func (archive *TempArchive) Close() error {
if archive.closed {
return nil
}
archive.closed = true
return archive.File.Close() |
baacae83 |
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data) |
32ba6ab8 |
archive.read += int64(n)
if err != nil || archive.read == archive.Size { |
48ec176c |
archive.Close() |
baacae83 |
os.Remove(archive.File.Name())
}
return n, err
} |