Signed-off-by: John Howard <jhoward@microsoft.com>
| ... | ... |
@@ -10,6 +10,7 @@ package builder |
| 10 | 10 |
import ( |
| 11 | 11 |
"fmt" |
| 12 | 12 |
"io/ioutil" |
| 13 |
+ "path" |
|
| 13 | 14 |
"path/filepath" |
| 14 | 15 |
"regexp" |
| 15 | 16 |
"runtime" |
| ... | ... |
@@ -39,9 +40,6 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina |
| 39 | 39 |
// in the dockerfile available from the next statement on via ${foo}.
|
| 40 | 40 |
// |
| 41 | 41 |
func env(b *Builder, args []string, attributes map[string]bool, original string) error {
|
| 42 |
- if runtime.GOOS == "windows" {
|
|
| 43 |
- return fmt.Errorf("ENV is not supported on Windows.")
|
|
| 44 |
- } |
|
| 45 | 42 |
if len(args) == 0 {
|
| 46 | 43 |
return fmt.Errorf("ENV requires at least one argument")
|
| 47 | 44 |
} |
| ... | ... |
@@ -269,12 +267,39 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str |
| 269 | 269 |
return err |
| 270 | 270 |
} |
| 271 | 271 |
|
| 272 |
+ // Note that workdir passed comes from the Dockerfile. Hence it is in |
|
| 273 |
+ // Linux format using forward-slashes, even on Windows. However, |
|
| 274 |
+ // b.Config.WorkingDir is in platform-specific notation (in other words |
|
| 275 |
+ // on Windows will use `\` |
|
| 272 | 276 |
workdir := args[0] |
| 273 | 277 |
|
| 274 |
- if !filepath.IsAbs(workdir) {
|
|
| 275 |
- workdir = filepath.Join("/", b.Config.WorkingDir, workdir)
|
|
| 278 |
+ isAbs := false |
|
| 279 |
+ if runtime.GOOS == "windows" {
|
|
| 280 |
+ // Alternate processing for Windows here is necessary as we can't call |
|
| 281 |
+ // filepath.IsAbs(workDir) as that would verify Windows style paths, |
|
| 282 |
+ // along with drive-letters (eg c:\pathto\file.txt). We (arguably |
|
| 283 |
+ // correctly or not) check for both forward and back slashes as this |
|
| 284 |
+ // is what the 1.4.2 GoLang implementation of IsAbs() does in the |
|
| 285 |
+ // isSlash() function. |
|
| 286 |
+ isAbs = workdir[0] == '\\' || workdir[0] == '/' |
|
| 287 |
+ } else {
|
|
| 288 |
+ isAbs = filepath.IsAbs(workdir) |
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ if !isAbs {
|
|
| 292 |
+ current := b.Config.WorkingDir |
|
| 293 |
+ if runtime.GOOS == "windows" {
|
|
| 294 |
+ // Convert to Linux format before join |
|
| 295 |
+ current = strings.Replace(current, "\\", "/", -1) |
|
| 296 |
+ } |
|
| 297 |
+ // Must use path.Join so works correctly on Windows, not filepath |
|
| 298 |
+ workdir = path.Join("/", current, workdir)
|
|
| 276 | 299 |
} |
| 277 | 300 |
|
| 301 |
+ // Convert to platform specific format |
|
| 302 |
+ if runtime.GOOS == "windows" {
|
|
| 303 |
+ workdir = strings.Replace(workdir, "/", "\\", -1) |
|
| 304 |
+ } |
|
| 278 | 305 |
b.Config.WorkingDir = workdir |
| 279 | 306 |
|
| 280 | 307 |
return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
| ... | ... |
@@ -489,7 +489,8 @@ func (b *Builder) processImageFrom(img *imagepkg.Image) error {
|
| 489 | 489 |
b.Config = img.Config |
| 490 | 490 |
} |
| 491 | 491 |
|
| 492 |
- if len(b.Config.Env) == 0 {
|
|
| 492 |
+ // The default path will be blank on Windows (set by HCS) |
|
| 493 |
+ if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 493 | 494 |
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) |
| 494 | 495 |
} |
| 495 | 496 |
|
| ... | ... |
@@ -201,8 +201,11 @@ func (container *Container) LogEvent(action string) {
|
| 201 | 201 |
// symlinking to a different path) between using this method and using the |
| 202 | 202 |
// path. See symlink.FollowSymlinkInScope for more details. |
| 203 | 203 |
func (container *Container) GetResourcePath(path string) (string, error) {
|
| 204 |
- cleanPath := filepath.Join("/", path)
|
|
| 205 |
- return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) |
|
| 204 |
+ // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 205 |
+ // any filepath operations must be done in an OS agnostic way. |
|
| 206 |
+ cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 207 |
+ r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) |
|
| 208 |
+ return r, e |
|
| 206 | 209 |
} |
| 207 | 210 |
|
| 208 | 211 |
// Evaluates `path` in the scope of the container's root, with proper path |
| ... | ... |
@@ -218,7 +221,9 @@ func (container *Container) GetResourcePath(path string) (string, error) {
|
| 218 | 218 |
// symlinking to a different path) between using this method and using the |
| 219 | 219 |
// path. See symlink.FollowSymlinkInScope for more details. |
| 220 | 220 |
func (container *Container) GetRootResourcePath(path string) (string, error) {
|
| 221 |
- cleanPath := filepath.Join("/", path)
|
|
| 221 |
+ // IMPORTANT - These are paths on the OS where the daemon is running, hence |
|
| 222 |
+ // any filepath operations must be done in an OS agnostic way. |
|
| 223 |
+ cleanPath := filepath.Join(string(os.PathSeparator), path) |
|
| 222 | 224 |
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) |
| 223 | 225 |
} |
| 224 | 226 |
|
| ... | ... |
@@ -10,8 +10,9 @@ import ( |
| 10 | 10 |
"github.com/docker/docker/pkg/archive" |
| 11 | 11 |
) |
| 12 | 12 |
|
| 13 |
-// TODO Windows. A reasonable default at the moment. |
|
| 14 |
-const DefaultPathEnv = `c:\windows\system32;c:\windows\system32\WindowsPowerShell\v1.0` |
|
| 13 |
+// This is deliberately empty on Windows as the default path will be set by |
|
| 14 |
+// the container. Docker has no context of what the default path should be. |
|
| 15 |
+const DefaultPathEnv = "" |
|
| 15 | 16 |
|
| 16 | 17 |
type Container struct {
|
| 17 | 18 |
CommonContainer |
| ... | ... |
@@ -48,7 +49,8 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
|
| 48 | 48 |
} |
| 49 | 49 |
|
| 50 | 50 |
func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
|
| 51 |
- return nil |
|
| 51 |
+ // On Windows, nothing to link. Just return the container environment. |
|
| 52 |
+ return container.Config.Env |
|
| 52 | 53 |
} |
| 53 | 54 |
|
| 54 | 55 |
func (container *Container) initializeNetworking() error {
|
| ... | ... |
@@ -258,6 +258,7 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc |
| 258 | 258 |
return nil |
| 259 | 259 |
} |
| 260 | 260 |
|
| 261 |
+// TODO Windows. Factor out as not relevant (as Windows daemon support not in pre-1.7) |
|
| 261 | 262 |
// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. |
| 262 | 263 |
// It reads the container configuration and creates valid mount points for the old volumes. |
| 263 | 264 |
func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
|
| ... | ... |
@@ -452,6 +452,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) |
| 452 | 452 |
} |
| 453 | 453 |
seen[relFilePath] = true |
| 454 | 454 |
|
| 455 |
+ // TODO Windows: Verify if this needs to be os.Pathseparator |
|
| 455 | 456 |
// Rename the base resource |
| 456 | 457 |
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
|
| 457 | 458 |
renamedRelFilePath = relFilePath |
| ... | ... |
@@ -503,7 +504,8 @@ loop: |
| 503 | 503 |
} |
| 504 | 504 |
|
| 505 | 505 |
// Normalize name, for safety and for a simple is-root check |
| 506 |
- // This keeps "../" as-is, but normalizes "/../" to "/" |
|
| 506 |
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: |
|
| 507 |
+ // This keeps "..\" as-is, but normalizes "\..\" to "\". |
|
| 507 | 508 |
hdr.Name = filepath.Clean(hdr.Name) |
| 508 | 509 |
|
| 509 | 510 |
for _, exclude := range options.ExcludePatterns {
|
| ... | ... |
@@ -512,7 +514,10 @@ loop: |
| 512 | 512 |
} |
| 513 | 513 |
} |
| 514 | 514 |
|
| 515 |
- if !strings.HasSuffix(hdr.Name, "/") {
|
|
| 515 |
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in |
|
| 516 |
+ // the filepath format for the OS on which the daemon is running. Hence |
|
| 517 |
+ // the check for a slash-suffix MUST be done in an OS-agnostic way. |
|
| 518 |
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
|
| 516 | 519 |
// Not the root directory, ensure that the parent directory exists |
| 517 | 520 |
parent := filepath.Dir(hdr.Name) |
| 518 | 521 |
parentPath := filepath.Join(dest, parent) |
| ... | ... |
@@ -529,7 +534,7 @@ loop: |
| 529 | 529 |
if err != nil {
|
| 530 | 530 |
return err |
| 531 | 531 |
} |
| 532 |
- if strings.HasPrefix(rel, "../") {
|
|
| 532 |
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
| 533 | 533 |
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
| 534 | 534 |
} |
| 535 | 535 |
|
| ... | ... |
@@ -658,10 +663,13 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
| 658 | 658 |
if err != nil {
|
| 659 | 659 |
return err |
| 660 | 660 |
} |
| 661 |
+ |
|
| 661 | 662 |
if srcSt.IsDir() {
|
| 662 | 663 |
return fmt.Errorf("Can't copy a directory")
|
| 663 | 664 |
} |
| 664 |
- // Clean up the trailing slash |
|
| 665 |
+ |
|
| 666 |
+ // Clean up the trailing slash. This must be done in an operating |
|
| 667 |
+ // system specific manner. |
|
| 665 | 668 |
if dst[len(dst)-1] == os.PathSeparator {
|
| 666 | 669 |
dst = filepath.Join(dst, filepath.Base(src)) |
| 667 | 670 |
} |
| ... | ... |
@@ -709,8 +717,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
| 709 | 709 |
// for a single file. It copies a regular file from path `src` to |
| 710 | 710 |
// path `dst`, and preserves all its metadata. |
| 711 | 711 |
// |
| 712 |
-// If `dst` ends with a trailing slash '/', the final destination path |
|
| 713 |
-// will be `dst/base(src)`. |
|
| 712 |
+// Destination handling is in an operating specific manner depending |
|
| 713 |
+// where the daemon is running. If `dst` ends with a trailing slash |
|
| 714 |
+// the final destination path will be `dst/base(src)` (Linux) or |
|
| 715 |
+// `dst\base(src)` (Windows). |
|
| 714 | 716 |
func CopyFileWithTar(src, dst string) (err error) {
|
| 715 | 717 |
return defaultArchiver.CopyFileWithTar(src, dst) |
| 716 | 718 |
} |
| ... | ... |
@@ -84,15 +84,17 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
| 84 | 84 |
if err != nil {
|
| 85 | 85 |
return err |
| 86 | 86 |
} |
| 87 |
- path = filepath.Join("/", path)
|
|
| 87 |
+ |
|
| 88 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 89 |
+ path = filepath.Join(string(os.PathSeparator), path) |
|
| 88 | 90 |
|
| 89 | 91 |
// Skip root |
| 90 |
- if path == "/" {
|
|
| 92 |
+ if path == string(os.PathSeparator) {
|
|
| 91 | 93 |
return nil |
| 92 | 94 |
} |
| 93 | 95 |
|
| 94 | 96 |
// Skip AUFS metadata |
| 95 |
- if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
|
| 97 |
+ if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
|
|
| 96 | 98 |
return err |
| 97 | 99 |
} |
| 98 | 100 |
|
| ... | ... |
@@ -169,12 +171,13 @@ type FileInfo struct {
|
| 169 | 169 |
} |
| 170 | 170 |
|
| 171 | 171 |
func (root *FileInfo) LookUp(path string) *FileInfo {
|
| 172 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 172 | 173 |
parent := root |
| 173 |
- if path == "/" {
|
|
| 174 |
+ if path == string(os.PathSeparator) {
|
|
| 174 | 175 |
return root |
| 175 | 176 |
} |
| 176 | 177 |
|
| 177 |
- pathElements := strings.Split(path, "/") |
|
| 178 |
+ pathElements := strings.Split(path, string(os.PathSeparator)) |
|
| 178 | 179 |
for _, elem := range pathElements {
|
| 179 | 180 |
if elem != "" {
|
| 180 | 181 |
child := parent.children[elem] |
| ... | ... |
@@ -189,7 +192,8 @@ func (root *FileInfo) LookUp(path string) *FileInfo {
|
| 189 | 189 |
|
| 190 | 190 |
func (info *FileInfo) path() string {
|
| 191 | 191 |
if info.parent == nil {
|
| 192 |
- return "/" |
|
| 192 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 193 |
+ return string(os.PathSeparator) |
|
| 193 | 194 |
} |
| 194 | 195 |
return filepath.Join(info.parent.path(), info.name) |
| 195 | 196 |
} |
| ... | ... |
@@ -257,7 +261,8 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
| 257 | 257 |
|
| 258 | 258 |
// If there were changes inside this directory, we need to add it, even if the directory |
| 259 | 259 |
// itself wasn't changed. This is needed to properly save and restore filesystem permissions. |
| 260 |
- if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" {
|
|
| 260 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 261 |
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
|
| 261 | 262 |
change := Change{
|
| 262 | 263 |
Path: info.path(), |
| 263 | 264 |
Kind: ChangeModify, |
| ... | ... |
@@ -279,8 +284,9 @@ func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
| 279 | 279 |
} |
| 280 | 280 |
|
| 281 | 281 |
func newRootFileInfo() *FileInfo {
|
| 282 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 282 | 283 |
root := &FileInfo{
|
| 283 |
- name: "/", |
|
| 284 |
+ name: string(os.PathSeparator), |
|
| 284 | 285 |
children: make(map[string]*FileInfo), |
| 285 | 286 |
} |
| 286 | 287 |
return root |
| ... | ... |
@@ -6,6 +6,8 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"os" |
| 8 | 8 |
"path/filepath" |
| 9 |
+ "runtime" |
|
| 10 |
+ "strings" |
|
| 9 | 11 |
|
| 10 | 12 |
"github.com/docker/docker/pkg/system" |
| 11 | 13 |
) |
| ... | ... |
@@ -48,9 +50,20 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
| 48 | 48 |
if err != nil {
|
| 49 | 49 |
return err |
| 50 | 50 |
} |
| 51 |
- relPath = filepath.Join("/", relPath)
|
|
| 52 | 51 |
|
| 53 |
- if relPath == "/" {
|
|
| 52 |
+ // As this runs on the daemon side, file paths are OS specific. |
|
| 53 |
+ relPath = filepath.Join(string(os.PathSeparator), relPath) |
|
| 54 |
+ |
|
| 55 |
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. |
|
| 56 |
+ // Temporary workaround. If the returned path starts with two backslashes, |
|
| 57 |
+ // trim it down to a single backslash. Only relevant on Windows. |
|
| 58 |
+ if runtime.GOOS == "windows" {
|
|
| 59 |
+ if strings.HasPrefix(relPath, `\\`) {
|
|
| 60 |
+ relPath = relPath[1:] |
|
| 61 |
+ } |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ if relPath == string(os.PathSeparator) {
|
|
| 54 | 65 |
return nil |
| 55 | 66 |
} |
| 56 | 67 |
|
| ... | ... |
@@ -7,9 +7,11 @@ import ( |
| 7 | 7 |
"io/ioutil" |
| 8 | 8 |
"os" |
| 9 | 9 |
"path/filepath" |
| 10 |
+ "runtime" |
|
| 10 | 11 |
"strings" |
| 11 | 12 |
"syscall" |
| 12 | 13 |
|
| 14 |
+ "github.com/Sirupsen/logrus" |
|
| 13 | 15 |
"github.com/docker/docker/pkg/pools" |
| 14 | 16 |
"github.com/docker/docker/pkg/system" |
| 15 | 17 |
) |
| ... | ... |
@@ -40,12 +42,35 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
| 40 | 40 |
// Normalize name, for safety and for a simple is-root check |
| 41 | 41 |
hdr.Name = filepath.Clean(hdr.Name) |
| 42 | 42 |
|
| 43 |
- if !strings.HasSuffix(hdr.Name, "/") {
|
|
| 43 |
+ // Windows does not support filenames with colons in them. Ignore |
|
| 44 |
+ // these files. This is not a problem though (although it might |
|
| 45 |
+ // appear that it is). Let's suppose a client is running docker pull. |
|
| 46 |
+ // The daemon it points to is Windows. Would it make sense for the |
|
| 47 |
+ // client to be doing a docker pull Ubuntu for example (which has files |
|
| 48 |
+ // with colons in the name under /usr/share/man/man3)? No, absolutely |
|
| 49 |
+ // not as it would really only make sense that they were pulling a |
|
| 50 |
+ // Windows image. However, for development, it is necessary to be able |
|
| 51 |
+ // to pull Linux images which are in the repository. |
|
| 52 |
+ // |
|
| 53 |
+ // TODO Windows. Once the registry is aware of what images are Windows- |
|
| 54 |
+ // specific or Linux-specific, this warning should be changed to an error |
|
| 55 |
+ // to cater for the situation where someone does manage to upload a Linux |
|
| 56 |
+ // image but have it tagged as Windows inadvertantly. |
|
| 57 |
+ if runtime.GOOS == "windows" {
|
|
| 58 |
+ if strings.Contains(hdr.Name, ":") {
|
|
| 59 |
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
|
| 60 |
+ continue |
|
| 61 |
+ } |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ // Note as these operations are platform specific, so must the slash be. |
|
| 65 |
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
|
| 44 | 66 |
// Not the root directory, ensure that the parent directory exists. |
| 45 | 67 |
// This happened in some tests where an image had a tarfile without any |
| 46 | 68 |
// parent directories. |
| 47 | 69 |
parent := filepath.Dir(hdr.Name) |
| 48 | 70 |
parentPath := filepath.Join(dest, parent) |
| 71 |
+ |
|
| 49 | 72 |
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
| 50 | 73 |
err = system.MkdirAll(parentPath, 0600) |
| 51 | 74 |
if err != nil {
|
| ... | ... |
@@ -74,13 +99,14 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
| 74 | 74 |
} |
| 75 | 75 |
continue |
| 76 | 76 |
} |
| 77 |
- |
|
| 78 | 77 |
path := filepath.Join(dest, hdr.Name) |
| 79 | 78 |
rel, err := filepath.Rel(dest, path) |
| 80 | 79 |
if err != nil {
|
| 81 | 80 |
return 0, err |
| 82 | 81 |
} |
| 83 |
- if strings.HasPrefix(rel, "../") {
|
|
| 82 |
+ |
|
| 83 |
+ // Note as these operations are platform specific, so must the slash be. |
|
| 84 |
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
| 84 | 85 |
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
| 85 | 86 |
} |
| 86 | 87 |
base := filepath.Base(path) |
| ... | ... |
@@ -1,68 +1,23 @@ |
| 1 | 1 |
package chrootarchive |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "bytes" |
|
| 5 |
- "encoding/json" |
|
| 6 |
- "flag" |
|
| 7 | 4 |
"fmt" |
| 8 | 5 |
"io" |
| 9 | 6 |
"os" |
| 10 | 7 |
"path/filepath" |
| 11 |
- "runtime" |
|
| 12 | 8 |
|
| 13 | 9 |
"github.com/docker/docker/pkg/archive" |
| 14 |
- "github.com/docker/docker/pkg/reexec" |
|
| 15 | 10 |
"github.com/docker/docker/pkg/system" |
| 16 | 11 |
) |
| 17 | 12 |
|
| 18 | 13 |
var chrootArchiver = &archive.Archiver{Untar: Untar}
|
| 19 | 14 |
|
| 20 |
-func untar() {
|
|
| 21 |
- runtime.LockOSThread() |
|
| 22 |
- flag.Parse() |
|
| 23 |
- |
|
| 24 |
- var options *archive.TarOptions |
|
| 25 |
- |
|
| 26 |
- if runtime.GOOS != "windows" {
|
|
| 27 |
- //read the options from the pipe "ExtraFiles" |
|
| 28 |
- if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
|
| 29 |
- fatal(err) |
|
| 30 |
- } |
|
| 31 |
- } else {
|
|
| 32 |
- if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
|
| 33 |
- fatal(err) |
|
| 34 |
- } |
|
| 35 |
- } |
|
| 36 |
- |
|
| 37 |
- if err := chroot(flag.Arg(0)); err != nil {
|
|
| 38 |
- fatal(err) |
|
| 39 |
- } |
|
| 40 |
- |
|
| 41 |
- // Explanation of Windows difference. Windows does not support chroot. |
|
| 42 |
- // untar() is a helper function for the command line in the format |
|
| 43 |
- // "docker docker-untar directory input". In Windows, directory will be |
|
| 44 |
- // something like <pathto>\docker-buildnnnnnnnnn. So, just use that directory |
|
| 45 |
- // directly instead. |
|
| 46 |
- // |
|
| 47 |
- // One example of where this is used is in the docker build command where the |
|
| 48 |
- // dockerfile will be unpacked to the machine on which the daemon runs. |
|
| 49 |
- rootPath := "/" |
|
| 50 |
- if runtime.GOOS == "windows" {
|
|
| 51 |
- rootPath = flag.Arg(0) |
|
| 52 |
- } |
|
| 53 |
- if err := archive.Unpack(os.Stdin, rootPath, options); err != nil {
|
|
| 54 |
- fatal(err) |
|
| 55 |
- } |
|
| 56 |
- // fully consume stdin in case it is zero padded |
|
| 57 |
- flush(os.Stdin) |
|
| 58 |
- os.Exit(0) |
|
| 59 |
-} |
|
| 60 |
- |
|
| 61 | 15 |
// Untar reads a stream of bytes from `archive`, parses it as a tar archive, |
| 62 | 16 |
// and unpacks it into the directory at `dest`. |
| 63 | 17 |
// The archive may be compressed with one of the following algorithms: |
| 64 | 18 |
// identity (uncompressed), gzip, bzip2, xz. |
| 65 | 19 |
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
| 20 |
+ |
|
| 66 | 21 |
if tarArchive == nil {
|
| 67 | 22 |
return fmt.Errorf("Empty archive")
|
| 68 | 23 |
} |
| ... | ... |
@@ -84,67 +39,9 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error |
| 84 | 84 |
if err != nil {
|
| 85 | 85 |
return err |
| 86 | 86 |
} |
| 87 |
- |
|
| 88 |
- var data []byte |
|
| 89 |
- var r, w *os.File |
|
| 90 | 87 |
defer decompressedArchive.Close() |
| 91 | 88 |
|
| 92 |
- if runtime.GOOS != "windows" {
|
|
| 93 |
- // We can't pass a potentially large exclude list directly via cmd line |
|
| 94 |
- // because we easily overrun the kernel's max argument/environment size |
|
| 95 |
- // when the full image list is passed (e.g. when this is used by |
|
| 96 |
- // `docker load`). We will marshall the options via a pipe to the |
|
| 97 |
- // child |
|
| 98 |
- |
|
| 99 |
- // This solution won't work on Windows as it will fail in golang |
|
| 100 |
- // exec_windows.go as at the lowest layer because attr.Files > 3 |
|
| 101 |
- r, w, err = os.Pipe() |
|
| 102 |
- if err != nil {
|
|
| 103 |
- return fmt.Errorf("Untar pipe failure: %v", err)
|
|
| 104 |
- } |
|
| 105 |
- } else {
|
|
| 106 |
- // We can't pass the exclude list directly via cmd line |
|
| 107 |
- // because we easily overrun the shell max argument list length |
|
| 108 |
- // when the full image list is passed (e.g. when this is used |
|
| 109 |
- // by `docker load`). Instead we will add the JSON marshalled |
|
| 110 |
- // and placed in the env, which has significantly larger |
|
| 111 |
- // max size |
|
| 112 |
- data, err = json.Marshal(options) |
|
| 113 |
- if err != nil {
|
|
| 114 |
- return fmt.Errorf("Untar json encode: %v", err)
|
|
| 115 |
- } |
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
- cmd := reexec.Command("docker-untar", dest)
|
|
| 119 |
- cmd.Stdin = decompressedArchive |
|
| 120 |
- |
|
| 121 |
- if runtime.GOOS != "windows" {
|
|
| 122 |
- cmd.ExtraFiles = append(cmd.ExtraFiles, r) |
|
| 123 |
- output := bytes.NewBuffer(nil) |
|
| 124 |
- cmd.Stdout = output |
|
| 125 |
- cmd.Stderr = output |
|
| 126 |
- |
|
| 127 |
- if err := cmd.Start(); err != nil {
|
|
| 128 |
- return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
|
| 129 |
- } |
|
| 130 |
- //write the options to the pipe for the untar exec to read |
|
| 131 |
- if err := json.NewEncoder(w).Encode(options); err != nil {
|
|
| 132 |
- return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
|
| 133 |
- } |
|
| 134 |
- w.Close() |
|
| 135 |
- |
|
| 136 |
- if err := cmd.Wait(); err != nil {
|
|
| 137 |
- return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
|
|
| 138 |
- } |
|
| 139 |
- return nil |
|
| 140 |
- } |
|
| 141 |
- cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
|
| 142 |
- out, err := cmd.CombinedOutput() |
|
| 143 |
- if err != nil {
|
|
| 144 |
- return fmt.Errorf("Untar %s %s", err, out)
|
|
| 145 |
- } |
|
| 146 |
- return nil |
|
| 147 |
- |
|
| 89 |
+ return invokeUnpack(decompressedArchive, dest, options) |
|
| 148 | 90 |
} |
| 149 | 91 |
|
| 150 | 92 |
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. |
| ... | ... |
@@ -165,8 +62,8 @@ func CopyWithTar(src, dst string) error {
|
| 165 | 165 |
// for a single file. It copies a regular file from path `src` to |
| 166 | 166 |
// path `dst`, and preserves all its metadata. |
| 167 | 167 |
// |
| 168 |
-// If `dst` ends with a trailing slash '/', the final destination path |
|
| 169 |
-// will be `dst/base(src)`. |
|
| 168 |
+// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
|
|
| 169 |
+// destination path will be `dst/base(src)` or `dst\base(src)` |
|
| 170 | 170 |
func CopyFileWithTar(src, dst string) (err error) {
|
| 171 | 171 |
return chrootArchiver.CopyFileWithTar(src, dst) |
| 172 | 172 |
} |
| ... | ... |
@@ -3,7 +3,17 @@ |
| 3 | 3 |
package chrootarchive |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
+ "bytes" |
|
| 7 |
+ "encoding/json" |
|
| 8 |
+ "flag" |
|
| 9 |
+ "fmt" |
|
| 10 |
+ "io" |
|
| 11 |
+ "os" |
|
| 12 |
+ "runtime" |
|
| 6 | 13 |
"syscall" |
| 14 |
+ |
|
| 15 |
+ "github.com/docker/docker/pkg/archive" |
|
| 16 |
+ "github.com/docker/docker/pkg/reexec" |
|
| 7 | 17 |
) |
| 8 | 18 |
|
| 9 | 19 |
func chroot(path string) error {
|
| ... | ... |
@@ -12,3 +22,64 @@ func chroot(path string) error {
|
| 12 | 12 |
} |
| 13 | 13 |
return syscall.Chdir("/")
|
| 14 | 14 |
} |
| 15 |
+ |
|
| 16 |
+// untar is the entry-point for docker-untar on re-exec. This is not used on |
|
| 17 |
+// Windows as it does not support chroot, hence no point sandboxing through |
|
| 18 |
+// chroot and rexec. |
|
| 19 |
+func untar() {
|
|
| 20 |
+ runtime.LockOSThread() |
|
| 21 |
+ flag.Parse() |
|
| 22 |
+ |
|
| 23 |
+ var options *archive.TarOptions |
|
| 24 |
+ |
|
| 25 |
+ //read the options from the pipe "ExtraFiles" |
|
| 26 |
+ if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
|
| 27 |
+ fatal(err) |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ if err := chroot(flag.Arg(0)); err != nil {
|
|
| 31 |
+ fatal(err) |
|
| 32 |
+ } |
|
| 33 |
+ |
|
| 34 |
+ if err := archive.Unpack(os.Stdin, "/", options); err != nil {
|
|
| 35 |
+ fatal(err) |
|
| 36 |
+ } |
|
| 37 |
+ // fully consume stdin in case it is zero padded |
|
| 38 |
+ flush(os.Stdin) |
|
| 39 |
+ os.Exit(0) |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+func invokeUnpack(decompressedArchive io.ReadCloser, dest string, options *archive.TarOptions) error {
|
|
| 43 |
+ |
|
| 44 |
+ // We can't pass a potentially large exclude list directly via cmd line |
|
| 45 |
+ // because we easily overrun the kernel's max argument/environment size |
|
| 46 |
+ // when the full image list is passed (e.g. when this is used by |
|
| 47 |
+ // `docker load`). We will marshall the options via a pipe to the |
|
| 48 |
+ // child |
|
| 49 |
+ r, w, err := os.Pipe() |
|
| 50 |
+ if err != nil {
|
|
| 51 |
+ return fmt.Errorf("Untar pipe failure: %v", err)
|
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ cmd := reexec.Command("docker-untar", dest)
|
|
| 55 |
+ cmd.Stdin = decompressedArchive |
|
| 56 |
+ |
|
| 57 |
+ cmd.ExtraFiles = append(cmd.ExtraFiles, r) |
|
| 58 |
+ output := bytes.NewBuffer(nil) |
|
| 59 |
+ cmd.Stdout = output |
|
| 60 |
+ cmd.Stderr = output |
|
| 61 |
+ |
|
| 62 |
+ if err := cmd.Start(); err != nil {
|
|
| 63 |
+ return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
|
| 64 |
+ } |
|
| 65 |
+ //write the options to the pipe for the untar exec to read |
|
| 66 |
+ if err := json.NewEncoder(w).Encode(options); err != nil {
|
|
| 67 |
+ return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
|
| 68 |
+ } |
|
| 69 |
+ w.Close() |
|
| 70 |
+ |
|
| 71 |
+ if err := cmd.Wait(); err != nil {
|
|
| 72 |
+ return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
|
|
| 73 |
+ } |
|
| 74 |
+ return nil |
|
| 75 |
+} |
| ... | ... |
@@ -1,6 +1,21 @@ |
| 1 | 1 |
package chrootarchive |
| 2 | 2 |
|
| 3 |
+import ( |
|
| 4 |
+ "io" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/pkg/archive" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 3 | 9 |
// chroot is not supported by Windows |
| 4 | 10 |
func chroot(path string) error {
|
| 5 | 11 |
return nil |
| 6 | 12 |
} |
| 13 |
+ |
|
| 14 |
+func invokeUnpack(decompressedArchive io.ReadCloser, |
|
| 15 |
+ dest string, |
|
| 16 |
+ options *archive.TarOptions) error {
|
|
| 17 |
+ // Windows is different to Linux here because Windows does not support |
|
| 18 |
+ // chroot. Hence there is no point sandboxing a chrooted process to |
|
| 19 |
+ // do the unpack. We call inline instead within the daemon process. |
|
| 20 |
+ return archive.Unpack(decompressedArchive, dest, options) |
|
| 21 |
+} |
| 7 | 22 |
deleted file mode 100644 |
| ... | ... |
@@ -1,102 +0,0 @@ |
| 1 |
-package chrootarchive |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "encoding/json" |
|
| 6 |
- "flag" |
|
| 7 |
- "fmt" |
|
| 8 |
- "io/ioutil" |
|
| 9 |
- "os" |
|
| 10 |
- "path/filepath" |
|
| 11 |
- "runtime" |
|
| 12 |
- |
|
| 13 |
- "github.com/docker/docker/pkg/archive" |
|
| 14 |
- "github.com/docker/docker/pkg/reexec" |
|
| 15 |
- "github.com/docker/docker/pkg/system" |
|
| 16 |
-) |
|
| 17 |
- |
|
| 18 |
-type applyLayerResponse struct {
|
|
| 19 |
- LayerSize int64 `json:"layerSize"` |
|
| 20 |
-} |
|
| 21 |
- |
|
| 22 |
-func applyLayer() {
|
|
| 23 |
- |
|
| 24 |
- var ( |
|
| 25 |
- root = "/" |
|
| 26 |
- tmpDir = "" |
|
| 27 |
- err error |
|
| 28 |
- ) |
|
| 29 |
- |
|
| 30 |
- runtime.LockOSThread() |
|
| 31 |
- flag.Parse() |
|
| 32 |
- |
|
| 33 |
- if runtime.GOOS != "windows" {
|
|
| 34 |
- if err := chroot(flag.Arg(0)); err != nil {
|
|
| 35 |
- fatal(err) |
|
| 36 |
- } |
|
| 37 |
- |
|
| 38 |
- // We need to be able to set any perms |
|
| 39 |
- oldmask, err := system.Umask(0) |
|
| 40 |
- defer system.Umask(oldmask) |
|
| 41 |
- if err != nil {
|
|
| 42 |
- fatal(err) |
|
| 43 |
- } |
|
| 44 |
- } else {
|
|
| 45 |
- // As Windows does not support chroot or umask, we use the directory |
|
| 46 |
- // passed in which will be <pathto>\docker-buildnnnnnnnn instead of |
|
| 47 |
- // the 'chroot-root', "/" |
|
| 48 |
- root = flag.Arg(0) |
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- if tmpDir, err = ioutil.TempDir(root, "temp-docker-extract"); err != nil {
|
|
| 52 |
- fatal(err) |
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- os.Setenv("TMPDIR", tmpDir)
|
|
| 56 |
- size, err := archive.UnpackLayer(root, os.Stdin) |
|
| 57 |
- os.RemoveAll(tmpDir) |
|
| 58 |
- if err != nil {
|
|
| 59 |
- fatal(err) |
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 |
- encoder := json.NewEncoder(os.Stdout) |
|
| 63 |
- if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
|
| 64 |
- fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
- flush(os.Stdout) |
|
| 68 |
- flush(os.Stdin) |
|
| 69 |
- os.Exit(0) |
|
| 70 |
-} |
|
| 71 |
- |
|
| 72 |
-// ApplyLayer parses a diff in the standard layer format from `layer`, and |
|
| 73 |
-// applies it to the directory `dest`. Returns the size in bytes of the |
|
| 74 |
-// contents of the layer. |
|
| 75 |
-func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
|
|
| 76 |
- dest = filepath.Clean(dest) |
|
| 77 |
- decompressed, err := archive.DecompressStream(layer) |
|
| 78 |
- if err != nil {
|
|
| 79 |
- return 0, err |
|
| 80 |
- } |
|
| 81 |
- |
|
| 82 |
- defer decompressed.Close() |
|
| 83 |
- |
|
| 84 |
- cmd := reexec.Command("docker-applyLayer", dest)
|
|
| 85 |
- cmd.Stdin = decompressed |
|
| 86 |
- |
|
| 87 |
- outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) |
|
| 88 |
- cmd.Stdout, cmd.Stderr = outBuf, errBuf |
|
| 89 |
- |
|
| 90 |
- if err = cmd.Run(); err != nil {
|
|
| 91 |
- return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
|
| 92 |
- } |
|
| 93 |
- |
|
| 94 |
- // Stdout should be a valid JSON struct representing an applyLayerResponse. |
|
| 95 |
- response := applyLayerResponse{}
|
|
| 96 |
- decoder := json.NewDecoder(outBuf) |
|
| 97 |
- if err = decoder.Decode(&response); err != nil {
|
|
| 98 |
- return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
|
| 99 |
- } |
|
| 100 |
- |
|
| 101 |
- return response.LayerSize, nil |
|
| 102 |
-} |
| 103 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,98 @@ |
| 0 |
+//+build !windows |
|
| 1 |
+ |
|
| 2 |
+package chrootarchive |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "bytes" |
|
| 6 |
+ "encoding/json" |
|
| 7 |
+ "flag" |
|
| 8 |
+ "fmt" |
|
| 9 |
+ "io/ioutil" |
|
| 10 |
+ "os" |
|
| 11 |
+ "path/filepath" |
|
| 12 |
+ "runtime" |
|
| 13 |
+ |
|
| 14 |
+ "github.com/docker/docker/pkg/archive" |
|
| 15 |
+ "github.com/docker/docker/pkg/reexec" |
|
| 16 |
+ "github.com/docker/docker/pkg/system" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+type applyLayerResponse struct {
|
|
| 20 |
+ LayerSize int64 `json:"layerSize"` |
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+// applyLayer is the entry-point for docker-applylayer on re-exec. This is not |
|
| 24 |
+// used on Windows as it does not support chroot, hence no point sandboxing |
|
| 25 |
+// through chroot and rexec. |
|
| 26 |
+func applyLayer() {
|
|
| 27 |
+ |
|
| 28 |
+ var ( |
|
| 29 |
+ tmpDir = "" |
|
| 30 |
+ err error |
|
| 31 |
+ ) |
|
| 32 |
+ runtime.LockOSThread() |
|
| 33 |
+ flag.Parse() |
|
| 34 |
+ |
|
| 35 |
+ if err := chroot(flag.Arg(0)); err != nil {
|
|
| 36 |
+ fatal(err) |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ // We need to be able to set any perms |
|
| 40 |
+ oldmask, err := system.Umask(0) |
|
| 41 |
+ defer system.Umask(oldmask) |
|
| 42 |
+ if err != nil {
|
|
| 43 |
+ fatal(err) |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
|
|
| 47 |
+ fatal(err) |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ os.Setenv("TMPDIR", tmpDir)
|
|
| 51 |
+ size, err := archive.UnpackLayer("/", os.Stdin)
|
|
| 52 |
+ os.RemoveAll(tmpDir) |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ fatal(err) |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ encoder := json.NewEncoder(os.Stdout) |
|
| 58 |
+ if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
|
| 59 |
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
|
| 60 |
+ } |
|
| 61 |
+ |
|
| 62 |
+ flush(os.Stdout) |
|
| 63 |
+ flush(os.Stdin) |
|
| 64 |
+ os.Exit(0) |
|
| 65 |
+} |
|
| 66 |
+ |
|
| 67 |
+// ApplyLayer parses a diff in the standard layer format from `layer`, and |
|
| 68 |
+// applies it to the directory `dest`. Returns the size in bytes of the |
|
| 69 |
+// contents of the layer. |
|
| 70 |
+func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
|
|
| 71 |
+ dest = filepath.Clean(dest) |
|
| 72 |
+ decompressed, err := archive.DecompressStream(layer) |
|
| 73 |
+ if err != nil {
|
|
| 74 |
+ return 0, err |
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 77 |
+ defer decompressed.Close() |
|
| 78 |
+ |
|
| 79 |
+ cmd := reexec.Command("docker-applyLayer", dest)
|
|
| 80 |
+ cmd.Stdin = decompressed |
|
| 81 |
+ |
|
| 82 |
+ outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) |
|
| 83 |
+ cmd.Stdout, cmd.Stderr = outBuf, errBuf |
|
| 84 |
+ |
|
| 85 |
+ if err = cmd.Run(); err != nil {
|
|
| 86 |
+ return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ // Stdout should be a valid JSON struct representing an applyLayerResponse. |
|
| 90 |
+ response := applyLayerResponse{}
|
|
| 91 |
+ decoder := json.NewDecoder(outBuf) |
|
| 92 |
+ if err = decoder.Decode(&response); err != nil {
|
|
| 93 |
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ return response.LayerSize, nil |
|
| 97 |
+} |
| 0 | 98 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,32 @@ |
| 0 |
+package chrootarchive |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "os" |
|
| 6 |
+ "path/filepath" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/docker/docker/pkg/archive" |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
|
|
| 12 |
+ dest = filepath.Clean(dest) |
|
| 13 |
+ decompressed, err := archive.DecompressStream(layer) |
|
| 14 |
+ if err != nil {
|
|
| 15 |
+ return 0, err |
|
| 16 |
+ } |
|
| 17 |
+ defer decompressed.Close() |
|
| 18 |
+ |
|
| 19 |
+ tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
|
|
| 20 |
+ if err != nil {
|
|
| 21 |
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
|
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ s, err := archive.UnpackLayer(dest, decompressed) |
|
| 25 |
+ os.RemoveAll(tmpDir) |
|
| 26 |
+ if err != nil {
|
|
| 27 |
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
|
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ return s, nil |
|
| 31 |
+} |
| 0 | 32 |
deleted file mode 100644 |
| ... | ... |
@@ -1,26 +0,0 @@ |
| 1 |
-package chrootarchive |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "io" |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "os" |
|
| 8 |
- |
|
| 9 |
- "github.com/docker/docker/pkg/reexec" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-func init() {
|
|
| 13 |
- reexec.Register("docker-untar", untar)
|
|
| 14 |
- reexec.Register("docker-applyLayer", applyLayer)
|
|
| 15 |
-} |
|
| 16 |
- |
|
| 17 |
-func fatal(err error) {
|
|
| 18 |
- fmt.Fprint(os.Stderr, err) |
|
| 19 |
- os.Exit(1) |
|
| 20 |
-} |
|
| 21 |
- |
|
| 22 |
-// flush consumes all the bytes from the reader discarding |
|
| 23 |
-// any errors |
|
| 24 |
-func flush(r io.Reader) {
|
|
| 25 |
- io.Copy(ioutil.Discard, r) |
|
| 26 |
-} |
| 27 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,28 @@ |
| 0 |
+// +build !windows |
|
| 1 |
+ |
|
| 2 |
+package chrootarchive |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "os" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/docker/pkg/reexec" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+func init() {
|
|
| 14 |
+ reexec.Register("docker-applyLayer", applyLayer)
|
|
| 15 |
+ reexec.Register("docker-untar", untar)
|
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+func fatal(err error) {
|
|
| 19 |
+ fmt.Fprint(os.Stderr, err) |
|
| 20 |
+ os.Exit(1) |
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+// flush consumes all the bytes from the reader discarding |
|
| 24 |
+// any errors |
|
| 25 |
+func flush(r io.Reader) {
|
|
| 26 |
+ io.Copy(ioutil.Discard, r) |
|
| 27 |
+} |