Browse code

Add chroot for tar packing operations

Previously only unpack operations were supported with chroot.
This adds chroot support for packing operations.
This prevents potential breakouts when copying data from a container.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit 3029e765e241ea2b5249868705dbf9095bc4d529)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>

Brian Goff authored on 2019/05/31 06:55:52
Showing 7 changed files
... ...
@@ -39,11 +39,11 @@ func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarO
39 39
 	return chrootarchive.UntarWithRoot(src, dst, opts, root)
40 40
 }
41 41
 
42
-func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
42
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
43 43
 	if ap, ok := i.(archiver); ok {
44 44
 		return ap.ArchivePath(src, opts)
45 45
 	}
46
-	return archive.TarWithOptions(src, opts)
46
+	return chrootarchive.Tar(src, opts, root)
47 47
 }
48 48
 
49 49
 // ContainerCopy performs a deprecated operation of archiving the resource at
... ...
@@ -239,7 +239,7 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
239 239
 	sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
240 240
 	opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
241 241
 
242
-	data, err := archivePath(driver, sourceDir, opts)
242
+	data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
243 243
 	if err != nil {
244 244
 		return nil, nil, err
245 245
 	}
... ...
@@ -433,7 +433,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
433 433
 	archive, err := archivePath(driver, basePath, &archive.TarOptions{
434 434
 		Compression:  archive.Uncompressed,
435 435
 		IncludeFiles: filter,
436
-	})
436
+	}, container.BaseFS.Path())
437 437
 	if err != nil {
438 438
 		return nil, err
439 439
 	}
... ...
@@ -70,7 +70,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
70 70
 		Compression: archive.Uncompressed,
71 71
 		UIDMaps:     daemon.idMapping.UIDs(),
72 72
 		GIDMaps:     daemon.idMapping.GIDs(),
73
-	})
73
+	}, basefs.Path())
74 74
 	if err != nil {
75 75
 		rwlayer.Unmount()
76 76
 		return nil, err
... ...
@@ -87,3 +87,11 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
87 87
 
88 88
 	return invokeUnpack(r, dest, options, root)
89 89
 }
90
+
91
+// Tar tars the requested path while chrooted to the specified root.
92
+func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
93
+	if options == nil {
94
+		options = &archive.TarOptions{}
95
+	}
96
+	return invokePack(srcPath, options, root)
97
+}
... ...
@@ -12,9 +12,11 @@ import (
12 12
 	"os"
13 13
 	"path/filepath"
14 14
 	"runtime"
15
+	"strings"
15 16
 
16 17
 	"github.com/docker/docker/pkg/archive"
17 18
 	"github.com/docker/docker/pkg/reexec"
19
+	"github.com/pkg/errors"
18 20
 )
19 21
 
20 22
 // untar is the entry-point for docker-untar on re-exec. This is not used on
... ...
@@ -24,7 +26,7 @@ func untar() {
24 24
 	runtime.LockOSThread()
25 25
 	flag.Parse()
26 26
 
27
-	var options *archive.TarOptions
27
+	var options archive.TarOptions
28 28
 
29 29
 	//read the options from the pipe "ExtraFiles"
30 30
 	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
... ...
@@ -45,7 +47,7 @@ func untar() {
45 45
 		fatal(err)
46 46
 	}
47 47
 
48
-	if err := archive.Unpack(os.Stdin, dst, options); err != nil {
48
+	if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
49 49
 		fatal(err)
50 50
 	}
51 51
 	// fully consume stdin in case it is zero padded
... ...
@@ -57,6 +59,9 @@ func untar() {
57 57
 }
58 58
 
59 59
 func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
60
+	if root == "" {
61
+		return errors.New("must specify a root to chroot to")
62
+	}
60 63
 
61 64
 	// We can't pass a potentially large exclude list directly via cmd line
62 65
 	// because we easily overrun the kernel's max argument/environment size
... ...
@@ -112,3 +117,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
112 112
 	}
113 113
 	return nil
114 114
 }
115
+
116
+func tar() {
117
+	runtime.LockOSThread()
118
+	flag.Parse()
119
+
120
+	src := flag.Arg(0)
121
+	var root string
122
+	if len(flag.Args()) > 1 {
123
+		root = flag.Arg(1)
124
+	}
125
+
126
+	if root == "" {
127
+		root = src
128
+	}
129
+
130
+	if err := realChroot(root); err != nil {
131
+		fatal(err)
132
+	}
133
+
134
+	var options archive.TarOptions
135
+	if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
136
+		fatal(err)
137
+	}
138
+
139
+	rdr, err := archive.TarWithOptions(src, &options)
140
+	if err != nil {
141
+		fatal(err)
142
+	}
143
+	defer rdr.Close()
144
+
145
+	if _, err := io.Copy(os.Stdout, rdr); err != nil {
146
+		fatal(err)
147
+	}
148
+
149
+	os.Exit(0)
150
+}
151
+
152
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
153
+	if root == "" {
154
+		return nil, errors.New("root path must not be empty")
155
+	}
156
+
157
+	relSrc, err := filepath.Rel(root, srcPath)
158
+	if err != nil {
159
+		return nil, err
160
+	}
161
+	if relSrc == "." {
162
+		relSrc = "/"
163
+	}
164
+	if relSrc[0] != '/' {
165
+		relSrc = "/" + relSrc
166
+	}
167
+
168
+	// make sure we didn't trim a trailing slash with the call to `Rel`
169
+	if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
170
+		relSrc += "/"
171
+	}
172
+
173
+	cmd := reexec.Command("docker-tar", relSrc, root)
174
+
175
+	errBuff := bytes.NewBuffer(nil)
176
+	cmd.Stderr = errBuff
177
+
178
+	tarR, tarW := io.Pipe()
179
+	cmd.Stdout = tarW
180
+
181
+	stdin, err := cmd.StdinPipe()
182
+	if err != nil {
183
+		return nil, errors.Wrap(err, "error getting options pipe for tar process")
184
+	}
185
+
186
+	if err := cmd.Start(); err != nil {
187
+		return nil, errors.Wrap(err, "tar error on re-exec cmd")
188
+	}
189
+
190
+	go func() {
191
+		err := cmd.Wait()
192
+		err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
193
+		tarW.CloseWithError(err)
194
+	}()
195
+
196
+	if err := json.NewEncoder(stdin).Encode(options); err != nil {
197
+		stdin.Close()
198
+		return nil, errors.Wrap(err, "tar json encode to pipe failed")
199
+	}
200
+	stdin.Close()
201
+
202
+	return tarR, nil
203
+}
... ...
@@ -3,11 +3,14 @@
3 3
 package chrootarchive
4 4
 
5 5
 import (
6
+	gotar "archive/tar"
6 7
 	"bytes"
7 8
 	"io"
8 9
 	"io/ioutil"
9 10
 	"os"
11
+	"path"
10 12
 	"path/filepath"
13
+	"strings"
11 14
 	"testing"
12 15
 
13 16
 	"github.com/docker/docker/pkg/archive"
... ...
@@ -75,3 +78,94 @@ func TestUntarWithMaliciousSymlinks(t *testing.T) {
75 75
 	assert.NilError(t, err)
76 76
 	assert.Equal(t, string(hostData), "pwn3d")
77 77
 }
78
+
79
+// Test for CVE-2018-15664
80
+// Assures that in the case where an "attacker" controlled path is a symlink to
81
+// some path outside of a container's rootfs that we do not unwittingly leak
82
+// host data into the archive.
83
+func TestTarWithMaliciousSymlinks(t *testing.T) {
84
+	dir, err := ioutil.TempDir("", t.Name())
85
+	assert.NilError(t, err)
86
+	// defer os.RemoveAll(dir)
87
+	t.Log(dir)
88
+
89
+	root := filepath.Join(dir, "root")
90
+
91
+	err = os.MkdirAll(root, 0755)
92
+	assert.NilError(t, err)
93
+
94
+	hostFileData := []byte("I am a host file")
95
+
96
+	// Add a file into a directory above root
97
+	// Ensure that we can't access this file while tarring.
98
+	err = ioutil.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 0644)
99
+	assert.NilError(t, err)
100
+
101
+	safe := filepath.Join(root, "safe")
102
+	err = unix.Symlink(dir, safe)
103
+	assert.NilError(t, err)
104
+
105
+	data := filepath.Join(dir, "data")
106
+	err = os.MkdirAll(data, 0755)
107
+	assert.NilError(t, err)
108
+
109
+	type testCase struct {
110
+		p        string
111
+		includes []string
112
+	}
113
+
114
+	cases := []testCase{
115
+		{p: safe, includes: []string{"host-file"}},
116
+		{p: safe + "/", includes: []string{"host-file"}},
117
+		{p: safe, includes: nil},
118
+		{p: safe + "/", includes: nil},
119
+		{p: root, includes: []string{"safe/host-file"}},
120
+		{p: root, includes: []string{"/safe/host-file"}},
121
+		{p: root, includes: nil},
122
+	}
123
+
124
+	maxBytes := len(hostFileData)
125
+
126
+	for _, tc := range cases {
127
+		t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), func(t *testing.T) {
128
+			// Here if we use archive.TarWithOptions directly or change the "root" parameter
129
+			// to be the same as "safe", data from the host will be leaked into the archive
130
+			var opts *archive.TarOptions
131
+			if tc.includes != nil {
132
+				opts = &archive.TarOptions{
133
+					IncludeFiles: tc.includes,
134
+				}
135
+			}
136
+			rdr, err := Tar(tc.p, opts, root)
137
+			assert.NilError(t, err)
138
+			defer rdr.Close()
139
+
140
+			tr := gotar.NewReader(rdr)
141
+			assert.Assert(t, !isDataInTar(t, tr, hostFileData, int64(maxBytes)), "host data leaked to archive")
142
+		})
143
+	}
144
+}
145
+
146
+func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool {
147
+	for {
148
+		h, err := tr.Next()
149
+		if err == io.EOF {
150
+			break
151
+		}
152
+		assert.NilError(t, err)
153
+
154
+		if h.Size == 0 {
155
+			continue
156
+		}
157
+		assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max expected size %d: %d", h.Name, maxBytes, h.Size)
158
+
159
+		data := make([]byte, int(h.Size))
160
+		_, err = io.ReadFull(tr, data)
161
+		assert.NilError(t, err)
162
+		if bytes.Contains(data, compare) {
163
+			return true
164
+		}
165
+	}
166
+
167
+	return false
168
+}
... ...
@@ -20,3 +20,10 @@ func invokeUnpack(decompressedArchive io.ReadCloser,
20 20
 	// do the unpack. We call inline instead within the daemon process.
21 21
 	return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
22 22
 }
23
+
24
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
25
+	// Windows is different to Linux here because Windows does not support
26
+	// chroot. Hence there is no point sandboxing a chrooted process to
27
+	// do the pack. We call inline instead within the daemon process.
28
+	return archive.TarWithOptions(srcPath, options)
29
+}
... ...
@@ -14,6 +14,7 @@ import (
14 14
 func init() {
15 15
 	reexec.Register("docker-applyLayer", applyLayer)
16 16
 	reexec.Register("docker-untar", untar)
17
+	reexec.Register("docker-tar", tar)
17 18
 }
18 19
 
19 20
 func fatal(err error) {